content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def calc_starting_row(page_num, rows_per_age=10):
"""
Calculate a starting row for the Solr search results. We only retrieve one page at a time
:param page_num: Current page number
:param rows_per_age: number of rows per page
:return: starting row
"""
page = 1
try:
page = int(page_num)
except ValueError:
pass
if page < 1:
page = 1
elif page > 100000: # @magic_number: arbitrary upper range
page = 100000
return rows_per_age * (page - 1), page
|
e9e467f152c0daad2cd9f5431f6abf916893955b
| 542,121 |
def sanitize_doc(comment):
"""Substitute HTML breaks for new lines in comment text.
:param comment: The comment text
:returns: Sanitized comment text
"""
if isinstance(comment, list):
return sanitize_doc("\n".join(filter(None, comment)))
else:
return comment.replace("\n", "<br/>") if comment else comment
|
a57ed53aa9941b695d4dcf00f44c8b149881a96b
| 473,116 |
from typing import OrderedDict
def extend_vocab(current_vocab, new_vocab, max_tokens=10000):
"""Extends current vocabulary with words from vocab that are not
present in the current vocabulary. Adds up to max_tokens words.
# Arguments:
current_vocab: Current dictionary of tokens.
new_vocab: Vocabulary to be added. MUST have word_counts populated, i.e.
must have run count_all_words() previously.
max_tokens: Maximum number of words to be added.
# Returns:
How many new tokens have been added.
"""
if max_tokens < 0:
max_tokens = 10000
words = OrderedDict()
# sort words by frequency
desc_order = OrderedDict(
sorted(new_vocab.word_counts.items(), key=lambda kv: kv[1], reverse=True)
)
words.update(desc_order)
base_index = len(current_vocab.keys())
added = 0
for word in words:
if added >= max_tokens:
break
if word not in current_vocab.keys():
current_vocab[word] = base_index + added
added += 1
return added
|
1613699ac87f9bef361de48417116eb57b90b791
| 510,160 |
def tf_left_split(op):
"""Split the parameters of op for left recursion.
Args:
op: tf.Operation
Returns:
A tuple of the leftmost input tensor and a list of the
remaining arguments.
"""
if len(op.inputs) < 1:
return None, []
if op.type == "Concat":
return op.inputs[1], op.inputs[2:]
return op.inputs[0], op.inputs[1:]
|
e02335360329938100873bd2975735aa6330185d
| 511,607 |
def discrete_signal(signal0, step_size):
"""
SNIPPET 10.3 - SIZE DISCRETIZATION TO PREVENT OVERTRADING
Discretizes the bet size signal based on the step size given.
:param signal0: (pandas.Series) The signal to discretize.
:param step_size: (float) Step size.
:return: (pandas.Series) The discretized signal.
"""
signal1 = (signal0 / step_size).round() * step_size
signal1[signal1 > 1] = 1 # Cap
signal1[signal1 < -1] = -1 # Floor
return signal1
|
279fc47068c706f7634d37e1e49d44cf291bd9dd
| 654,400 |
def create_demag_params(atol, rtol, maxiter):
"""
Helper function to create a dictionary with the given
demag tolerances and maximum iterations. This can be
directly passed to the Demag class in order to set
these parameters.
"""
demag_params = {
'absolute_tolerance': atol,
'relative_tolerance': rtol,
'maximum_iterations': int(maxiter),
}
return {'phi_1': demag_params, 'phi_2': demag_params}
|
9de9db696f5f022569ab31d8fe069e34b2d7dc08
| 105,016 |
def currency(value):
"""
Returns a string currency representation of a float
"""
return '${:,.2f}'.format(value)
|
c9ce50794d1e5c9ede66018b9ded5a25b7e69eaf
| 640,962 |
import re
def is_iso_time(time_string):
"""
Checks if string represents valid time in ISO format,
with the default delimiter.
Regex is somewhat convoluted, but general enough to last
at least until the 9999 AD.
:returns:
True if string matches the pattern.
False otherwise.
"""
match = re.match(
r'\d{4}-[01][0-9]-[0-3][0-9]T[0-3][0-9](:[0-5][0-9]){2}\.\d+Z',
time_string)
if match:
return True
else:
return False
|
7df7824af03e3047eb8bfa50b2301e06c977b0d4
| 550,935 |
from typing import Tuple
def format_time(elapsed: float) -> Tuple[int, int]:
"""Convert a quantity of time, in seconds, to minutes and seconds.
:param elapsed: float
A quantity of time, in seconds.
:return: Tuple[int, int]
The number of minutes in `elapsed` and the remaining number of seconds, respectively.
"""
mins = int(elapsed / 60)
secs = int(elapsed - (mins * 60))
return mins, secs
|
6a1ed9a0009e4f219a1903deb1b8f61c4df14e63
| 496,004 |
def data_section(timestamps, data):
"""
Inputs:
timestamps - Nested list CLC formatted timestamps
data - Nested list of CLC formatted data
Outputs:
List of lists where each row in list corresponds to data rows
in the CLC file
"""
data_section = []
for time_idx in range(len(timestamps)):
data_row = []
data_row.append(timestamps[time_idx])
for idx in range(len(data[time_idx])):
data_row.append(data[time_idx][idx])
data_section.append(data_row)
return data_section
|
f1cb9d96a9a187ec27ee79b2b66fc74ed320cc58
| 164,435 |
def FilterSet(df, annotSet="", annotSetArr=[], annotSetCompare="=", limit=0, negate=False):
"""Provide the ability to filter a Dataframe of AQAnnotations based on the value in the annotSet field.
Args:
df: Dataframe of AQAnnotations that will be filtered by the specified annotation set.
annotSet: String to filter against the annotSet field in the dataset of AQAnnotations.
annotSetArr: Array of Strings to filter against the annotSet field in the dataframe of AQAnnotations. An OR will be applied to the Strings. Only used if annotSet was not specified.
annotSetCompare: Comparison operator to use for the annotSet field in the dataframe of AQAnnotations. Default is '='. Possible values are '=' and '!='.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query. Default is false.
Returns:
Dataframe of AQAnnotations
"""
query = ""
if annotSet != "":
query += ("annotSet " + annotSetCompare + " \"" + annotSet + "\"")
elif len(annotSetArr) > 0:
if annotSetCompare == "=":
query += ("annotSet in " + "('" + "','".join(map(str,annotSetArr)) + "')")
else:
query += ("annotSet not in " + "('" + "','".join(map(str,annotSetArr)) + "')")
if negate:
query = "!(" + query + ")"
results = df.filter(query)
if limit > 0:
results = results.limit(limit)
return results
|
e56f7fb5aba3efeba231a03f1be3817550ae7b4a
| 663,639 |
def valid_xss_content_type(http_res):
"""Check whether the returned content-type header allow javascript evaluation."""
# When no content-type is returned, browsers try to display the HTML
if "content-type" not in http_res.headers:
return True
# else only text/html will allow javascript (maybe text/plain will work for IE...)
if "text/html" in http_res.headers["content-type"]:
return True
return False
|
14d7148521d4eb0457c58db9728beb082ec87e75
| 549,479 |
import requests
def get_linked_status(gh_headers, linked_issue_info):
"""
Returns the open/closed status of the linked issue
"""
lorg, lrepo, lnum = linked_issue_info
print(f"::set-output name=linkedIssueInfo::Found a \
linked issue: {lorg} {lrepo} {lnum}")
issue_url = f"https://api.github.com/repos/{lorg}/{lrepo}/issues/{lnum}"
response = requests.get(issue_url, headers=gh_headers).json()
return response["state"]
|
1874833868cf8132457e75b95eb02ffe17078c54
| 575,401 |
def List_genomes (file_genomes_fasta):
"""
Convert the TXT file with the individual genomes into a list with the complete
names of each individual genome.
"""
# Create the list:
list_genomes = []
# Open the file to read.
genomes = open(file_genomes_fasta, 'r')
for line in genomes:
line = line.strip()
list_genomes.append(line)
return list_genomes
|
90becaec12da4278bd9dc77b73f3b1e9c1b41ae4
| 543,073 |
def pegtop_blending(rgba, norm_intensities):
""" Calculates image colors with the Pegtop Light shading of ImageMagick
See:
http://www.imagemagick.org/Usage/compose/#pegtoplight
Forked from Ran Novitsky's blog (no license found)
http://rnovitsky.blogspot.nl/2010/04/using-hillshade-image-as-intensity.html
:param rgba: [nrows, ncols, 3|4] RGB or RGBA array. The alpha layer will be ignored.
:param norm_intensities: normalized intensities
Returns 3D array that can be plotted with matplotlib.imshow(). The last dimension is RGB.
"""
# get rgb of normalized data based on cmap
rgb = rgba[:, :, :3]
# form an rgb eqvivalent of intensity
d = norm_intensities.repeat(3).reshape(rgb.shape)
# simulate illumination based on pegtop algorithm.
return 2 * d * rgb + (rgb ** 2) * (1 - 2 * d)
|
878d1c09d9746b20d47284d931a5da0f4aa0cf29
| 593,272 |
def bitarray2fasm(bitarray):
""" Convert array of bits ('0', '1') into FASM value.
Note: index 0 is the LSB.
"""
bitstr = ''.join(bitarray[::-1])
return "{}'b{}".format(len(bitstr), bitstr)
|
b1a94ab76dc1569fb835d36f3c6754060c5f6114
| 408,274 |
def _shift_twelve(number):
"""Shifts the number by 12, if it is less than 0.
Parameters
----------
number : int
Returns
-------
int
"""
return number + 12 if number < 0 else number
|
cca6689a7caabbaae0e352b4e91b64ebb1f63ad7
| 686,070 |
def is_ref(value):
"""
Whether the value is a reference.
"""
return isinstance(value, str) and (value.startswith('$') or value.startswith('@'))
|
454315286dce05d1bb95b237556b575b321f99a5
| 466,842 |
import re
def mk_rule_fn(rule, name):
"""Test helper for converting camelCase names to
underscore_function_names and returning a callable rule fn."""
parts = re.sub('(?!^)([A-Z][a-z]+)', r' \1', name).split()
parts.insert(0, 'with')
fn_name = '_'.join(map(lambda x: x.lower(), parts))
return getattr(rule, fn_name)
|
e6ba5e1c409077911e2b67878dba078dc2890c38
| 659,032 |
from typing import Union
def decrement_by_n(n: Union[int, float]):
"""Generates a function that
will decrement by n.
Args:
n (int): integer to decrement
by.
>>> decrement_by_n(2)(2)
0
"""
def decrementor(base: Union[int, float]):
return base - n
return decrementor
|
77230babd08ffcb0643cdaf37ed4cbec79ce08ef
| 318,768 |
def preprocess_logic_form(lf, conversions: dict) -> list:
""" Preprocess a single logic form. """
new_lf = []
for term in lf:
if isinstance(term, list):
term = preprocess_logic_form(term, conversions)
elif isinstance(term, str):
if conversions.get(term):
term = conversions[term]
new_lf.append(term)
return new_lf
|
9a17a2edebbc5344f6dda4495016f182e1203fbb
| 155,297 |
def split_gates(inputs: int) -> tuple[int, int]:
"""Splits the number of inputs across the left and right of the karnaugh map."""
left = inputs // 2 # Larger for odd
top = inputs - left
return left, top
|
76e5eeae7e50b075eced8753da44c229e2b87578
| 74,478 |
import yaml
def yaml_loader(filepath, log):
"""Load a yaml file"""
try:
with open(filepath, "r") as myfile:
return yaml.load(myfile, Loader=yaml.BaseLoader)
except FileNotFoundError as e:
log.error(e)
quit()
|
55de05c37aebb724e101b306f439a2836929d09f
| 573,175 |
import ast
def parse_comment(comment):
"""
Parse a comment of the form
# investigation_time=50.0, imt="PGA", ...
and returns it as pairs of strings:
>>> parse_comment('''path=('b1',), time=50.0, imt="PGA"''')
[('path', ('b1',)), ('time', 50.0), ('imt', 'PGA')]
"""
names, vals = [], []
pieces = comment.split('=')
for i, piece in enumerate(pieces):
if i == 0: # first line
names.append(piece.strip())
elif i == len(pieces) - 1: # last line
vals.append(ast.literal_eval(piece))
else:
val, name = piece.rsplit(',', 1)
vals.append(ast.literal_eval(val))
names.append(name.strip())
return list(zip(names, vals))
|
9e46e817a7125a9f677064955653c81300d3e610
| 232,814 |
def get_val_unit(json_data, arg1, arg2, unit):
"""Extract value from JSON and concatenate with a unit."""
value = str(json_data[arg1][arg2])
return f"{value}{unit}"
|
de66fe4b87c43eccfb6e30e3e577e14613a855a7
| 577,672 |
def time_to_seconds(duration):
"""
时间字符串转换成秒
:param duration: 时间格式串, 例如"01:46:00"
:return seconds: int
"""
hours, minutes, seconds = duration.split(':')
seconds = int(seconds)
if minutes > '00':
seconds += int(minutes) * 60
if hours > '00':
seconds += int(hours) * 3600
return seconds
|
fe13fbac838186a4803acbcf814b68a0c30abf8e
| 382,204 |
import struct
def find_coordinator_error(error):
"""
Encode an error'd out FindCoordinatorResponse v0.
:param error: Kafka errno
"""
return b"".join([ # FindCoordinator Response v0
struct.pack('>h', error), # Error Code
struct.pack('>i', -1), # Coordinator id
struct.pack('>h', 0), # The Coordinator host
struct.pack('>i', 0), # The Coordinator port
])
|
e909411e07c143dbe13f77904bfc824e33626695
| 343,242 |
def is_uniform(x, epsilon=0.000000001):
"""Determine if the vector is uniform within epsilon tolerance. Useful to stop a simulation if the fitness landscape has become essentially uniform."""
x_0 = x[0]
for i in range(1, len(x)):
if abs(x[i] - x_0) > epsilon:
return False
return True
|
95441cbc7ad1c06461d1e029d2490250e491e020
| 585,030 |
def add(a: int, b: int) -> int:
"""
Add of a and b.
>>> add(3, 3)
6
>>> add(3, -3)
0
>>> add(99, 1)
100
"""
return a + b
|
8196b5e31564d293bd2fb7cceae3641986740e30
| 559,823 |
def gt(x, y):
"""Implement `gt`."""
return x > y
|
ceb5d4cf5812a587fc8aaea8f2c5e97b6ac7c973
| 65,610 |
def _is_int(value):
"""Use casting to check if value can convert to an `int`."""
try:
int(value)
except ValueError:
return False
else:
return True
|
56e1b97651afe017fa4e9f0c8c5542754d112d70
| 106,365 |
import re
def validate_container_name(name):
"""Make sure the container name conforms to Azure's expectations
"""
label = '[a-z0-9]+(?:[a-z0-9\-]*[a-z0-9])*'
validate_name = re.compile('^' + label + '$')
return (
len(name) >= 3 and len(name) <= 63 and bool(validate_name.match(name))
)
|
f1ab6f30f22a259b812b784c831fbb9b5753b613
| 412,655 |
def array_find(arr, obj) -> int:
"""A helpher function which finds the index of an object in an array.
Instead of throwing an error when no index can be found it returns -1.
Args:
arr : the array to be searched
obj : the object whose index is to be found.
Returns:
int: The index of obj in the array. -1 if the object is not in the array
"""
index = -1
try:
index = arr.index(obj)
return index
except:
return -1
|
df8733b073d24d47f7be8f3b01b3f9e2d78f51bc
| 683,247 |
def filter_dictionary(dictionary, keys):
"""Filters a dictionary from a list of keys."""
return {key: dictionary[key] for key in dictionary if key in keys}
|
ffc1d0591d69e27ff2d5acdd826c1ebe39b59c08
| 274,949 |
def _tf_tensorarray_append(target, element):
"""Overload of append that stages a TensorArray write at the last position."""
return target.write(target.size(), element)
|
3f064a1399839b9200a77a2ecce9dc540af3c440
| 543,059 |
import math
def slopeRadians(dist):
"""
Given the distance in meters find the slope in radians.
We assume the "road" is an undulating sine wave
with a constant frequency and amplitude. If amplitude
is 0 there is no undulations.
"""
# The cos(sin(x)) is the slope of a line tagent to the sin curve at x
freq = 100.0 # 100 meters
amplitude = 1.0 / freq # 0.0 for level 1.0 for slight undulation
c = math.cos(2.0 * math.pi * (dist / freq)) * amplitude
return c
|
8b1f2303df259e499ef2f8e26472bd76ac0ee349
| 480,411 |
def _check90deg(pd):
"""Return True if the Euler angles are 0,90,180, etc"""
psi, phi, theta = int(pd['psi']), int(pd['phi']), int(pd['theta'])
if psi % 90 == 0 and phi % 90 == 0 and theta % 90 == 0:
return True
else:
return False
|
f159d7f10e96b13b818ed51b2e0f0add731264a3
| 514,318 |
def _get_item(i, j, block):
"""
Returns a single item from the block. Coords must be in block space.
"""
return block[i, j]
|
45a12ecb3959a75ad8f026616242ba64174441fc
| 707,953 |
def v0_lstrip(iterable, strip_value):
"""Return iterable with strip_value items removed from beginning."""
stripped = []
is_beginning = True
for item in iterable:
if is_beginning:
if item != strip_value:
is_beginning = False
else:
continue
stripped.append(item)
return stripped
|
33c89d02614a12caad04530fa191bc2009810eca
| 232,430 |
def K(A,L=None):
"""
Function we are trying to maximize: sum_{i<j} A[L[i]][L[j]]
A is m x m matrix,
L is a subset of range(m) (or defaults to range(m) if no L given)
"""
if L==None:
L = range(len(A))
return sum([A[L[i]][L[j]] for j in range(len(L)) for i in range(j)])
|
de8c83c695a8262131b4bef707d48255a3be89de
| 642,361 |
def get_novel_gene_ids(cursor):
""" Fetch IDs of all novel genes in the database """
query = """SELECT DISTINCT(gene_ID) FROM observed
LEFT JOIN gene_annotations AS ga ON ga.ID = observed.gene_ID
WHERE (ga.attribute = 'gene_status' AND ga.value = 'NOVEL')
"""
cursor.execute(query)
novel_genes = [x[0] for x in cursor.fetchall()]
return novel_genes
|
2f8d5b5c9cef77e7c289d03739897ef7af4a85a8
| 273,182 |
import re
def apply_lit_token(arg, macro):
"""Apply the LIT or ARG_LIT macro to a single token."""
# The macro must only be applied to a floating-point constant, not
# to an integer constant or lit_* value.
sign_re = r'[+-]?'
exp_re = r'([+-])?[0-9]+'
suffix_re = r'[lLfF]?'
dec_exp_re = r'[eE]' + exp_re
hex_exp_re = r'[pP]' + exp_re
dec_frac_re = r'(?:[0-9]*\.[0-9]+|[0-9]+\.)'
hex_frac_re = r'(?:[0-9a-fA-F]*\.[0-9a-fA-F]+|[0-9a-fA-F]+\.)'
dec_int_re = r'[0-9]+'
hex_int_re = r'[0-9a-fA-F]+'
dec_cst_re = r'(?:%s(?:%s)?|%s%s)' % (dec_frac_re, dec_exp_re,
dec_int_re, dec_exp_re)
hex_cst_re = r'0[xX](?:%s|%s)%s' % (hex_frac_re, hex_int_re, hex_exp_re)
fp_cst_re = r'(%s(?:%s|%s))%s\Z' % (sign_re, dec_cst_re, hex_cst_re,
suffix_re)
m = re.match(fp_cst_re, arg)
if m:
return '%s (%s)' % (macro, m.group(1))
else:
return arg
|
54bd3210298bc3bd693408f5aa2769a0d73b95a5
| 409,263 |
import imp
from importlib.machinery import SourceFileLoader
import six
def load_module_from_file(name, filepath):
"""Load a python module from a sourcefile.
Args:
name (str): Module name.
filepath (str): Python sourcefile.
Returns:
`module`: Loaded module.
"""
if six.PY2:
with open(filepath) as f:
return imp.load_source(name, filepath, f)
else:
return SourceFileLoader(name, filepath).load_module()
|
c9d066af0a66bdd80420e86fe81d889de91ff704
| 238,174 |
import re
def natural_sort(string):
"""
Natural sorting function which sorts by numerical value of a string,
rather than raw ASCII value.
"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string)]
|
ebb771e4ffc877da74b8d4014eff4a7ab8e04fa5
| 116,390 |
import math
def softmax_mom(p, q, n = 5):
"""
Two-place softmax function.
Parameters
----------
p = float
one interpolant.
q = float
another interpolant.
n = float
scaling parameter.
"""
return 1 / n * math.log(1.01**(n * p) + 1.01**(n * q), 1.01)
|
a2749c2f573e1a1c8e79f9d29b6d9a152279b7dd
| 262,046 |
def is_ldap_user(user):
"""Check whether user is a LDAP user.
"""
return user.source == 'LDAP' or user.source == 'LDAPImport'
|
98631660abc1aa087f1e2ab9c6f6381b44757c41
| 470,358 |
def ensure_ascii(in_string):
"""Remove any non-ASCII characters from the input string
Parameters
----------
in_string : str
Input string
output_string : str
String with non-ASCII characters removed
"""
encoded_string = in_string.encode('ascii', 'ignore')
return encoded_string.decode()
|
6440cb66d6bee0af7cd2e93052636598e553ec19
| 99,049 |
def lcm(x: int, y: int):
"""
Returns the least common multiple of two integers.
:param x: any integer
:param y: any integer
:return: integer z such that remainder(z/x) == 0 and remainder(z/y) == 0
Not necessarily faster solution, just a different solution using filter
"""
# if either number is 0 then lcm should be 0
if x == 0 or y == 0:
return 0
# simple algorithm is to return the first multiple of x that is also a multiple of y
# the built in filter function is great for such tasks
# note that we use multiples of the large number as it makes the generator length smaller
min_ = min(x, y)
max_ = max(x, y)
return next(filter(lambda i: i % min_ == 0, (max_ * j for j in range(1, min_ + 1))))
|
7d0d1f28c805b4cc21baaba6adc105f442e1ee9f
| 548,585 |
def choosecat(str, classnames):
""" Gets a string and tries to match its category with an int. """
for i, id in enumerate(classnames):
if id in str:
return i
return None
|
ad3ccefd63e92be774386209f114465dbc5d482e
| 346,859 |
import pkgutil
import importlib
def import_solver(package, day):
"""Import and return a solver module for a day of the Advent calendar.
Return None if the module can't be found. No ImportError is raised in this
situation in order to distinguish it from an error occurring during import.
"""
prefix = 'd' + day
for module_info in pkgutil.iter_modules(package.__path__):
if module_info.name.startswith(prefix):
name = package.__name__ + '.' + module_info.name
return importlib.import_module(name)
return None
|
2ce1fd094ef0b9373e603fe5a4e991bb8bf1a3ce
| 261,255 |
def get_union_metrics(metric_a, metric_b):
"""Union of metric_a and metric_b
:param metric_a: list
:param metric_b: list
:return: Union metrics list from metric_a and metric_b
"""
if metric_a is None and metric_b is None:
return None
elif metric_a is None:
return metric_b
elif metric_b is None:
return metric_a
else:
metric_list = list(set(metric_a).union(metric_b))
return metric_list
|
145ccd006ecf6ddb30045c7d9fd3647a6644d7cc
| 204,140 |
import json
from pathlib import Path
def get_total_epochs(save_path, run, last_gen):
"""
Compute the total number of performed epochs.
Parameters
----------
save_path: str
path where the ojects needed to resume evolution are stored.
run : int
current evolutionary run
last_gen : int
count the number of performed epochs until the last_gen generation
Returns
-------
total_epochs : int
sum of the number of epochs performed by all trainings
"""
total_epochs = 0
for gen in range(0, last_gen+1):
j = json.load(open(Path('%s/run_%d/gen_%d.csv' % (save_path, run, gen))))
num_epochs = [elm['num_epochs'] for elm in j]
total_epochs += sum(num_epochs)
return total_epochs
|
a9f046640b2502ae5057ab9cfc88ea37d895863e
| 686,701 |
def str_rstrip(text):
"""Strips whitespaces from the end of the text."""
return text.rstrip()
|
95136f1153d3710703a5a22f34bc91e83d8da1d7
| 594,872 |
import functools
import shutil
def save_workspace(new_workspace):
"""Decorator to save a workspace to a new location.
If `new_workspace` already exists on disk, it will be recursively
removed.
Example usage with a test case::
import natcap.invest.testing
@natcap.invest.testing.save_workspace('/path/to/workspace')
def test_workspaces(self):
model.execute(self.args)
Note:
+ Target workspace folder must be saved to ``self.workspace_dir``
This decorator is only designed to work with test functions
from subclasses of ``unittest.TestCase`` such as
``natcap.invest.testing.GISTest``.
+ If ``new_workspace`` exists, it will be removed.
So be careful where you save things.
Args:
new_workspace (string): a URI to the where the workspace should be
copied.
Returns:
A composed test case function which will execute and then save your
workspace to the specified location."""
# item is the function being decorated
def test_inner_func(item):
# this decorator indicates that this innermost function is wrapping up
# the function passed in as item.
@functools.wraps(item)
def test_and_remove_workspace(self, *args, **kwargs):
# This inner function actually executes the test function and then
# moves the workspace to the folder passed in by the user.
item(self)
# remove the contents of the old folder
try:
shutil.rmtree(new_workspace)
except OSError:
pass
# copy the workspace to the target folder
old_workspace = self.workspace_dir
shutil.copytree(old_workspace, new_workspace)
return test_and_remove_workspace
return test_inner_func
|
85312257d87450fa4eb10a8aac1fdfa7c69c4c48
| 210,005 |
def folder_contents_html(folder_path, files, folders):
"""Given files and folders generate html."""
html = "<!DOCTYPE html><html><body>{}</body></html>"
atag = '<a href="{}">{}</a>'
files_and_folders = ''
for folder in folders:
files_and_folders += '<h4>' + atag.format(folder_path + '/' + folder, folder) + '</h4>'
for file_name in files:
files_and_folders += '<h4>' + atag.format(folder_path + '/' + file_name, file_name) + '</h4>'
return html.format(files_and_folders)
|
6b6b37ca9452319d309a61c877ebf6d1fba201aa
| 698,954 |
def find_next_coefficient_c(k, previous_c, lamb, beta):
"""Return c_k = lamb^(k-1) * (1 - lamb)^(k * beta - k + 1) * \\binom{k * beta}{k - 1} / k.
To avoid large values of \\binom when 'k' is sufficiently big, and to decrease the runtime,
the value is computed as:
с_1 = (1-lamb)^beta,
c_k = c_{k-1} * lamb * (1 - lamb)^(beta-1) * beta * \\prod_{j=1}^{beta-1} (beta * k - j) / (beta * k - k + 2 - j).
"""
if k == 1:
return (1. - lamb) ** beta
result = previous_c * lamb * (1. - lamb) ** (beta - 1) * beta
for j in range(1, beta):
result *= (beta * k - j) / (beta * k - k + 2. - j)
return result
|
3d9f68e222c2cf9320e2c9a1fdc5970e80f0a159
| 591,640 |
import math
def compute_fuel_mass(mass: int) -> int:
"""
Computes the fuel required for a specific mass, taking
into account that the fuel has mass itself.
"""
# Stopping condition
if mass <= 0:
return 0
fuel = (math.floor(mass/3)-2)
fuel = fuel if fuel > 0 else 0
return fuel + compute_fuel_mass(fuel)
|
43c160d1344a79b2d5480a315a8fa6ac75ee54a4
| 562,560 |
import math
def isPrime(n):
"""
check if the input number n is a prime number or not
"""
if n <= 3:
return n > 1
if n % 6 != 1 and n % 6 != 5:
return False
sqrt = math.sqrt(n)
for i in range(5, int(sqrt)+1, 6):
if n % i == 0 or n % (i+2) == 0:
return False
return True
|
91da5b13840181d039902e2db3efb8cc09609465
| 5,091 |
def DifferenceLists(entries):
"""
Find difference of one list with another.
Useful for existing lists or complex searches.
Inputs:
entries (list) : list of two lists to
difference [[...],[...]]
Outputs:
diff (list) : difference of all entry lists
"""
if len(entries) > 2:
raise ValueError('Symmetric difference only works on two lists')
entryset = set(entries[0])
diff = list(entryset.symmetric_difference(entries[1]))
# Re-sort if necessary
diff = sorted(diff)
return diff
|
e9d0c9474d7e21e267f425c5409df472ace28b6d
| 506,345 |
def py(url, description, doc=''):
"""Make a markdown table entry for a .py file."""
if doc: doc = f'[documentation]({doc})'
return f'|[{url}](/blob/master/py/{url})|*{description}*|{doc}|'
|
a9353cbd206e39dec74593bd349600e886e65384
| 315,546 |
import re
def natural_sort_key(string_to_split):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
Can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
|
0ff6c6e1b522eb2b92c18164249aa17454afefb0
| 223,509 |
from typing import List
def pad_batch(context_tokens: List[List[int]], pad_id: int, pad_len: int):
"""
pads context lengths in context_tokens with pad_id to equal neox_args.seq_length,
and returns the padded batch and the new lengths.
context_tokens: list of lists of tokens
pad_id: int, integer to use as padding token
pad_len: int, context length to be padded; all batch items will be padded to the same length
returns: tuple of padded context tokens and a list of unpadded token count
"""
context_lengths = []
for tokens in context_tokens:
context_length = len(tokens)
if context_length < pad_len:
tokens.extend([pad_id] * (pad_len - context_length))
elif context_length > pad_len:
raise ValueError("context_length is bigger than to be padded length")
context_lengths.append(context_length)
return context_tokens, context_lengths
|
f447ad215acb21fcd95c427dce043426d7224594
| 580,852 |
def upsample_method(request):
"""Fixture for parametrization of Grouper upsample methods."""
return request.param
|
9baed452ea6ac68b456ecbda6e7f9b56faf2f299
| 252,132 |
def kron_d(i, j):
""" The Kronecker delta. """
return 1 if i == j else 0
|
4b4bfe35bc4407ebde917b70d4052259c984c0d4
| 77,484 |
def calc_rank(someset, n):
"""
Calculates the rank of a subset `someset` of {1, 2, ..., `n`}
in the ordering given by grey sequenccce of characteristic vectors.
"""
assoc_seq = [k + 1 in someset for k in range(n)]
bit = False
rank = 0
for k, x in enumerate(assoc_seq):
bit ^= x
rank += bit * 2**(n - k - 1)
return rank
|
4b3a7a226d90a5431d7622788e1d84ab406b40c3
| 629,799 |
def aggregate_conversations(li, delta):
"""Aggregates the conversation count.
Keyword arguments:
li --- The list of tuples containing tweet creation date
and number of user mentions.
delta --- The timedelta object by which the conversations
should be aggregated.
"""
count = []
date = []
index = -1
for (d, c) in sorted(li):
if index == -1:
index += 1
count.append(c)
date.append(d)
else:
if d - date[index] <= delta:
count[index] += c
else:
date.append(d)
count.append(c)
index += 1
return (count, date)
|
06682bef2693b3a528074f1796e45d548d628425
| 408,636 |
def distribute_events_between_lines(n_events: int, n_lines: int) -> list[int]:
"""
Distribute evenly the specified number of events between the specified number of lines.
:param n_events:
number of events
:param n_lines:
number of lines
:return:
list of numbers of events in a particular line
"""
results = [n_events // n_lines for _ in range(n_lines)]
i = 0
while sum(results) < n_events:
results[i] += 1
i += 1
return results
|
70add1c8e66c3fee10d5f4cd2b9ae342f8e8cd24
| 327,750 |
def isDatetimeObjTzAware(dt):
"""Find if the datetime object is timezone aware"""
return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
|
fe40e4a2814380d9672a76b24a4d38d728e36141
| 393,316 |
def cont(X):
"""Returns True for all non-categorical columns, False for the rest.
This is a helper function for OpenML datasets encoded as DataFrames simplifying the handling
of mixed data types. To build sklearn models on mixed data types, a ColumnTransformer is
required to process each type of columns separately.
This function allows transformations meant for continuous/numeric columns to access the
continuous/numeric columns given the dataset as DataFrame.
"""
if not hasattr(X, "dtypes"):
raise AttributeError("Not a Pandas DataFrame with 'dtypes' as attribute!")
return X.dtypes != "category"
|
9b4842936d82764f3e1c15aa2edeff4946fcda65
| 547,234 |
import tokenize
def is_name_token(token, name):
"""Check if a token is a NAME and has a specific string value
NAME tokens are returned by the ``tokenize`` module for identifiers
and keywords: ``if``, ``lambda``, ``a_variable``, ``is_name_token``, ...
Arguments:
token (tokenize.TokenInfo): The Token to check.
name (Union[str, Iterable[str]]): A token name or names to check
against; must be lowercase.
Returns:
Return ``True`` if token is a ``NAME`` token and case-insensitively
matches ``name``. If ``name`` is a tuple or set, return ``True`` if the
token matches any of the supplied names.
Return ``False`` otherwise.
"""
if isinstance(name, str):
name = set([name])
return token.exact_type == tokenize.NAME and token.string.lower() in name
|
a4e01855a59e9c86ff93b1a60239eeafad5caeae
| 167,068 |
def Confirm(text, default='N'):
"""Asks the user to confirm something.
Args:
text(str): the text of the question.
default(str): set the accepted value if user just hits Enter.
Possible values: 'Y' or 'N' (the default).
Returns:
bool: True if the user confirms, False otherwise.
"""
print(text)
user_choice = ''
if default == 'Y':
while user_choice not in ['y', 'n', '']:
user_choice = input('[Y/n]? ').lower()
# An empty user answer means 'y'
return user_choice in ['y', '']
elif default == 'N':
while user_choice not in ['y', 'n', '']:
user_choice = input('[y/N]? ').lower()
return user_choice == 'y'
else:
# Don't allow an empty answer
while user_choice not in ['y', 'n']:
user_choice = input('[y/N]? ').lower()
return user_choice == 'y'
|
57e55ad780b9a442d245b2e12863d52650a7aa56
| 614,628 |
def corr_row_i_row_j(row_i, row_j):
"""This function will compute the correlation between one row, i, and a second row, j"""
return row_i.corr(row_j)
|
2db4a1b1ce5c99b963a4fc6dea8573c62923c9fd
| 424,178 |
def clamp_short(x):
"""Clamps a signed short to be within its upper and lower bounds."""
return -32768 if x < -32768 else 32767 if x > 32767 else x
|
daab709a37e3689c1639bc0505721bd329c13126
| 519,250 |
def list_difference(list1, list2):
"""
Given two lists with alignments list1 and list2, return a new list
(new_list2) that contains all elements of list2 without elements of
list1.
"""
# Create an inverted list of alignments names in list1 to allow fast
# lookups.
inverted_list1 = {}
for alignment in list1:
inverted_list1[alignment.name] = 1
# Copy only elements of list2 that are not in list1, by looking up the
# inverted list.
new_list2 = list()
for alignment in list2:
if alignment.name not in inverted_list1:
new_list2.append(alignment)
return new_list2
|
38821fead3f769c16721ad1f7e4f84a2af9238fa
| 514,751 |
def parse_filenames_and_sizes(list_file):
"""
Takes a file with tab-delimited filename/size pairs and returns a
filename-->size dict.
"""
filename_to_size = {}
with open(list_file,'r') as f:
for line in f:
if ('catalog.json' in line) or ('stac.json' in line):
continue
tokens = line.split('\t')
assert len(tokens) == 2
fn = tokens[0]
size_str = tokens[1]
size = int(size_str)
if size == 0:
continue
filename_to_size[fn] = size
# ...for each line
# ...with open()
return filename_to_size
|
ca029d217a6b0b6ba65acea63255542179a413c2
| 600,032 |
import random
def rationed_split(examples, train_ratio, val_ratio, test_ratio, shuffle):
"""
Splits a list of examples according to the given ratios and returns the
splits as a tuple of lists (train_examples, valid_examples, test_examples).
The list can also be randomly shuffled before splitting.
Parameters
----------
examples : list
A list of examples that is to be split according to the ratios.
train_ratio : float
The fraction of examples that should be put into the train split.
val_ratio : float
The fraction of examples that should be put into the valid split.
test_ratio : float
The fraction of examples that should be put into the test split.
shuffle : bool
Whether to shuffle the list before splitting.
Returns
-------
tuple
The train, valid and test splits, each as a list of examples.
Raises
------
ValueError
If the given split ratio is wrong in the sense that it would result
with at least one empty split.
"""
# Create a random permutation of examples, then split them
# by ratio x length slices for each of the train/test/dev? splits
N = len(examples)
indices = list(range(N))
if shuffle:
random.shuffle(indices)
train_len = int(round(train_ratio * N))
# Due to possible rounding problems
if val_ratio is None:
if train_len == 0 or (N - train_len) == 0:
raise ValueError("Bad ratio: both splits should have at least 1 element.")
indices_tuple = (indices[:train_len], [], indices[train_len:])
else:
test_len = int(round(test_ratio * N))
val_len = N - train_len - test_len
if train_len * test_len * val_len == 0:
raise ValueError("Bad ratio: all splits should have at least 1 element.")
indices_tuple = (
indices[:train_len], # Train
indices[train_len : train_len + val_len], # Validation
indices[train_len + val_len :], # Test
)
# Create a tuple of 3 lists, the middle of which is empty if only the
# train and test ratios were provided
data = tuple([examples[idx] for idx in indices] for indices in indices_tuple)
return data
|
dd18c21c34f1d18bdede81c90592264517d91f1b
| 227,260 |
def num_spaces(src: str) -> int:
"""Count the number of spaces at the beginning of the string."""
return len(src) - len(src.lstrip())
|
da3889e968f40eaf106da3d3bfa67d148fb8dbf4
| 390,202 |
from pathlib import Path
def _get_uml_filename(module_filename) -> str:
"""
Return the UML file name, for a given Python module name.
:param module_filename: e.g. cylc.flow.ws_messages_proto.pb2.
:type module_filename: str
:return: UML file name (e.g. ws_messages_proto).
:rtype: str
"""
return Path(module_filename).stem
|
de3ef084c8d6896ac54962ad13320977db4d72f5
| 246,827 |
def make_aux_coord(cube, axis='Y'):
"""Make any given coordinate an Auxiliary Coordinate."""
coord = cube.coord(axis=axis)
cube.remove_coord(coord)
if cube.ndim == 2:
cube.add_aux_coord(coord, 1)
else:
cube.add_aux_coord(coord)
return cube
|
51c0ea85fca3397d8326ef6ae763f204baaaaa71
| 192,706 |
import six
def anykey(d):
"""
Return any key in dictionary.
:param d: dict: dictionary
:return object
"""
return next(six.iterkeys(d))
|
428e63d856ea2615d594b79013692e10baf0e80c
| 198,634 |
import torch
def flatten(lst):
"""
Flattens a list or iterable. Note that this chunk allocates more memory.
Argument:
lst (list or iteratble): input vector to be flattened
Returns:
one dimensional tensor with all elements of lst
"""
tmp = [i.contiguous().view(-1, 1) for i in lst]
return torch.cat(tmp).view(-1)
|
8a6d38c2e8d8e031f19bd7860237a32475709bea
| 654,310 |
def adjust_volume_gravity(vol, og, new_vol):
"""
Calculate the new gravity after boil off or dilution to ``new_vol``
This is unit independent and the volume can be used for liters
and or gallons.
Ending Gravity = (Beginning Volume * Beginning Gravity) / End Volume
:arg vol: Original volume of wort
:arg og: The current gravity of the wort
:arg new_vol: The new volume of the wort
:returns: The new gravity after boiloff or dilution
"""
og = (og - 1) * 1000
return 1 + ((vol * og) / new_vol) / 1000
|
df73322ae59f3551296756415889c5907d850e4a
| 427,195 |
def check_max_drawdown(
initial_balance: float, current_balance: float, max_drawdown: float
) -> bool:
"""
check if the loss exceed the max given drawdown
"""
percentage = 0.01
max_drawdown_percentage = max_drawdown * percentage
is_in_drawdown = False
if current_balance < (initial_balance - initial_balance * max_drawdown_percentage):
is_in_drawdown = True
return is_in_drawdown
|
8c0c57f95ac072795c654648b8721a9ed1f61676
| 463,595 |
def to_matrix_vector(transform):
"""Split a transform into it's matrix and vector components.
The tranformation must be represented in homogeneous coordinates
and is split into it's rotation matrix and translation vector
components.
Parameters
----------
transform : ndarray
NxM transform matrix in homogeneous coordinates representing an
affine transformation
from an (N-1)-dimensional space to an (M-1)-dimensional space
Example, a 4x4
transform representing rotations and translations in 3
dimensions. A 4x3 matrix can represent a 2-dimensional plane
embedded in 3 dimensional space.
Returns
-------
matrix, vector : ndarray
The matrix and vector components of the transform matrix. For
an NxM transform, matrix will be N-1xM-1 and vector will be
1xN-1.
See Also
--------
from_matrix_vector
"""
ndimin = transform.shape[0] - 1
ndimout = transform.shape[1] - 1
matrix = transform[0:ndimin, 0:ndimout]
vector = transform[0:ndimin, ndimout]
return matrix, vector
|
210a01d0d4fb53e69f9a18ffa6358a0de0aca415
| 571,817 |
def extract_turn_10_more(df):
"""
Given a concise conversation dataframe, extract those with 10 or more dialog turns.
Arg:
df: A conversation dataframe from a subreddit.
Return:
turn_10_more: A dataframe containing only those conversations with 10 or more turns.
"""
turn_dist = df.groupby('conversation id').size()
turn_dist_10_more_index = turn_dist[turn_dist >= 10].index
turn_10_more = df[df['conversation id'].isin(list(turn_dist_10_more_index))]
return turn_10_more
|
5f4559f023dfc85bf02d7e0ed6326fd1ffd1abce
| 102,947 |
import re
def replaceall(replace_dict, string):
"""
replaceall will take the keys in replace_dict and replace string with their corresponding values. Keys can be regular expressions.
"""
replace_dict = dict((re.escape(k), v) for k, v in list(replace_dict.items()))
pattern = re.compile("|".join(list(replace_dict.keys())))
return pattern.sub(lambda m: replace_dict[re.escape(m.group(0))], string)
|
66c6ec299476986011de21a5f28a613c44435d33
| 41,894 |
import re
def parse_url(url, quadkey_zoom):
"""Parse quadkey from url
"""
name = url.split('/')[-1]
match = re.findall(r'([0-3]{{{0}}})'.format(quadkey_zoom), name)
qk = match[-1]
return qk
|
d4cb66e5507af5f141a4c3c0b4fb661677130dc8
| 339,569 |
def compute_gradient(y, y_predicted, tx, N=1, regularization=0):
"""Computes gradient of a linear regression model with squared loss function
Parameters
----------
y : np.ndarray
Class labels
y_predicted : np.ndarray
Labels predicted by model
tx : np.ndarray
Data
N : int
The size of the dataset
regularization : int or np.ndarray
Returns
-------
np.ndarray
Gradient of a mean square loss for linear model
"""
return -tx.T.dot(y - y_predicted) / N + regularization
|
6b5b69ca3726f1d05010a0e73e12da9e84e7d50b
| 120,769 |
def parse_jcamp_line(line,f):
"""
Parse a single JCAMP-DX line
Extract the Bruker parameter name and value from a line from a JCAMP-DX
file. This may entail reading additional lines from the fileobj f if the
parameter value extends over multiple lines.
"""
# extract key= text from line
key = line[3:line.index("=")]
text = line[line.index("=")+1:].lstrip()
if "<" in text: # string
while ">" not in text: # grab additional text until ">" in string
text = text+"\n"+f.readline().rstrip()
value = text.replace("<","").replace(">","")
elif "(" in text: # array
num = int(line[line.index("..")+2:line.index(")")])+1
value = []
rline = line[line.index(")")+1:]
# extract value from remainer of line
for t in rline.split():
if "." in t or "e" in t:
value.append(float(t))
else:
value.append(int(t))
# parse additional lines as necessary
while len(value) < num:
nline = f.readline().rstrip()
for t in nline.split():
if "." in t or "e" in t:
value.append(float(t))
else:
value.append(int(t))
elif text == "yes":
value = True
elif text == "no":
value = False
else: # simple value
if "." in text or "e" in text:
value = float(text)
else:
value = int(text)
return key,value
|
84061c3f4bc42a62e308d5f93877e5c55d85efc1
| 13,833 |
import pickle
def load_calib(filename):
""" Loads calibration parameters from '.pkl' file.
Parameters
----------
filename : str
Path to load file, must be '.pkl' extension
Returns
-------
calib_params : dict
Parameters for undistorting images.
"""
# read python dict back from the file
pkl_file = open(filename, 'rb')
try:
calib_params = pickle.load(pkl_file)
except:
raise IOError("File must be '.pkl' extension")
pkl_file.close()
return calib_params
|
93700abe123df3ebcd17bddf16a6acd1a42ea1a7
| 22,647 |
import re
def _expand(string):
""" Convert number ranges with hyphens and commas into list of numbers """
result = []
for element in re.split(', *', string):
# Expand 1-2 to [1, 2] or 1/3-4 to [1/3, 1/4] or 1/5-1/6 to [1/5, 1/6]
m = re.match(r'([0-9/]*?)(\d+)-\1?(\d+)', element)
if m:
for num in range(int(m.group(2)), int(m.group(3)) + 1):
result.append(m.group(1) + str(num))
else:
result.append(element)
return result
|
8eab6c2408b40457cbcda2c34641414a2dcceea5
| 198,458 |
import requests
from bs4 import BeautifulSoup
def get_url_paths(url, ext='ff'):
"""
Function to obtain the list of elements in ``url``.
Parameters
-------------
url : `str`
Path to the Internet address.
ext : `str`, optional
File extension to search for. This variable is set to ``ff`` by
default.
Returns
----------
parent_arr : list
List of files in ``url`` that match ``ext`` extension.
"""
# Connecting to `url`
response = requests.get(url)
# Checking connection
if response.ok:
response_text = response.text
else:
return response.raise_for_status()
# Parsing HTML from website
soup = BeautifulSoup(response_text, 'html.parser')
# Extracting filenames that match the criteria of `ext`.
parent = [url + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
return parent
|
db0e4d12011d8394b5db4f6276e98e5b49cba479
| 167,967 |
def camelcase(var): # someVariable
"""
Camel case convention. Include an uppercase at every first element except the first.
:param var: Variable to transform
:type var: :py:class:`list`
:returns: **transformed**: (:py:class:`str`) - Transformed input in ``camelCase`` convention.
"""
result = ""
for i, element in enumerate(var):
element = list(element)
if i > 0:
element[0] = element[0].upper()
result += "".join(element)
return result
|
7591ba43aaa05c10892f9699687b0f7306fc22b3
| 327,494 |
def load_capcodes_dict(filename):
"""Load capcodes to dictionary."""
capcodes = {}
try:
print("Loading data from '{}'".format(filename))
with open(filename, "r") as csv_file:
csv_list = [
[val.strip() for val in r.split(",")] for r in csv_file.readlines()
]
(_, *header), *data = csv_list
for row in data:
key, *values = row
capcodes[key] = {key: value for key, value in zip(header, values)}
print("{} records loaded".format(len(capcodes)))
except KeyError:
print(f"Could not parse file contents of: {filename}")
except OSError:
print(f"Could not open/read file: {filename}, ignore filter")
return capcodes
|
a0172a8bc58609604b3d457f088cf445367e41a4
| 111,901 |
def is_number(c):
"""
Checks if the given character is a number.
:param c: The character to check.
:return: True if the character is a number, False otherwise.
"""
return '0' <= c <= '9'
|
4c115405d46e878863de4bf84b058fce586df331
| 584,579 |
def fib(n):
"""Calculates the nth fibbonaci number. NOTE: Uses index counting"""
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
|
1662f9b826dc8a74eab9776f9d3032d2ffb88a38
| 596,589 |
def prompt_output(cli_input, converted=None):
"""Return expected output of simple_command, given a commandline cli_input string."""
return f'Opt: {cli_input}\n{converted or cli_input}\n'
|
beceecf80039452e68acb02005ab35ce939be735
| 685,957 |
import math
def translate_point(point, angle, distance):
"""Translate a point a distance in a direction (angle)."""
x, y = point
return (x + math.cos(angle) * distance, y + math.sin(angle) * distance)
|
a32c4209cad97fc670c18acb47c27ec7fbc8bc5c
| 47,228 |
def data_repeated(data):
"""
Generate many datasets.
Parameters
----------
data : fixture implementing `data`
Returns
-------
Callable[[int], Generator]:
A callable that takes a `count` argument and
returns a generator yielding `count` datasets.
"""
def gen(count):
for _ in range(count):
yield data
return gen
|
ace8addf548e07655584cc20f7c7038a4cc94579
| 212,053 |
def calculate_polynomial_term(coefficient, variable, order):
"""Calculates a term in a polynomial.
Args:
coefficient (float): The coefficient to use in
calculating the polynomial term.
variable (float): Value to plug in for the variable in the polynomial
term.
order (int): Integer to use as the order of the polynomial term.
Returns:
float: The result of coefficient * variable**(order)
Raises:
TypeError: A non-integer order is given.
"""
if type(order) != int:
raise TypeError('Non-integer order in polynomial term')
else:
return coefficient * variable**(order)
|
1f7d657ded741c4ba840f6657fe469c6014d38d5
| 382,784 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.