content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def sum_of_squares(x, y, fn):
"""
Calculates error between training set and our approximation
Residential sum of squares: sum(|f(x) - y|^2)
for all (x,y) in out training set
:return: float which represents error
"""
return sum([(i - j) ** 2 for i, j in zip(map(fn, x), y)])
|
99be28ba69a5abb99617221279b561c6a6d5df35
| 310,555 |
def _convert_None_to_str(value):
"""
returns input value or 'None' string value if input is None
The DictParser representation of a pygeometa MCF can't handle an
actual value of: None. All dict keys must be strings.
>>> _convert_None_to_str('foo')
'foo'
>>> _convert_None_to_str('None')
'None'
>>> _convert_None_to_str(None)
'None'
"""
if value is None:
return 'None'
return value
|
52408d30d9e2ba4de184289647b5991dc7032c21
| 463,066 |
def swap_element(sequence, idx, replacement):
"""
Returns a copy of the sequence with the ith value swapped with the replacement value. Useful for immutable values
such as strings.
:param sequence: the original immutable sequence.
:param idx: the index of the sequence element to swap (use negative index to count from last element of the sequence.)
:param replacement: the replacement value the ith element of the sequence
:return: a copy of the original sequence with the ith element replaced by the replacement value
"""
seq_length = len(sequence)
if seq_length == 0: # return a copy of the empty sequence
raise ValueError('cannot swap value in an empty sequence')
if idx < 0:
use_idx = seq_length + idx
else:
use_idx = idx
if use_idx < 0 or use_idx >= seq_length:
raise IndexError('index out of range')
if seq_length == 1: # swap element in a single element sequence
return replacement
elif use_idx == 0: # swap first element
return replacement + sequence[1:]
elif use_idx == seq_length-1: # swap last element
return sequence[:use_idx] + replacement
else: # swap an element in the middle of a sequence
return sequence[:use_idx] + replacement + sequence[use_idx + 1:]
|
63b89eddb23ad748e820a0e314aeea29395b967c
| 408,547 |
def correlation_matrix(data):
"""Calculates the correlation matrix of a dataframe and orders by
degree of correlation.
Args:
data (pandas dataframe): The data object.
Returns:
pandas series: The correlation matrix sorted with highest\
correlation on the top."""
data = data.drop(["time"], axis=1) # get rid of time column
drop_values = set()
cols = data.columns
for i in range(0, data.shape[1]):
for j in range(0, i + 1):
# get rid of all diagonal entries and the lower triangular
drop_values.add((cols[i], cols[j]))
corrmat = data.corr().unstack()
# sort by absolute values but keep sign
corrmat = corrmat.drop(labels=drop_values).sort_values(ascending=False,
key=lambda col: col.abs())
return corrmat
|
8d19cf7de1e7cb028e47dc6563b99a10fe7e7586
| 537,734 |
def squared_loss(x1: float, x2: float):
"""Returns the squared difference between two numbers"""
return (x1 - x2) ** 2
|
dcec9a0632e9b3c70aec0d6e1610286b8b6923cc
| 230,696 |
def _estimate_fgbio_defaults(avg_coverage):
"""Provide fgbio defaults based on input sequence depth and coverage.
For higher depth/duplication we want to use `--min-reads` to allow
consensus calling in the duplicates:
https://fulcrumgenomics.github.io/fgbio/tools/latest/CallMolecularConsensusReads.html
If duplicated adjusted depth leaves a coverage of 800x or higher
(giving us ~4 reads at 0.5% detection frequency),
then we use `--min-reads 2`, otherwise `--min-reads 1`
"""
out = {}
if avg_coverage >= 800:
out["--min-reads"] = 2
else:
out["--min-reads"] = 1
return out
|
c5d4e93c41e00f66d35935035a157054e66d69c9
| 54,400 |
def check_for_ticket_quantity_error(quantity):
"""
Returns any error message if the ticket's quantity is not between 1 and 100 else if
there is no errors it returns false.
:param quantity: the ticket's quantity as a string
:return: false if no error, else returns the error as a string message
"""
if int(quantity) < 1 or int(quantity) > 100:
return "The quantity of the ticket must be between 1 and 100"
return False
|
80001bbcaffe8193eaa158219f1747b18c4110a5
| 84,670 |
def get_year_month_day(node):
"""
Retorna os valores respectivos dos elementos "year", "month", "day".
Parameters
----------
node : lxml.etree.Element
Elemento do tipo _date_, que tem os elementos "year", "month", "day".
Returns
-------
tuple of strings
("YYYY", "MM", "DD")
None se node is None
"""
if node is not None:
return tuple(
[(node.findtext(item) or "").zfill(2)
for item in ["year", "month", "day"]]
)
|
f16fcc8b54984c7f26b2ce3f37da0353d2e7542e
| 563,180 |
from typing import Sequence
from typing import Tuple
def get_default_powerup_distribution() -> Sequence[Tuple[str, int]]:
"""Standard set of powerups."""
return (('triple_bombs', 3), ('ice_bombs', 3), ('punch', 3),
('impact_bombs', 3), ('land_mines', 2), ('sticky_bombs', 3),
('shield', 2), ('health', 1), ('curse', 1))
|
1e125dfe64627b25e56e9f905d4fc8cc1a878684
| 35,863 |
def addHtmlBreaks(s, isXhtml=True):
"""Replace all returns by <br/> or <br>."""
tag = {True:'<br/>\n', False:'<br>\n'}[isXhtml]
return s.replace('\n',tag)
|
dc4fb9d39fc7a2fba8d8c795c3c8394630358425
| 656,849 |
def is_empty(module, container_name, facts):
"""
Check if container can be removed safely.
To be removed, a container shall not have any container or
device attached to it. Current function parses facts to see if a device or
a container is attached. If not, we can remove container
Parameters
----------
module : AnsibleModule
Object representing Ansible module structure with a CvpClient connection
container_name : str
Name of the container to look for.
facts : dict
Facts from CVP collected by cv_facts module
"""
is_empty = True
not_empty = False
# test if container has at least one device attached
for device in facts['devices']:
if device['parentContainerName'] == container_name:
return not_empty
return is_empty
|
e91eb5dfa36c14d701082d0831d6be5147d88179
| 629,239 |
import copy
import itertools
def merge_cropobject_lists(*cropobject_lists):
"""Combines the CropObject lists into one.
This just means shifting the `objid`s (and thus inlinks
and outlinks). It is assumed the lists pertain to the same
image. Uses deepcopy to avoid exposing the original lists
to modification through the merged list.
"""
lengths = [len(c) for c in cropobject_lists]
shift_by = [0] + [sum(lengths[:i]) for i in range(1, len(lengths))]
new_lists = []
for clist, s in zip(cropobject_lists, shift_by):
new_list = []
for c in clist:
new_c = copy.deepcopy(c)
new_c.objid = c.objid + s
new_c.inlinks = [i + s for i in c.inlinks]
new_c.outlinks = [o + s for o in c.outlinks]
new_list.append(new_c)
new_lists.append(new_list)
output = list(itertools.chain(*new_lists))
return output
|
7c92fd345d932746460034dcbf8fad4325933a0d
| 335,188 |
import re
def get_rna_filtered_reads(wildcards, config):
"""
get the clean reads for a sample
then insert requested sequence type name into fastsq file name
eg: turn sample.clean.fastq -> sample.clean.SSU.fastq
"""
cleaned_reads = config["sample_data"][wildcards.sample]["clean"]
rrna_subset = wildcards.rna
return re.sub(r"\.(fast[aq])$", r".{}.\1".format(rrna_subset), cleaned_reads)
|
f70fe6282f1cc47ec90708fe5cd665dd41413305
| 236,532 |
def insertion_sort(array):
"""
Insertion Sort
Complexity: O(N^2)
"""
array_len = len(array)
for k in range(array_len):
temp = array[k]
i = k
while i > 0 and temp < array[i-1]:
array[i] = array[i-1]
i -= 1
array[i] = temp
return array
|
09a9e5be73e23f42ee04129f979516e9e556a908
| 664,724 |
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
if number < 0 or number is None:
return None
elif number == 0:
return 0
elif number == 1:
return 1
previous = 0
n = number // 2
while True:
if n * n == number:
return n
elif n * n < number:
return previous - 1
else:
previous = n
n = n // 2
|
d4235023cfbc56aa59d3709a3039914b1e808fb7
| 69,533 |
def format_duration(dur_ms):
"""Return a time string representing the duration in human readable format."""
if not dur_ms:
return "0ms"
ms = dur_ms % 1000
dur_ms = (dur_ms - ms) / 1000
secs = dur_ms % 60
dur_ms = (dur_ms - secs) / 60
mins = dur_ms % 60
hrs = (dur_ms - mins) / 60
out = ""
if hrs > 0:
out += "%dh" % hrs
if mins > 0:
out += "%dm" % mins
if secs > 0:
out += "%ds" % secs
if ms > 0 or not out:
out += "%dms" % ms
return out
|
f552b52058ba104517d3082fe19839afb137ebe2
| 136,242 |
def parse_count(cell_value):
"""
Parse Halias data cell value to integer.
"""
if cell_value.startswith('"') and cell_value.endswith('"'):
cell_value = cell_value[1:-1]
if len(cell_value) > 0:
cell_value = int(cell_value)
else:
cell_value = None
return cell_value
|
ad4a3062d06c8c81141ba28a5a9db497b2e65a0e
| 313,611 |
def bokeh_barbs(mpl_barb):
"""Convert matplotlib.quiver.Barbs to bokeh multiline/patches data"""
xo, yo = mpl_barb.get_offsets().T
paths = mpl_barb.get_paths()
xs, ys = [], []
for path in paths:
x, y = path.vertices.T
xs.append(x)
ys.append(y)
return xo, yo, xs, ys
|
d5599adc187a8e47ae270c6d7e8afed0af7676b2
| 411,490 |
def decrypt(sk, c):
"""Decrypt a cyphertext based on the provided key."""
return (c % sk) % 2
|
d3a96f66e1b449ffbd5b6ca1ba0827455b83f9e2
| 701,346 |
def interval_to_col_name(interval):
"""
Queries the proper name of the column for timespans given an interval.
"""
interval = interval.lower()
if interval == "yearly":
return "year"
elif interval == "monthly":
return "month"
elif interval == "weekly":
return "week"
elif interval == "daily":
return "day"
|
8fefc26f256b19a45317e608803c2fc1e8fcb06f
| 197,390 |
def create_empty_vapp(client, vdc, name, description):
"""Helper method to create an empty vApp.
:param pyvcloud.vcd.client.Client client: a client that would be used
to make ReST calls to vCD.
:param pyvcloud.vcd.vdc.VDC vdc: the vdc in which the vApp will be
created.
:param str name: name of the new vApp.
:param str description: description of the new vApp.
:return: href of the created vApp.
:rtype: str
"""
vapp_sparse_resouce = vdc.create_vapp(
name=name, description=description, accept_all_eulas=True)
client.get_task_monitor().wait_for_success(
vapp_sparse_resouce.Tasks.Task[0])
return vapp_sparse_resouce.get('href')
|
7dcd5641ba00056b2045c57bbca3cc8529cf9f95
| 496,956 |
import textwrap
def indent(multiline_str: str, indented=4):
"""
Converts a multiline string to an indented string
Args:
multiline_str: string to be converted
indented: number of space used for indentation
Returns: Indented string
"""
return textwrap.indent(multiline_str, " " * indented)
|
9fd5f2310ade00071a57731040435428cff88557
| 698,815 |
def guard(f, *args, **kwargs):
"""
Run a function.
Return (is_error, result), where ``is_error`` is a boolean indicating whether
it raised an exception. In that case, ``result`` will be an exception.
"""
try:
return (False, f(*args, **kwargs))
except Exception as e:
return (True, e)
|
7b612dbc88a098c50a5f3b9cc2d2e8eeb617b160
| 34,919 |
import random
def random_models(ctx, model_class_name, min_count=0, max_count=3):
"""
Get a random model identifier by class name. Example usage::
# db/fixtures/Tag.yml
{% for i in range(0, 10) %}
tag{{ i }}:
name: {{ faker.name() }}
{% endfor %}
# db/fixtures/Post.yml
a_blog_post:
tags: {{ random_models('Tag') }}
Will render to something like the following::
# db/fixtures/Post.yml (rendered)
a blog_post:
tags: ["Tag(tag2, tag5)"]
:param ctx: The context variables of the current template (passed automatically)
:param model_class_name: The class name of the models to get.
:param min_count: The minimum number of models to return.
:param max_count: The maximum number of models to return.
"""
model_identifiers = ctx['model_identifiers'][model_class_name]
num_models = random.randint(min_count, min(max_count, len(model_identifiers)))
if num_models == 0:
return '[]'
added = set()
while len(added) < num_models:
idx = random.randrange(0, len(model_identifiers))
added.add(model_identifiers[idx])
return '["%s(%s)"]' % (model_class_name, ','.join(added))
|
d213733551c81bc71c9199fbb9c0045c55dcea5d
| 145,411 |
import math
def eq_A4_phi_parallel_corner(W_m, H_m, S_m, multiplier=1):
"""Equation A4 in BR 187 second edition (2014) calculates view factor from a rectangle corner parallel.
:param W_m: in m, width of emitter panel
:param H_m: in m, height of emitter panel
:param S_m: in m, separation distance from surface to surface
:param multiplier:
:return phi: configuration factor
"""
# Calculate view factor, phi
X = W_m / S_m
Y = H_m / S_m
a = 1 / 2 / math.pi
b = X / (1 + X ** 2) ** 0.5
c = math.atan(Y / (1 + X ** 2) ** 0.5)
d = Y / (1 + Y ** 2) ** 0.5
e = math.atan(X / (1 + Y ** 2) ** 0.5)
phi = a * (b * c + d * e)
return phi * multiplier
|
d7bb8ef276cf76e553da88fe9b0dbc777577d559
| 545,061 |
def get_experiments_from_server_by_run_id(uploader, run_id,
schema_namespace):
"""
Query the server for all Experiments (run and project) with a matching
run_id Parameter. Returns a list of integer object IDs.
:param uploader: An uploader object instance
:type uploader: MyTardisUploader
:param run_id: The unique run ID (eg 150225_DMO177_0111_AHFNLKADXX)
:type run_id: basestring
:param schema_namespace: The namespace of the MyTardis schema (a URL)
:type schema_namespace: basestring
:return: Object IDs of all matching experiments.
:rtype: list(int)
"""
# using the custom API in the sequencing_facility app
response = uploader.do_get_request(
'%s_experiment' % uploader.tardis_app_name,
{'schema_namespace': schema_namespace,
'parameter_name': 'run_id',
# 'parameter_type': 'string',
'parameter_value': run_id})
if response.ok:
data = response.json()
if 'objects' in data:
return [o.get('id', None) for o in data['objects']]
else:
raise response.raise_for_status()
|
18974960abac11dcde6ea7293db6c1dd8e775226
| 391,595 |
def _pkg_curated_namespace(package):
"""
Strips out the package name and returns its curated namespace.
"""
return f"curated-{package.split('/', 1)[0]}"
|
d7e8bd4a0e28f0aa46bdcf8eb4bddd02a7928450
| 342,940 |
def get_mean(iterable):
"""
Returns the mean of a given iterable which is defined as the sum divided by
the number of items.
"""
return sum(iterable) / len(iterable)
|
c1a5a81a7ac9c04991145a8b8245f8cd05af5fcb
| 573,414 |
import re
def remove_pattern(input_text, pattern):
"""
Finds patterns in posts and substitutes them with blank space.
Args:
input_text: String representing of a twitter post
pattern: Regex pattern to search for in twitter post
Returns:
String with pattern stripped.
"""
match = re.findall(pattern, input_text)
for i in match:
input_text = re.sub(i, "", input_text)
return input_text
|
7681566e64a136a3adb7fe02338dff2698cb9623
| 317,182 |
def load_file_lines(filepath):
"""
Load lines of the file passed.
:param filepath: path to the file
:type filepath: str
"""
with open(filepath, 'r') as f:
contents = f.readlines()
return contents
|
b7be92214098fb5ec323e20092c03cb1886e5575
| 437,651 |
def compose_line(title, file_stem):
""" Composes the line to be written to the index in md format """
index_line = f'* [{title}]({file_stem}.html)' + '\n'
return index_line
|
a015bea14f365641ea89baa8fab8fed72fbfb4f9
| 101,022 |
def generateContentRange(tup):
"""tup is (rtype, start, end, rlen)
rlen can be None.
"""
rtype, start, end, rlen = tup
if rlen == None:
rlen = '*'
else:
rlen = int(rlen)
if start == None and end == None:
startend = '*'
else:
startend = '%d-%d' % (start, end)
return '%s %s/%s' % (rtype, startend, rlen)
|
2e5bd5240a2ffe89a789a554f09a6b07ffc737b0
| 292,209 |
def idx2long(index):
"""Convert a possible index into a long int."""
try:
return int(index)
except Exception:
raise TypeError("not an integer type.")
|
7b2064abd6210230c0e726191f7498e2e8ca0816
| 161,917 |
from typing import Tuple
def rgb_from_native(native_color: int) -> Tuple[int, int, int]:
"""
Extract RGB values from a native (OS-dependent) color.
Parameters
----------
native_color : int
Native color.
Returns
-------
r, g, b : (int, int, int)
RGB values between 0 and 255.
"""
_, r, g, b = RPR.ColorFromNative(native_color, 0, 0, 0) # type:ignore
return r, g, b
|
9fc34dc84bbef13b498262158c27ed024b4bde81
| 166,879 |
def gcf(a, b, epsilon=1e-16):
"""Return the greatest common factor of a and b, using Euclidean algorithm.
Arguments:
a, b -- two numbers
If both numbers are integers return an integer result,
otherwise return a float result.
epsilon -- floats less than this magnitude are considered to be zero
(default: 1e-16)
Examples:
>>> gcf(12, 34)
2
>>> gcf(13.5, 4)
0.5
>>> gcf(-2, 4)
2
>>> gcf(5, 0)
5
By (a convenient) definition:
>>> gcf(0, 0)
0
"""
result = max(a, b)
remainder = min(a, b)
while remainder and abs(remainder) > epsilon:
new_remainder = result % remainder
result = remainder
remainder = new_remainder
return abs(result)
|
91bc03277495c8968486d5f8b059057f72f5d715
| 243,336 |
def get_input_tool_name(step_id, steps):
"""Get the string with the name of the tool that generated an input."""
inp_provenance = ''
inp_prov_id = str(step_id)
if inp_prov_id in steps:
name = steps[inp_prov_id]['name']
if 'Input dataset' in name:
inp_provenance = "(%s)" % name
else:
inp_provenance = "(output of **%s** {%% icon tool %%})" % name
return inp_provenance
|
336926d59b4409d095feac36b4ffe374fcf56755
| 608,476 |
def knots_to_mps(vals):
"""Knot to meters/second."""
return vals * 1852.0 / 3600.0
|
2ea67e9b33e32537d8f1db7197e3123801c2ce97
| 387,604 |
def meanRelativeSquaredError(xs,ys,func):
"""Return the mean relative squared error."""
return sum([((func(x)-y)/y)**2 for x, y in zip(xs,ys)])/len(xs)
|
28ca788f51c9482cc7fb29532a967fb0f4123d9d
| 121,996 |
def cm_q(cm_i, l_t, mac):
""" This calculates the damping in pitch coefficient
Assumptions:
None
Source:
J.H. Blakelock, "Automatic Control of Aircraft and Missiles"
Wiley & Sons, Inc. New York, 1991, (pg 23)
Inputs:
cm_i [dimensionless]
l_t [meters]
mac [meters]
Outputs:
cm_q [dimensionless]
Properties Used:
N/A
"""
cm_q = 2. * 1.1 * cm_i * l_t / mac
return cm_q
|
e41ea49ce3727397ad9c7a48cecbb4391366209d
| 259,274 |
def make_xor(f, g):
"""
Compose functions in a short-circuit version of xor using the following
table:
+--------+--------+-------+
| A | B | A^B |
+--------+--------+-------+
| truthy | truthy | not B |
+--------+--------+-------+
| truthy | falsy | A |
+--------+--------+-------+
| falsy | truthy | B |
+--------+--------+-------+
| falsy | falsy | B |
+--------+--------+-------+
"""
def xor(*args, **kwargs):
a = f(*args, **kwargs)
if a:
b = g(*args, **kwargs)
return not b if b else a
else:
return g(*args, **kwargs)
return xor
|
6cb0164594e7cd51c4e3aaa7d3c2988fbec93b45
| 448,419 |
from typing import OrderedDict
def filter_words(word_to_idx, vectors, vocabulary):
"""Filter word vector data to vocabulary."""
filtered_to_idx, filtered_indices = OrderedDict(), []
for word, idx in word_to_idx.items():
if word in vocabulary:
filtered_to_idx[word] = len(filtered_to_idx)
filtered_indices.append(idx)
return filtered_to_idx, vectors[filtered_indices]
|
dc545455d9a60b17191602d55866acabd8006d20
| 94,967 |
def read_txt_file(fname):
""" read a txt file, each line is read as a separate element in the returned list"""
return open(fname).read().splitlines()
|
af0be81903c6ffa8a574b8e33b842e62ae4a5bc8
| 661,074 |
def _should_process_segment(seg, segname):
"""Check if we should process the specified segment."""
return segname.endswith('__DATA_CONST.__mod_init_func') or \
segname == '__DATA.__kmod_init'
|
c0fc59f25dc523f669a63c2bf026aff26bc42877
| 617,262 |
def format_time(seconds):
"""Formats seconds to readable time string.
Args:
seconds: Number of seconds to format.
Returns:
The formatted time string.
Raises:
ValueError: If the input `seconds` is less than 0.
"""
if seconds < 0:
raise ValueError(f'Input `seconds` should be greater than or equal to '
f'0, but `{seconds}` is received!')
# Returns seconds as float if less than 1 minute.
if seconds < 10:
return f'{seconds:5.3f}s'
if seconds < 60:
return f'{seconds:5.2f}s'
seconds = int(seconds + 0.5)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days:
return f'{days:2d}d{hours:02d}h'
if hours:
return f'{hours:2d}h{minutes:02d}m'
return f'{minutes:2d}m{seconds:02d}s'
|
2d58918fa22cd7cd40eea531f6bc86116bee1748
| 238,773 |
def get_data(users, exclude=None):
"""
users: set of Django users
exclude [optional]: set of Django users to exclude
returns: {'EMAIL': u.email} for all users in users less those in `exclude`
"""
exclude = exclude if exclude else set()
emails = (u.email for u in users)
return ({'EMAIL': e} for e in emails if e not in exclude)
|
f92ed5aef2873a09abd7612dfc767019909a9083
| 476,631 |
def generate_registers_riscv_plic0_threshold(hart, addr):
"""Generate xml string for riscv_plic0 threshold register for hart"""
return """\
<register>
<name>threshold_""" + hart + """</name>
<description>PRIORITY THRESHOLD Register for hart """ + hart + """</description>
<addressOffset>""" + addr + """</addressOffset>
</register>
"""
|
822c17560e055a5652621176630d9437ca90f631
| 437,535 |
def aix_path_join(path_one, path_two):
"""Ensures file path is built correctly for remote UNIX system
:param path_one: string of the first file path
:param path_two: string of the second file path
:returns: a uniform path constructed from both strings
"""
if path_one.endswith('/'):
path_one = path_one.rstrip('/')
if path_two.startswith('/'):
path_two = path_two.lstrip('/')
final_path = path_one + '/' + path_two
return final_path
|
073115ad92683dd6d22da6dc2d5dea6562e51e74
| 569,095 |
def noll_to_wss(zern):
"""
Transform a Noll Zernike index into a JWST WSS framework Zernike index.
:param zern: int; Noll Zernike index
:return: WSS Zernike index
"""
noll = {1: 'piston', 2: 'tip', 3: 'tilt', 4: 'defocus', 5: 'astig45', 6: 'astig0', 7: 'ycoma', 8: 'xcoma',
9: 'ytrefoil', 10: 'xtrefoil', 11: 'spherical'}
wss = {'piston': 1, 'tip': 2, 'tilt': 3, 'defocus': 5, 'astig45': 4, 'astig0': 6, 'ycoma': 8, 'xcoma': 7,
'ytrefoil': 10, 'xtrefoil': 11, 'spherical': 9}
wss_ind = wss[noll[zern]]
return wss_ind
|
dede2e9057fe5418488532d0a763c64886372932
| 232,667 |
def filter_post_edits(dict_elem):
"""
Filter post history events that modified the body of the posts.
:param dict_elem: dict with parsed XML attributes
:return: boolean indicating whether element modified the body of the corresponding post
"""
return int(dict_elem['PostHistoryTypeId']) in [2, 5, 8]
|
b8f567d6dceb0dd1deb6cffac631bfc44f0fb282
| 21,023 |
async def get_authenticated_user(*, app, logger):
"""Get information about the authenticated GitHub user.
This function wraps the `GET /user
<https://developer.github.com/v3/users/#get-the-authenticated-user>`_
method.
Parameters
----------
app : `aiohttp.web.Application`
The app instance.
logger
A `structlog` logger instance with bound context related to the
Kafka event.
Returns
-------
response : `dict`
The parsed JSON response body from GitHub.
"""
ghclient = app["root"]["templatebot/gidgethub"]
response = await ghclient.getitem("/user")
return response
|
b7577fbe203b08080dd2c2e9c9f3af132095e4d7
| 79,140 |
from typing import List
def get_available_signatures() -> List[str]:
"""Return a list of available signatures.
Returns
-------
List[str]
List of all available signatures.
"""
signatures = [
"high_q_freq", "high_q_dur", "low_q_freq", "low_q_dur", "zero_q_freq", "q95", "q5", "q_mean", "hfd_mean",
"baseflow_index", "slope_fdc", "stream_elas", "runoff_ratio"
]
return signatures
|
69c93c5d10cbf669a9959a7f8fb555c656da5a98
| 320,914 |
def get_missing_vars(aws):
"""
return a list of missing required env vars if any
"""
required = ["AWS_SECRETS_NAME", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
return [f'Missing env variable "{v}"' for v in required if aws.get(v, None) is None]
|
8fd121c74a8771893627ce75ee4636ce612ca1d4
| 377,240 |
import time
def verify_token(timestamp):
"""判断token是否过期
:param timestamp: token过期时间戳
:type timestamp: int
"""
# 过期时间2小时
if timestamp - int(time.time()) <= 7200:
return True
return False
|
2dd0fd629028e8d399f36dc95bf0f050af627301
| 516,088 |
def endswith(x, lst):
""" Select longest suffix that matches x from provided list(lst)
:param x: input string
:param lst: suffixes to compare with
:return: longest suffix that matches input string if available otherwise None
"""
longest_suffix = None
for suffix in lst:
if x.endswith(suffix):
if longest_suffix is None or len(longest_suffix) < len(suffix):
longest_suffix = suffix
return longest_suffix
|
875f213d3478a6c49956b195db9bd4fd5fbdfc7b
| 190,649 |
def parse_row(row):
"""
Take in a tr tag and get the data out of it in the form of a list of
strings.
"""
return [str(x.string) for x in row.find_all('td')]
|
44b8eadcd11d79ede2ae3d0a0866bfa80ff5e36d
| 666,796 |
import re
def clean_output(output: str) -> str:
"""Removes ansi color codes from string"""
return re.sub(r"\x1B[@-_][0-?]*[ -/]*[@-~]", "", str(output).strip())
|
ba4aa3e90f53fa1f61a9c8b4264fd737c89d2553
| 343,428 |
def _resample_daily(data):
"""
Resample data using linear interpolation.
:param data: Pandas DataFrame or Series.
:return: Resampled daily data.
"""
return data.resample('D').interpolate(method='linear')
|
974eda3a0a0e842f478128ddfa87a765899c9d14
| 438,185 |
def _html_param_builder(params):
"""
Build a HTML params string out of a dictionary of the option names and their values. If a list is the value
(it is intended in a select case) the original list is returned instead of a string
:param dict params:
:return str | list: the composed parameters string or list in case list is the class of the value in the dict
"""
string = ""
for key, value in params.items():
if isinstance(value, list):
return value
string = string + key + "=" + str(value) + " "
return string.strip()
|
e483c1f43cd5dde78982bdf8026a3a5c1b9a73b2
| 428,956 |
def myRound(x, y=4):
"""Rounds floating point number to specified number of decimal places."""
return round(x*y)/y
|
692ee4ee73f94acd025edc4464ce267c9cf9ae71
| 158,683 |
def is_a_number(x):
""" Returns True if x is an int or a float. """
return (type(x) is int) or (type(x) is float)
|
bacb75da546e551f0e4e2f7fcd3345ffd49e8b6e
| 354,715 |
def collectKwargs(label, inKwargs):
"""
Collect kwargs of the type label_* and return them as a dict.
Parameters
----------
label: str
inKwargs: dict
Returns
-------
Dict with the collected kwargs.
"""
outKwargs = dict()
for key, value in inKwargs.items():
if label + "_" in key:
outKwargs[key.replace(label + "_", "")] = value
return outKwargs
|
0e92c451dd2c3f9463da263f3b09a95a23c11543
| 304,665 |
def compute_prior_pseudocounts(vs, rv, pseudocount_base, pseudocount_match):
"""
Make pseudocounts dict.
Input: vs a list of votes (domain of prior)
rv a particular vote (e.g. the reported vote)
pseudocount_base a prior pseudocount (e.g. 0.5)
pseudocount_match a prior pseudocount (e.g. 50.0)
Output: prior_pseudocounts a dict mapping votes to prior pseudocounts.
Method: Each entry in dict is pseudocount_base,
except that for key rv we have pseudocount_match.
Remark: This approach of having prior defined by exactly two
pseudocounts is intended to allow prior to reflect
belief that scanners are largely accurate, by having
pseudocount_match be large, while preserving symmetry
among kinds of errors, by having all other entries only
equal to pseudocount_base.
Note that vote rv does not need to be in vs, in which
case all entries are equal to pseudocount_base.
Remark: The vote ("-noCVR",) is treated specially, in that a
reported vote of ("-noCVR",) is NOT expected to yield
an actual vote of the same value; all other reported votes
are expected to yield an actual vote of the same value
as the reported vote. This is reflected by having a
prior-pseudocount of pseudocount base in general, EXCEPT
whe rv == av and rv != ("-noCVR",).
"""
prior_pseudocounts = {}
for av in vs:
prior_pseudocounts[av] = (pseudocount_match
if av==rv and rv != ("-noCVR",)
else pseudocount_base)
return prior_pseudocounts
|
f190e8cdf9462f5d13d4846c6561810a35f0011c
| 596,587 |
def contains(element):
"""
Check to see if an argument contains a specified element.
:param element: The element to check against.
:return: A predicate to check if the argument is equal to the element.
"""
def predicate(argument):
try:
return element in argument
except:
return False
return predicate
|
901785df69b8ee3f07c01cc1d3a1ce1c31d204b2
| 682,523 |
import math
def py37_isqrt(n):
"""Return largest integer r such that r^2 <= n"""
if n < 10000000000000000000000: # trial and error
return math.floor(math.sqrt(n))
else:
# https://stackoverflow.com/a/53983683
if n > 0:
x = 1 << (n.bit_length() + 1 >> 1)
while True:
y = (x + n // x) >> 1
if y >= x:
return x
x = y
elif n == 0:
return 0
else:
raise ValueError("square root not defined for negative numbers")
|
7f5a93f8b8db5ecfc49c239343fb679063ea0534
| 593,596 |
def check_output_options(input,):
"""
Checks the inputs of the mcmc_options dictionary and ensures that
all keywords have valid values.
output_options={
'write_chain' : False,# Write MCMC chains for all paramters, fluxes, and
# luminosities to a FITS table We set this to false
# because MCMC_chains.FITS file can become very large,
# especially if you are running multiple objects.
# You only need this if you want to reconstruct chains
# and histograms.
'print_output' : True, # prints steps of fitting process in Jupyter output
}
"""
output={} # output dictionary
if not input:
output={
'write_chain' : False, # Write MCMC chains for all paramters, fluxes, and
# luminosities to a FITS table We set this to false
# because MCMC_chains.FITS file can become very large,
# especially if you are running multiple objects.
# You only need this if you want to reconstruct chains
# and histograms.
'print_output' : True, # prints steps of fitting process in Jupyter output
}
return output
# Check write_chain
if 'write_chain' in input:
write_chain = input['write_chain']
if (not isinstance(write_chain,(bool,int))):
raise TypeError('\n write_chain must be set to "True" or "False" \n')
else:
output['write_chain']=write_chain
else:
output['write_chain']= False
# Check print_output
if 'print_output' in input:
print_output = input['print_output']
if (not isinstance(print_output,(bool,int))):
raise TypeError('\n print_output must be set to "True" or "False" \n')
else:
output['print_output']=print_output
else:
output['print_output']= True
# print output
return output
|
0bf69c7bc2d24cca315c1956ffb6cdfd34a4b5bd
| 173,506 |
import random
def init_neurons(count):
"""
Initialize the weights of the neurons
:param count: number of neurons to initialize
:return: list of neurons as weight vectors (x,y)
"""
return [[random.uniform(0.0, 1.0),
random.uniform(0.0, 1.0)] for i in range(count)]
|
98be8fc1f621e2c768af3c93d1a0254ac42a4ffc
| 667,504 |
def _GetPercentageChange(value1, value2):
"""Returns the percentage change between the specified values."""
difference = value2 - value1
return 0 if value1 == 0 else difference/value1 * 100
|
987d2cd20456ad5fad72145adecdda96256b9162
| 584,109 |
def get_categories_of_column(data: list, column: str, delimiter=',') -> list:
"""Get list of unique categories of non-atomic column which contains
multiple categorical values splitted by delimiter
Parameters
----------
data : list
Data stored in list of dicts
column : str
Column name which contains multiple values
delimiter : str, optional
Delimiter of values in column, by default ','
Returns
-------
list
Unique list of categories in variable
"""
column_values = ''
for row in data:
column_values += delimiter + str(row[column])
splitted = column_values.split(delimiter)[1:]
categories = list(set(splitted))
return categories
|
2890918e0f5a69546c23cecbe04ef452e3b39f7b
| 129,592 |
def _build_selector(selectors, separator):
"""
Build a selector defined by the selector and separator
:param selectors: a list of strings that are selector
:param separator: the char that separates the different selectors
:return: the resulting selector
"""
selector = ""
for i, sel in enumerate(selectors):
if i != 0:
selector += separator
selector += "$" + sel.replace(" ", "%20")
return selector
|
fb61d36f2d552e655e9f071cabacc8792217421f
| 566,374 |
import copy
def merge_vocabulary(vocab1, vocab2):
"""
Merges two vocabularies
:param vocab1:
:param vocab2:
:return: merged vocab
"""
merged = copy.deepcopy(vocab1)
for key2 in vocab2.keys():
if key2 not in merged:
merged[key2] = len(merged)
return merged
|
c8d08c3d2ae722f59f95d4e8b7f5e97d31ad6a0b
| 219,398 |
def get_audit_ccs(assessment):
"""Returns audit CCs regarding assessment.
Args:
assessment: An instance of Assessment model.
Returns:
List of audit ccs
"""
audit_issuetracker_issue = assessment.audit.issuetracker_issue
if audit_issuetracker_issue is not None and audit_issuetracker_issue.cc_list:
audit_ccs = audit_issuetracker_issue.cc_list.split(",")
else:
audit_ccs = []
return audit_ccs
|
0630f9c69acf0b2f61c362b2bed1dc35eb2bd8f2
| 59,374 |
import re
def filter_cmd_list(cmd_list, regex_to_include=".*", regex_to_exclude=None, dict_key='label'):
"""Filter a list of Benchpress commands
Parameters
----------
cmd_list : list of dict
The commands to filter.
regex_to_include : str
RegEx that match for inclusion.
regex_to_exclude : str
RegEx that match for exclusion.
dict_key : str
The dict-key in the command to search within.
Returns
-------
new_cmd_list : list of dict
New list of commands.
"""
ret = []
for cmd in cmd_list:
if re.search(regex_to_include, cmd[dict_key]) is not None:
if regex_to_exclude is None or re.search(regex_to_exclude, cmd[dict_key]) is None:
ret.append(cmd.copy())
return ret
|
77eafc02cb530f0f84a80d24e517c0104e524dcc
| 262,311 |
def determine_if_pb_should_be_filtered(row, min_junc_after_stop_codon):
"""PB should be filtered if NMD, a truncation, or protein classification
is not likely protein coding (intergenic, antisense, fusion,...)
Args:
row (pandas Series): protein classification row
min_junc_after_stop_codon (int): mininum number of junctions after stop
codon a protein can have. used in NMD determination
Returns:
int: 1 if should be filtered, 0 if should not be filtered
"""
# filter out pbs that are artifacts or noncoding
pclass = str(row['protein_classification'])
num_junc_after_stop_codon = int(row['num_junc_after_stop_codon'])
pclass_base_to_keep = ['pFSM','pNIC']
pclass_base = str(row['protein_classification_base'])
if pclass_base not in pclass_base_to_keep and num_junc_after_stop_codon > min_junc_after_stop_codon:
return 1
elif 'trunc' in pclass:
return 1
elif 'intergenic' in pclass:
return 1
elif 'antisense' in pclass:
return 1
elif 'fusion' in pclass:
return 1
elif 'orphan' in pclass:
return 1
elif 'genic' in pclass:
return 1
return 0
|
29ab7ce53ac7569c4d8a29e8e8564eab33b3f545
| 2,631 |
import hashlib
import re
def _build_schedule_name(
job_body_data: bytes,
schedule: str,
pipeline_name: str,
display_name: str,
) -> str:
"""Generates the name for the schedule.
Args:
job_body_data: The serialized pipeline job.
schedule: Schedule in cron format.
pipeline_name: Full resource name of the pipeline in
projects/<project>/pipelineJobs/<pipeline_id> format.
display_name: Pipeline display name.
Returns:
Suggested schedule resource name.
"""
pipeline_name_part = 'pipeline'
if pipeline_name is not None:
# pipeline_name format: projects/<project>/pipelineJobs/<pipeline_id>
pipeline_id = pipeline_name.split('/')[-1]
# Limiting the length of the pipeline name part.
pipeline_name_part = pipeline_id[0:200]
elif display_name is not None:
pipeline_name_part = display_name
pipeline_hash_part = hashlib.sha256(job_body_data).hexdigest()[0:8]
schedule_part = (
schedule.replace('*/', 'div').replace('*', 'a').replace(' ', '-'))
job_name = '_'.join([
'pipeline',
pipeline_name_part,
pipeline_hash_part,
schedule_part,
])
job_name = re.sub('[^-_a-z0-9]', '_', job_name)
return job_name
|
740f22c4c78b588806e14df61ad10dcaf116e7e9
| 534,138 |
def Bin_minutes(val):
"""
Map minutes to a time bin.
"""
if val/60. >= 0.5:
return 0.5
else:
return 0.0
|
67175df068e0b51e56ac7b4b99c734dfba5c44e9
| 129,441 |
import re
def custom_regex_removal(regex, text):
"""
Find and substitue given regex in given text
:param regex: regex that removed
:param text: arbitrary string
:return: clean string
"""
text = re.sub(r'{}'.format(regex), '', text)
return text
|
9c2ca934b5f0d3121d75a73b3f95d6bb242bb5bb
| 504,797 |
def checkNextString(StringList, index, storageString, rawlength):
"""
checkNextString takes a parsed ICS string list, along with the current index being read,
the string the user is writing to along with the rawlength of the string list. returns
a tuple containing the indexes we combined into the string, as well as an updated
storage string.
"""
## a list of indexs of strings concanated to make our storage string
indexToSkip = []
## make sure the index we are checking isn't out of bounds, has a single whitespace
## to being
if index + 1 < rawlength and StringList[index+1].startswith(" "):
## combine our strings, replace " " instead of lstrip to preserve spaces between
## words that coincide with folds.
storageString += StringList[index+1].replace(" ", "", 1)
indexToSkip.append(index+1)
checkFurther = checkNextString(StringList, index+1, storageString, rawlength)
## did we change the string?
if checkFurther[1] != storageString:
for i in checkFurther[0]:
indexToSkip.append(i)
## get the full string
storageString = checkFurther[1]
return indexToSkip, storageString
|
e061833b39d772f87bdfca1d8737f1787669bf1e
| 476,024 |
def doubleChar(words):
"""
Function to repeat the chars.
Given a string, return a string where for every char in the original, there
are two chars.
Args:
words (String): String provided by user
Return:
result (String): String with characters dupplicated
"""
result = ""
for char in words:
result += char*2
return result
|
2acff6db3a258cb7c90b7e5c92192a8aafce31a5
| 333,293 |
def get_providers(targets, provider, map_fn = None):
"""Returns the given provider (or a field) from each target in the list.
The returned list may not be the same size as `targets` if some of the
targets do not contain the requested provider. This is not an error.
The main purpose of this function is to make this common operation more
readable and prevent mistyping the list comprehension.
Args:
targets: A list of targets.
provider: The provider to retrieve.
map_fn: A function that takes a single argument and returns a single
value. If this is present, it will be called on each provider in the
list and the result will be returned in the list returned by
`get_providers`.
Returns:
A list of the providers requested from the targets.
"""
if map_fn:
return [
map_fn(target[provider])
for target in targets
if provider in target
]
return [target[provider] for target in targets if provider in target]
|
81bda635566479d02ca9d21a096bc4caeaabda01
| 274,351 |
import itertools
def take(n, gen):
"""Take the first n items from the generator gen. Return a list of that many items
and the resulting state of the generator."""
lst = list(itertools.islice(gen, n))
return lst, gen
|
6254817eb20c14e8eed75d5051aedf865b28223e
| 448,005 |
def get_aF(r1_norm, r2_norm):
"""
Computes the semi-major axis of the fundamental ellipse. This value is
kept constant for all the problem as long as the boundary conditions are not
changed.
Parameters
----------
r1_norm: float
Norm of the initial vector position.
r2_norm: float
Norm of the final vector position.
Returns
-------
a_F: float
Semi-major axis of the fundamental ellipse.
Notes
-----
No labeled equation (appears between [3] and [4]) from Avanzini's report
[1].
"""
a_F = (r1_norm + r2_norm) / 2
return a_F
|
739a5c8d61ccde8db0eb7df10335940c338353c7
| 589,966 |
def preprocessed_test_metrics(expected_metrics):
"""function that creates a single file with metrics
that need to be tested in different files
In:
expected_metrics:
List[ Tuple[FileName, Dict[Metric1: Value1, Metric2: Value2, ...], ...] ]
Out:
res:
List[ Tuple[FileName, Metric1, Value1], ... ]
"""
res = []
for a, b in expected_metrics:
for c, d in b.items():
res.append((a, c, d))
return res
|
ce70cf7fe342fd761d3669af512752ab0ba96231
| 216,313 |
def get_det_prefix(pv_info):
"""
Determines which prefix will be passed into the detector init.
Parameters
----------
pv_info: ``str``
The second element in a camview cfg line.
Returns
-------
detector_prefix: ``str``
The prefix to use in the detector init.
"""
pv_info = pv_info.split(';')
try:
detector_prefix = pv_info[1]
except IndexError:
# Not provided in config, guess from image base
detector_prefix = ':'.join(pv_info[0].split(':')[:-1])
return detector_prefix + ':'
|
0b973808092f14b0eeeb6d20f5e3c207a83a7068
| 136,620 |
from typing import OrderedDict
def get_partial_state_dict(model_state_dict, modules,
init_from_decoder_asr=False,
init_from_decoder_mt=False):
"""Create state_dict with specified modules matching input model modules.
Note that get_partial_lm_state_dict is used if a LM specified.
Args:
model_state_dict (OrderedDict): trained model state_dict
modules (list): specified module list for transfer
Return:
new_state_dict (OrderedDict): the updated state_dict
"""
new_state_dict = OrderedDict()
for key, value in model_state_dict.items():
if init_from_decoder_asr:
key = key.replace("_asr", "")
elif init_from_decoder_mt:
key = key.replace("decoder_mt", "decoder")
if any(key.startswith(m) for m in modules):
new_state_dict[key] = value
return new_state_dict
|
4d83f22124dca13852451e4016f5a52a727fa031
| 610,750 |
def filter_by_type(lst, type_of):
"""Returns a list of elements with given type."""
return [e for e in lst if isinstance(e, type_of)]
|
8aa0c910ee7b098b2715cd612b434b37ea75e51a
| 329,285 |
def createExpressionList(string):
"""Creates a list of expression strings
Keyword arguments:
string -- String of semicolon-separated expressions
Takes the expression string and splits it with semicolon as delimiter
Returns list of single expression strings
"""
exprList = list()
exprTempList = string.split(";")
# string ending with ";" results in "" being appended
for x in exprTempList:
if(x != ""):
exprList.append(x)
return exprList
|
eb7424827a0508a169f2caa069331dd4ab0947b1
| 514,303 |
def redis_serialize_data(datum):
"""Serialize a data object for redis.
The format is 'data:<pk>'
"""
return "data:" + str(datum.pk)
|
5a9892264647f2cd1ab042a6a62640cd4f0f5397
| 508,434 |
import time
def parse_recurrence(time_string):
"""Parse recurrence data found in event entry.
Keyword arguments:
time_string: Value of entry's recurrence.text field.
Returns:
Tuple of (start_time, end_time, frequency). All values are in the user's
current timezone (I hope). start_time and end_time are datetime objects,
and frequency is a dictionary mapping RFC 2445 RRULE parameters to their
values. (http://www.ietf.org/rfc/rfc2445.txt, section 4.3.10)
"""
# Google calendars uses a pretty limited section of RFC 2445, and I'm
# abusing that here. This will probably break if Google ever changes how
# they handle recurrence, or how the recurrence string is built.
data = time_string.split('\n')
start_time_string = data[0].split(':')[-1]
start_time = time.strptime(start_time_string,'%Y%m%dT%H%M%S')
end_time_string = data[1].split(':')[-1]
end_time = time.strptime(end_time_string,'%Y%m%dT%H%M%S')
freq_string = data[2][6:]
freq_properties = freq_string.split(';')
freq = {}
for prop in freq_properties:
key, value = prop.split('=')
freq[key] = value
return (start_time, end_time, freq)
|
f2a991c2f2846c46228f0aa67cb883310abfe267
| 410,913 |
import six
def to_unicodestr(string):
"""If input string is a byte string convert to Unicode string."""
if isinstance(string, six.binary_type):
return string.decode('utf8')
return string
|
c07a885a2d1ae4ee291a547b1e6d27de874c25bf
| 164,377 |
def checkBuildingType(line):
"""Checks if the building type is a house, or if the item is being bought or sold"""
if " is buying " in line:
return "buy"
elif " is selling " in line:
return "sell"
else:
return "house"
|
b18c2db78aba84820e4b489f5b8bad128e7c1860
| 399,030 |
def pass_min_coverage(dp, i, min_coverage, cov_metric):
"""
if the DP flag does not meet minimum coverage, toss it.
if the DP4 flag does not meet minimum coverage, toss it.
:param dp:
DP coverage allele depth
:param i: string
DP4 coverage allele depth
:param min_coverage: int
minimum coverage required (anything less will return False)
:param cov_metric: string
either 'DP' or 'DP4'
:return passed: boolean
True if the coverage is at least min_coverage, false otherwise.
"""
i = i.split(',')
if (cov_metric == 'DP') and (int(dp) < min_coverage):
return False
elif (cov_metric == 'DP4') and ((int(i[0]) + int(i[2]) + int(
i[1]) + int(i[3])) < min_coverage):
return False
return True
|
929852dd7635bfa6917ba5a1e8c1456b69db2bbf
| 427,149 |
def _get_smartcard_keychain(xml):
"""Get keychain items from the output of 'system_profiler SPSmartCardsDataType -xml'"""
keychain = [x for x in xml if x.get("_name", None) == "AVAIL_SMARTCARDS_KEYCHAIN"]
if len(keychain) == 0:
return []
keychain = keychain[0].get("_items", None)
return keychain
|
bf5c3414a0b333ad805458c9e6a36a4cfa918be8
| 543,067 |
def format_duration(duration: float):
"""Formats the duration (milliseconds) to a human readable way.
:param duration: Duration in milliseconds
:return: Duration in HOURS:MINUTES:SECONDS format. Example: 01:05:10
"""
m, s = divmod(duration / 1000, 60)
h, m = divmod(m, 60)
if h:
return "{0}:{1:0>2}:{2:0>2}".format(str(int(h)).zfill(2),
str(int(m)).zfill(2), str(int(s)).zfill(2))
else:
return "{0}:{1:0>2}".format(str(int(m)).zfill(2), str(int(s)).zfill(2))
|
20314e05294504a1b8d0979b48397d8b0563eb0d
| 464,620 |
def prime_sieve(n):
"""
Return a list of all primes smaller than or equal to n.
This algorithm uses a straightforward implementation of the
Sieve of Eratosthenes. For more information, see
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
Algorithmic details
-------------------
Memory: O(n)
Time: O(n * log(log(n)))
where n is the input number.
Examples
--------
>>> prime_sieve(2)
[2]
>>> prime_sieve(9)
[2, 3, 5, 7]
>>> prime_sieve(30)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
# Initialize the sieve
is_prime = [True for i in range(n)]
is_prime[0:1] = [False, False]
for i in range(2, int(n**0.5 + 1)):
# Keep going it it's not a prime
if not is_prime[i]:
continue
# It's a prime number, remove all multiples larger than i * i
c = i * i
while c <= n:
is_prime[c] = False
c += i
# Return a list of primes where True
return list(num for (num, prime) in enumerate(is_prime) if prime)
|
180dc6a2ff1d3096630a6279ace841e4c5efdd83
| 661,389 |
def get_item(dictionary, key):
"""
Given a dictionary and a key, return the key's value
"""
return dictionary.get(key)
|
97bf041cb8afe160a5c1610e088dadb91d023e8f
| 97,582 |
def s2b(u, enc='UTF-8'):
"""
Convert a string into a bytes object.
"""
return u.encode(enc)
|
e19c795722380bed50aa1f011b65dacfe92c13cc
| 557,091 |
def find_smallest(num_vars):
"""Find the smallest exponent of two that is greater than the number
of variables
Parameters
----------
num_vars : int
Number of variables
Returns
-------
x : int
Smallest exponent of two greater than `num_vars`
"""
for x in range(10):
if num_vars <= 2 ** x:
return x
|
ab773fbbad29c75df6350a426acc2db9c30d8137
| 190,100 |
def parse_task(line):
"""
Receives a line and parses it to match the format `[subject] name`, where `subject` is a two-letter code for the subject and `name` is the name of the task.
If `subject` is invalid, the user is re-prompted until a valid entry is given. Capitalisation is automatic.
Args:
line: A line to parse a task from.
Returns:
A string with the format `[subject] name`, explained above.
"""
subject, name = line.split(" - ")
print("Parsing {}...".format(line))
while not subject.isupper() or not len(subject) == 2:
subject = input("The subject {} does not meet your standarised format. What should be the corrected term? ".format(subject)).upper()
return "[{}] {}".format(subject, name)
|
e378c028089b44a8d6eca1123a15f71ce210f7cc
| 293,751 |
def get_blacklist_scans(subject_id, blacklist_path, new_id=None):
"""
Finds all entries in <blacklist_path> that belong to the participant with
ID <subject_id>. If <new_id> is given, it modifies the found lines to
contain the new subject's ID.
"""
try:
with open(blacklist_path, 'r') as blacklist:
lines = blacklist.readlines()
except IOError:
lines = []
entries = []
for line in lines:
if subject_id in line:
if new_id is not None:
line = line.replace(subject_id, new_id)
entries.append(line)
return entries
|
ab7e1aca57cbc944da48d7e6fe36c5888429eeda
| 168,422 |
def remove_repeated_coords(sequence):
"""
Removes coordinates repeated on a sequence
Args:
sequence (list): list of tuples
Returns:
List of unique tuples
"""
seen = set()
return [x for x in sequence if not (x in seen or seen.add(x))]
|
361e2fb256a9b8299d3982569af10ac503d56ad6
| 335,129 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.