content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def apply_split_linear(lin_module_1,
lin_module_2,
input1,
input2,
activation=None):
"""Returns a linear output of two inputs, run independently and summed."""
output_1 = lin_module_1(input1)
output_2 = lin_module_2(input2)
summed_output = output_1 + output_2
if activation is not None:
summed_output = activation(summed_output)
return summed_output
|
bd07e5e3b9f7833db66c37e9362eeb6ea6322bf7
| 229,936 |
def calc_disfrac(input_df, threshold=0.5):
"""Take a merged IUPRed input (multiple proteins) and calculate
the fraction of disordered residues according to a specific threshold for
each case. A pd.GroupBy object is first generated and then the function
applied.
Arguments:
input_df {string} -- [description]
Keyword Arguments:
threshold {float} -- [description] (default: {0.5})
Returns:
[type] -- [description]
"""
grouped = input_df.groupby(["protein_ac", "dataset", "disorder_predictor"])
# Aggregate IUPred scores using the disorder fraction formula
disfrac = grouped.agg({"score": lambda x: sum(x >= threshold) / len(x)})
# Rename score column to disorder fraction
disfrac = disfrac.rename(columns={"score": "disorder_fraction"})
disfrac.reset_index(inplace=True)
return disfrac
|
5809df39779248d69fee7a177c858e946def6c28
| 275,075 |
import random
def dummy_decision(board):
""" make a random dummy move
"""
legal_moves = list(board.legal_moves)
return random.choice(legal_moves)
|
8c44acf1aff245ed532e48c35e440440b6ddcaef
| 30,493 |
def add(a, b, c):
"""Take three inputs as integer and returns their sum."""
total = a+b+c
return total
|
2cd49714133511a0fe09b44d27c50dbe74af2507
| 249,978 |
def create_payment_msg(identifier, status):
"""Create a payment payload for the paymentToken."""
payment_msg = {'paymentToken': {'id': identifier, 'statusCode': status}}
return payment_msg
|
265401c5b8c9bbc0f71cfb718157101e3b48e8de
| 36,713 |
def get_coords(name, geo_coder):
"""
Returns the coordinates of the `name` location using the geo_coder.
"""
location = geo_coder(name + ', Canada')
# Default to province if geocoding failed
if location is None:
print(f"WARNING: Failed to geocode {name}.")
location = geo_coder(name.split(", ", 1)[1] + ', Canada')
return location.latitude, location.longitude
|
c036ba83ce521b9f6acaeb5965cebf9ecc41d58e
| 403,101 |
def reverse_and_complement(s):
"""Takes an RNA sequence and returns the reversed and complemented form.
Presumably `s` is formed by transcribing a DNA sequence. If the same DNA
sequence was transcribed in the opposite direction, you would get the RNA
sequence that this function returns.
"""
rna_list = list(s)
rna_list.reverse()
rev_c = []
complement = {'A' : 'U', 'C' : 'G', 'G': 'C', 'U': 'A'}
for i in rna_list:
rev_c.append(complement[i])
return ''.join(rev_c)
|
17e8a168015c5e96103c51b4df0f30bb8637e919
| 451,771 |
def traverse_get(target, *args):
"""
Travel down through a dict
>>> traverse_get({"one": {"two": 3}}, ["one", "two"])
3
"""
current = target
for level in args:
current = current[level]
return current
|
f9441bc83695b7bf7ed20f20c304188b96520cda
| 428,401 |
import unicodedata
def _is_punctuation(char):
"""
判断字符是否为标点符号,这里包括其他除字母和数字以外的符号
:param char: char
:return: bool
"""
cp = ord(char)
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cate = unicodedata.category(char) # Pc, Pd, Pe, Pf, Pi, Po, Ps
if cate.startswith("P"):
return True
return False
|
6c47bf08e44ecbcd7cf6b0c550a222bad9949cf0
| 122,293 |
from typing import List
import random
def genfrags(sequence: str, number: int, size: int) -> List[str]:
"""
Generate `number` fragments all of equal length: `size`, from `sequence`.
Ensure that there is a fragment covering the very beginning
and end of `sequence`.
"""
random.seed()
starts = (random.randint(0, len(sequence) - size) for _ in range(number))
return [sequence[start : start + size] for start in starts] + [
sequence[:size],
sequence[len(sequence) - size :],
]
|
2fc82bb70c078aba8540a8a2ce9570672f479a39
| 432,051 |
import itertools
def merge_event_pages(event_pages):
"""
Combines a iterable of event_pages to a single event_page.
Parameters
----------
event_pages: Iterabile
An iterable of event_pages
Returns
------
event_page : dict
"""
pages = list(event_pages)
if len(pages) == 1:
return pages[0]
array_keys = ['seq_num', 'time', 'uid']
return {'descriptor': pages[0]['descriptor'],
**{key: list(itertools.chain.from_iterable(
[page[key] for page in pages])) for key in array_keys},
'data': {key: list(itertools.chain.from_iterable(
[page['data'][key] for page in pages]))
for key in pages[0]['data'].keys()},
'timestamps': {key: list(itertools.chain.from_iterable(
[page['timestamps'][key] for page in pages]))
for key in pages[0]['data'].keys()},
'filled': {key: list(itertools.chain.from_iterable(
[page['filled'][key] for page in pages]))
for key in pages[0]['data'].keys()}}
|
913b924d2f2218b32bfc34eb3fca86047b28071a
| 566,667 |
def sane_parser_name(name) -> bool:
"""
Checks whether given name is an acceptable parser name. Parser names
must not be preceded or succeeded by a double underscore '__'!
"""
return name and name[:2] != '__' and name[-2:] != '__'
|
b20b8bbd11baa63752129c7f6b1be5b407b9683a
| 512,267 |
def bare_name(msg_type):
"""
Get bare_name from <dir>/<bare_name>[x] format
"""
bare = msg_type
if '/' in msg_type:
# removing prefix
bare = (msg_type.split('/'))[1]
# removing suffix
return bare.split('[')[0]
|
8d57e55a4fb41e0ffda54489c38950382a8fab03
| 608,658 |
def get_a_policy(base, policy_id):
"""Utility function that gets a Senlin policy."""
res = base.client.get_obj('policies', policy_id)
return res['body']
|
ff707b225055e48ed8daf28bcda9b937460ce04c
| 521,490 |
import socket
def receive_all(conn: socket.socket, size: int) -> bytes:
"""Receive a given length of bytes from socket.
Args:
conn (socket.socket): Socket connection.
size (int): Length of bytes to receive.
Raises:
RuntimeError: If connection closed before chunk was read, it will raise an error.
Returns:
bytes: Received bytes.
"""
buffer = b""
while size > 0:
chunk = conn.recv(size)
if not chunk:
raise RuntimeError("connection closed before chunk was read")
buffer += chunk
size -= len(chunk)
return buffer
|
34ec809f6269fb4f9edcfb56c96381dc47c59bf8
| 64,715 |
def get_email_domain_part(address):
"""
Get the domain part from email
[email protected] -> cd.com
"""
return address[address.find("@") + 1 :].strip().lower()
|
e7661e2ca99f858c848e28951e61469f35378684
| 276,407 |
import re
def unsubscribe_user(cur, phone, confcode):
"""
Adds a user to the unsubscribe table
Params:
cur: (psycopg2.extensions.cursor) A Cursor to the database
phone: (str) phone number in E.164 format
confcode: (int) randome 128-bit code
Returns:
(str) a response message to the user to be flashed on the webpage
"""
assert re.match('\+\d{9}', phone), "Invalid phone number format: %r" % phone
assert type(confcode) == int, "Invalid confirmation code: %r" % confcode
print((phone, confcode))
cur.execute("INSERT INTO unsubscribe (phone, confcode) VALUES (%s, %s);", [phone, confcode])
return 'Unsubscribe requested for %s. You should receive a text message shortly to confirm unsubscribe' % (phone)
|
77141b28a6a9e43f26c2d8e55404a159579196d4
| 203,221 |
def month_difference(a, b):
"""
Calculates difference in months between two datetime objects
Args:
a (pandas datetime object) latest datetime object
b (pandas datetime object) earliest datetime object
Returns:
integer value of months between two dates
"""
return 12 * (a.year - b.year) + (a.month - b.month)
|
f6589c3bbe255938fa4825c5e02db440d787a89b
| 449,107 |
def aio_rpc_request(method):
"""
A method decorator announcing the method to be exposed as
a request handler.
This decorator assumes that the first parameter (after ``self``)
takes a BSONRpc/JSONRpc object reference as an argument, so that the method
will have an access to make RPC callbacks on the peer node (requests and
notifications) during its execution. From the second parameter onward the
parameters are exposed as-is to the peer node.
"""
method._request_handler = True
return method
|
032b54da46e343fdaf85ebda3d7ce93ca2df208e
| 107,725 |
def get_by_name(ast, name):
"""
Returns an object from the AST by giving its name.
"""
for token in ast.declarations:
if token.name == name:
return token
return None
|
caad2b27be6c25e7ed320847f75e265ac965755c
| 678,886 |
def index_of_last_non_zero(x):
"""Index of last non-zero item."""
for i, a in reversed(list(enumerate(x))):
if a != 0:
return i
return -1
|
d3b3cd7165601f494f847d8396d6c9cdf1df7f9c
| 383,138 |
def get_enz_remnant(enz):
"""Get enzyme recognition site remnant sequence"""
if enz.ovhg > 0:
remnant = enz.site[enz.fst3:]
return [remnant,remnant.replace('G','A')]
else:
remnant = enz.site[enz.fst5:]
return [remnant,remnant.replace('C','T')]
|
5b76c23b3b3a4b853927bdbc6ab267d20781b7ba
| 692,378 |
def Trace_finder(lines:list):
"""Finds traces index from data. And extract eact trace section from data
Args:
lines (list): A list of lines from '.ndf' file.
Returns:
trace_sections (list): A list of trace sections.
"""
trace_index = []
# segment_index = []
for line in lines:
if line.startswith('// Tracing'):
trace_index.append(lines.index(line))
# if line.startswith('// Segment'):
# segment_index.append(lines.index(line))
trace_sections = []
for t in trace_index:
if trace_index.index(t) < len(trace_index)-1:
trace_sections.append(lines[t:trace_index[trace_index.index(t)+1]])
else:
trace_sections.append(lines[t:])
return trace_sections
|
6f01144811f6ab1699accf8addd41fbe8ccff4d6
| 510,287 |
def huffman_decoding(encoded_str, root):
"""Huffman decoding algorithm
Args:
sentence: encoded string
root: root Node for Huffman Binary Tree
Returns:
decode_str: string decoded from the sequence
"""
decoded = []
node = root
for e in encoded_str:
if e == '1':
node = node.get_right_child()
elif e == '0':
node = node.get_left_child()
if node.is_leaf():
decoded.append(node.get_letter())
node = root
decoded = ''.join([e for e in decoded])
return decoded
|
f8f6e9c897206b8db65d27652ece7754edec0d1d
| 81,187 |
def flatten(lst):
"""
Returns a list containing the items found in sublists of lst.
"""
return [item for sublist in lst for item in sublist]
|
88dddf23a5d1d2a6269561dc7a53672f5056a6fd
| 468,361 |
def is_field_allowed(name, field_filter=None):
"""
Check is field name is eligible for being split.
For example, '__str__' is not, but 'related__field' is.
"""
if field_filter in ["year", "month", "week", "day", "hour", "minute", "second"]:
return False
return isinstance(name, str) and not name.startswith('__') and not name.endswith('__') and '__' in name
|
8be38b79bab3aeb49219155db0159cc143c38111
| 701,666 |
def doTest(n):
"""Runs a test. returns score."""
score = 0
l = list(range(1,16))
for i in l:
if input("what is {} to the power of 3? ".format(i)) == str(i**3):
score += 1
print("Correct.")
else:
print("Wrong, the correct answer is {}".format(i**3))
return score
|
83f32bec718e7459218b8863e229d5ecbd479d2c
| 13,841 |
import socket
import errno
def connection_reset(e: Exception) -> bool:
"""
Return true if an error is a connection reset error.
"""
# For some reason we get 'error: [Errno 104] Connection reset by peer' where the
# English description suggests that errno is 54 (ECONNRESET) while the actual
# errno is listed as 104. To be safe, we check for both:
return isinstance(e, socket.error) and e.errno in (errno.ECONNRESET, 104)
|
1d0e5d0742772d1279c17db933dfc1f268121849
| 54,385 |
def count_simulation_problem_batches(n_simulation_problem_chunks, n_simulation_problems,
n_simulation_problem_batches_per_chunk):
"""
Count batches to be generated.
:param n_simulation_problem_chunks: number of chunks of simulation problems
:param n_simulation_problems: number of simulations to perform
:param n_simulation_problem_batches_per_chunk: number of batches to evenly split each chunk into
:return: number of batches
"""
return min(n_simulation_problem_chunks * n_simulation_problem_batches_per_chunk, n_simulation_problems)
|
953c5cedfbecb46040f74eb97c5a5e96d88ca73d
| 248,887 |
import itertools
def year_state_filter(years=(), states=()):
"""
Create filters to read given years and states from partitioned parquet dataset.
This function was the only dependency on the pudl repo, so I copied it from
pudl.outputs.epacems.py:year_state_filter
A subset of an Apache Parquet dataset can be read in more efficiently if files
which don't need to be queried are avoideed. Some datasets are partitioned based
on the values of columns to make this easier. The EPA CEMS dataset which we
publish is partitioned by state and report year.
This function takes a set of years, and a set of states, and returns a list of lists
of tuples, appropriate for use with the read_parquet() methods of pandas and dask
dataframes.
Args:
years (iterable): 4-digit integers indicating the years of data you would like
to read. By default it includes all years.
states (iterable): 2-letter state abbreviations indicating what states you would
like to include. By default it includes all states.
Returns:
list: A list of lists of tuples, suitable for use as a filter in the
read_parquet method of pandas and dask dataframes.
"""
year_filters = [("year", "=", year) for year in years]
state_filters = [("state", "=", state.upper()) for state in states]
if states and not years:
filters = [
[
tuple(x),
]
for x in state_filters
]
elif years and not states:
filters = [
[
tuple(x),
]
for x in year_filters
]
elif years and states:
filters = [list(x) for x in itertools.product(year_filters, state_filters)]
else:
filters = None
return filters
|
6967223f06cf96d85c2f69fdf4b2b08fc387fcac
| 167,331 |
import uuid
def generate_username(user, profile, client):
"""
Default function to generate usernames using the built in `uuid` library.
"""
return str(uuid.uuid4())[:30]
|
557728555db44321911de905b6e4ac1efbea0f14
| 538,435 |
from typing import Union
def to_n_cpus(cpu_str: str) -> Union[int, float]:
"""Convert cpu string to number of cpus
(e.g. '500m' -> 0.5, '2000000000n' -> 2)
"""
if cpu_str.endswith("n"):
return int(cpu_str.strip("n")) / 1000000000
elif cpu_str.endswith("u"):
return int(cpu_str.strip("u")) / 1000000
elif cpu_str.endswith("m"):
return int(cpu_str.strip("m")) / 1000
elif cpu_str.isnumeric():
return int(cpu_str)
else:
return 0
|
5107eeed19c7cf1635b4892ead3bbb23cf72543f
| 129,729 |
def stationlist2filelist(slo):
"""
Input a stationlist object (slo). From this extract the station codes and
create a list of file names that represent long period data files
"""
site_list = slo.getStationList()
files = []
for sta in site_list:
fname = "%s" % (sta.scode)
files.append(fname)
return files
|
10a290ad52dab7fb561ce0c1b87dec9fe5fbd0cf
| 587,907 |
def multQuatLists(q0, q1):
"""Multiply two quaternions that are represented as lists."""
x0, y0, z0, w0 = q0
x1, y1, z1, w1 = q1
return [
w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1,
w0 * y1 - x0 * z1 + y0 * w1 + z0 * x1,
w0 * z1 + x0 * y1 - y0 * x1 + z0 * w1,
w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1,
]
|
0eb0597af457c27a775322f752c10b388fce38b4
| 169,814 |
import re
def simple_preprocess(text_list:list) -> list:
"""Performs simple preprocessing on the list of texts
Args:
text_list (list): list of texts
Returns:
list: list of preprocessed texts in tokens format
"""
text_list = [x.lower() for x in text_list]
text_list = [re.sub(r'[^\w]', ' ', x) for x in text_list]
text_list = [x.split() for x in text_list]
return text_list
|
1fc7c55ec067666c7cf7f0617065100f4ec69a0c
| 113,796 |
def SfromL( L, nmax=25, epsilon= 2**(-50) ):
"""
Compute sequence of generalized inverses from given Schroder value L.
Args:
L (real): main arg, range 0.0 to 1.0
nmax (integer): default 22. Max length to allow for S.
epsilon (real): smallest change in L you don't care about.
Returns:
Sequence as list of integers
Normally epsilon should be 2 ** -(number of significant bits in L), and for
IEEE 64-bit that's 52 bits (the rest being sign and exponent).
Fearing trouble with round-offs, I set the default to 50 bits.
If you're using some alternative form of real numbers, say a fixed point
format built for a fixed range like -1 to 1, or 0 to 1, then set epsilon
to something suitable for that type.
"""
# Prevent accidental use of negative L, or L too close to zero
# which can lead to infinite loop
if L<1e-22: # 1e-22 is a guess; no real thinking was done
return [73]
S = []
while len(S) <= nmax:
count = 0
while L < 0.5:
L = 2.0*L
count +=1
S.append( count)
if count > 52:
break;
if abs(L-0.5) < epsilon:
break
L = 1-L
if L<1e-22:
break
return S
|
3741751c12219483abb75a987ef605e0ff9bf16e
| 113,290 |
from typing import Iterable
from typing import Any
def allunique(iterable: Iterable[Any]) -> bool:
"""Check whether all items of an iterable are distinct.
Works for either hashable or unhashable items. If all items are
hashable, allunique_hashable() will be much faster.
Returns True for an empty iterable. Will not return if passed an
infinite iterator.
Arguments:
iterable: object to be checked
Returns:
True if all items of iterable are different, otherwise False
Examples:
>>> allunique(range(100))
True
>>> allunique(iter(range(100)))
True
>>> allunique(list(range(100)) + [9])
False
>>> allunique(['alice', 'bob', 'charlie'])
True
>>> allunique('hi ho')
False
>>> allunique([['this', 'object'], ['is'], ['not', 'hashable']])
True
>>> allunique([])
True
"""
seen = []
saw = seen.append
return not any(item in seen or saw(item) for item in iterable)
|
784ea8a3fff5c4b8599cd21b5f488f8b5ea5c416
| 427,841 |
def ceiling_cpm(cpm, ceiling = 1000):
"""Sets CPM to have ceiling
Args:
cpm (float): CPM of isoform
ceiling (int, optional): Maximum. Defaults to 1000.
Returns:
float: new cpm constrained to ceiling
"""
# gtf top score is 1000
if cpm > ceiling:
return ceiling
else:
return cpm
|
2e17522d7ae0edf0316c770477616ec312a1835e
| 228,374 |
def make_reveal_url(handler, content_id):
"""Produces a link to this reveal handler that, on success, redirects back
to the given 'target' URL with a signature for the given 'content_id'."""
return handler.get_url(
'/reveal', target=handler.request.path_qs, content_id=content_id)
|
09905675b0b41089f0c001cc402cfcff0b0add00
| 352,505 |
def image_group_object_factory(image_id, group_id):
"""Cook up a fake imagegroup json object from given ids."""
groupimage = {
'image_id': image_id,
'group_id': group_id
}
return groupimage
|
56572b5f902e4503ac3b62b023df0047bab14c57
| 329,244 |
def normalize_name(s):
"""Normalizes the name of a file. Used to avoid characters errors and/or to
get the name of the dataset from a config filename.
Args:
s (str): The name of the file.
Returns:
new_s (str): The normalized name.
"""
if s is None:
return ''
s = s.replace('.json', '')
new_s = ''
for c in s:
if c in ' \t\n':
new_s += '_'
else:
new_s += c
return new_s
|
a0921b12ea2d6c855e5443ab81df33ba83d587c8
| 650,179 |
def maxWithNone(a, b):
""" Returns maximal value from two values, if any of them is None, returns other one.
:param a: int, float or None value
:param b: int, float or None value
:return: class instance with given properties
>>> maxWithNone(None, 1)
1
"""
if a == None:
return b
elif b == None:
return a
else:
return max(a, b)
|
75493c40f86dad7fc09cd452ffb0f39b8d35dde0
| 287,712 |
def get_storage_status(dev):
"""Return storage status
"""
if dev:
return dev.get_storage_status()
else:
return None
|
4c86bb2acd254cb08e2cf7c2796f930113dfde0e
| 437,329 |
def batch_submit(func, batches, client, *args, **kwargs):
"""
Args:
func: function to call using client.
batches (list): list of input(s) per batch,
e.g. from split_dataframe or split_zip
client (concurrent.futures.Executor, dask.distributed.Client)
*args: arguments to pass to func.
**kwargs: keyword arguments to pass to func.
Returns:
future_list (list)
"""
future_list = []
for batch in batches:
future = client.submit(func, batch, *args, **kwargs)
future_list.append(future)
return future_list
|
daccce38932f9063ef37001b5c5a2f14cf413afc
| 474,605 |
def _union(groupA, groupB):
"""Returns the union of groupA and groupB"""
elems = set(groupA)
elems.update(groupB)
return list(elems)
|
f166ac1bf4cf913dde5711a35c7620573bfba9ef
| 189,416 |
import yaml
def load_yaml(filename):
"""
Load a YAML file.
Parameters
----------
filename : str
The name of the file to load
Returns
-------
dict
The contents of the file.
"""
with open(filename) as file:
return yaml.load(file, Loader=yaml.FullLoader)
|
68fd38e5312049af7c41bd2bfafc3345309ff166
| 602,752 |
def fake_call(command, **kwargs):
"""
Instead of shell.call, call a command whose output equals the command.
:param command: Command that will be echoed.
:return: Returns a tuple of (process output code, output)
"""
return (0, str(command))
|
9f9826bdb7d2f901f79d807dc59dbc11bf204de9
| 135,521 |
def argparse_textwrap_unwrap_first_paragraph(doc):
"""Join by single spaces all the leading lines up to the first empty line"""
index = (doc + "\n\n").index("\n\n")
lines = doc[:index].splitlines()
chars = " ".join(_.strip() for _ in lines)
alt_doc = chars + doc[index:]
return alt_doc
|
f7068c4b463c63d100980b743f8ed2d69b149a97
| 708,854 |
from pathlib import Path
def exists_fixture(filename: str) -> bool:
"""Check if a fixture exists."""
path = Path(Path(__file__).parent.joinpath("fixtures"), filename)
return path.exists()
|
c296f93a878a2d27154385e6ed11afc87f36aae7
| 551,132 |
def convert_xywh_to_xyxy(api_bbox):
"""
Converts an xywh bounding box to an xyxy bounding box.
Note that this is also different from the TensorFlow Object Detection API coords format.
Args:
api_bbox: bbox output by the batch processing API [x_min, y_min, width_of_box, height_of_box]
Returns:
bbox with coordinates represented as [x_min, y_min, x_max, y_max]
"""
x_min, y_min, width_of_box, height_of_box = api_bbox
x_max, y_max = x_min + width_of_box, y_min + height_of_box
return [x_min, y_min, x_max, y_max]
|
0781ac4a4f898724ef4b07646b04bf05f0235f83
| 533,741 |
def validation_error_view(error, request):
"""
Return Marshmallow :class:`ValidationError`
as a JSON response with a proper response code.
:param error: Marshmallow error instance
:type error: ValidationError
:param request: Pyramid request
:type request: pyramid.request.Request
:return: Error messages
:rtype: dict[str,list]
"""
request.response.status_int = 400
return error.messages
|
655bd60ce3c6e6663dc2cd25d0e49dcc3d63c50f
| 198,107 |
def validate_option(option, min_value, max_value):
"""
Validates whether an option number is in the accepted interval (between
min_value and max_value)
"""
if (option >= min_value and option <= max_value):
return True
else:
print("\nNot a valid option!")
print(f'Only numbers between {min_value} and {max_value} are valid.\n')
return False
|
c31be5459504f602a715bd302e544a5ee0b08d1b
| 308,648 |
import platform
import shlex
def split_command_string(string):
"""Uses shlex.split() to split a string into a list according
to the operating system
"""
is_posix = platform.system() != 'Windows'
return shlex.split(string, posix=is_posix)
|
c556c9e0b4b45c902635d5e70d198184df7c0fd7
| 360,521 |
from typing import Dict
from typing import OrderedDict
def build_year_scoring_dict() -> Dict:
"""Returns an OrderedDict that will be used to populate
panelist scoring data"""
score_dict = OrderedDict(Jan=0, Feb=0, Mar=0, Apr=0,
May=0, Jun=0, Jul=0, Aug=0,
Sep=0, Oct=0, Nov=0, Dec=0)
return score_dict
|
c540f3974067cf91eae5a94890756849ef98db3e
| 559,650 |
def eval_expr(expr, dict_data):
"""
If expr is a string, will do a dict lookup using that string as a key.
If expr is a callable, will call it on the dict.
"""
if callable(expr):
return expr(dict_data)
else:
return dict_data.get(expr, None)
|
a1cb4af2a88d0636c24702124f0d31412da8de9a
| 566,783 |
import urllib3
def with_urllib3(url):
"""Get a streaming response for the given event feed using urllib3."""
http = urllib3.PoolManager()
return http.request('GET', url, preload_content=False)
|
047ed1e867f4b179f6f3799a1cf3cd3b7ffd8362
| 415,606 |
def subpx_bias(f, pos_columns=None):
"""Histogram the fractional part of the x and y position.
Parameters
----------
f : DataFrame
pos_columns : list of column names, optional
Notes
-----
If subpixel accuracy is good, this should be flat. If it depressed in the
middle, try using a larger value for feature diameter."""
if pos_columns is None:
if 'z' in f:
pos_columns = ['x', 'y', 'z']
else:
pos_columns = ['x', 'y']
axlist = f[pos_columns].applymap(lambda x: x % 1).hist()
return axlist
|
cc69027b47616ac499b744cdc125d6adfe25f330
| 466,011 |
def get_all_dict_words(dict_):
"""
This function gets all the word entries of the dictionary.
:param dict_: Dictionary words and their definitions.
:type dict_: dict
:return: All the word entries.
:rtype: list
"""
words = set([])
for k, v in dict_.items():
for lev, meaning_words in v.items():
words = words.union(meaning_words)
return list(words)
|
ea0e336f20a2d51b72ca13c77f5696e3bf896039
| 72,071 |
def with_metaclass(mcls):
"""
For metaclass compatibility between Python 2 and 3.
cf. http://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
"""
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
|
a7899592615a3bf5eb05e50ff81c880711efd4ba
| 629,178 |
def read_gene_cov_file(input_file):
"""Convert coverage mapping file into a dictionary data structure"""
d = {}
f = open(input_file, mode='rt')
for line in f:
words = line.strip().split('\t')
gene, coverage = words[0], int(words[1])
d[gene] = coverage
f.close()
return d
|
1ac7701beffb8e3f0aa6d0d1a7023e6557da9521
| 349,702 |
def filter_after_preprocess(processed_tokens, retained_dict):
"""
Removes any tokens not in the retained_dict from a list of processed tokens
Argument(s): processed_tokens - a list of pre-processed token
retained_dict - a token-frequency dictionary of tokens retained
after removing tokens up to min_threshold frequency
Output: filtered - a list of tokens remaining after filtering out those that
are absent from retained_dict
"""
filtered = []
for token in processed_tokens:
if token in retained_dict.keys():
filtered.append(token)
return filtered
|
97d5800631cf6ffbe6a4365400b4a6f04626c499
| 497,564 |
def _read_one(stream):
"""Read a byte from the file (as an integer)
raises EOFError if the stream ends while reading bytes.
"""
c = stream.read(1)
if c == b'':
raise EOFError("Unexpected EOF while reading bytes")
return ord(c)
|
4a1df38ba144add9bc2cea748319ba2da7bab576
| 349,227 |
def mode(ary):
"""
mode function
find the object with the biggest number of appears in the given ary
:param ary: input ary (some iterable object)
:return: the number with the largest appearence number
"""
dct = {}
for value in ary:
if value in dct:
dct[value]+=1
else:
dct[value] =1
return max(dct, key=lambda k: dct[k])
|
29cdbd665f0e23347498f69895c622797451409a
| 101,720 |
def class_as_string(obj):
"""Return the full class of an object as string"""
module = obj.__class__.__module__
if module is None or module == str.__class__.__module__:
return obj.__class__.__name__
return module + '.' + obj.__class__.__name__
|
3b87b2afd4af1a94f04e2ae2678babdc48e172a7
| 569,406 |
def linear_tri(frags, thr):
"""
Triangle (pyramid) of linear increase then linear decrease weights.
Args:
frags (list of list): [chrom,start,end] for each fragment in a GEM
Returns:
lin_all (list of bed entries): bed entries in format [chrom,start,end]
"""
lin_all = []
n = len(frags)
for i in range(int(n/2)):
gem_cov = frags[i][0:2]
gem_cov.append(frags[n-i-1][2])
if thr > 0:
if (gem_cov[2]-gem_cov[1]) < thr:
lin_all.append(gem_cov)
else:
lin_all.append(gem_cov)
return lin_all
|
54587a8ade86cba968568b8c305f1e05663d120b
| 257,174 |
def _data_augmentation(text, labels, augmenter):
"""Use an augmentation method to expand a training set.
:param text: list of input text.
:param labels: list of corresponding labels.
:param augmenter: textattack.augmentation.Augmenter, augmentation scheme.
:return: augmented_text, augmented_labels. list of (augmented) input text and labels.
"""
aug_text = augmenter.augment_many(text)
# flatten augmented examples and duplicate labels
flat_aug_text = []
flat_aug_labels = []
for i, examples in enumerate(aug_text):
for aug_ver in examples:
flat_aug_text.append(aug_ver)
flat_aug_labels.append(labels[i])
return flat_aug_text, flat_aug_labels
|
31006ca10913f6d27d2949f7a6ae62d1eea41fb3
| 417,692 |
import json
def conv_to_json(my_string: str):
"""
Converts a valid string to a JSON object.
"""
try:
json_obj = json.loads(my_string)
return json_obj
except ValueError:
return None
|
78915f84507bbadd825074cab8c3dc7eca1c6754
| 139,989 |
def differential(f, x, deltax=1e-12):
"""
Returns the slope of the given function at the point x
using the midpoint method. A deltax of 1e-12 or 1e-11
works best.
"""
# the following formula is the average between the slopes using
# the right limit [(f(x + deltax) - f(x)) / deltax] and
# the left limit [(f(x) - f(x - deltax)) / deltax]
return (f(x + deltax) - f(x - deltax)) / (2 * deltax)
|
e0fffbda02edd8aec0f5b418278919aeee296156
| 312,052 |
def get_lambda_timeout(lambda_context) -> int:
"""Gets the remaining lambda execution time in seconds."""
return int(lambda_context.get_remaining_time_in_millis()/1000)
|
b66f485bc5394151757f662e4d9b2be7c7e379f3
| 42,091 |
def count_words(tokens, n_words = 20):
"""
Count the number of comments containing each entry
Parameters
----------
tokens: list
a nested list containing lists of tokens or a list of spacy docs
n: int
The number of words to return. Defaults to 20, i.e. the top 20 words
Returns
-------
word_freq(most_common(n)): a list of tuples
a list of tuples containing words and frequencies
"""
# Flatten list and set to lower case
bag_of_words = [word.lower() for comment in tokens for word in comment]
unique_words = set(bag_of_words)
word_counts = []
for word in unique_words:
n = 0
for doc in tokens:
if word in doc:
n += 1
word_counts.append((word, n))
word_counts.sort(key = lambda x: x[1], reverse = True)
return word_counts[0:n_words]
|
5ea52c7fd101b8859345dae82d408c2a6bbd04ce
| 328,166 |
def split_function_name(fn):
"""
Given a method, return a tuple containing its fully-qualified
class name and the method name.
"""
qualname = fn.__qualname__
if '.' in qualname:
class_name, fn_name = qualname.rsplit('.', 1)
class_name = '%s.%s' % (fn.__module__, class_name)
else:
class_name = fn.__module__
fn_name = qualname
return (class_name, fn_name)
|
0f525d93afdf72269da303c13b69cc8f29aa0661
| 47,442 |
def MODULE_PATH(analysis_module):
"""Returns the "module_path" used as a key to look up analysis in ACE."""
return '{}:{}'.format(analysis_module.__module__,
analysis_module.__name__ if isinstance(analysis_module, type) else type(analysis_module).__name__)
|
fe0efb189899364d4141fc0d13ae1669bf950394
| 74,797 |
def R0(beta, gamma):
"""
R0 from model parameters.
"""
return beta / gamma
|
59bb954ec5ab48adf394d82c0ac2e678461eba35
| 257,748 |
def pipe_info(source, sink):
"""Return stream information header."""
# First line
if source is None:
source = ''
else:
source = f' from {repr(source)}'
if sink is None:
sink = ''
else:
sink = f' to {repr(sink)}'
return f"{source}{sink}"
|
9f610f915a25438dc4a36814e1a8707e54dcf504
| 251,241 |
def A11_2_d11(A11, SRM_ratio=4.04367):
"""
Convert Abundance to Delta notation.
Default SRM_ratio is NIST951 11B/10B
"""
return ((A11 / (1 - A11)) / SRM_ratio - 1) * 1000
|
ad257038b862d3260fa12cd2c8ce9966a9fa2dbd
| 96,846 |
def rule_passes(rule_regexp, product_repr):
"""
rule: rule(keep, drop)_A_B_C_D
product: A_B_C
"""
#we have altered the rules to have "*" as the 4th parameter (if split by _)
# so all the D's will pass
return rule_regexp.match(product_repr+"_")
|
94dc48a0c02282edfc7276c4b0fc7380701274ac
| 355,379 |
def _get_category_names(category_ids: list) -> list:
""" Transform the category id like '10_blogging' to a human-friendly name like 'Blogging'
If no name is found for an id the input value will be returned
"""
categories = {
"1_communication": "Social & Communication",
"2_entertainment": "Entertainment",
"6_news": "News & Weather",
"7_productivity": "Productivity",
"10_blogging": "Blogging",
"11_web_development": "Developer Tools",
"12_shopping": "Shopping",
"13_sports": "Sports",
"14_fun": "Fun",
"15_by-google": "By Google",
"22_accessibility": "Accessibility",
"28_photos": "Photos",
"38_search_tools": "Search Tools",
"69_office_applications": "Office Applications",
"71_online_documents_and_file_storage": "Online Documents & File Storage",
"83_online_videos": "Online Videos",
"87_task_management": "Task Management"
}
# Replace the category id with the category title, if not found use the category id as default
return [categories.get(category, category) for category in category_ids]
|
3f8745f65bd4f932f8df6f1868bc10eadc9d67cc
| 431,437 |
def getFile(file):
"""
Read out a file and return a list of string representing each line
"""
with open (file, "r") as out:
return out.readlines()
|
b869a7252fc45fdc6877f22975ee5281650877b9
| 30,083 |
def esc(code: int) -> str:
"""
Converts the integer code to an ANSI escape sequence
:param code: code
:return: escape sequence
"""
return f"\033[{code}m"
|
6bdc0679ba9b480220bc088bd09d6356dd539f1f
| 7,196 |
def aic(model):
"""Given a model, calculates an AIC score."""
k = model.num_of_params
L = model.lnlikelihood()
return 2*(k-L)
|
bd8d719537e4e3c2a1e7a6ec97f8237f870b6f5f
| 670,320 |
def find_subset_sum(values, target):
"""
Find a subset of the values that sums to the target number
This implements a dynamic programming approach to the subset sum problem
Implementation is taken from: https://github.com/saltycrane/subset-sum/blob/master/subsetsum/stackoverflow.py
@param values: List of integers
@param target: The sum that we need to find a subset to equal to it.
"""
def g(v, w, S, memo):
subset = []
id_subset = []
for i, (x, y) in enumerate(zip(v, w)):
# Check if there is still a solution if we include v[i]
if f(v, i + 1, S - x, memo) > 0:
subset.append(x)
id_subset.append(y)
S -= x
return subset, id_subset
def f(v, i, S, memo):
if i >= len(v):
return 1 if S == 0 else 0
if (i, S) not in memo: # <-- Check if value has not been calculated.
count = f(v, i + 1, S, memo)
count += f(v, i + 1, S - v[i], memo)
memo[(i, S)] = count # <-- Memoize calculated result.
return memo[(i, S)] # <-- Return memoized value.
memo = dict()
result, _ = g(values, values, target, memo)
return result
|
93b3062c710d617a20bd065e6d43c072dd7ad2fa
| 65,869 |
def _temperature(temperature_register, farenheight=False):
"""Return the temperature in Celsius given the raw temperature register value.
If farenheight is True, return the temperature in Farenheight.
"""
temp = (0xFF & (temperature_register >> 8)) + ((0x7 & (temperature_register >> 5)) / 8.0)
if farenheight:
temp = (((9 * temp) / 5.0) + 32)
return temp
|
6d70f12bf76a6bab1d19c8668941f569954d3282
| 71,900 |
import inspect
def get_method_arguments(method, exclude=None):
"""
Return the method's argument, associated with a boolean describing whether
or not the given argument is required.
"""
signature = inspect.getfullargspec(method)
args_count = len(signature.args) if signature.args else 0
default_args_count = len(signature.defaults) if signature.defaults else 0
result = {
arg: args_count - i > default_args_count
for i, arg in enumerate(signature.args)
}
if exclude:
for arg in exclude:
result.pop(arg)
return result
|
c44a82e173e32145dac91e7b6262b62553d55269
| 626,524 |
def mergeinfo_ranges_to_set(mergeinfo_ranges):
"""Convert compact ranges representation to python set object"""
result = set()
for r in mergeinfo_ranges:
if type(r) == int:
result.add(r)
else:
result |= set(range(r[0], r[1]+1))
return result
|
e7587da43459f89902b9bea7d8cfd13d3e51e87d
| 560,662 |
import glob
def get_lines_from_file(folder_name, interface_name, file_string):
"""Open file with the path "folder_name/interface_name/filestring"
and return its contents.
"""
with open(glob.glob(
f"{folder_name}/{interface_name}/{file_string}")[0], "r") as file:
return file.readlines()
|
171437e75ac1bbac1b11f0d9b7563dae766036f3
| 100,051 |
import torch
def expand_dims(input, axis):
"""
Inserts a dimension of 1 into a tensor's shape.
Parameters
----------
input : tensor
A Tensor.
axis : int
0-D (scalar). Specifies the dimension index at which to expand the shape of input.
Must be in the range [-rank(input) - 1, rank(input)].
Returns
-------
A Tensor with the same data as input, but its shape has an additional dimension of size 1 added.
"""
return torch.unsqueeze(input, axis)
|
2e0ad21fab2c142b896552b980c88edf80c9fe8d
| 566,065 |
def rescue_default(callback, default=""):
"""Call callback. If there's an exception, return default.
It's convenient to use lambda to wrap the expression in order to
create a callback. Only catch IndexError, AttributeError, and ValueError.
"""
try:
return callback()
except (IndexError, AttributeError, ValueError):
return default
|
805bfdc5c15fef83d4bc4901e8d71654354a522e
| 109,052 |
def capacity_cost_rule(mod, g, p):
"""
The capacity cost of projects of the *gen_ret_bin* capacity type is its net
capacity (pre-specified capacity or zero if retired) times the per-mw
fixed cost for each of the project's operational periods.
"""
return mod.gen_ret_bin_fixed_cost_per_mw_yr[g, p] \
* mod.gen_ret_bin_capacity_mw[g, p] \
* (1 - mod.GenRetBin_Retire[g, p])
|
8e004e6926e14873b635ce68d9e87cbd26ab79cf
| 501,967 |
def sample_from_bam(bam):
"""Extract sample name from BAM"""
read_groups = bam.header["RG"]
samples = set([rg["SM"] for rg in read_groups])
assert len(samples) == 1, "BAM file {} must contain a single sample".format(
bam.filename
)
sample, = samples # Extract single value from set
return sample
|
9df75c80b15481870c04eefaa9aefc1ce3cfc989
| 251,606 |
from typing import List
from typing import Any
def is_int_array(array: List[Any]) -> bool:
"""Returns `True` is array contains integers or floats otherwise `False`.
Examples:
>>> assert not is_int_array("")
>>> assert not is_int_array(None)
>>> assert not is_int_array("-1")
>>> assert not is_int_array([1, 2, None])
>>> assert is_int_array([])
>>> assert is_int_array([1, 2, 3, 4])
>>> assert is_int_array([-11, -12, -13, -14])
>>> assert is_int_array([1.0, 2.0, 3.0])
"""
if array is None or isinstance(array, str):
return False
for item in array:
if not isinstance(item, (int, float)):
return False
if isinstance(item, float):
if not item.is_integer(): # e.g 3.001 is decimal
return False
return True
|
5e86a90b45ee1681fd47a93b152de12f045a757a
| 447,325 |
def _cid2c(cid):
"""Gets just the component portion of a cid string
e.g. main_page/axis_controls|x_column.value => x_column
"""
return cid.split("|")[-1].split(".")[0]
|
409f0ed212ae0453363618d684a07011ff5f9589
| 93,727 |
import math
def round_mult(val, multiple, direction='round'):
"""Rounds :val: to the nearest :multiple:. The argument :direction: should be either 'round', 'up', or 'down'."""
round_func = {'round': round, 'up': math.ceil, 'down': math.floor}
return round_func[direction](val / multiple) * multiple
|
53ba98f1c8a4c623c8831e831b21ff689483f58a
| 50,215 |
def get_tech_installed(enduse, fuel_switches):
"""Read out all technologies which are specifically switched
to of a specific enduse
Parameter
---------
enduse : str
enduse
fuel_switches : dict
All fuel switches where a share of a fuel
of an enduse is switched to a specific technology
Return
------
installed_tech : list
List with all technologies where a fuel share is switched to
crit_fuel_switch : bool
Criteria wheter swich is defined or not
"""
# Add technology list for every enduse with affected switches
installed_tech = set([])
for switch in fuel_switches:
if switch.enduse == enduse:
installed_tech.add(switch.technology_install)
if len(list(installed_tech)) > 0:
crit_fuel_switch = True
else:
crit_fuel_switch = False
return list(installed_tech), crit_fuel_switch
|
4e84c574c9afd08010df653cf00c03daa38bab45
| 662,219 |
def underscore_ip(ip):
"""
Takes an ipv4 adress and replaces dots with underscores.
"""
if (ip is None):
return None
return ip.replace(".", "_")
|
be2d7f8ddedd7ffc43407ce15ae41edbab4f7d52
| 532,412 |
def is_avx512_instruction_form(instruction_form):
"""Indicates whether the instruction form belongs to AVX512 extensions"""
return instruction_form.isa_extensions and instruction_form.isa_extensions[0].name.startswith("AVX512")
|
d6538b119c38c2fe6040c1481ad17dda2c24e4c8
| 440,925 |
def upload_file_to_slack(slack_connection, file_name, channel_name, timestamp):
"""
Upload a file to Slack in a thread under the main message.
All channel members will have access to the file.
Only works for files <50MB.
"""
try:
file_response = slack_connection.files_upload(
file=file_name,
filename=file_name,
title=file_name,
channels=channel_name,
thread_ts=timestamp)
print(f'{file_name} successfully uploaded.')
except Exception as e:
print(e)
file_response = None
print('File failed to upload.')
return file_response
|
1922bf8d357881325fd861bc76827961c7ac4db8
| 30,566 |
def user_with_role(user, role):
""" Build user representation with attached role """
return {
'id': user.id,
'username': user.username,
'email': user.email,
'role': role
}
|
5c7e134c5df16ac6b987be1c3c31a8eeba785e15
| 199,266 |
def f(x):
"""Cubic function."""
return x**3
|
13832221de3490dbd92f4f1a26854baec7010023
| 1,300 |
def clean_data(df):
"""
:param df: DataFrame of messages and categories
:return: cleaned DataFrame of messages and categories, with duplicates removed
"""
return df.drop_duplicates(subset='id')
|
9dcfb5bf579bb89562df2ba19927a219d2967434
| 334,916 |
def tempo_execucao(start_time, end_time, num_el, show):
"""
Calcula o tempo de execução do código
Parâmetros
----------
start_time: float
Tempo inicial
end_time: flaot
Tempo final
num_el: int
Número de elementos da atual execução
show: bool
Mostra o tempo no console
Retorna
-------
exec_time: flaot
O tempo de execução
"""
exec_time = round((end_time - start_time), 2)
if show:
print(f"Elementos: {num_el}|", end=" ")
if exec_time > 60:
print(f"{round(exec_time/60, 2)} minutos")
else:
print(f"{round(exec_time, 2)} segundos")
return exec_time
|
d8e908cf595695e9b35e70bce0a95989aa54705a
| 439,545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.