content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def alignment_patterns_table() -> dict[int, list[int]]:
"""A dictionary that contains the positions of the alignment patterns.
Returns:
dict[int, list[int]]: Returns a list of positions.
"""
table = {
2: [18],
3: [22],
4: [26],
5: [30],
6: [34],
7: [6, 22, 38],
8: [6, 24, 42],
9: [6, 26, 46],
10: [6, 28, 50],
11: [6, 30, 54],
12: [6, 32, 58],
13: [6, 34, 62],
14: [6, 26, 46, 66],
15: [6, 26, 48, 70],
16: [6, 26, 50, 74],
17: [6, 30, 54, 78],
18: [6, 30, 56, 82],
19: [6, 30, 58, 86],
20: [6, 34, 62, 90],
21: [6, 28, 50, 72, 94],
22: [6, 26, 50, 74, 98],
23: [6, 30, 54, 78, 102],
24: [6, 28, 54, 80, 106],
25: [6, 32, 58, 84, 110],
26: [6, 30, 58, 86, 114],
27: [6, 34, 62, 90, 118],
28: [6, 26, 50, 74, 98, 122],
29: [6, 30, 54, 78, 102, 126],
30: [6, 26, 52, 78, 104, 130],
31: [6, 30, 56, 82, 108, 134],
32: [6, 34, 60, 86, 112, 138],
33: [6, 30, 58, 86, 114, 142],
34: [6, 34, 62, 90, 118, 146],
35: [6, 30, 54, 78, 102, 126, 150],
36: [6, 24, 50, 76, 102, 128, 154],
37: [6, 28, 54, 80, 106, 132, 158],
38: [6, 32, 58, 84, 110, 136, 162],
39: [6, 26, 54, 82, 110, 138, 166],
40: [6, 30, 58, 86, 114, 142, 170]
}
return table
|
86bf868cfd00b66b1175abe34f7824567e38808a
| 653,538 |
def get_video_id_by_number(number, results):
"""Get video id by its number from the list of search results generated by search_by_keyword
function call.
Videos are numbered from 1 to maxResults (optional search parameter,
set by default to 5, see https://developers.google.com/youtube/v3/docs/search/list)
as in results list.
Args:
number (int): number of video whose id is requested
results (list): list of search results generated by search_by_keyword function call
Returns:
string: youtube video id
"""
return results[number-1]['id']['videoId']
|
469649f723f205758f2fd55c8990e567361b425c
| 171,522 |
def boolean(value):
"""Turn the given string value into a boolean.
Any truthy value will be interpreted as True and anything else will
be False. For convenience, this function will also accept a boolean
value (and simply return it) and the value None, which will be
interpreted as False.
"""
if isinstance(value, bool) or value is None:
return bool(value)
return value.lower() in ('true', 't', 'yes', 'y', '1')
|
eb81f0e1139904dd19d9b452b99c589632196344
| 492,571 |
def format_day(value, format_string='%e'):
"""Returns day given a datetime object."""
return value.strftime(format_string)
|
693131d1b3765efce7ac60d6c9158095ef09216f
| 466,829 |
def clean_specie_name(specie_str: str) -> str:
"""
Clean up the species name from '<element>\xa0<roman number for oxidation state>'
to '<element>'
"""
return specie_str.split("\xa0")[0]
|
fd5f435982fca9471b08cf02b217fd849c38fee6
| 637,682 |
import glob
def get_all_vdirs(path):
"""returns a list of paths, expanded using glob
"""
items = glob.glob(path)
return items
|
ce9a48976303257befc2776ff03a5caa189d6de2
| 365,427 |
from datetime import datetime
def string_to_datetime(date_str: str):
"""
Turns a string including date and time like this - Sun Jul 01 21:06:07 +0000 2018 - to a Python datetime object
like this - datetime.datetime(2018, 7, 1, 21, 6, 7, tzinfo=datetime.timezone.utc)
"""
return datetime.strptime(date_str, '%a %b %d %H:%M:%S %z %Y')
|
5da02754b1ca967acbcb7f4a150afb2dca087aa0
| 270,298 |
def is_scalar_shape(shape):
"""Determines if a shape is scalar.
"""
if shape == ():
return True
return False
|
b36c5171423dc678f488a260478a1883e7ec4f43
| 225,860 |
def _partition_at_level(dendrogram, level) :
"""Return the partition of the nodes at the given level
A dendrogram is a tree and each level is a partition of the graph nodes.
Level 0 is the first partition, which contains the smallest snapshot_affiliations, and the best is len(dendrogram) - 1.
The higher the level is, the bigger are the snapshot_affiliations
"""
partition = dendrogram[0].copy()
for index in range(1, level + 1) :
for node, community in partition.items() :
partition[node] = dendrogram[index][community]
return partition
|
b179127076c386480c31a18a0956eb30d5f4ef2a
| 1,167 |
def chrom_header(sample_name: str) -> str:
"""Create final header line. Single sample only"""
s = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{0}".format(
sample_name
)
return s
|
eade25f765d7730125dfc3f15b5c80659d6a981a
| 462,787 |
def indices(alist,value):
"""
Returns the indices for a given value in a list
"""
ind = [item for item in range(len(alist)) if alist[item] == value]
return ind
|
db009b670aa70f5cb9087533a41c6e76854e1d22
| 100,810 |
def RGB_to_rgb(r, g, b):
"""
Convert RGB values to rgb values
:param r,g,b: (0,255) range floats
:return: a 3 element tuple of rgb values in the range (0, 1)
"""
return (float(r) / 255, float(g) / 255, float(b) / 255)
|
11651e46a627ac4ae4a9a2270baa64e3892c898f
| 260,172 |
def gt_polyp(g):
"""Return True if sample genotype has a polyploid call.
Parameters
----------
g : str
Sample genotype.
Returns
-------
bool
True if sample genotype has a polyploid call.
Examples
--------
>>> from fuc import pyvcf
>>> pyvcf.gt_polyp('1')
False
>>> pyvcf.gt_polyp('0/1')
False
>>> pyvcf.gt_polyp('0/1/1')
True
>>> pyvcf.gt_polyp('1|0|1')
True
>>> pyvcf.gt_polyp('0/./1/1')
True
"""
gt = g.split(':')[0]
if '/' in gt:
return gt.count('/') > 1
else:
return gt.count('|') > 1
|
62649d2d38cb347c5ead3b85a20829264028b66c
| 300,262 |
import math
def find_frac_cut_point(entropies, fraction=0.995):
"""
Calculate a cut-off point from fraction of entropy distribution.
Use case is with native entropy distribution to qualify on true negatives,
under the assumption that loan entropies will be generally greater than native,
and so result in low false negatives and high true positives.
"""
# Entropies are not power law distributed, but neither are they Gaussian.
# Better to use fraction of distribution rather than Gaussian z value
# as cut-point for discriminating between native and loan.
entropies = sorted(entropies)
idx = min((len(entropies) - 1) * fraction, len(entropies) - 1)
return (entropies[math.floor(idx)] + entropies[math.ceil(idx)]) / 2
|
0e4093c6c46c16562ca5a972ebbf5fb61707a3df
| 357,343 |
def dec_bytes(data: bytes, sep: str= " "):
""" Format a bytes() object as a decimal dump """
return sep.join(str(bval) for bval in data)
|
37aa71c05ece662a4ea656ba55301504a035eeeb
| 401,055 |
def get_list_from_file(file_name):
"""read the lines from a file into a list"""
with open(file_name, mode='r', encoding='utf-8') as f1:
lst = f1.readlines()
return lst
|
a7ac0fdeb6eae012962ba1e96fe390d1ddddfabc
| 661,093 |
def is_valid_ascii(path):
"""Check if string is valid ascii"""
try:
path.decode('ascii')
valid= True
except UnicodeDecodeError:
valid= False
return valid
|
185f88b67eee43d45f5216d85f836e94985b7d56
| 171,552 |
import pathlib
def _get_artefacts_count_of_bootstrap_nodes(_: pathlib.Path) -> int:
"""Returns number of network bootstrap nodes.
"""
return 3
|
7c03491c47161fc65474bc39a59e3a8b6ecf34f2
| 628,411 |
def boxes_xyxy_rel_to_abs(bboxes, img_shape):
""" Converts boxes from relative to absolute coordinates
bboxes: array of boxes in XYXY relative coordinates
img_shape: tuple (height, width)
"""
h, w = img_shape
bboxes[:, ::2] = bboxes[:, ::2] * w
bboxes[:, 1::2] = bboxes[:, 1::2] * h
return bboxes
|
ea6cc57794424d3bf8ab371c1ffaae25c8b8e9cf
| 474,746 |
def scale_to_total(value):
"""\
Convert a mapping of distinct quantities to a mapping of proportions of the
total quantity.
"""
total = float(sum(value.values()))
return {k: (v / total) for k, v in value.items()}
|
4ac2be24fa668b9b194749def3649253a413545a
| 600,600 |
import six
import re
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, six.text_type):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub(b'[ _]', b'-', hostname)
hostname = re.sub(b'[^\w.-]+', b'', hostname)
hostname = hostname.lower()
hostname = hostname.strip(b'.-')
return hostname
|
67d99e231ddb2e3b7689ff4ef1257c9fd0a302ee
| 245,622 |
def get_encoded_len_fn(typename):
"""
Get the name of a function which can retrieve the encoded length of a type
"""
assert typename[:4] == "sbp_"
assert typename[-2:] == "_t"
return typename[:-2] + "_encoded_len"
|
040d6bce6bf3f11789fb4f09fc26ca3f82e4763c
| 165,154 |
def S(Lt, l):
"""
Calculates the S coefficient
Parameters
----------
Lt : float
The length way of liquid, [m]
l : float
The length way of liquid of one slot mix, [m]
Returns
-------
Lt : float
The S coefficient, [dismensionless]
References
----------
&&&&
"""
return Lt / l
|
0df571c0b597dd05c3e516919dd11a27f0451d3c
| 108,701 |
import re
def extract_infos(filename, patterns=None, transformers=None):
"""
patterns: A dict of the form {key: pattern}. A collection of
regular expressions to extract information from the
filename: {key: re.match(pattern, filename)}
transformers: A dict of the form {key: trafo}. A collection of
transformers with signature trafo(ret), where ret is
the object returned by re.match(). The default
transformer for pattern is to return the first
capture group:
def trafo(ret): return ret.group(1)
The info will be returned as a dict {key: info}
Example:
extract_initials = lambda ret: (ret.group(1)+ret.group(2)).upper()
infos = extract_infos("walt_disney",
patterns={"surname": ".*_(.*)",
"initials": "(.).*_(.).*"},
transformers={"initials": extract_initials})
# infos = {'surname': 'disney', 'initials': 'WD'}
"""
infos = {}
if patterns is None:
patterns = {}
if transformers is None:
transformers = {}
def default_trafo(ret): return ret.group(1)
for key, pattern in patterns.items():
ret = re.match(pattern, filename)
msg = "File pattern does not match filename: %s (%s)"
assert ret is not None, (msg % (pattern, key))
infos[key] = ret
transformers.setdefault(key, default_trafo)
msg = ("Can only transform information that was previously extracted. "
"No pattern specified for trafo key '%s'")
for key, trafo in transformers.items():
assert key in patterns, msg % key
infos[key] = trafo(infos[key])
return infos
|
ed6065dcf390406f99929cab001d25ad2d327a7d
| 300,777 |
def bt_device_services(bt, address):
"""
Returns all the services and characteristics for the given device
"""
if bt:
return bt.get_device_services(address)
|
3497276da1e64ee24279e156107d284214e4a814
| 319,533 |
import tarfile
def is_tarfile(filename):
"""Tells if the file is a tar ball"""
return tarfile.is_tarfile(filename)
|
6aeb443d7be140ac914504208b992155639a4af0
| 121,231 |
def SUM(src_column):
"""
Builtin sum aggregator for groupby.
Example: Get the sum of the rating column for each user. If
src_column is of array type, if array's do not match in length a NoneType is
returned in the destination column.
>>> sf.groupby("user",
... {'rating_sum':tc.aggregate.SUM('rating')})
"""
return ("__builtin__sum__", [src_column])
|
18adf3b81e7cd15f603ebc60878598dcfd56144f
| 633,395 |
def issubtype(tcon1, tcon2):
"""
Check whether tcon1 <: tcon2
"""
return tcon2 in tcon1.supertypes
|
ae72afab2cd77028ac61392c45a871b8c4c6a4e6
| 589,776 |
def remove_command_flag(command_string):
"""Remove the command flags from a command string.
Parameters
----------
command_string: str
String containing commands, incased in {}
Returns
-------
translated_command: str
Command with bracket flags removed.
"""
brackets = ["{", "}"]
trans_dict = {bracket:"" for bracket in brackets}
translate = lambda x, trans: x.translate(str.maketrans(trans))
translated_command = translate(command_string, trans_dict)
return translated_command
|
36c21cae18c8c05c1ceb4da2675a51c825ea1949
| 564,194 |
def divceil(divident, divisor):
"""Integer division with rounding up"""
quot, r = divmod(divident, divisor)
return quot + int(bool(r))
|
ec51fa6fc9c88e1549f3cdd804c8c6274b523413
| 625,131 |
from typing import Any
def NOT_EQUAL(left: Any, right: Any) -> str:
"""
Creates an not equality assertion
>>> NOT_EQUAL(2,2)
'2!=2'
"""
return "{}!={}".format(left, right)
|
12d15ec69dd8ccc9eb4264a4c7fd48b9948ae8fa
| 287,818 |
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
|
9cbeee4a863b44e45475b4161fdc7b94f576005a
| 691,097 |
from typing import Callable
def channel_is_open(func) -> Callable:
"""
Checks if he Producer channel is opened, else opens a new one.
func (Callable): wrapped function.
returns (Callable): the decorated function.
"""
def wrapper(ins):
if not ins.channel.is_open:
new_ch = ins.open_new_channel()
ins.channel = new_ch
return func(ins)
return wrapper
|
a5469e20fe5ead626886200f5261149471c9d2bc
| 272,575 |
def fit_parabola(x1,x2,x3,y1,y2,y3):
"""Returns the parabola coefficients a,b,c given 3 data points [y(x)=a*x**2+b*x+c]"""
denom = (x1-x2)*(x1-x3)*(x2-x3)
a = (x3*(y2-y1)+x2*(y1-y3)+x1*(y3-y2))/denom
b = (x1**2*(y2-y3)+x3**2*(y1-y2)+x2**2*(y3-y1))/denom
c = (x2**2*(x3*y1-x1*y3)+x2*(x1**2*y3-x3**2*y1)+x1*x3*(x3-x1)*y2)/denom
return a,b,c
|
e077f5a895e353d5b980b15bee603be5c34d3ec4
| 702,218 |
def workaround(browser_check, replacement_code):
"""
Checks the browser if the workaround applies, and if it
does will execute the replacement code instead of the original
function.
The replacement code will receive the germanium instance
on the first parameter, and the original_function as
the second parameter, so it can default on calling it to
easily test if the function was fixed upstream.
:param browser_check:
:param replacement_code:
:return:
"""
def wrapper(original):
def original_aspect(*args, **kwargs):
# Dhis will check if there is actually a germanium instance
# passed to be workarounded. Some workarounds are based on
# environment variable checks, and don't need a germanium instance.
germanium = None
if args:
germanium = args[0]
if browser_check(germanium):
return replacement_code(germanium,
original,
*args,
**kwargs)
return original(*args, **kwargs)
return original_aspect
return wrapper
|
fad298a027a5e4141796aa12e45885c7c6c523c9
| 277,188 |
def index(bytes, c, pos=0):
"""
查找c字符在bytes中的位置(从0开始),找不到返回-1
pos: 查找起始位置
"""
for i in range(len(bytes)):
if (i <= pos):
continue
if bytes[i] == c:
return i
break
else:
return -1
|
8808ee98a9c96aa1e3781153c47160cc87940288
| 520,251 |
import json
def json_load(file):
"""
load json data from file.
"""
with open(file, 'r', encoding='utf8') as _file:
data = json.load(_file)
return data
|
9d2ef792a9d2b201a5608601057f4f76e7905662
| 24,804 |
def select_by_year(year, D):
"""Select year from specification given in dictionary or list of ranges.
Examples:
>>> spec = {(..., 1990): 'foo',
... 1991: 'bar',
... (1992, 2000): 'foobar',
... (2001, ...): 'blah'}
>>> select_by_year(1990, spec)
'foo'
>>> select_by_year(1991, spec)
'bar'
>>> select_by_year(1999, spec)
'foobar'
>>> select_by_year(2010, spec)
'blah'
"""
# Must have a list of specifications
if hasattr(D, 'items'):
D = list(D.items())
# We want a sorted and normalized list
spec = []
for item, value in D:
if isinstance(item, (tuple, list)):
a, b = item
else:
a = b = item
a = -float('inf') if a in [..., None] else a
b = float('inf') if b in [..., None] else b
spec.append((a, b, value))
spec.sort()
# Now let us test each interval
for a, b, value in spec:
if a <= year <= b:
return value
else:
raise ValueError('no valid range for year %s: %s' % (year, spec))
|
0c3eca376e4f06bf3d3f85308bf02c35ded21e9e
| 680,161 |
def NOT_IS(attribute, val):
"""
Check if two values are not equal
"""
return attribute != val
|
3567a3789bc3ab9176ee9ba4a5d1eb0a4c4dec48
| 76,299 |
def get_detector_type(meta):
"""
Gets the IRIS detector type from a meta dictionary.
In this function, FUV1 and FUV2 are just assigned as FUV.
Parameters
----------
meta: dict-like
Dictionary-like object containing entry for "detector type"
Returns
-------
detector_type: `str`
Detector type.
"""
if "FUV" in meta["detector type"]:
detector_type = "FUV"
else:
detector_type = meta["detector type"]
return detector_type
|
eb7ce98315f398701f3bf74c495b4f18b4c5ae36
| 554,989 |
from typing import Any
def is_sdict(x: Any) -> bool:
"""check if an object is an `SDict` (a SAX S-dictionary)"""
return isinstance(x, dict)
|
15d6fa57b81f9e8e942383e09315d5c61783cc38
| 610,342 |
def split_txts(txts):
"""
Args:
txts: The texts that will be split
into single characters
Returns:
The splitted texts and array of all unique characters
in those texts.
"""
txts_splitted = []
unique_chars = set()
for txt in txts:
splitted = list(txt)
splitted = [ch if ch != ' ' else '<SPACE>' for ch in splitted]
txts_splitted.append(splitted)
unique_chars.update(splitted)
return txts_splitted, sorted(unique_chars)
|
f4ec15021053547bd38065fb4af8f9e7328a3f6e
| 216,549 |
def shear_bending_stress(V, Q, I, b):
""" Shear stresses due to bending
:param float V: Shear max_force in y direction
:param float Q: first moment of in cross section in y direction
:param float I: Area moment of inertia around the y axis
:param float b: thickness
:returns: Shear stress resulting from bending
:rtype: float
"""
return (V * Q) / (I * b)
|
bac8c7de412d6300bb459a350256af4432557886
| 200,518 |
from typing import Any
def test(test_input: Any):
"""test function for simple Pythonian test for achieve
Arguments:
test_input (Any): thing to be printed
Returns:
1, always 1. For testing purposes.
"""
print(str(test_input))
return 1
|
0a97454c68a29413cb342706bb949dd52e5421bd
| 350,901 |
import re
def language(text, wtl):
"""Return language from given text and using the provided language classifier and regex"""
words = [
"recomendo", "amei", "entrega", "otim[ao]", "excelente", "rapida", "celular", "gostei",
"facil", "lindo", "bonito", "comprei", "legal", "perfume", "preco", "tela", "pra", "lento",
"problema", "pelicula", "memoria", "cabelo", "ultima",
]
if re.search(rf'\b({"|".join(words)})\b', text):
result = "pt"
else:
result = wtl.predict_lang(text)
return result
|
8b5ecd781825b3ad7217c144cf70796a6ebde8d3
| 687,475 |
import unicodedata
def len(s):
"""
Calculates the length of a string. This function takes into account
CJK and zero-width characters.
.. note::
The string passed to this function is expected to have been
:attr:`normalized <normalize>`.
Parameters
----------
s: :class:`str`
The string to calculate the length of.
Returns
-------
:class:`int`
The calculated length of the string.
Examples
--------
.. code:: python3
>>> len("oranges")
7
>>> len("\\x00")
0
>>> len("\u65E5\u672C\u8A9E")
6
>>> len("\\u0061\\u0301") # \u0061\u0301
2
>>> len(normalize("\\u0061\\u0301")) # \u00E1
1
"""
# NOTE: unicode characters 0001-0006, 0010-001A, 001C-001F appear
# to be replaced in WT by 263A-263B, 2665-2666, 2663, 2660,
# 25BA, 25C4, 2195, 203C, 00B6, 00A7, 25AC, 21A8, 2191, 2193,
# 2192, 221F, 2194, 25B2, and 25BC respectively.
l = 0
for c in s:
# NOTE: unicode characters 0000-001F, and 007F-009F get special
# cased to zero-width
if ord(c) <= 31 or 127 <= ord(c) <= 159:
continue
w = unicodedata.east_asian_width(c)
if w in "FW":
l += 2
else:
l += 1
return l
|
853c56bbeb3504f68cc8f8ae5bb0f24348fd1ee7
| 298,708 |
import logging
def get_logger(name):
"""
Return a multiprocess safe logger.
:param str name: Name of the logger.
:return: A multiprocess safe logger.
:rtype: :py:class:`logging.Logger`.
"""
return logging.getLogger(name)
|
a5143cc3c4929f8a9c889f055b3b01d8fed2865e
| 658,635 |
import struct
import socket
import fcntl
def get_interface_IP(interface_name):
"""Retrieve IP address of the specified network interface.
Args:
interface_name (str): Name of the interace.
Returns:
str: Standard dotted-quad string representation of the IP address.
"""
IFNAMSIZ = 16 # Maximum size of the interface name including zero character.
SIOCGIFADDR = 0x8915 # IOCTL control code
IFREQSIZ = 40 # sizeof(struct ifreq)
# Pack interface name into ASCIIZ form while making sure
# the interface name length is within the alowed range.
interface_name_z = struct.pack("%ds" % IFREQSIZ, bytes(interface_name[:IFNAMSIZ-1], "utf-8"))
# Open UDP socket.
udpSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Execute IOCTL.
result = fcntl.ioctl(udpSock.fileno(), SIOCGIFADDR, interface_name_z)
# Extract binary IP and convert to string.
# The code below is an emulation of:
# (struct sockaddr_in *) &ifr.ifr_addr)->sin_addr
bin_IP = result[20:24]
return socket.inet_ntoa(bin_IP)
|
0191e325447dfe9b6a7dcb330fdb3f2fe8329454
| 625,396 |
from typing import Dict
from typing import Any
def build_validation(validator) -> Dict[str, Any]:
"""Builds and returns a fake validator response object."""
if validator == "sql":
metadata = {"sql": "SELECT user_id FROM users"}
elif validator == "content":
metadata = {
"field_name": "view_a.dimension_a",
"content_type": "dashboard",
"space": "Shared",
}
elif validator == "assert":
metadata = {"test_name": "test_should_pass"}
else:
metadata = {}
return {
"validator": validator,
"status": "failed",
"tested": [
dict(model="ecommerce", explore="orders", passed=True),
dict(model="ecommerce", explore="sessions", passed=True),
dict(model="ecommerce", explore="users", passed=False),
],
"errors": [
dict(
model="ecommerce",
explore="users",
message="An error occurred",
metadata=metadata,
)
],
}
|
a344d97b2f80486a0c826099bc47fe4bdd60989a
| 46,064 |
import secrets
def random_select(var_names, count):
"""
Sample count variable from var_names
:param var_names: a name-bool pair from which sample.
:type var_names: dict
:param count: the number of variables to sample.
:type count: int
:return: the variables pair with True if selected, else False.
:rtype: dict
"""
for k in var_names.keys():
var_names[k] = False
rng = secrets.SystemRandom()
selected = rng.sample(var_names.keys(), count)
for sel in selected:
var_names[sel] = True
return var_names
|
99af04b2841f924cf7a2d373305b2e735c0ac7de
| 328,640 |
def join_field_w_lab(wq_single_gain, chl_lab_single_date, wq_site_fieldname, lab_site_fieldname):
"""
Joins lab data to a single gain for a single date
:param chl_lab_single_date: all the lab values for a single sampling date
:param wq_site_fieldname: the name of the field storing the siteid in the gain file to join on
:param lab_site_fieldname: the name of the field storing the siteid in the lab file to join on
:return: dataframe with lab values joined by site to the appropriate field measurement
"""
join_lab = wq_single_gain.merge(chl_lab_single_date, left_on=wq_site_fieldname,
right_on=lab_site_fieldname, suffixes=('', '_y'))
return join_lab
|
8a6f8012372d3699c7f46876814e50f4a50be1f5
| 198,420 |
def invert_txid(txid: bytes) -> str:
"""invert txid string from bytes
:param txid: txid byte string from wallet
:type txid: bytes
:return: inverted txid string
:rtype: str
"""
tx = txid.hex()
if len(tx) != 64:
raise ValueError("txid %r length != 64" % tx)
new_txid = ""
for i in range(32):
new_txid += tx[62 - 2 * i]
new_txid += tx[62 - 2 * i + 1]
return new_txid
|
97996bb63027c2333aa47ea8315bc0e29305416f
| 438,648 |
def find_sum(a: int, b: int) -> int:
""" This function returns the sum of two number
Args:
a (int): first argument
b (int): second argument
Returns:
int: Result
"""
return a+b
|
04831848172d0b490fe22e354ee324074f48fee7
| 135,086 |
def _check_value(value):
"""Convert the provided value into a boolean, an int or leave it as it."""
if str(value).lower() in ["true"]:
value = True
elif str(value).lower() in ["false"]:
value = False
elif str(value).isdigit():
value = int(value)
return value
|
b5dbc8925de7ea383529a7131d5d65bb1f7e88fc
| 633,628 |
from typing import Mapping
from typing import Any
def is_nested(item: Mapping[Any, Any]) -> bool:
"""Returns if 'item' is nested at least one-level.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
Returns:
bool: if 'item' is a nested mapping.
"""
return (
isinstance(item, Mapping)
and any(isinstance(v, Mapping) for v in item.values()))
|
9b61d63f100e7cd4da1ba628d7637c3d52a82ce1
| 595,562 |
def arr2text(arr):
""" format a numpy array into a text string """
text = ''
if len(arr.shape) == 1: # vector
text = " ".join(arr.astype(str))
elif len(arr.shape) == 2: # matrix
mat = [arr2text(line) for line in arr]
text = "\n" + "\n".join(mat) + "\n"
else:
raise RuntimeError('arr2text can only convert vector or matrix.')
return text
|
680162ca71b8fab5591c4fccff8e253e636bffa3
| 573,603 |
import six
def keys_to_string(data):
"""
Function to convert all the unicode keys in string keys
"""
if isinstance(data, dict):
for key in list(data.keys()):
if isinstance(key, six.string_types):
value = data[key]
val = keys_to_string(value)
del data[key]
data[key.encode("utf8", "ignore")] = val
return data
|
c4f88f052b8426866020783868906d3d0c1175f9
| 559,852 |
def select_captions(annotations, image_ids):
"""Select captions of given image_ids and return them with their image IDs.
"""
# for fast lookup
image_ids = set(image_ids)
captions = []
caption_image_ids = []
for annotation in annotations:
image_id = annotation['image_id']
if image_id in image_ids:
captions.append(annotation['caption'].replace('\n', ''))
caption_image_ids.append(image_id)
return captions, caption_image_ids
|
8ac6fc4f96160b6669080432a864ae049d8eac91
| 490,242 |
def density(T, S, P):
"""
Computes the density of seawater from Gill (1982)
Computes the density of seawater using the equation of state in Gill
(1982), *Ocean-Atmosphere Dynamics*, Academic Press, New York. The
equations for this code are taken from Appendix B in Crounse (2000).
Parameters
----------
T : float
temperature (K)
S : float
salinity (psu)
P : float
pressure (Pa)
Returns
-------
rho : float
seawater density (kg/m^3)
"""
# Convert T to dec C and P to bar
T = T - 273.15
P = P * 1.e-5
# Compute the density at atmospheric pressure
rho_sw_0 = (
999.842594 + 6.793952e-2 * T - 9.095290e-3 * T**2
+ 1.001685e-4 * T**3 - 1.120083e-6 * T**4 + 6.536332e-9 * T**5
+ 8.24493e-1 * S - 5.72466e-3 * S**(3./2.) + 4.8314e-4 * S**2
- 4.0899e-3 * T*S + 7.6438e-5 * T**2 * S - 8.2467e-7 * T**3 *
S + 5.3875e-9 * T**4 * S + 1.0227e-4 * T * S**(3./2.)
- 1.6546e-6 * T**2 * S**(3./2.)
)
# Compute the pressure correction coefficient
K = (
19652.21 + 148.4206 * T - 2.327105 * T**2 + 1.360477e-2 * T**3
- 5.155288e-5 * T**4 + 3.239908 * P + 1.43713e-3 * T * P
+ 1.16092e-4 * T**2 * P - 5.77905e-7 * T**3 * P
+ 8.50935e-5 * P**2 - 6.12293e-6 * T * P**2
+ 5.2787e-8 * T**2 * P**2 + 54.6746 * S - 0.603459 * T * S
+ 1.09987e-2 * T**2 * S - 6.1670e-5 * T**3 * S
+ 7.944e-2 * S**(3./2.) + 1.64833e-2 * T * S**(3./2.)
- 5.3009e-4 * T**2 * S**(3./2.) + 2.2838e-3 * P * S
- 1.0981e-5 * T * P * S - 1.6078e-6 * T**2 * P * S
+ 1.91075e-4 * P * S**(3./2.) - 9.9348e-7 * P**2 * S
+ 2.0816e-8 * T * P**2 * S + 9.1697e-10 * T**2 * P**2 * S
)
return rho_sw_0 / (1 - P / K)
|
f0a30b88c4da4b0ac3e3cc8b8e7abf2491c8b8a7
| 284,660 |
def common_bit(lines: list, column_number: int) -> int:
"""common_bit take a list of strings representing binary numbers and a column number and return an int that can be 0, > 0 or < 0 if the number of 1s and 0s in that column are equal, more 1s or more 0s"""
cb = 0
for bits in lines:
if bits[column_number] == 0:
cb -= 1
elif bits[column_number] == 1:
cb += 1
return cb
|
c322ef48be3ff3a9c0d19e0049876a950032cacb
| 498,360 |
def task_flake8() -> dict:
"""Run flake8 check."""
return {
'actions': ['flake8 setup.py wdom tests'],
}
|
bec087b8cc04b47c64d9e451e4bde5552f9036bd
| 570,461 |
from pathlib import Path
def get_world_paths() -> list:
"""
Returns a list of paths to the worlds on the server.
"""
server_dir = Path(__file__).resolve().parents[1]
world_paths = []
for p in server_dir.iterdir():
if p.is_dir and (p / "level.dat").is_file():
world_paths.append(p.absolute())
return world_paths
|
bf1c23c6a1c928dc66470db2e11b49ad2fc9e5d9
| 2,529 |
def calc_moving_average(data, n):
"""Calculates the average of the last n values in the data dictionary
:param data: dictionary
:param n: number of days used to calculate the moving average
:return: integer value of the average
"""
past_n_days = data[-n:]
sum_values = 0
for day in past_n_days:
sum_values += day["daily_deaths"]
n_moving_average = int(sum_values / n)
return n_moving_average
|
5e370e01036924243730f7f29d4d3dcf7e33c36b
| 84,262 |
def to_list(value) -> list:
"""
Wrap value in list if it's not already in a list.
Arguments
---------
value : Any
Value to wrap in list.
Returns
-------
list
List with value inside.
"""
return value if isinstance(value, list) else [value]
|
16c690f9670d51f891d18d5f6a40abf9b2d6a3dc
| 358,657 |
def get_unlabled_last_index(
num_unlabeled_samples,
len_dataset,
len_class):
"""
For example, for CIFAR100 we have len_dataset 10000 for test data.
The number of samples per class is 1000.
If we want 9000 unlabeled samples then the ratio_unlabeled is 9/10.
The number of samples per class for the unlabeled dataset is 9/10*100=90.
If the number of samples for the final test is 1000 samples and we have 100
classes, then the number of samples per class will be 10 (only).
:param num_unlabeled_samples: number of unlabeled samples from the test set
:param len_dataset: the total number of samples in the intial test set
:param len_class: the number of samples for a given class
:return: for the array of sample indices for the class, the last index for
the unlabeled part
>>> num_unlabeled_samples = 9000
>>> len_dataset = 10000
>>> len_class = 100
>>> result = get_unlabled_last_index(num_unlabeled_samples=num_unlabeled_samples, len_dataset=len_dataset, len_class=len_class)
>>> assert result == 90
>>> # print('result: ', result)
"""
ratio_unlabeled = num_unlabeled_samples / len_dataset
last_unlabeled_index = int(ratio_unlabeled * len_class)
return last_unlabeled_index
|
45a955dfc37b765c6458e31f2d1c0d54075ec5fb
| 343,506 |
def _addsubimg(img, subimage, bbox):
"""
Add a ``subimage`` to ``img`` at the specified ``bbox`` bounding
box.
"""
x0, x1, y0, y1 = bbox
img[y0:y1, x0:x1] += subimage
return img
|
5e8a3b8b3d828f2ba25059769e56c7b4bce00394
| 502,856 |
def d4(dx, fc, i):
""" fourth-order centered derivative at index i """
D = -fc[i+2] + 8.0*fc[i+1] - 8.0*fc[i-1] + fc[i-2]
D = D/(12.0*dx)
return D
|
e36538171d1d032ea0098a5997886885b6152d6b
| 575,907 |
import tarfile
def _safe_is_tarfile(path):
"""safe version of is_tarfile, return False on IOError"""
try:
return tarfile.is_tarfile(path)
except IOError:
return False
|
8335f4e18eb0ef49768703e2d20dd4a8c6396203
| 240,976 |
def _make_subplot(fig, subplotnum, title):
"""
Generates a subplot on the given figure.
Args:
fig (matplotlib.Figure): The figure to add the subplot to.
subplotnum (int): The options for the subplot. Needs to have 3 digits
where the first digit stands for the total number of rows, the
second for the number of columns and the third is the index for
the subplot.
title (str): The title for the plot.
Returns:
ax (matplotlib.Axes): The generated subplot object.
"""
ax = fig.add_subplot(subplotnum)
ax.set_xlabel("x [Bohr]")
ax.set_ylabel("Energy [Hartree]")
ax.set_title(title)
return ax
|
6717f714c7662e91729385fd37b14f964e1ad803
| 536,339 |
def read_keys(file: str) -> dict:
"""
Read a file containing the reddit credentials with the format:
client_id:YOUR REDDIT'S CLIENT ID
client_secret:YOUR REDDIT'S CLIENT SECRET
password:YOUR REDDIT'S ACCOUNT PASSSWORD
username:YOUR REDDIT'S USERNAME
:param file: the path to the file
:return: a dictionary containing four entries:
client_id, client_secret, password, username
"""
keys = {}
with open(file, "r") as f:
for l in f:
etry = l.rstrip('\n').split(":")
keys[etry[0]] = etry[1]
return keys
|
a11ea5ea92c8d1d045e1e2d650bcbb2454b2c92b
| 315,096 |
import torch
def pdist(feature):
"""Compute pairwise distances of features.
Args:
feature (torch.Tensor): (batch_size, channels, num_features)
Returns:
distance (torch.Tensor): (batch_size, num_features, num_features)
Notes:
This method returns square distances, and is optimized for lower memory and faster speed.
Sqaure sum is more efficient than gather diagonal from inner product.
"""
square_sum = torch.sum(feature ** 2, 1, keepdim=True)
square_sum = square_sum.transpose(1, 2) + square_sum
distance = torch.baddbmm(square_sum, feature.transpose(1, 2), feature, alpha=-2.0)
return distance
|
88250b7c169cf6628afa936fafd6e44ce836c9b1
| 602,282 |
def _model_name(name):
"""
Extracts the main component of a model, removes
suffixes such ``Classifier``, ``Regressor``, ``CV``.
@param name string
@return shorter string
"""
if name.startswith("Select"):
return "Select"
if name.startswith("Nu"):
return "Nu"
modif = 1
while modif > 0:
modif = 0
for suf in ['Classifier', 'Regressor', 'CV', 'IC',
'Transformer']:
if name.endswith(suf):
name = name[:-len(suf)]
modif += 1
return name
|
8b2f752063d310d0d8f7e8564d44a7fc72dba24e
| 510,854 |
def BuildReachabilityTree(dependency_mapping_files, file_open=open):
"""Builds a reachability tree using entries from dependency mapping files.
Args:
dependency_mapping_files: A comma separated list of J2ObjC-generated
dependency mapping files.
file_open: Reference to the builtin open function so it may be
overridden for testing.
Returns:
A dict mapping J2ObjC-generated source files to the corresponding direct
dependent source files.
"""
tree = dict()
for dependency_mapping_file in dependency_mapping_files.split(','):
with file_open(dependency_mapping_file, 'r') as f:
for line in f:
entry = line.strip().split(':')[0]
dep = line.strip().split(':')[1]
if entry in tree:
tree[entry].append(dep)
else:
tree[entry] = [dep]
return tree
|
c9ccb4b6305fa5c51925724a67c8e4c281370f74
| 526,166 |
def hadamard_complex(x_re, x_im, y_re, y_im):
"""Hadamard product for complex vectors"""
result_re = x_re * y_re - x_im * y_im
result_im = x_re * y_im + x_im * y_re
return result_re, result_im
|
5aa08bf499fd4835d97d6f8fe4bfda7595c719d1
| 656,059 |
from typing import Any
def check_int(data: Any) -> int:
"""Check if data is `int` and return it."""
if not isinstance(data, int):
raise TypeError(data)
return data
|
814155f2407cd0e8b580372679f4cecfcc087d9e
| 6,319 |
def webelement(self):
"""
Return a webelement instance.
:returns: webelement instance
:rtype: :py:obj:`selenium.webdriver.remote.webelement.WebElement`
:raises NoSuchElementException: if the element cannot be found
:raises InvalidSelectorException: if the selector is invalid
or doesn't select an element
.. seealso::
`selenium WebElement documentation`_ (external link)
.. _`selenium WebElement documentation`: https://seleniumhq\
.github.io/selenium/docs/api/py/webdriver_remote/selenium\
.webdriver.remote.webelement.html
"""
return self.webdriver.find_element_by_xpath(self._locator_value)
|
206db114ba5fccc4f2d0838c7e3b55f7fcb0cd38
| 439,283 |
from typing import List
def split_chunks_on(str_: str, maxlen: int, split_char='\n') -> List[str]:
"""
Split a long string along `split_char` such that all strings are smaller than but as close as
possible to `maxlen` size.
Lines that exceed `maxlen` size will not be split.
"""
len_split = len(split_char)
lines = str_.split(split_char)
parts = []
this_part = []
running_len = 0
for line in lines:
len_line = len(line) + len_split # can't forget the newline/split_char!
if len_line + running_len <= maxlen:
this_part.append(line)
running_len += len_line
else:
if this_part: # prevents empty part if the first line is a very long line
parts.append(this_part)
this_part = [line]
running_len = len_line
parts.append(this_part) # last one, not committed in loop
return [split_char.join(part) for part in parts]
|
72e2702a1771d5bbc145234c736f62aeec42e7b5
| 96,133 |
def check_winner(board_state):
"""
Iterates over the board spaces,
Recording the indices of 1's (1) and 0's (0) in two sets.
Iterates through the winning combinations in WINNERS
to see if there is a winner.
Returns 1, 0, or -1 if True wins, False wins,
or if there is a tie, respectively.
Returns None if there is no winner or tie.
(True and False can represent X's or O's in the game
and either True or False can go first.)
"""
# the indices of the winning positions.
WINNERS = set()
WINNERS.add((0,1,2))
WINNERS.add((3,4,5))
WINNERS.add((6,7,8))
WINNERS.add((0,3,6))
WINNERS.add((1,4,7))
WINNERS.add((2,5,8))
WINNERS.add((0,4,8))
WINNERS.add((2,4,6))
indices_ones = set()
indices_zeroes = set()
# iterate over board spaces. record indices in sets.
for i, board_position in enumerate(board_state):
if board_position == 1:
indices_ones.add(i)
elif board_position == 0:
indices_zeroes.add(i)
# iterate through the set of winner tuples.
# for each item in a winning configuration, check
# if the item is contained in one of the sets.
for winner in WINNERS:
One_count = 0
Zero_count = 0
for w in winner:
if w in indices_ones:
One_count += 1
elif w in indices_zeroes:
Zero_count += 1
# 1 wins
if One_count == 3:
return 1
# 0 wins
elif Zero_count == 3:
return 0
# tie
return -1 if len(indices_ones) + len(indices_zeroes) == len(board_state) else None
|
62f02d235ed9f354b014fd1158ca52449cb8429c
| 246,725 |
from typing import Optional
def int_range_to_string(
begin_optional: Optional[int] = None,
end_optional: Optional[int] = None,
unit: Optional[str] = None,
default_begin_value: str = "",
default_end_value: str = "",
) -> Optional[str]:
"""
Change range int to string for TFDS.
Parameters
----------
begin_optional : Optional[int], optional, default=None
Start value for string range.
end_optional : Optional[int], optional, default=None
End value for string range.
unit : Optional[str], optional, default=None
Unit for range.
default_begin_value : str, optional, default=""
Default value for begin.
default_end_value : str, optional, default=""
Default value for end.
Returns
-------
Optional[str]
Range string for TFDS load.
Examples
--------
>>> int_range_to_string(begin_optional=30, unit="%")
"30%:"
>>> int_range_to_string(begin_optional=10, end_optional=50, unit="%")
"10%:50%"
>>> self.assertEqual(int_range_to_string(unit="%"), None)
None
"""
result: Optional[str] = None
if begin_optional or end_optional:
begin_string: str = (
str(begin_optional) if begin_optional else default_begin_value
)
end_string: str = str(end_optional) if end_optional else default_end_value
if unit:
begin_string = (
begin_string + unit
if begin_string is not default_begin_value
else begin_string
)
end_string = (
end_string + unit if end_string is not default_end_value else end_string
)
result = "{}:{}".format(begin_string, end_string)
return result
|
af209774b269c6baf54b8849867c4d112dae7723
| 55,523 |
def _Line(v_char,v_lengh):
"""Prints a line of chars , given chosen char """
c = ""
for i in range(v_lengh):
c = c + v_char
return print("\n"+c+"\n")
|
c23278290febaa442e4c6659722a8568e356e771
| 447,213 |
def square_root_2param(t, a, b):
"""t^1/2 fit w/ 2 params: slope a and vertical shift b."""
return a*t**(0.5) + b
|
d102f25bdbaec30c562b2c1ba698897df15d7270
| 513,734 |
def divides(i, j):
"""Returns true if j divides i"""
if j is 0:
return False
elif i % j:
return False
else:
return True
|
a4eb4b81d9a7f43a63b14dd34bf4a2c1cd51c901
| 601,761 |
def reversel(l,a=0,b=None):
""" Reverse a list in place from index a up to b, which default to the start and the end. """
if b is None:
b = len(l)
assert a <= b <= len(l)
num_swaps = (b-a)//2
for i in range(num_swaps):
j = b-i-1
l[a+i],l[j] = l[j],l[a+i]
return l
|
aa9f97049c6a431f52daf81c8f86d79c448c5aad
| 570,284 |
def singlechar_xor(input_bytes, key_value):
"""XORs every byte of the input with the given key_value and returns the result."""
output = b''
for char in input_bytes:
output += bytes([char ^ key_value])
return output
|
04b1f202b92df1e26f4bec114f166c07e0d25725
| 465,752 |
def format_currency(amount):
"""Convert float to string currency with 2 decimal places."""
str_format = str(amount)
cents = str_format.split('.')[1]
if len(cents) == 1:
str_format += '0'
return '{0} USD'.format(str_format)
|
b90d064fe6b095227e5c590ad8d175f072e07957
| 701,823 |
def parse_method(name):
"""Parse hyperparameters from string name to make legend label.
Parameters
----------
name : str
Name of method
Returns
-------
string : str
Formatted string
"""
string = r""
if name.split('es_')[1][0] == '1':
string += r'ES'
if name.split('vm_')[1][0] == '1':
if len(string) > 0:
string += r', VM'
else:
string += r'VM'
alpha = name.split('alpha_')[1].split('_')[0]
if len(string) > 0:
string += r', $\alpha=%s$' % alpha
else:
string += r'$\alpha=%s$' % alpha
return string
|
eb6824a6ab7ca126c924fa7acca2725f8b06379e
| 8,752 |
def _to_job_id(task_id):
"""Convert e.g. ``'task_201601081945_0005_m_000005'``
or ``'application_201601081945_0005'`` to
to ``'job_201601081945_0005'``."""
return 'job_' + '_'.join(task_id.split('_')[1:3])
|
8a298e10bdafb3fed1b512a62b96cf252084b9a1
| 508,437 |
def __args_to_weka_options(args):
"""
Function that creates list with options (args) in format
approperiate for weka.
:param args: dictionery with command line input
:return: list of command line arguments
"""
result = []
for k,v in args.items():
if v:
result.append("-" + k)
result.append(v)
elif v == "":
result.append("-" + k)
return result
|
1d480ffaf840ae67d805d7845684eef24d3da583
| 700,724 |
import torch
def unwrap_packed_sequences_recursive(packed):
"""Unwrap `PackedSequence` class of packed sequences recursively.
This function extract `torch.Tensor` that
`torch.nn.utils.rnn.PackedSequence` holds internally. Sequences in the
internal tensor is ordered with time axis first.
Unlike `torch.nn.pad_packed_sequence`, this function just returns the
underlying tensor as it is without padding.
To wrap the data by `PackedSequence` again, use
`wrap_packed_sequences_recursive`.
Args:
packed (object): Packed sequences.
Returns:
object: Unwrapped packed sequences. If `packed` is a `PackedSequence`,
then the returned value is `PackedSequence.data`, the underlying
tensor. If `Packed` is a tuple of `PackedSequence`, then the
returned value is a tuple of the underlying tensors.
"""
if isinstance(packed, torch.nn.utils.rnn.PackedSequence):
return packed.data
if isinstance(packed, tuple):
return tuple(unwrap_packed_sequences_recursive(x) for x in packed)
return packed
|
cd5ef3ee3a20ab59f67a43e633035b25316310dd
| 149,419 |
def _get_center(spot, length):
"""
info: gets the center
:param spot: int
:param length: int: 0 - inf
:return: int
"""
return spot - length//2
|
75676d30e527bada773b6f4809ed2a6d82e64494
| 446,993 |
from functools import reduce
def foldl(func, start, iterable):
"""foldl: foldl is folding a function from left to right
func: the function to reduce with
start: the initial starting value
iterable: the iterable to reduce over
"""
return reduce(func, iterable, start)
|
c8fa84d1f26b274c179f0495b57cf3da5b5a7975
| 140,027 |
def double_qoute(s):
"""Add double quotes to s, needed to produce C strings.
>>> double_qoute('program')
'"program"'
"""
return '"' + s + '"'
|
36d5409a44a4b3dd462d92e32258f10acaadcf33
| 193,511 |
def get_params(param_list, value_list=None):
"""Collect additional params to be used for XSL params
:param list param_list: a list of params; usually specified in the migration module with `PARAM_LIST` constant
:param list value_list: a list of values to be used when constructing the params dictionary; if this is not
provided then the user will be prompted to enter a value for each param
:return: a dictionary of params to be use in the XSL
:rtype: dict
"""
params = dict()
for i, param in enumerate(param_list):
if value_list:
try:
assert len(param_list) == len(value_list)
except AssertionError:
raise ValueError("incompatible lengths for param_list and value_list; they should be equal")
param_value = value_list[i]
else:
param_value = input("{}: ".format(param))
params[param] = param_value
return params
|
974efbf55940fd26490fb140e987dbece33545ee
| 211,806 |
def get_code_start_line_num(source_lines):
"""
Get the start code line number exclude comments.
Args:
source_lines (list[str]): Split results of code.
Returns:
int, the start line number.
"""
stack = []
index = 0
for i, line in enumerate(source_lines):
line_strip = line.strip()
if line_strip.startswith('#'):
continue
if line_strip.startswith('"""'):
if not line_strip.endswith('"""'):
stack.append('"""')
continue
if line_strip.startswith("'''"):
if not line_strip.endswith("'''"):
stack.append("'''")
continue
if line_strip.endswith('"""') or line_strip.endswith("'''"):
stack.pop()
continue
if line_strip != '' and not stack:
index = i
break
return index
|
d80247ea879758a0dfc0f5c025aefeab31da7343
| 595,747 |
def load_points(filename: str) -> set:
"""
Loads a set of x,y points from the specified text file.
Each point should be listed on its own line and be a pair of comma separated integers. Invalid points, whitespace, and
Python style comments are ignored.
:param filename: A string specifying the text file to read.
:return: A standard set of points, excluding any duplicates read from the file.
"""
points = set()
try:
with open(filename, "rt") as input_file:
line_num = 0
for raw_input_line in input_file.readlines():
line_num = line_num + 1 # ++line_num
# Support python-style line comments
comment_pos = raw_input_line.find("#")
if comment_pos != -1:
raw_input_line = raw_input_line[:comment_pos]
# Silently skip blank lines.
# Must be done after comment filtering, just in case a comment leaves a blank line.
if raw_input_line.strip() == "":
continue
split_pos = raw_input_line.find(",")
if split_pos != -1:
x = raw_input_line[:split_pos]
y = raw_input_line[split_pos + 1:]
try:
x = int(x)
y = int(y)
points.add((x, y))
except ValueError:
# One or both coordinates were not integers or some other typo.
# Skip the line and move on, but log the result.
print("Skipping line {}. Points must be two integers. Line reads: \"{}\"".format(line_num,
raw_input_line.strip()))
else:
# Lines should be "int, int" (ignoring whitespace and comments).
print("Skipping line {}. No comma separator found. Line reads: \"{}\"".format(line_num,
raw_input_line.strip()))
except FileNotFoundError as err:
print("Could not open file \"{}\".".format(filename))
return points
|
52d47436466120a1fe8bd8b16fe7574746e5dcf2
| 608,259 |
def _get_query_alignment_pos_in_inferred_read(clippings, is_reverse, inferred_read_length, query_alignment_pos):
"""
Convert query alignment position (such as aln.query_alignment_start) in coordinate of forward read.
CIGAR : 7H3S10M2I10M6D10M4S5H
7H 3S 10M 2I 10M 6D 10M 4S 5H --> direction
REF : MMMMMMMMMM -- MMMMMMMMMM DDDDDD MMMMMMMMMM
READ : HHHHHHHSSS MMMMMMMMMM II MMMMMMMMMM ------ MMMMMMMMMM SSSS HHHHH
| | | |
QUERY ALN POS [3, 15, 25, 34]
QUERY ALN POS IN FORWARD READ
[0, 10, 22, 32, ,50]
...doctest:
>>> clippings = (7, 5, 3, 4)
>>> [_get_query_alignment_pos_in_inferred_read(clippings, False, 51, i) for i in (3, 4, 10, 24, 25)]
[10, 11, 17, 31, 32]
CIGAR : 7H3S10M2I10M6D10M4S5H
7H 3S 10M 2I 10M 6D 10M 4S 5H <-- direction
REF : MMMMMMMMMM -- MMMMMMMMMM DDDDDD MMMMMMMMMM
HHHHHHHSSS MMMMMMMMMM II MMMMMMMMMM ------ MMMMMMMMMM SSSS HHHHH : <-- REVERSE COMPLEMENT READ
| | | | |
QUERY ALN POS [3, 15, 25, 34]
QUERY ALN POS IN REVERSE COMPLEMENTARY OF (READ)
[0, 10 41, ,50]
QUERY ALN POS IN READ
[50, 40 9, ,0]
...doctest:
>>> clippings = (7, 5, 3, 4)
>>> [_get_query_alignment_pos_in_inferred_read(clippings, True, 51, i) for i in (3, 4, 34)]
[40, 39, 9]
"""
l_hardclip, r_hardclip, l_softclip, r_softclip = clippings
ret = l_hardclip + query_alignment_pos if not is_reverse else inferred_read_length - l_hardclip - query_alignment_pos - 1
assert ret <= inferred_read_length # allow exclusive end
assert ret >= -1 # allow exclusive start
return ret
|
e440e2246876af69aa07ab92b988e4f39708e6f7
| 453,337 |
def is_string_in_file(string, filepath):
"""
Check if a string is in a file
Args:
:string: string to search for
:filepath: path of file
Returns:
:is_found: True if found, False otherwise
"""
if string in open(filepath).read():
return True
return False
|
3705573f37c604f61f5f5da14e3726f13413de96
| 60,636 |
from typing import List
import csv
def parse_file(path: str, delimiter: str = ",") -> List[List[str]]:
"""Reads CSV file and returns Table representation for it.
Args:
path (str): File path.
delimiter (str): CSV delimiter (default: `,`)
Returns:
List[List[str]]: List of lists with rows of data (still in str format).
"""
with open(path, encoding="utf-8") as file:
csv_file = csv.reader(file, delimiter=delimiter)
return [list(map(lambda s: s.strip(), row)) for row in csv_file]
|
bf53181ca1165f8cf5ead0e1d92111c7b9f70bca
| 306,524 |
def is_base_pair(s1, s2):
""" (str, str) -> bool
Precondition: s1 and s2 both contain a single character from 'A', 'T', 'C'
or 'G'.
Return True iff s1 and s2 form a base pair.
>>> is_base_pair('A','T')
True
>>> is_base_pair('G','T')
False
"""
cond1 = (s1 == 'A' and s2 == 'T')
cond2 = (s1 == 'T' and s2 == 'A')
cond3 = (s1 == 'G' and s2 == 'C')
cond4 = (s1 == 'C' and s2 == 'G')
if cond1 or cond2 or cond3 or cond4:
return True
else:
return False
|
feb8d7ac23f610be077f9e898bd796b781a5d5ca
| 102,395 |
import json
def verify_json(filename):
""" Checks that a JSON file is valid JSON """
try:
with open(filename) as jsonfile:
json.loads(jsonfile.read())
return True
except ValueError:
return False
|
ba3a0bce0acbb321b7f7071d008f2b92cbc56875
| 622,828 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.