content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import logging
def parse_logs_head(data: bytes) -> set[str]:
"""Retrieve object hashes from logs/HEAD file."""
ret = set()
lines = data.split(b"\n")
for line in lines:
fields = line.split(b" ")
if len(fields) < 2:
break
for i in range(2): #first two fields are commit hashes
digest = fields[i].decode()
if digest == "0000000000000000000000000000000000000000":
continue
ret.add(digest)
logging.info(f"Commit {digest} found in logs/HEAD")
return ret
|
03ffb95bef5951efb57d98d9adb68b66b044b797
| 570,250 |
def array_split(ary,n):
"""
>>> data = [1,2,3,4,5]
>>> array_split(data,3)
[[1, 2], [3, 4], [5]]
>>> grps = array_split(range(0,1121),8)
>>> total_len = 0
>>> for grp in grps: total_len += len(grp)
>>> assert(total_len == len(range(0,1121)))
"""
step = int(round(len(ary)/float(n)))
if step == 0:
step = 1
ret = []
idx = 0
for ii in range(0,n):
if ii == n-1:
app = ary[idx:]
else:
app = ary[idx:idx+step]
if len(app) > 0:
ret.append(app)
idx = idx + step
return(ret)
|
a36bb14778ba9b3ed8a2bd77990ad6baf0d14271
| 629,934 |
def get_message_type(msg_or_cls):
"""Returns ROS1 message type name, like "std_msgs/Header"."""
return msg_or_cls._type
|
42db77fbb59ada627e66a5878247fcd030e4773a
| 196,437 |
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
|
018e1a1a5773a025729560ad03a9eec85dd1e198
| 680,770 |
def convert_to_coordinate(raw_data:float, sec_delim:str, min_delim:str, deg_delim:str) -> str:
"""
Description:
Converts converts float value to the geographical data
(for example, 27°16'37.9")
Arguments:
- raw_data : `float` value as degree
- sec_delim : `str` delimiter char for seconds
- min_delim : `str` delimiter char for minutes
- deg_delim : `str` delimiter char for degrees
Returns:
`str` : geographical data as string
"""
degrees = int(raw_data)
minutes_float = (raw_data-degrees)*60
minutes = int(minutes_float)
seconds_float = (minutes_float-minutes)*60
seconds = round(seconds_float, 1)
return str(degrees) + deg_delim + str(minutes) + min_delim + str(seconds) + sec_delim
|
df1a7bde98f879f3dd1091f4d70e471627485084
| 596,145 |
def _is_valid_color(colors :dict, user_color :str):
"""Returns true if user_color is a graph color provided by matplotlib
Positional Arguments:
colors : dict
- dict of all colors supported by matplotlib passed in
by the graph_colors decorator
user_color : str
- a string representing the name of a color
"""
try:
colors[user_color]
except KeyError:
return False
return True
|
95acb7270a5f0d6ebd8ba2b69a4e36b434251831
| 170,758 |
def truncate_int(tval, time_int):
"""Truncate tval to nearest time interval."""
return((int(tval)/time_int) * time_int)
|
3c366f417ab9e84b5337814f78d7888fca4d0265
| 36,717 |
def convert_temperature(val):
""" Convert temperature from Kelvin (unit of 1/16th K) to Celsius
"""
return val * 0.0625 - 273.15
|
24b74df84c244c87aa86b2753f93dc6a587e61ce
| 398,989 |
import re
def output_to_dict(output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
#0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
|
671165127d22ead060c49fe835cba8d9130c8c27
| 433,961 |
def follow_abspath(json, abspath):
"""json: an arbitrarily nested python object, where each layer
is a list, dict, or tuple.
abspath: a list of keys or array indices to be followed.
**Returns:** the child of json[abspath[0]][abspath[1]]...[abspath[-1]]"""
out = json
for elt in abspath:
out = out[elt]
return out
|
f870debbd3fe8ab05f93a9f0df979364706ece87
| 480,779 |
def tree_pop_fields(root, fields):
"""deletes given fields (as iterable of keys) from root and all its children (recursively)
returnes updated root """
for f in fields:
root.pop(f)
if root['is_leaf']: return root
for i in range(len(root['children'])):
root['children'][i]['child'] = tree_pop_fields(root['children'][i]['child'], fields)
return root
|
1dca88301219ad2a9c83642024ab0db08472b507
| 30,652 |
def short_version(version):
"""Get a shorter version, only with the major and minor version.
:param version: The version.
:type version: str
:return 'major.minor' version number.
:rtype float
"""
return float('.'.join(version.split('.')[0:2]))
|
ab8c4cca8af8eb47c9ddc6ae117fc725ce7956e6
| 176,588 |
def to_ms(s):
"""Convert from seconds to milliseconds.
Args:
s (float, int): Value in seconds.
Returns:
int: Value in milliseconds.
"""
return int(s * 1e3)
|
18b735962dbc4445dfc704e58e2c625d33b9d8f7
| 524,858 |
def iconv(idata, input):
"""
compare idata and input, if it is the same return 1, if not return 0
:param idata: (char1) char1 input
:param input: (char1) char1 input
:return: returns a 1 if the values are the same or a 0 if they are not the same
"""
ret = 0
if idata == input:
ret = 1
return int(ret)
|
1e42e22e8f4fa78f2f1a4244dc301f62fa7fef7c
| 472,710 |
def getabovethresh(inputlist,thresh):
"""creates a binary list, indicating comparison of
inputlist entries to thresh value
1 if above (exclusive), else 0"""
abovethreshlist=[1 if i>thresh else 0
for i in inputlist]
return abovethreshlist
|
a3b1161960a682d36d827c558b0c0a6d726b49a2
| 559,987 |
import base64
def convert_bytes_to_base64(data: bytes) -> str:
"""Convert bytes data to base64 for serialization."""
return base64.b64encode(data).decode("ascii")
|
a810937ffaf0c40d318d02b3838accb294a5c226
| 640,512 |
import math
def text_clip_center(text, width):
"""
Center-clip a string `text` to width `width`. That is, print starting from
the center, out.
"""
clip = 0
if len(text) > width:
clip = len(text)-width
clip1 = math.ceil(clip/2)
clip2 = clip-clip1
return text[clip1:][:-clip2]
|
e6cbad4d9ad1fc2ca5c7d1bbe5e022a14598f5f5
| 440,521 |
def merge_sort(arr):
"""
time complexity: O(n*logn)
space complexity: O(n)
:prarm arr: list
:return: list
"""
if not isinstance(arr, list):
raise TypeError
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left = merge_sort(arr[:mid])
right = merge_sort(arr[mid:])
k, i, j = 0, 0, 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
if i < len(left):
arr[k:] = left[i:]
if j < len(right):
arr[k:] = right[j:]
return arr
|
7297e20fa3a01b85fa7f5cebef1921e9c17c7be9
| 70,172 |
import re
def newlines_to_spaces(text):
"""
Strips newlines and any spacing surrounding the newlines and replaces them with a single space
:param text: The text to parse
:type text: str
"""
newline_pattern = re.compile('\s*\n\s*')
return newline_pattern.sub(' ', text)
|
ae331ded725e59b1cf1dea2025f17131636225fd
| 442,726 |
def getText(node):
""" Get textual content of an xml node.
"""
text = []
for n in node.childNodes:
if n.nodeType == n.TEXT_NODE: text.append(n.data)
return ''.join(text)
|
e8dda800260ba7141bd59d0c77eb3c73ac2bac0a
| 113,005 |
def iso_week(date):
"""Return the week of ISO date 'date'."""
return date[1]
|
dd1183d60f0ffa04784d882effcd969034b9a54a
| 365,873 |
def get_vgci_warnings(tc):
"""
extract warnings from the parsed test case stderr
"""
warnings = []
if tc['stderr']:
for line in tc['stderr'].split('\n'):
# For each line
if "WARNING" in line and "vgci" in line:
# If it is a CI warning, keep it
warnings.append(line.rstrip())
return warnings
|
d167c66374754e6099cb20d05e49c5ad609b472b
| 651,936 |
def _parse_duration(duration: str) -> float:
"""
Convert string value of time (duration: "25:36:59") to a float value of seconds (92219.0)
"""
try:
# {(1, "s"), (60, "m"), (3600, "h")}
mapped_increments = zip([1, 60, 3600], reversed(duration.split(":")))
seconds = sum(multiplier * int(time) for multiplier, time in mapped_increments)
return float(seconds)
# ! This usually occurs when the wrong string is mistaken for the duration
except (ValueError, TypeError, AttributeError):
return 0.0
|
ef2a4a14666695a226a34622e1f049dd841c2452
| 177,951 |
import requests
def s3_obj_exists(url):
"""Helper function to determine whether an s3 object exists
Args:
url (str): https URL for a public object stored in s3
"""
resp = requests.head(url)
return resp.status_code != 404
|
a77431992f8d19c13055a33cd86e96afb8cdfdf8
| 348,692 |
def create_norm_peptide_column_if_needed(row_meta_df, gcp_normalization_peptide_field, gcp_normalization_peptide_id):
"""If it doesn't already exist, create a new column indicating the norm
peptide for each probe.
Modifies row_meta_df in-place.
Args:
row_meta_df (pandas df):
gcp_normalization_peptide_field (string): metadata field in
GCT that indicates which norm peptide to use for each probe
gcp_normalization_peptide_id (string): if
gcp_normalization_peptide_field not in the GCT, a new column will
be created and filled with just this peptide_id
Returns:
None
"""
# If gcp_normalization_peptide_field is already in the row metadata, we'll
# just use it
if gcp_normalization_peptide_field not in row_meta_df.columns.values:
assert gcp_normalization_peptide_id is not None, (
"gcp_normalization_peptide_field not present in metadata headers, " +
"so gcp_normalization_peptide_id must not be None. " +
"gcp_normalization_peptide_field: {}, " +
"gcp_normalization_peptide_id: {}").format(
gcp_normalization_peptide_field, gcp_normalization_peptide_id)
# Otherwise, create a new column and fill it with gcp_normalization_peptide_id
row_meta_df[gcp_normalization_peptide_field] = gcp_normalization_peptide_id
return None
|
2b499bb23a186a6c7bff46998a2377fa38b8f114
| 644,617 |
def job_id(config: dict) -> str:
"""The ID for the current job"""
return config["job_id"]
|
b03a0b77f2794e70665d8599a572e97b5aab8b31
| 554,391 |
def closest(v, L):
"""
Returns the element in L (a sorted numpy array) which is closest to v
>>> R = np.array([9,3,6])
>>> R.sort()
>>> R
array([3, 6, 9])
>>> [closest(i, R) for i in range(1,12)]
[3, 3, 3, 3, 6, 6, 6, 9, 9, 9, 9]
"""
i = L.searchsorted(v)
return L[-1 if i == len(L) else (0 if i == 0 else (i if v - L[i - 1] > L[i] - v else i - 1))]
|
8930e49e36bb191c17e0dfe4e3cdd98da67f69d0
| 55,184 |
from typing import Tuple
def process_arguments(arguments: str) -> Tuple[str, str, int]:
"""
Process the arguments given to !weather, dividing them into state, location and future
Uses default of QLD, Brisbane and 0 if not given
"""
args = arguments.split(" ") if arguments else []
if args and args[-1].lstrip('-+').isnumeric():
future = int(args.pop())
else:
future = 0
# get location
if args:
if args[0].upper() in ["NSW", "ACT", "NT", "QLD", "SA", "TAS", "VIC", "WA"]:
state = args.pop(0).upper()
else:
state = "QLD"
location = " ".join(args)
else:
state = "QLD"
location = "Brisbane"
return state, location, future
|
8bd887bab7d3bbc002973e4a327610933130021a
| 31,411 |
def knight_tour(n, path, u, limit):
"""
Conduct a knight's tour using DFS.
Args:
n: current depth of the search tree.
path: a list of vertices visited up to this point.
u: the vertex we wish to explore.
limit: the number of nodes in the path.
Returns:
done (bool)
"""
visited = set(u)
path.append(u)
if n < limit:
nbrList = list(u.get_connections())
i = 0
done = False
while i < len(nbrList) and not done:
if nbrList[i] in visited:
done = knight_tour(n + 1, path, nbrList[i], limit)
i = i + 1
if not done: # prepare to backtrack
path.pop()
visited.remove(u)
else:
done = True
return done
|
f11a2da4e740183a85dbd2f281c65dda77237dad
| 16,021 |
def bitarray_to_int(b):
"""Convert a bitarray to an integer."""
if not len(b):
return 0
return int(b.to01(), 2)
|
90c351e81b088a079d346313efd56f9c08344a3f
| 531,452 |
def binary_to_hex(hex_format, binary_string) -> str:
"""
Convert a binary string to a hexadecimal string.
:param hex_format: hexadecimal format - "{0:0>X}" or "{0:0>2X}" etc.
:param binary_string: binary string - for example, '1110'.
:return: hexadecimal string - for example '0E', 'F'.
"""
return hex_format.format(int(binary_string, 2))
|
e25507e09db96fbd8b60ba6e427d7f2799adfd4e
| 212,088 |
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
|
4f80e045cec8e8641bc74de15f82adc65394da1a
| 39,113 |
from typing import Dict
def is_queue_not_empty(queue: Dict) -> bool:
"""Check if a data queue is not empty.
Agrs:
queue: Data queue
Returns:
False if queue is empty, else True.
"""
return queue['messages'] > 0 or queue['messages_unacknowledged'] > 0
|
85b0c68645c4d301265672293e75c76d2e46e21e
| 413,376 |
def mask_sentinel_clouds(img):
"""
Function that masks out clouds from the input Sentinel-2 image. The
input image has to have the QA60 band in its bands list.
Parameters
----------
img : ee.image.Image
Single Sentinel-2 Earth Engine Image that needs cloud-masking
Returns
-------
ee.image.Image
The cloud-masked image
"""
try:
# selecting the qa band
qa = img.select('QA60')
# Bits 10 belong to clouds, bits 11 belong to cirrus
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Setting flags to 0 to indicate no cloudy condtions
mask = qa.bitwiseAnd(cloudBitMask).eq(0) \
.And(qa.bitwiseAnd(cirrusBitMask).eq(0))
return img.updateMask(mask).divide(10000)
# The function will return an error message if the input is not
# of type <class 'ee.image.Image'>
except AttributeError:
print("""
Error: the input is {}. It needs to be a <class 'ee.image.Image'>
""".format(str(type(img))))
return None
|
83b5c231cf518e41eab6979f1354ed6e1372ca5e
| 524,736 |
def solution(A):
"""
Codility -
https://app.codility.com/demo/results/trainingKC23TS-77G/
100%
Idea is to maintain the start and end point marker for each disc/circle
Sort by start position of disc/circle
On each start position count the active circle/disc
On each end position reduce the active circle/disc
Count the total intersection using the active circle in each iteration
Reference -
https://www.youtube.com/watch?v=HV8tzIiidSw
http://www.lucainvernizzi.net/blog/2014/11/21/codility-beta-challenge-number-of-disc-intersections/
:param A:
:return: total intersections
"""
# contains start and end points of circle
circle_points = []
for i, a in enumerate(A):
# store start points and end points of circle
# as per problem statement -> The J-th disc is drawn with its center at (J, 0) and radius A[J].
# example 0-1, 0+1 second 1-5, 1+5
circle_points += [(i - a, True), (i + a, False)]
print("Original disc positions " + str(circle_points))
# Sort the array of disc, making sure that the start of a disk in a particular point P comes before the end of any disk at P.
circle_points.sort(key=lambda x: (x[0], not x[1]))
print("Sorted disc positions " + str(circle_points))
intersections, active_circles = 0, 0
# We now walk this array, keeping track of how thick the set of disk is at each new disc (below, the variable is called active_circles).
# Furthermore, we increase the number of intersection by active_circles if a new disk starts.
for _, is_beginning in circle_points:
# active circle found ie. starting disc found
if is_beginning:
# counting intersections by active circles ie. each active circle must be intersecting the comming new circle,
# we already know they are in sorted order by start position
intersections += active_circles
active_circles += 1
print(
"This is start disc -> intersections " + str(intersections) + " active_circles " + str(active_circles))
# ending circle position found, now active circle should be reduced by one
else:
print("Closing disc found.....")
print("Reduce active circle " + str(active_circles))
print()
active_circles -= 1
# ** is 10 to the power of 7
if intersections > 10 ** 7:
return -1
return intersections
|
ec2eae5c4af4b7e0c71b2487371f7b6b2aaa8f70
| 175,195 |
import re
import io
def get_version(filename):
"""
Trivial parser to extract a __version__ variable from a source file.
:param str filename: the file to extract __version__ from
:returns str: the version string for the package
"""
version_re = re.compile(r'(\d\.\d(\.\d+)?)')
with io.open(filename, 'r') as source:
for line_num, line in enumerate(source):
if line.startswith('__version__'):
match = version_re.search(line)
if not match:
raise Exception(
'Invalid __version__ string found on '
'line %d of %s' % (line_num + 1, filename))
return match.group(1)
raise Exception('No __version__ line found in %s' % filename)
|
6abdd829a2067f148cb3f4e500d7881e8e0b3e60
| 107,900 |
def jsonify(records):
""" Parse asyncpg record response into JSON format
"""
return [{key: value for key, value in
zip(r.keys(), r.values())} for r in records]
|
ae25304429f89d48f9261040dc0894833a3cd368
| 521,649 |
import csv
def parse_throughput_stats(fp):
"""
Parse throughput statistics.
:param fp: the file path that stores the statistics
:returns the number of pairs of initiator and responder clients a server can handle per second
"""
counter = 0
with open(fp) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=' ', fieldnames=['title', 'time'])
for _ in csvreader:
counter += 1
return counter/float(10) # The experiment lasted for 20 seconds
|
e9b2e3e6e29eccb315fbb7882be93c9b1c91c77e
| 223,957 |
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""
Maximizes metric function over available gold spans. Because there can be
multiple legal answers, we do not penalize the model by only testing on
the first gold span.
Args:
metric_fn: Function to maximize over.
prediction: Predicted answer span (string).
ground_truths: List of true answer spans (each string).
Returns:
Max score.
"""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
|
e5c46e08efdaedefdaaa5117a866a275554759d0
| 530,114 |
def decay_across_unit_interval(v, p, d):
""" Generalized decay function over unit interval.
Returns: initial value rescaled based on decay factor
Parameters:
v: Starting value
p: Percent completed must be in a unit interval [0,1]
d: Decay trajectory must be in a unit interval [0,1]
Example values for d:
d = 0.00 No decay return starting value
d = 0.25 Slow onset decay slowly and then accelerate
d = 0.50 Linear decay 45 degree) decay across interval
d = 0.75 Fast onset decay fast and then deccelerate
d = 1.00 Immediate decay return
Author: [email protected]
License: GNU General Public License with attribution
"""
# No decay
if d == 0.0:
return v
# Slow onset
if d <= 0.5:
return v * (1 - p ** (1.0 / (d * 2)))
# Linear decay
if d == 0.5:
return v * (1 - p)
# Fast onset
if d > 0.5:
return v * ( decay_across_unit_interval(1, p, 0.5)
- (decay_across_unit_interval(1, 1 - p, 1 - d)
- decay_across_unit_interval(1, 1 - p, 0.5))
)
# Immediate decay
if d == 1.0: return 0
|
42138e22103d6ec5733a70209d49611a167aae65
| 241,884 |
def unescape_json(s):
"""Unescape a string that was previously encoded into JSON.
This unescapes forward slashes (optional in JSON standard),
backslashes and double quotes"""
return s.replace("\\/", '/').replace('\\"', '"').decode('string_escape')
|
cb2093aad453822ed8660bd0219a59472e765f56
| 179,814 |
from typing import List
import random
def sample_floats(low: float, high: float, k: int = 1) -> List[float]:
"""Return a k-length list of unique random floats in the range of low <= x <= high."""
seen = set()
for _ in range(k):
x = random.uniform(low, high)
while x in seen:
x = random.uniform(low, high)
seen.add(x)
return list(seen)
|
9dcbef61809e1cfc3cc3748338137e4d48a95059
| 35,539 |
from datetime import datetime
def resample(dset, freq, weights=None, time_coord_name=None, method=None):
""" Resample given dataset and computes the mean for specified sampling time frequecy
Parameters
----------
dset : xarray.Dataset
The data on which to operate
freq : str
Time frequency alias. Accepted aliases:
- ``mon``: for monthly means
- ``ann``: for annual means
weights : array_like, optional
weights to use for each time period. This argument is supported for annual means only!
If None and dataset doesn't have `time_bound` variable,
every time period has equal weight of 1.
time_coord_name : str
Name for time coordinate to use
method : str, optional
Specify how the time values of returned dataset are computed.
if none, time values are computed as midpoints.
Accepted values:
- ``left``: for leftmost time values
- ``right``: for rightmost time values
Returns
-------
computed_dset : xarray.Dataset
The resampled data with computed mean
"""
accepted_freq = {'mon', 'ann'}
if freq not in accepted_freq:
raise ValueError(f'{freq} is not among supported frequency aliases={accepted_freq}')
if freq == 'mon':
ds = dset.esmlab.set_time(time_coord_name=time_coord_name).compute_mon_mean(method=method)
else:
ds = dset.esmlab.set_time(time_coord_name=time_coord_name).compute_ann_mean(
weights=weights, method=method
)
new_history = f'\n{datetime.now()} esmlab.resample(<DATASET>, freq="{freq}")'
ds.attrs['history'] = new_history
return ds
|
8e817c6876837e77ffd6741e48532020a99f256c
| 504,384 |
from typing import Iterable
from typing import Union
def big_endian_digits_to_int(digits: Iterable[int], *,
base: Union[int, Iterable[int]]) -> int:
"""Returns the big-endian integer specified by the given digits and base.
Examples:
>>> cirq.big_endian_digits_to_int([0, 1], base=10)
1
>>> cirq.big_endian_digits_to_int([1, 0], base=10)
10
>>> cirq.big_endian_digits_to_int([1, 2, 3], base=[2, 3, 4])
23
Args:
digits: Digits of the integer, with the least significant digit at the
end.
base: The base, or list of per-digit bases, to use when combining the
digits into an integer. When a list of bases is specified, the last
entry in the list is the base for the last entry of the digits list
(i.e. the least significant digit). That is to say, the bases are
also specified in big endian order.
Returns:
The integer.
Raises:
ValueError:
One of the digits is out of range for its base.
The base was specified per-digit (as a list) but the length of the
bases list is different from the number of digits.
"""
digits = tuple(digits)
base = (base,) * len(digits) if isinstance(base, int) else tuple(base)
if len(digits) != len(base):
raise ValueError('len(digits) != len(base)')
result = 0
for d, b in zip(digits, base):
if not (0 <= d < b):
raise ValueError(
'Out of range digit. Digit: {!r}, base: {!r}'.format(d, b))
result *= b
result += d
return result
|
ba54567afa4eecd8756ad0f089bbe6045a2861e5
| 436,693 |
def cast_int(value):
"""
Cast value to 32bit integer
Usage:
cast_int(1 << 31) == -1
(where as: 1 << 31 == 2147483648)
"""
value = value & 0xFFFFFFFF
if value & 0x80000000:
value = ~value + 1 & 0xFFFFFFFF
return -value
else:
return value
|
1f843d4ac82b16f5d735f4e87fb79748b979cf43
| 206,875 |
def load_queries(path):
"""Loads queries into a dict of key: query id, value: query text."""
queries = {}
with open(path) as f:
for i, line in enumerate(f):
query_id, query = line.rstrip().split('\t')
queries[query_id] = query
if i % 100000 == 0:
print('Loading queries {}'.format(i))
return queries
|
c8b677965181970ee5dff90d3b611a9641a0b68d
| 562,387 |
def clean_uri(uri):
"""
This method removes the url part of the URI in order to obtain just the property or class
:param uri: An uri to be cleaned
:return: The name of the property or the class
"""
if uri.find('#') != -1:
special_char = '#'
else:
special_char = '/'
index = uri.rfind(special_char)
return uri[index + 1:len(uri)]
|
737540b4cf9381e2b315699a90dd31a7afebce08
| 512,315 |
def _tensor_max(*args):
"""Elementwise maximum of a sequence of tensors"""
maximum, *rest = args
for arg in rest:
maximum = maximum.max(arg)
return maximum
|
3046b6ae14368a7275f74ade42a1b179ae38b95e
| 37,710 |
def version_components(version):
"""Split version string into major1.major2.minor components."""
components = version.split(".")
num = len(components)
if num < 1:
raise ValueError("version should have at least one component.")
major1 = components[0]
if num >= 2:
major2 = components[1]
else:
major2 = "0"
if num >= 3:
minor = components[2]
else:
minor = "0"
return (major1, major2, minor)
|
fdd4d577d5140db2fa8068db6e4f1f8b1479c4c1
| 371,944 |
def most_common(lst):
"""Returns the most common element in a list
Args:
lst (list): list with any datatype
Returns:
an element of lst (any): the most common element
if commonests tie, randomly selected
Raises:
ValueError: if lst is None or empty list
"""
if not lst:
raise ValueError("input must be non-empty list")
return max(set(lst), key=lst.count)
|
59723d3a20d04e1ec7a7f2db0d6a818d0a922135
| 394,243 |
def get_col_widths(table, tab_width=2, tab_level=0, col_padding=0):
"""Recursively get the list of max number of characters of each column in table.
Parameters:
table (dict): The table to check (has "fields" and "records" properties)
tab_width (int): The amount of space to leave for tabs. By default, 2.
tab_level (int): The number of tables this table is nested within. By default, 0.
col_padding (int): How much space to pad columns with. By default, 0.
Returns:
list: List of ints corresponding to the maximum number of characters per table column.
"""
col_widths = []
for cell in table["fields"]:
col_widths.append(len(cell["text"])+col_padding)
col_widths[0] += tab_width * tab_level
# take max of this col_widths or col_widths returned by recursing on children
for row in table["records"]:
child_widths = get_col_widths(row, tab_width, tab_level + 1, col_padding)
for i in range(0,len(col_widths)):
col_widths[i] = max(col_widths[i],child_widths[i])
return col_widths
|
9d68a1e314a5956be2595caee47b6a65db19b69b
| 517,010 |
from datetime import datetime
def subtract_dates(date1, date2):
"""
Takes two dates %Y-%m-%d format. Returns date1 - date2, measured in days.
"""
date_format = "%Y-%m-%d"
a = datetime.strptime(date1, date_format)
b = datetime.strptime(date2, date_format)
delta = a - b
#print(date1,"-",date2,"=",delta.days)
return delta.days
|
2000019c76c42ba881ba9b25b89b6dea295d9e19
| 291,831 |
def _project(doc, projection):
"""Return new doc with items filtered according to projection."""
def _include_key(key, projection):
for k, v in projection.items():
if key == k:
if v == 0:
return False
elif v == 1:
return True
else:
raise ValueError('Projection value must be 0 or 1.')
if projection and key != '_id':
return False
return True
return {k: v for k, v in doc.items() if _include_key(k, projection)}
|
0f2cd190e73b39ceeec0f850054baab1dd357587
| 2,708 |
import heapq
def nlargest_indices(n, iterable):
"""Given an iterable, computes the indices of the n largest items.
Parameters
----------
n : int
How many indices to retrieve.
iterable : iterable
The iterable from which to compute the n largest indices.
Returns
-------
largest : list of int
The n largest indices where largest[i] is the index of the i-th largest index.
"""
nlargest = heapq.nlargest(n, enumerate(iterable),
key=lambda x: x[1])
return [i[0] for i in nlargest]
|
e6609150f2b9f95721288ee0d410978aaa898951
| 451,936 |
def _get_projection(el):
"""
Get coordinate reference system from non-auxiliary elements.
Return value is a tuple of a precedence integer and the projection,
to allow non-auxiliary components to take precedence.
"""
return (int(el._auxiliary_component), el.crs) if hasattr(el, 'crs') else None
|
94da883c3aba5647d0619670bf1bbdc14fa36299
| 154,809 |
def find_longest(arr):
""" Find the number with the most digits.
If two numbers in the argument array have the same number of digits,
return the first one in the array.
"""
return max(arr, key=lambda x: len(str(x)))
|
340bfd92ab2e88bbec071ef0e9ea584cc4c18cf0
| 595,250 |
def unify_table(table):
"""Given a list of rows (i.e. a table), this function returns a new table
in which all rows have an equal amount of columns. If all full column is
empty (i.e. all rows have that field empty), the column is removed.
"""
max_fields = max(map(lambda row: len(row), table))
empty_cols = [True] * max_fields
output = []
for row in table:
curr_len = len(row)
if curr_len < max_fields:
row += [''] * (max_fields - curr_len)
output.append(row)
# register empty columns (to be removed at the end)
for i in range(len(row)):
if row[i].strip():
empty_cols[i] = False
# remove empty columns from all rows
table = output
output = []
for row in table:
cols = []
for i in range(len(row)):
should_remove = empty_cols[i]
if not should_remove:
cols.append(row[i])
output.append(cols)
return output
|
80de4acb372513aef2a740d18a9530e4da2cbe41
| 448,855 |
def test_input_yes_no(value) -> str:
"""This function test input argument and returns boolean result as string."""
return 'Yes' if value else 'No'
|
9ed8e6563cae9947986509469d71709e98dd50d4
| 282,915 |
def process_list(string):
""" Create list from string with items separated by commas
Helper for get_sorted_emails.
"""
# Remove all spaces
no_spaces = "".join(string.split())
# Split at commas
split_lst = no_spaces.split(",")
# Remove empty strings and return
return list(filter(None, split_lst))
|
4191c63cfb3ef87a08102235152c2d2a56696749
| 598,694 |
def select_dict_keys(dictionary, keys):
"""Filters a dict by only including certain keys.
:param dictionary: dictionary
:param keys: keys to be selected
:return: dictionary with selected keys
"""
key_set = set(keys) & set(dictionary.keys())
return {key: dictionary[key] for key in key_set}
|
4b3d07e340d599298453ef1c3a507c0b3f782d04
| 338,453 |
import re
def check_google_calendar_id(google_cal_id):
"""utility to verify a google calendar id passed as a parameter.
Args:
google_cal_id (str): string of the google calendar id
Returns:
bool: True if the string in parameter is a verified google calendar id, else returns False
"""
return bool(
re.match(r"esi\.dz_[a-z0-9]{26}@group\.calendar\.google\.com",
google_cal_id))
|
feffa4626b9031d199155ba43a5c48c1e01eab1b
| 211,529 |
def der_order(order: int) -> str:
"""Return correct suffix for order.
>>> der_order(1)
... "1-st"
>>> der_order(4)
... "4-th"
"""
_suffix = ["st", "nd", "rd"]
if order - 1 > 3:
return f"{order}-th"
else:
return f"{order}-{_suffix[order - 1]}"
|
e073135851a8848c41fbdff75215cc0dfe3df7cd
| 648,524 |
def flatten(iterables, start_with=None):
"""Takes a list of lists ``iterables`` and returns a list containing elements of every list.
If ``start_with`` is not ``None``, the result will start with ``start_with`` items, exactly as
if ``start_with`` would be the first item of lists.
"""
result = []
if start_with:
result.extend(start_with)
for iterable in iterables:
result.extend(iterable)
return result
|
10ecc77cf5642a8ac236be1ab2e1e0a37351b523
| 171,255 |
def read_reco2vol(volumes_file):
"""Read volume scales for recordings.
The format of volumes_file is <reco-id> <volume-scale>
Returns a dictionary { reco-id : volume-scale }
"""
volumes = {}
with open(volumes_file) as volume_reader:
for line in volume_reader.readlines():
if len(line.strip()) == 0:
continue
parts = line.strip().split()
if len(parts) != 2:
raise RuntimeError("Unable to parse the line {0} in file {1}."
"".format(line.strip(), volumes_file))
volumes[parts[0]] = float(parts[1])
return volumes
|
8914f0f8351f3340fa3382c7dad00c61ce0aa154
| 652,623 |
import re
def re_map(f, regexp, string):
"""Find regexp in string and map f on the matches.
Replace every occurence of regexp in string by the application of f
to the match.
>>> re_map(lambda x: '<%s>' % x.capitalize(), 'a|e|i|o|u', 'This was a triumph')
'Th<I>s w<A>s <A> tr<I><U>mph'
"""
match = re.search(regexp, string)
while match:
prefix = string[:match.span()[0]]
suffix = string[match.span()[1]:]
string = '%s%s%s' % (prefix, f(match.group()), suffix)
match = re.search(regexp, string)
return string
|
5f13ea82834910acb96d9dbf968419f2df4232e4
| 560,205 |
def average_tweets_per_user(tweets, users_with_freq):
"""
Return the average number of tweets per user from a list of tweets.
:param tweets: the list of tweets.
:param users_with_freq: a Counter of usernames with the number of tweets in 'tweets' from each user.
:return: float. average number of tweets per user
"""
tweets_number = len(tweets)
users_number = len(users_with_freq)
return tweets_number/users_number
|
f2fc5b725003b39a5e429a4945007fbb16640b54
| 679,261 |
import math
def distance_formula(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Distance between two points is defined as the square root of (x2 - x1)^2 + (y2 - y1) ^ 2
:raises TypeError: Any of the values are non-numeric or None.
"""
return math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))
|
5c1a4706365a2347bc23d7efcc74caa003405c0e
| 28,965 |
def binary_search_recursive(list, target):
"""
Returns the index position of the target if found, else returns None
complexity: O(log n)
"""
if len(list) <= 0:
return None
midpoint = len(list)//2
if list[midpoint] == target:
return True
if list[midpoint] < target:
return binary_search_recursive(list[midpoint + 1:], target)
else:
return binary_search_recursive(list[:midpoint], target)
|
76cc158dc0a99a7b95b5ebfe60a5ca3731475c30
| 554,873 |
def remove_none_value(data):
"""remove item from dict if value is None.
return new dict.
"""
return dict((k, v) for k, v in data.iteritems() if v is not None)
|
934690c406585dd71ce2785c169133160d319801
| 440,904 |
import ast
def parseTypeFromString(value):
"""
Parse a string representation of a variable into a true, typed, python variable
"""
return ast.literal_eval(value)
|
2a7664af015a60a9070e3090772c73af4ef76fb5
| 26,409 |
def build_interlock_status(device):
"""
Builds an integer representation of the interlock bit-field.
:param device: the lewis device
:return: int representation of the bit field
"""
interlocks = device.get_interlocks()
bit = 1
result = 0
for ilk in interlocks.values():
result += bit if ilk else 0
bit *= 2
return result
|
792994d5856978ab2d82a18c7eeeaebb0c67a5b9
| 182,854 |
def filter_data(data, filter_keys):
"""Applies a key filter to a dictionary in order to return a subset of
the key-values. The insertion order of the filtered key-value pairs is
determined by the filter key sequence.
Parameters:
data (dict): key-value pairs to be filtered.
filter_keys (tuple): sequence of key names that are used to select
key-value pairs that match on the key name.
Returns:
dict: filtered collection of key-value pairs.
"""
return {key: data[key] for key in filter_keys if key in data.keys()}
# record = {}
# for key in filter_keys:
# if key in data.keys():
# record[key] = data[key]
# return record
|
0c6c7985d09d5c6f7414335a92346f40186e6200
| 136,750 |
def get_change(budget: float, exchanging_value: float) -> float:
"""Calculate currency left after an exchange
Args:
budget (float): amount of money you own.
exchanging_value (float): amount of your money you want to exchange now.
Returns:
float: amount left of your starting currency after exchanging.
"""
return budget - exchanging_value
|
ccfec74ecb078459bfe440160d806d50ee702ac0
| 468,970 |
def format_temp_unit(value: float, unit: str) -> str:
"""
returns formatted degrees value
Format celsius or fahrenheit degrees to nice string output.
:param value: celsius or fahrenheit degrees value
:param unit: 'C' for celsius or 'F' for fahrenheit degrees
:type value: float
:type unit: str
:returns: value with degrees symbol
:rtype: str
:Example:
>>> a = format_temp_unit(-17.78, 'c')
>>> print(a)
"-17.78℃"
"""
if unit.lower() == 'c':
return f"{value}℃"
elif unit.lower() == 'f':
return f"{value}℉"
else:
return str(value)
|
67fbcac2e111fb4546734c63269eaa9c6ff752aa
| 176,647 |
def remove_prefix(text, prefix):
"""
Remove prefix of a string.
Ref: https://stackoverflow.com/questions/16891340/remove-a-prefix-from-a-string
"""
if text.startswith(prefix):
return text[len(prefix):]
return text
|
716cff2f704c416055cf9d5d279b7d3ac2c4d4f2
| 362,742 |
def VShadowPathSpecGetStoreIndex(path_spec):
"""Retrieves the store index from the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
int: store index or None if not available.
"""
store_index = getattr(path_spec, 'store_index', None)
if store_index is None:
location = getattr(path_spec, 'location', None)
if location is None or not location.startswith('/vss'):
return None
store_index = None
try:
store_index = int(location[4:], 10) - 1
except (TypeError, ValueError):
pass
if store_index is None or store_index < 0:
return None
return store_index
|
28bccc75a3fb2ec13733b03aad28b68765d1d453
| 371,068 |
def _slope_lstsq(x, y):
""" Utility function which returns the slope of the linear
regression between x and y.
Parameters
----------
x : ndarray, shape (n_times,)
y : ndarray, shape (n_times,)
Returns
-------
float
"""
n_times = x.shape[0]
sx2 = 0
sx = 0
sy = 0
sxy = 0
for j in range(n_times):
sx2 += x[j] ** 2
sx += x[j]
sxy += x[j] * y[j]
sy += y[j]
den = n_times * sx2 - (sx ** 2)
num = n_times * sxy - sx * sy
return num / den
|
d523007e56ca16223b4aa2ab80aa6e6daa42839a
| 329,847 |
def is_error(data):
"""check for error indication in data"""
return data[0] == 255
|
92e13d2803567467576c2c134cb14a4401c949f0
| 180,365 |
def extract_sparse(sparse_mat):
"""
Extract a coordinate levele representation as a tuple for a PyTorch
sparse tensor.
Args:
- sparse_mat (PyTorch sparse tensor) - The sparse matrix from which to
extract coordinates and values.
Returns:
- A tuple of the indices (coordinates) of non-zero values and a
vector of the corresponding values.
"""
# Perform extraction
indices = sparse_mat._indices().t().numpy()
values = sparse_mat._values().numpy()
# Return tuple
return (indices, values)
|
aedfe8591d6856465094468a682e65ed6ee8f6a7
| 514,655 |
from typing import List
def chunk_sizes(total_size: int, num_chunks: int) -> List[int]:
"""Return list containing the sizes of chunks.
Args:
total_size: Total computation work.
num_chunks: Maximum number of chunks the work will be split into.
Returns:
List of chunks with split work.
"""
chunk_size = max(total_size // num_chunks, 1)
if chunk_size == 1:
sizes = total_size * [chunk_size]
else:
equal, rest = divmod(total_size, chunk_size)
sizes = equal * [chunk_size]
if rest != 0:
sizes.append(rest)
return sizes
|
9f7f50abf022c25010d44e7e154e574831fcf965
| 293,506 |
def pt_segment_dist(A, B, P):
"""
SUMMARY
computes the distance of point P from segment AB
PARAMETERS
A: endpoint of segment AB
B: other endpoint of segment AB
P: point at some distance from AB
RETURNS
float
"""
x1, y1 = A
x2, y2 = B
x3, y3 = P
px = x2-x1
py = y2-y1
norm = px*px + py*py
u = ((x3 - x1) * px + (y3 - y1) * py) / float(norm)
if u > 1:
u = 1
elif u < 0:
u = 0
x = x1 + u * px
y = y1 + u * py
dx = x - x3
dy = y - y3
dist = (dx*dx + dy*dy)**.5
return dist
|
dd072a558a8a48d7e8052d1ec3f8a73abba54c0e
| 322,413 |
def merge_logfiles(log1, log2):
"""Merge two log files.
If log1 overlaps with log2, the overlapping entries in log1
are discarded.
"""
first_in_2 = log2['time'][0]
keep_from_1 = log1['time'] < first_in_2
for key in log1.keys():
log1[key] = log1[key][keep_from_1]
log1.timeseries_append(log2)
return log1
|
e169edd3cc7617a20bb96b9b41deea8748f3c56e
| 610,784 |
def dict_list_to_bins(dict_list: list, bin_key: str, comparison_key=None) -> dict:
"""
A dictionary binning function which reduces a set of data
to a set of bins.
:param dict_list: a list of dictionaries
:param bin_key: a key for binning
:param comparison_key: a key for counting
:return: a dictionary
"""
dict_bins = {}
for item in dict_list:
bin_val = item[bin_key]
if dict_bins.get(bin_val, None):
if comparison_key:
dict_bins[bin_val] += float(item[comparison_key])
else:
dict_bins[bin_val] += 1
else:
if comparison_key:
dict_bins[bin_val] = float(item[comparison_key])
else:
dict_bins[bin_val] = 1
return dict_bins
|
2d56aa92d31e8a3921a805e5654764d1f267c291
| 306,018 |
import re
def _compose_comment(remote: str, sha: str, git_log: str) -> str:
"""Composes a comment describing the original (triggering) commit.
Arguments:
remote {str} -- a link to the remote git repo
sha {str} -- the hash of the commit
git_log {str} -- the git log for the commit
Returns:
str -- a comment describing the commit
"""
match = re.match(r"(https://github.com/([^/]+)/([^/.]+).*?)(\.git)?", remote)
if match:
link = f"{match.group(1)}/commit/{sha}"
source_repo = match.expand(r"\2/\3")
lines = [
git_log,
f"Source-Repo: {source_repo}",
f"Source-Sha: {sha}",
f"Source-Link: {link}",
]
else:
lines = [git_log, f"Source-Repo: {remote}", f"Source-Sha: {sha}"]
return "\n".join(lines)
|
d6aac13cbf23db2b5ea94e0b6cfe9600f1900283
| 643,319 |
def get_cube_data_info(cube):
"""Return short_name, and mip from the cube."""
short_name = cube.var_name
mip = cube.attributes['mip']
return short_name, mip
|
fa1be8f072d3ce320225648c71c02e8fcb361fa6
| 502,613 |
def format_perm(model, action):
"""
Format a permission string "app.verb_model" for the model and the
requested action (add, change, delete).
"""
return '{meta.app_label}.{action}_{meta.model_name}'.format(
meta=model._meta, action=action)
|
12f532e28f685c2a38a638de63928f07039d44c8
| 668 |
import torch
def emb_2d_dropout(training, mask_perc, tensor):
"""2D dropout of tensor where entire row gets zero-ed out with prob
mask_perc.
Args:
training: if the model is in train mode or not
mask_perc: percent to dropout
tensor: tensor to dropout
Returns: tensor after 2D dropout
"""
batch, M, K, dim = tensor.shape
if training and mask_perc > 0:
# reshape for masking
tensor = tensor.contiguous().reshape(batch * M * K, dim)
# randomly mask each entity embedding
bern_prob = (torch.ones(batch * M * K, 1) * mask_perc).to(tensor.device)
zero_mask = torch.bernoulli(bern_prob) > 0
tensor = tensor.masked_fill(zero_mask, 0)
tensor = tensor.contiguous().reshape(batch, M, K, dim)
return tensor
|
b0742f385054b65be60b1dcbece7b31ff385bcca
| 425,050 |
def determine_type(api_version: str, kind: str):
"""Return name of Swagger model for this `api_version` and `kind`.
Example: ('Extensions/v1beta1', 'Deployment') -> 'ExtensionsV1beta1Deployment'
This is useful to convert the K8s Json response into a Swagger generated
Python object. The `api_version` and `kind` should be copied verbatime
(including capitalisation) from the manifests K8s returns.
Input:
api_version: str
As returned by K8s API, eg `V1`, `Extensions/v1beta1` ...
kind: str
As returned by K8s API, eg `Deployment`, `Namespace` ...
Returns:
str: Name of Swagger model that can wrap the specified data.
"""
# Split the API name by '/', eg Extensions/v1beta1 -> (Extensions,
# v1beta1). Then capitalize the first character only, leaving all other
# characters untouched. This is the scheme Swagger uses for its classes.
words = api_version.split('/')
words = [word[0].capitalize() + word[1:] for word in words]
api = str.join('', words)
# Capitalise the first character of the `kind` and compile the Swagger name.
kind = kind[0].capitalize() + kind[1:]
klass = f'{api}{kind}'
# Capitalise trailing `list` to match the Swagger class name.
if klass.endswith('list'):
klass = str.join('', klass.rpartition('list')[0]) + 'List'
return klass
|
a92dcd595b960e197248462a0021b13b3aa521da
| 470,736 |
import asyncio
from typing import Optional
def get_loop(running: bool = False, enforce_running: bool = False) -> asyncio.AbstractEventLoop:
"""Gracefully fetch a loop.
The function tries to get an event loop via :func:`asyncio.get_event_loop`.
On fail, returns a new loop using :func:`asyncio.new_event_loop`.
Parameters
----------
running: :class:`bool`
Indicates if the function should get a loop that is already running.
enforce_running: :class:`bool`
If ``running`` is ``True``, indicates if :exc:`RuntimeError`
should be raised if there is no current loop running.
"""
loop: Optional[asyncio.AbstractEventLoop]
if running:
try:
loop = asyncio._get_running_loop()
except Exception: # an error might occur actually
loop = None
if loop is not None:
return loop
if enforce_running:
raise RuntimeError("No running event loop.")
try:
loop = asyncio.get_event_loop()
if loop.is_running() and not running:
# loop is running while we have to get the non-running one,
# let us raise an error to go into <except> clause.
raise ValueError("Current event loop is already running.")
if loop.is_closed():
# same here, fall into <except> clause if the loop is closed
raise ValueError("Current event loop is closed.")
except Exception:
loop = asyncio.new_event_loop()
return loop
|
bb387622ab1e0e50bc1c71abc2c3d2132ae085e3
| 533,591 |
def split(u, axis=0):
"""Split an array into a list of arrays on the specified axis.
Split an array into a list of arrays on the specified axis. The
length of the list is the shape of the array on the specified axis,
and the corresponding axis is removed from each entry in the list.
This function does not have the same behaviour as :func:`numpy.split`.
Parameters
----------
u : array_like
Input array
axis : int, optional (default 0)
Axis on which to split the input array
Returns
-------
v : list of ndarray
List of arrays
"""
# Convert negative axis to positive
if axis < 0:
axis = u.ndim + axis
# Construct axis selection slice
slct0 = (slice(None),) * axis
return [u[slct0 + (k,)] for k in range(u.shape[axis])]
|
8e793853d39909838003cefd3e830c5c5088dd37
| 474,486 |
def h_bubble(heigth_layer, epsi_vapor):
"""
Calculates the heigth of bubble layer.
Parameters
----------
epsi_vapor : float
The vapor content of bubble layer, [dimensionless]
heigth_layer : float
The heigth ligth layer of the liquid, [m]
Returns
-------
h_bubble : float
The heigth of of bubble layer. [m]
References
----------
Дытнерский, страница 242
"""
return heigth_layer / (1 - epsi_vapor)
|
f713bd71553fce50461359fd5946113e26927937
| 621,593 |
def is_basic_scheme(all_tags):
"""
Check if a basic tagging scheme is used. Return True if so.
Args:
all_tags: a list of NER tags
Returns:
True if the tagging scheme does not use B-, I-, etc, otherwise False
"""
for tag in all_tags:
if len(tag) > 2 and tag[:2] in ('B-', 'I-', 'S-', 'E-'):
return False
return True
|
6333fd74029bd3ab6d689ac9792cfffd841eb3fe
| 550,834 |
def transform_six_with_metaclass(node):
"""Check if the given class node is defined with *six.with_metaclass*
If so, inject its argument as the metaclass of the underlying class.
"""
call = node.bases[0]
node._metaclass = call.args[0]
return node
|
611fdd207334d2bebfacfceb5a467b483336f497
| 204,158 |
from functools import reduce
def escape(raw, *chars):
"""
Prefix special characters with backslashes, suitable for encoding in a larger string
delimiting those characters.
Args:
raw (str):
Unsafe input string.
chars (str list):
Control characters to be escaped.
Returns:
str:
Escaped input string.
"""
args = (raw, r"\\", *chars)
return reduce(lambda current, char: current.replace(char, r"\{}".format(char)), args)
|
d1db2b9a1bc6fc9941f8d46bcd53d6662dce7b14
| 433,943 |
def buzz(number):
""" returns whether a number is divisible by 5"""
return not number % 5
|
d7b076fc998ecd70e59d87a9f3bec8f48c92ede9
| 296,430 |
def unpad_list(listA, val=-1):
"""Unpad list of lists with which has values equal to 'val'.
Parameters
----------
listA : list
List of lists of equal sizes.
val : number, optional
Value to unpad the lists.
Returns
-------
list
A list of lists without the padding values.
Examples
--------
Remove the padding values of a list of lists.
>>> from dbcollection.utils.pad import unpad_list
>>> unpad_list([[1,2,3,-1,-1],[5,6,-1,-1,-1]])
[[1, 2, 3], [5, 6]]
>>> unpad_list([[5,0,-1],[1,2,3,4,5]], 5)
[[0, -1], [1, 2, 3, 4]]
"""
# pad list with zeros in order to have all lists of the same size
assert isinstance(listA, list), 'Input must be a list. Got {}, expected {}' \
.format(type(listA), type(list))
if isinstance(listA[0], list):
return [list(filter(lambda x: x != val, l)) for i, l in enumerate(listA)]
else:
return list(filter(lambda x: x != val, listA))
|
8354f3c78168aafa86a0a07997895d964d81cb31
| 675,015 |
def totaled_total_commits(cc, sql_time_specification): # pragma: no cover
"""Counts all the git commits in a given timeframe
Args:
cc(cursor)
sql_time_specification(str): a sql command to limit the dates of the
returned results
Return:
result(int): a count of all the commits
"""
cc.execute("""SELECT COUNT(*)
FROM git_commit
WHERE %s""" % sql_time_specification)
result = cc.fetchone()
return int(result[0])
|
7270b23bb5d78684989a9a9c14afb46aeaeae291
| 417,277 |
def to_camel_case(field: str, prefix: str) -> str:
"""Convert a STAR-fusion output column name
- Remove the prefix (either "Left" or "Right"
- Convert the first character to lower case
"""
# Remove side from field name
new_field = field[len(prefix) :] # noqa: E203
# Convert to camelCase
camel_case = new_field[0].lower() + new_field[1:]
return camel_case
|
7e470110ccf180192931bd340b289278df94e706
| 384,820 |
def ipasser(inbox, i=0):
"""
Passes the "i"-th input from inbox. By default passes the first input.
Arguments:
- i(``int``) [default: ``0``]
"""
return inbox[i]
|
254ed050745bd0e57f209757a45ce049c5fbee21
| 229,226 |
def get_post_fields(request):
"""parse through a request, and return fields from post in a dictionary"""
fields = dict()
for field, value in request.form.items():
fields[field] = value
return fields
|
2a8e67ad05449c6a3d57fd434cce9859da3f0e94
| 209,447 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.