content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def read_file(path):
"""Returns the entirety of a file's contents"""
with open(path, 'r', encoding="utf-8") as content:
return content.read()
|
f718df4503093f14359d7dd33e68921667e711d3
| 161,635 |
def linear_predictor(X):
"""A simple linear effect with features 'a' and 'b'."""
return X["a"] + X["b"]
|
588e2876ba87b85c61b69fc4568a757100f4cdaa
| 341,871 |
def getParameter(parameter, key, default = None):
"""Gets a parameter from a dict, returns default value if not defined
Arguments:
parameter (dict): parameter dictionary
key (object): key
default (object): deault return value if parameter not defined
Returns:
object: parameter value for key
"""
if not isinstance(parameter, dict):
return default
if key in parameter.keys():
return parameter[key]
else:
return default
|
b2146c8964062e64b1a3ada64b6d69e2da978353
| 510,390 |
def get_links(_stories):
"""
function extract links from received stories
:param _stories: list of stories including story title, link, unique id, published time
:return: list of links
"""
# Get the stories titles
links = []
for story in _stories:
links.append(story['link'])
return links
|
9bbe677f9b8600b232efa3ce78fc4b3edbbe0403
| 365,803 |
def peaks_with_signal(peaks, signal_range):
"""Returns peaks with at least one peak in signal_range
Parameters
----------
peaks: 2D array of peaks
signal_range: tuple(float)
(min, max) of signal window
Returns
-------
peaks_with_signal: 2D np.array
"""
peaks_with_signal = []
for peak in peaks:
got_signal = False
for p in peak:
if p > signal_range[0] and p < signal_range[1]:
got_signal = True
if got_signal:
peaks_with_signal.append(peak)
return peaks_with_signal
|
4fa8018eb04ce7af62fcac493403e8fd4c5b0930
| 185,893 |
def write_file(file_path, data):
""" Write data to a specified file path by appending """
try:
with open(file_path, 'a+') as fp:
fp.write(data)
return True
except Exception as err:
raise err
return False
|
222ab5a288805f32613297cb3f4ca48696f0e41c
| 373,432 |
def get_db_login(path='../data/db_details.txt'):
"""Get login details for database from file."""
with open(path, 'r') as f:
lines = [line.strip() for line in f]
login_dict = {i.split(":")[0].strip(): i.split(":")[1].strip()
for i in lines}
return login_dict
|
01f2e25a351af6d19d3c93989c00cba7cf3161a8
| 595,982 |
import re
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds extra try bots list to the CL description in order to run
tests on the Windows 10 try bot in addition to CQ try bots.
"""
rietveld_obj = cl.RpcServer()
issue = cl.issue
description = rietveld_obj.get_description(issue)
if re.search(r'^CQ_INCLUDE_TRYBOTS=.*', description, re.M | re.I):
return []
bots = [
'tryserver.chromium.win:win10_chromium_x64_rel_ng',
]
results = []
new_description = description
new_description += '\nCQ_INCLUDE_TRYBOTS=%s' % ';'.join(bots)
results.append(output_api.PresubmitNotifyResult(
'Automatically added Win10 bot to run on CQ.'))
if new_description != description:
rietveld_obj.update_description(issue, new_description)
return results
|
b909505b807c79f5a8c08b530cc82bccb9b96338
| 530,070 |
def date_search(df, unix_ts_date):
"""!
Create a filtered DataFrame that only contains entries that occurred on or
past the specified date.
@param df DataFrame: contains the readings from the sensor
@param unix_ts_date Integer: the universal unix time of the starting date from
which to examine the data
@return Return a Dataframe that only contains entries that occurred on or
past the specified date.
"""
filtered_df = df.loc[df['Ts'] >= unix_ts_date]
return filtered_df
|
5d3c0ef7bb47de5d5ce5126f7d564d686cae3dca
| 210,466 |
import re
def get_exclamations(text):
""" Count exclamation marks attached to words or standalone"""
ex_count = re.findall("[a-z#]*[!]+", text.lower())
return len(ex_count)
|
4f79268d1f868b447c6fac1fdf6985e132ca55af
| 99,630 |
def open_path(path, mode='r'):
""" Python < 3.6 doesn't support calling open on a Pathlib Path.
Parameters
----------
path : Path
Pathlib path to open
"""
return open(str(path), mode)
|
55d9a49fa5097df279b30ed29d5f82db9f335144
| 159,917 |
import ipaddress
def get_ip(request):
"""
Retrieves the remote IP address from the request data. If the user is
behind a proxy, they may have a comma-separated list of IP addresses, so
we need to account for that. In such a case, only the first IP in the
list will be retrieved. Also, some hosts that use a proxy will put the
REMOTE_ADDR into HTTP_X_FORWARDED_FOR. This will handle pulling back the
IP from the proper place.
"""
# if neither header contain a value, just use local loopback
ip_address = request.META.get('HTTP_X_FORWARDED_FOR',
request.META.get('REMOTE_ADDR', '127.0.0.1'))
if ip_address:
# make sure we have one and only one IP
if ',' in ip_address:
ip_address = ip_address.split(',')[0].strip()
# check if IP address is valid
try:
ipaddress.ip_address(ip_address)
except:
ip_address = ip_address[:39]
return ip_address
|
d5e030ca60dfb7400dffab7ed02335e146e4b91f
| 480,775 |
def calculate_alpha(mu1: float, mu2: float, a: float) -> float:
"""
Args:
mu1: mean of the first Gaussian random variable :math:`Y_1`
mu2: mean of the second Gaussian random variable :math:`Y_2`
a: this number is calculated using standard deviations and the coefficent of linear correlation
Note:
alpha can't be calculated in the case :math:`a=0`.
"""
return (mu2 - mu1) / a
|
f3857e7792a4afecb6887f62fbe3ea3ff9596dcd
| 484,328 |
def _expr_to_smtlib(e, daggify=True):
"""
Dump the symbol in its smt-format depending on its type
:param e: symbol to dump
:param daggify: The daggify parameter can be used to switch from a linear-size representation that uses ‘let’
operators to represent the formula as a dag or a simpler (but possibly exponential) representation
that expands the formula as a tree
:return string: smt-lib representation of the symbol
"""
if e.is_symbol():
return "(declare-fun %s %s)" % (e.symbol_name(), e.symbol_type().as_smtlib())
else:
return "(assert %s)" % e.to_smtlib(daggify=daggify)
|
8afb21100585f48bc3ccea58e95b02cacfb67338
| 429,255 |
def is_windows_path(path):
"""Checks if the path argument is a Windows platform path."""
return '\\' in path or ':' in path or '|' in path
|
88d856751744f0400bd4db09013492215b363c2a
| 638,591 |
def no_validation(s):
"""anything goes"""
return s
|
7ab880b9e85577b600cf583a72d0680c339f06be
| 159,160 |
from pathlib import Path
def get_datapath_base(data_type: str, filename: str) -> Path:
"""Return the path to the footprints test data file"""
return Path(__file__).resolve(strict=True).parent.joinpath(f"../data/{data_type}/{filename}")
|
9bccb92b1c4a5dbaa625b2fa52b3c77163104c11
| 38,200 |
def ensure_chosen_alternatives_are_in_user_alt_ids(choice_col,
wide_data,
availability_vars):
"""
Ensures that all chosen alternatives in `wide_df` are present in the
`availability_vars` dict. Raises a helpful ValueError if not.
Parameters
----------
choice_col : str.
Denotes the column in `wide_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain the specified
`choice_col` column.
availability_vars : dict.
There should be one key value pair for each alternative that is
observed in the dataset. Each key should be the alternative id for the
alternative, and the value should be the column heading in `wide_data`
that denotes (using ones and zeros) whether an alternative is
available/unavailable, respectively, for a given observation.
Alternative id's, i.e. the keys, must be integers.
Returns
-------
None.
"""
if not wide_data[choice_col].isin(availability_vars.keys()).all():
msg = "One or more values in wide_data[choice_col] is not in the user "
msg_2 = "provided alternative ids in availability_vars.keys()"
raise ValueError(msg + msg_2)
return None
|
73fe39be549de10a5eac9502ba9ad0e34aa633a8
| 310,372 |
def squeeze_generic(a, axes_to_keep):
"""squeeze_generic
Numpy squeeze implementation keeping <axes_to_keep> dimensions.
Parameters
----------
a: numpy.ndarray
array to be squeezed
axes_to_keep: tuple, range
tuple of axes to keep from original input
Returns
----------
numpy.ndarray
`axes_to_keep` from `a`
Example
----------
>>> a = np.random.rand(3,5,1)
>>> squeeze_generic(a, axes_to_keep=range(2)).shape
(3, 5)
Notes
----------
From: https://stackoverflow.com/questions/57472104/is-it-possible-to-squeeze-all-but-n-dimensions-using-numpy
"""
out_s = [s for i, s in enumerate(a.shape) if i in axes_to_keep or s != 1]
return a.reshape(out_s)
|
1aaadaa73b533256a6a5206b3629044ad62eb9d2
| 404,317 |
def legacySTAkwargs(**kwargs):
"""Provide support for legacy SpikeTrainArray
kwargs. This function is primarily intended to be
a helper for the new STA constructor, not for
general-purpose use.
kwarg: time <==> timestamps <==> abscissa_vals
kwarg: data <==> ydata
kwarg: unit_ids <==> series_ids
kwarg: unit_labels <==> series_labels
kwarg: unit_tags <==> series_tags
Examples
--------
sta = nel.SpikeTrainArray(time=..., )
sta = nel.SpikeTrainArray(timestamps=..., )
sta = nel.SpikeTrainArray(abscissa_vals=..., )
"""
def only_one_of(*args):
num_non_null_args = 0
out = None
for arg in args:
if arg is not None:
num_non_null_args += 1
out = arg
if num_non_null_args > 1:
raise ValueError ('multiple conflicting arguments received')
return out
# legacy STA constructor support for backward compatibility
abscissa_vals = kwargs.pop('abscissa_vals', None)
timestamps = kwargs.pop('timestamps', None)
time = kwargs.pop('time', None)
# only one of the above, otherwise raise exception
abscissa_vals = only_one_of(abscissa_vals, timestamps, time)
if abscissa_vals is not None:
kwargs['abscissa_vals'] = abscissa_vals
# Other legacy attributes
series_ids = kwargs.pop('series_ids', None)
unit_ids = kwargs.pop('unit_ids', None)
series_ids = only_one_of(series_ids, unit_ids)
kwargs['series_ids'] = series_ids
series_labels = kwargs.pop('series_labels', None)
unit_labels = kwargs.pop('unit_labels', None)
series_labels = only_one_of(series_labels, unit_labels)
kwargs['series_labels'] = series_labels
series_tags = kwargs.pop('series_tags', None)
unit_tags = kwargs.pop('unit_tags', None)
series_tags = only_one_of(series_tags, unit_tags)
kwargs['series_tags'] = series_tags
return kwargs
|
516f69c358d59f415f54ffa17ca585f7436e1531
| 323,521 |
import requests
import re
def get_urls(root_url, regex_str):
"""
Scrape all regex matches from a web page. Used to extract data from NOAA
servers since their html structure isn't amenable to beautiful soup.
"""
html = requests.get(root_url).text
date_re = re.compile(regex_str)
url_suffixes = set(date_re.findall(html))
url_suffixes = [x.encode('ascii') for x in url_suffixes]
return url_suffixes
|
0f76bcde6380b978cb6fa66c0dee99efa331d640
| 218,636 |
def calcul_distance_euclidienne(point_1, point_2):
"""
Calcul de la distance euclidienne au carré entre `points_1` et `point_2`.
Soient `point_1` = [x1, x2, ..., xn] et `point_2` = [y1, y2, ..., yn]
alors distance = somme de i=1 à n de (xi - yi)**2 .
Paramètres
----------
point_1 : array_like
Liste des coordonnées de `point_1` sans la classe.
point_2 : array_like
Liste des coordonnées de `point_2` sans la classe.
Retours
-------
distance : float
Distance euclidienne au carré entre `points_1` et `points_2`.
"""
nb_parametre = len(point_1)
distance = 0
for parametre in range(nb_parametre):
somme = (point_1[parametre] - point_2[parametre]) ** 2
distance += somme
return distance
|
8e51009db436a5187bbc1e0a76672f9680fe2115
| 147,112 |
from typing import List
def unity2bullet_position(unity_position: List[float]):
"""Converts from unity to bullet position.
Args:
unity_position: The xyz position in unity coordinates.
Returns:
bullet_position: The xyz position in bullet coordinates.
"""
x, y, z = unity_position
bullet_position = [-z, x, y]
return bullet_position
|
9bb221dcde0d222e269f9a741bb51913b2735acf
| 522,885 |
def create_link(url):
"""Create an html link for the given url"""
return (f'<a href = "{url}" target="_blank">{url}</a>')
|
77a5375369be2be140a69a4521c50a92cee2d5ed
| 704,787 |
def add_two(val):
"""
Add 2 to a number.
Parameters
----------
val: number
The number to which two should be added.
Returns
-------
The number plus two.
"""
return val + 2
|
a1c32a975841518a5fd70a745859839aad743635
| 201,134 |
def fillna_column(df, col_name, val):
"""Replace all N/As in a given column with a given value.
Doesn't mutate the original dataframe but returns a new dataframe.
"""
result = df.copy()
result[col_name] = result[col_name].fillna(val)
return result
|
20496f87cfe97a587872f0f932486e45557dd6c6
| 286,958 |
from typing import Tuple
def overlay_onto_background_image_bboxes_helper(
bbox: Tuple, overlay_size: float, x_pos: float, y_pos: float, **kwargs
) -> Tuple:
"""
The src image is overlaid on the dst image offset by (`x_pos`, `y_pos`) & with a
size of `overlay_size` (all relative to the dst image dimensions). So the bounding
box is also offset by (`x_pos`, `y_pos`) & scaled by `overlay_size`. It is also
possible that some of the src image will be cut off, so we take the max with 0/min
with 1 in order to crop the bbox if needed
"""
left_factor, upper_factor, right_factor, lower_factor = bbox
return (
max(0, left_factor * overlay_size + x_pos),
max(0, upper_factor * overlay_size + y_pos),
min(1, right_factor * overlay_size + x_pos),
min(1, lower_factor * overlay_size + y_pos),
)
|
c3ba4f1e53c0ddbd892dfd0de7c8880156cedc9c
| 376,729 |
from datetime import datetime
def read_log_file(path):
"""
Parse a LabView log file and return a dictionary of the parameters.
Parses the text of a file line-by-line according to how they're written. Several values present in only newer
log files are attempted and excepted as None. Failure to parse one of those lines will pass and return the
dictionary as-is. The output of this is designed to and must return a dictionary such that:
LogFile(**read_log_file(file)) creates a LogFile without error.
:param Path path: path to the file to be read.
:return dict: returns a dictionary containing all logged parameters
"""
data = path.read_text().split('\n')
logdata = {
'date': datetime.strptime(data[18].split('\t')[0], '%Y%j%H%M%S'),
'sample_time': data[0].split('\t')[1],
'sample_flow': data[1].split('\t')[1],
'sample_type': data[2].split('\t')[1],
'backflush_time': data[3].split('\t')[1],
'desorb_temp': data[4].split('\t')[1],
'flashheat_time': data[5].split('\t')[1],
'inject_time': data[6].split('\t')[1],
'bakeout_temp': data[7].split('\t')[1],
'bakeout_time': data[8].split('\t')[1],
'carrier_flow': data[9].split('\t')[1],
'sample_flow_act': data[20].split('\t')[1],
'sample_num': data[10].split('\t')[1],
'ads_trap': data[12].split('\t')[1],
'sample_p_start': data[13].split('\t')[1],
'sample_p_during': data[19].split('\t')[1],
'gcheadp_start': data[14].split('\t')[1],
'gcheadp_during': data[31].split('\t')[1],
'wt_sample_start': data[15].split('\t')[1],
'wt_sample_end': data[21].split('\t')[1],
'ads_a_sample_start': data[16].split('\t')[1],
'ads_b_sample_start': data[17].split('\t')[1],
'ads_a_sample_end': data[22].split('\t')[1],
'ads_b_sample_end': data[23].split('\t')[1],
'trap_temp_fh': data[24].split('\t')[1],
'trap_temp_inject': data[26].split('\t')[1],
'trap_temp_bakeout': data[28].split('\t')[1],
'battv_inject': data[27].split('\t')[1],
'battv_bakeout': data[29].split('\t')[1],
'gc_start_temp': data[25].split('\t')[1],
'gc_oven_temp': data[32].split('\t')[1],
'wt_hot_temp': data[30].split('\t')[1],
'sample_code': data[18].split('\t')[0],
'mfc1_ramp': None,
'trapheatout_flashheat': None,
'trapheatout_inject': None,
'trapheatout_bakeout': None
}
try:
logdata['mfc1_ramp'] = data[33].split('\t')[1]
logdata['trapheatout_flashheat'] = data[34].split('\t')[1]
logdata['trapheatout_inject'] = data[35].split('\t')[1]
logdata['trapheatout_bakeout'] = data[36].split('\t')[1]
except IndexError:
pass
return logdata
|
b9f060d4392267ef5862dd4ff44ea8a62be203ef
| 116,152 |
def game_over(player_decks):
"""
Determines if either of the player decks are empty
and if so, game is over.
Parameters:
player_decks -
Decks for each player
Returns:
True if either deck is empty, False otherwise
"""
return_value = False
for deck in player_decks:
if len(deck) == 0:
print("GAME OVER")
return_value = True
break
return return_value
|
7806a483b4c5decc5b7ab7ff3bd909547da7a3e4
| 104,078 |
def normalized_coords_from_coords(coords: tuple, shape:tuple) -> tuple:
"""Return the normalized coordinates in [0, 1] for coordinates in
[0, shape]. Note that a coordinate (normalized or otherwise) centered on
pixel 0 should be slightly larger than 0."""
return tuple([c/s for (c,s) in zip(coords, shape)])
|
72bf0dd456390c25a50bde0326be8b22d0a73ac7
| 247,545 |
def human_readable(millis):
"""
Take a number of milliseconds and turn it into a string with the format
"min:sec.millis".
"""
if not isinstance(millis, int):
raise TypeError('millis must be a positive integer')
if millis < 0:
raise ValueError('millis must be a positive integer')
seconds, ms = divmod(millis, 1000)
minutes, seconds = divmod(seconds, 60)
return '{}:{}.{}'.format(minutes, seconds, int(ms))
|
bf58816ea3cb31495268a440fd400abb58e5edba
| 397,515 |
def statusError(pkt):
"""
Grabs the status (int) from an error packet and returns it. It retuns -1
if the packet is not a status packet.
"""
if pkt[7] == 0x55:
return pkt[8]
return -1
|
03322c97e3b1563bc2dec9498317fc7672ca51c8
| 112,904 |
def value_left(self, right):
"""
Returns the value of the right type instance to use in an
operator method, namely when the method's instance is on the
left side of the expression.
"""
return right.value if isinstance(right, self.__class__) else right
|
f28c2f0548d3e004e3dd37601dda6c1ea5ab36f6
| 709,624 |
import mpmath
def invsf(p, loc=0, scale=1):
"""
Inverse survival function of the logistic distribution.
"""
with mpmath.extradps(5):
p = mpmath.mpf(p)
loc = mpmath.mpf(loc)
scale = mpmath.mpf(scale)
x = loc + scale*(mpmath.log1p(-p) - mpmath.log(p))
return x
|
a50fd557527d5a1c65892bb901f4fbe2851dc43f
| 498,032 |
from typing import List
from typing import Dict
from typing import Tuple
import collections
def get_dataset_stats(sample_list: List[str],
sample_to_study: Dict[str, str],
sample_metadata: dict,
sample_to_label: Dict[str, str]) -> Tuple[collections.Counter,
collections.Counter,
collections.Counter,
dict]:
"""
Calculate statistics about a list of samples
Arguments
---------
sample_list: A list of sample ids to calculate statistics for
sample_to_study: A mapping from sample ids to study ids
sample_metadata: A dictionary containing metadata about samples
sample_to_label: a mapping from sample ids to disease labels
Returns
-------
studies: The number of samples in each study id
platforms: The number of samples using each expression quantification platform
diseases: The number of samples labeled with each disease
study_disease_counts: The number of samples corresponding to each disease in each study
"""
studies = []
platforms = []
diseases = []
study_disease_counts = {}
for sample in sample_list:
study = sample_to_study[sample]
studies.append(study)
platform = sample_metadata[sample]['refinebio_platform'].lower()
platforms.append(platform)
disease = sample_to_label[sample]
diseases.append(disease)
if study in study_disease_counts:
study_disease_counts[study][disease] = study_disease_counts[study].get(disease, 0) + 1
else:
study_disease_counts[study] = {disease: 1}
studies = collections.Counter(studies)
platforms = collections.Counter(platforms)
diseases = collections.Counter(diseases)
return studies, platforms, diseases, study_disease_counts
|
4fb7eb10c8624ca028690fb48d7a6eb6099fb6e7
| 461,510 |
import math
def probability(ec, en, t):
"""
Probability function
:param ec: current energy
:param en: next energy
:param t: temperature ratio
:return: probability value
"""
return math.exp((ec - en) / t)
|
c055081cd93473ecf4abeab1c8b5cc36fb38f0a4
| 13,930 |
def selectNodeByAttributeValue(graph, attr_field, attr_value):
"""
Returns a list of nodes filterd by the given attribute field and corresponding value.
Note: To select a single unique node. You must define a unique attribute value in your layer.
PARAMETER(S)
: graph : A networkx graph object.
: attr_field : Attribute field name of the feature.
: attr_value : Attribute value of the feature
RETURN(S)
: nodes_by_attr_value : List of node filtered nodes.
"""
g = graph
attr_field = str(attr_field)
attr_value = attr_value
nodes_data = g.nodes(data=True)
nodes_by_attr_value = []
for n in nodes_data:
if n[1]['geom']['properties'][attr_field] == attr_value:
nodes_by_attr_value.append(n)
return nodes_by_attr_value
|
41a89f72e9635c252bca0517878ad264e8876c05
| 527,673 |
def get_path(whereis_output):
"""Returns the full path of a command from the output of a whereis command."""
if len(whereis_output) == 1:
data = whereis_output[0]
else:
data = whereis_output[1]
more_data = data.split()
if len(more_data) < 2:
return ""
else:
return more_data[1]
|
5ebfe3a8271111e156153bc74a72eae920ed5d24
| 235,917 |
def get_submission_files(jobs):
"""Return the filenames of all jobs within a submission.
Arguments:
jobs: jobs retrieve filenames from
Returns:
array of all filenames within the jobs given"""
job_list = []
for job in jobs:
if job.filename not in job_list:
job_list.append(job.filename)
return job_list
|
b823202ae9afd6a8791f0d73e6d9a996e848e8e1
| 458,246 |
def VLOOKUP(table, **field_value_pairs):
"""
Vertical lookup. Searches the given table for a record matching the given `field=value`
arguments. If multiple records match, returns one of them. If none match, returns the special
empty record.
The returned object is a record whose fields are available using `.field` syntax. For example,
`VLOOKUP(Employees, EmployeeID=$EmpID).Salary`.
Note that `VLOOKUP` isn't commonly needed in Grist, since [Reference columns](col-refs.md) are the
best way to link data between tables, and allow simple efficient usage such as `$Person.Age`.
`VLOOKUP` is exactly quivalent to `table.lookupOne(**field_value_pairs)`. See
[lookupOne](#lookupone).
For example:
```
VLOOKUP(People, First_Name="Lewis", Last_Name="Carroll")
VLOOKUP(People, First_Name="Lewis", Last_Name="Carroll").Age
```
"""
return table.lookupOne(**field_value_pairs)
|
2a865196cd528593b757aded7ca58fbbe0cd862a
| 106,382 |
import math
def determine_max_possible_base_size(h: int, w: int, crop_sz: int) -> int:
"""Given a crop size and original image dims for aspect ratio, determine
the max base_size that will fit within the crop.
"""
longer_size = max(h, w)
if longer_size == h:
scale = crop_sz / float(h)
base_size = math.floor(w * scale)
else:
scale = crop_sz / float(w)
base_size = math.floor(h * scale)
return base_size
|
eaf1a908d7217d398fd46ea93a7d6cea9c8e15b2
| 546,026 |
import secrets
def randbelow(exclusive_upper_bound: int) -> int:
"""Return a random int in the range [0, n)."""
return secrets.randbelow(exclusive_upper_bound)
|
06bf74858cb0ff057cae64351159d5198fd895b0
| 428,428 |
def rprpet_point(pet, snowmelt, avh2o_3, precip):
"""Calculate the ratio of precipitation to ref evapotranspiration.
The ratio of precipitation or snowmelt to reference
evapotranspiration influences agdefac and bgdefac, the above- and
belowground decomposition factors.
Parameters:
pet (float): derived, reference evapotranspiration
snowmelt (float): derived, snowmelt occuring this month
avh2o_3 (float): derived, moisture in top two soil layers
precip (float): input, precipitation for this month
Returns:
rprpet, the ratio of precipitation or snowmelt to reference
evapotranspiration
"""
if snowmelt > 0:
rprpet = snowmelt / pet
else:
rprpet = (avh2o_3 + precip) / pet
return rprpet
|
94473f9dc1dab4dffaf8a7556ed4109a3a7497a1
| 660,122 |
import ast
def can_eval(s):
"""Returns True if the string can be evaluated."""
if not s:
return False
try:
ast.parse(s, mode="eval")
except SyntaxError:
return False
else:
return True
|
cca587373ad5c9d57a6f54b1e15d95928df39203
| 155,104 |
def digits_to_number(digits, running=0):
"""Convert a list of digits to an integer"""
if len(digits) == 0:
return running
else:
r = (running * 10) + int(digits[0])
return digits_to_number(digits[1:], r)
|
85dc57d4611578dcf65d79615bce5ee7a449579f
| 487,367 |
def binary_search(val, grid):
"""
Binary search that returns the bin index of a value `val` given
grid `grid`
Some special cases:
`val` < min(`grid`) --> -1
`val` > max(`grid`) --> size of bins
`val` = a grid point --> bin location whose upper bound is `val`
(-1 if val = min(grid)
"""
left = 0
right = len(grid) - 1
mid = -1
while left <= right:
mid = (int((left + right)/2))
if grid[mid] < val: left = mid + 1
else: right = mid - 1
return int(right)
|
0b26653c30ccf94eb062aa5fda685a220a30de8c
| 74,601 |
def parse_query(query_to_parse):
"""
Converts a comma or space-separated string of query terms into a list to use
as filters. The use of positional arguments on the CLI provides lists (which
we join and resplit to avoid any formatting issues), while strings passed
from pssh.py just get split since those should never come over as a list.
"""
if isinstance(query_to_parse, list):
_query = ','.join(query_to_parse)
else:
_query = query_to_parse.replace(' ',',')
split_query = _query.split(',')
### Pick up passed --region query from pssh.py
parsed_query = [x for x in split_query if not x.startswith('--region=')]
region_query = [x for x in split_query if x.startswith('--region')]
parsed_regions = ','.join([x.split('=')[1] for x in region_query])
return parsed_query, parsed_regions
|
edd1cb6b42cd895a2eeb5f4f024dc1f2c92743af
| 48,941 |
def compute_spiral_diagonal_sum(first_elem, loop_wh):
"""
Compute the sum of the four diagonal elements for the given loop
first_elem: First element (in the right-most, second-down element of this loop)
loop_wh: Width / height of the spiral's loop to compute the diag-sum
return: sum of the four diagonal elements
"""
lower_right = first_elem + loop_wh - 2
return 4 * lower_right + 6 * loop_wh - 6
|
42ec7f1d95afbcb84e8bf90415803e1f950a2c01
| 339,788 |
import calendar
def date_format(date_ymd: str) -> str:
"""Convert date to IMAP SEARCH Command acceptable format.
Args:
date_ymd (str): The date in `YYYY-MM-DD` format.
Returns:
str: The date in `DD-MM-YYYY` format and the month replaced with its
abbreviated form.
"""
date = date_ymd.split("-")[::-1]
return f"{date[0]}-{calendar.month_abbr[int(date[1])]}-{date[2]}"
|
49e494500e75460a79b52d5d9f95424e128923ae
| 570,337 |
def calculate_monthly_average(total: int, sales: list[list[str]]) -> float:
"""Calculate the monthly sale average."""
return total / len(sales)
|
e562fd6e7692cfc9ef7c72acce3867b21d804152
| 356,928 |
def mask_out_bits(segbits, mask, tags_to_mask=None):
"""
Given a set of bits and a list of tags to affect (optional) removes all
the bits from each tag that are present (and equal) in the masking set.
"""
if tags_to_mask is None:
tags_to_mask = segbits.keys()
# Mask out matching bits
for tag in tags_to_mask:
bits = segbits[tag]
bits = set(bits) - set(mask)
segbits[tag] = bits
return segbits
|
5d8358b225e4e28e668eab4d4a5f9b79fec3bd23
| 551,891 |
def has_recent_snapshot(webcam, latest_snapshot):
"""
Returns a tuple with a boolean telling whether the webcam image was updated
after latest_snapshot, and the latest of the two dates
"""
updated = int(webcam.updated_epoch)
return updated > latest_snapshot, max(updated, latest_snapshot)
|
def6ba367f3cde5494f66e24be8da75bf4ef7861
| 523,678 |
def query_split(msg):
"""Split a message's query up into (key, [*value]) pairs from a
?key=value&key2=value2 style Uri-Query options.
Keys without an `=` sign will have a None value, and all values are
expressed as an (at least 1-element) list of repetitions.
>>> m = aiocoap.Message(uri="coap://example.com/foo?k1=v1.1&k1=v1.2&obs")
>>> query_split(m)
{'k1': ['v1.1', 'v1.2'], 'obs': [None]}
"""
result = {}
for q in msg.opt.uri_query:
if '=' not in q:
k = q
# matching the representation in link_header
v = None
else:
k, v = q.split('=', 1)
result.setdefault(k, []).append(v)
return result
|
73ed6352364b4ce10d241b862f067fa48b26e146
| 543,685 |
def concatenate(*args):
"""
If no argument is None, then join all strings.
:param args: Can be strings, MyString objects or None
:return: None or string.
"""
for e in args:
if e is None:
return None
return ''.join([str(e) for e in args])
|
165147efc3b6c6c263214d6320cfc7a02a257368
| 401,423 |
import re
def re_lines(pat, text, match=True):
"""Return a list of lines selected by `pat` in the string `text`.
If `match` is false, the selection is inverted: only the non-matching
lines are included.
Returns a list, the selected lines, without line endings.
"""
assert len(pat) < 200, "It's super-easy to swap the arguments to re_lines"
return [l for l in text.splitlines() if bool(re.search(pat, l)) == match]
|
aa04cfdbf5dfa2c3aed6d22e3f3671f69ab4481f
| 81,829 |
def ImCrossTermID(atom_names):
"""
# From a list of 4 atom names, corresponding two a pair
# of angles between atoms# 3,2,1 and 3,2,4,
# and replaces the list of atoms with a canonical tuple
# which eliminates order ambiguity.
# If you swap the first and last atom (#1 and #4), then
# the -pair- of angles is the same. Hence if atom #1
# has a name which is lexicographically less than atom #4,
# swap atoms 1 and 4.
"""
if atom_names[0] <= atom_names[3]:
return (atom_names[0]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[3])
else:
return (atom_names[3]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[0])
|
119bc4e36d20eae6c2b2d1fac4eab5c60ebc59f2
| 350,904 |
def cast_to_date(session):
"""cast session.data datetime object to a date() instance
"""
if session.data is not None:
session.data = session.data.date()
return session.data
|
6c38c9a14ce6bdca68ba90f06dba9a5aa6254eec
| 211,909 |
import zipfile
import tempfile
def training_model_archive(filepath):
"""
Create zip archive file for training model.
:param filepath:
:return: temporary archive file
"""
if zipfile.is_zipfile(filepath):
return open(filepath, 'rb')
tmp_file = tempfile.NamedTemporaryFile(suffix='.zip')
with zipfile.ZipFile(tmp_file.name, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
new_zip.write(filepath)
tmp_file.seek(0)
return tmp_file
|
4d8dd1d6166c50d8c368cb34599126ab7d838333
| 61,459 |
import inspect
def sig_arg_names(fun):
"""Return argument names from function signature."""
sig = inspect.signature(fun)
return tuple(sig.parameters.keys())
|
dcf53ef09b7094242572f4c1b0d7950b776c597f
| 296,480 |
def add_delay_columns(data):
"""Compute edge durations and delays."""
data["event_delay"] = (
data["event_time_real"] - data["event_time_planned"]
).dt.total_seconds().astype("float32") / 60
data["edge_duration_planned"] = (
data["next_event_time_planned"] - data["event_time_planned"]
).dt.total_seconds().astype("float32") / 60
data["edge_duration_real"] = (
data["next_event_time_real"] - data["event_time_real"]
).dt.total_seconds().astype("float32") / 60
data["edge_delay"] = (
data["edge_duration_real"] - data["edge_duration_planned"]
).astype("float32")
return data
|
a9c3d623638a1506bc0b79cf2a356eecd8b20ee5
| 331,562 |
def GenerateAuthProviderCmdArgs(kind, cluster_id, location):
"""Generates command arguments for kubeconfig's authorization provider.
Args:
kind: str, kind of the cluster e.g. aws, azure.
cluster_id: str, ID of the cluster.
location: str, Google location of the cluster.
Returns:
The command arguments for kubeconfig's authorization provider.
"""
template = ('container {kind} clusters print-access-token '
'{cluster_id} --location={location}')
return template.format(kind=kind, cluster_id=cluster_id, location=location)
|
1f10d734d8392165360e33fc47cc4ef80498dff7
| 201,634 |
import math
def __even__(f):
"""
Help function to round each number to even.
:param f: given number
:return: even approximation of the given number
"""
return math.ceil(f / 2.) * 2
|
3faf2160cadaff9ab97660ffdac51aaeb2ca52af
| 412,104 |
def _check_flexibility_scenario(flex):
"""Checks the flexibility scenario input to
:py:func:`partition_flexibility_by_sector`.
:param str flex: The input flexibility scenario that will be checked. Can be any of:
*'Base'* or *'Enhanced'*.
:return: (*set*) -- The formatted set of flexibility scenarios.
:raises TypeError: if flex is not input as a set or list, or if the components of
flex are not input as str.
:raises ValueError: if the components of flex are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(flex, str):
raise TypeError("Flexibility scenario must be input as a str.")
# Reformat flex
flex = flex.capitalize()
# Check that flex is valid
if flex not in {"Base", "Enhanced"}:
raise ValueError(f"{flex} is not a valid flexibility scenario.")
# Return the reformatted flex
return flex
|
e7bfceec23f6e02e7d8895f0593b3704628a8e28
| 577,562 |
def add_keys(destdict, srclist, value=None):
"""
Nests keys from srclist into destdict, with optional value set on the final key.
:param dict destdict: the dict to update with values from srclist
:param list srclist: list of keys to add to destdict
:param value: final value to set
:return: destination dictionary
:rtype: dict
"""
if len(srclist) > 1:
destdict[srclist[0]] = {}
destdict[srclist[0]] = destdict.get(srclist[0], {})
add_keys(destdict[srclist[0]], srclist[1:], value)
else:
destdict[srclist[0]] = value
return destdict
|
b326da0380090b3a749dc2131161b94ed67ae7db
| 537,617 |
def compute_squared_bp(channels):
"""Compute band power estimates by squaring channel values."""
squared_channels = dict()
for channel in channels:
if channel == "Time":
squared_channels[channel] = channels[channel][:]
continue
squared_channel = [x**2 for x in channels[channel]]
squared_channels[channel] = squared_channel
return squared_channels
|
29f596a15b73beb857ad7f4fb4b07919b5602a79
| 151,597 |
def b_to_gb(value):
"""Bytes to Gibibytes"""
return round(int(value) / (1000**3), 3)
|
7f4abbd4db51e1a57917948405f0ce4affb61149
| 172,153 |
import torch
import math
def exp_sigmoid(x, exponent=10.0, max_value=2.0, threshold=1e-7):
"""Exponentiated Sigmoid pointwise nonlinearity.
Bounds input to [threshold, max_value] with slope given by exponent.
Args:
x: Input tensor.
exponent: In nonlinear regime (away from x=0), the output varies by this
factor for every change of x by 1.0.
max_value: Limiting value at x=inf.
threshold: Limiting value at x=-inf. Stablizes training when outputs are
pushed to 0.
Returns:
A tensor with pointwise nonlinearity applied.
"""
return max_value * torch.sigmoid(x)**math.log(exponent) + threshold
|
e0f4076a75d760194f6f78d4c38b319aa9bdc690
| 502,361 |
def sort_response(response_dict, *args):
"""
Used in tests to sort responses by two or more keys.
For example if response includes experimentKey and FeatureKey, the function
will sort by primary and secondary key, depending which one you put first.
The first param will be primary sorted, second secondary.
Can handle arbitrary number of arguments.
:param response_dict: response
:param args: usually experimentKey and featureKey
:return: sorted response
"""
return sorted(response_dict, key=lambda k: tuple(map(k.__getitem__, args)))
|
3e4993469071c79e107aaaee8979005e4161d37b
| 381,079 |
def distance_difference_calc(r2, s1, gap):
"""
Computes the necessary distance between mocks given geometrical
components of the survey.
Parameters
-----------
r2 : `float`
s1 : `float`
gap : `float`
Returns
----------
dist_diff : `float`
"""
# Converting to floats
r2 = float(r2)
s1 = float(s1)
gap = float(gap)
dist_diff = (((r2 + gap)**2 - (0.5 * s1)**2)**0.5) - r2
return dist_diff
|
499fbfddab00c2e3d2a81861b0e23ed07d4e0b5c
| 687,105 |
def point_dist(p0, p1):
"""
Calculate distance between two points in 2D.
:param p0: first point, array like coordinates
:param p1: second point, array like coordinates
:return: distance, float
"""
return ((p1[0] - p0[0])**2 + (p1[1] - p0[1])**2)**(1/2)
|
a7bed525e0256f180a2f0702697bdb19834c8b5f
| 196,625 |
from typing import Dict
from typing import Any
import yaml
def _mock_yaml_load() -> Dict[str, Any]:
"""Create a mock openAPI-specification dokument."""
with open("./tests/files/petstore.yaml", "r") as file:
_yaml = yaml.safe_load(file)
return _yaml
|
2093e6696a6eca3bff7a56f85bde582accf151aa
| 300,475 |
def get_class(class_name, module_name):
"""
Retrieves a class based off the module using __import__.
"""
if not isinstance(class_name, str) or not isinstance(module_name, str):
return None
try:
# requiring parameter `fromlist` to get specified module
module = __import__(module_name, fromlist=[''])
if hasattr(module, class_name):
return getattr(module, class_name)
except ImportError:
return None
|
4c0b46c0d56e05454dff8bd395e61541a5ff80f9
| 101,749 |
import random
def generate_random_key(seed_rule, start, end=None):
"""
依照指定的亂數長度範圍參數,產生隨機網址碼
Args:
seed_rule(str):亂數的種子規則 e.g abcABC1234+
start(int): 起始的長度
end(int): 最長的長度 (如果沒有提供則為 start 的固定長度)
Returns:
url_key(str): 隨機網址碼 (長度 15 - 20)
"""
if end:
url_length = random.randint(start, end)
else:
url_length = start
# 產生使用者的隨機碼
url_key = ''.join(random.sample(seed_rule, url_length))
# 是唯一的,沒有衝突
return url_key
|
dd8edd61c2458fcdba063f2ce591123adf727022
| 144,186 |
def remove_dups(list_):
"""Get new list without duplicates, order-preserving."""
seen = set()
seen_add = seen.add
return [x for x in list_ if not (x in seen or seen_add(x))]
|
bbd0dc3e1bad8be4dc7b870e8de8c198bc470525
| 181,084 |
def build_variant_display_title(chrom, pos, ref, alt, sep='>'):
""" Builds the variant display title. """
return 'chr%s:%s%s%s%s' % (
chrom,
pos,
ref,
sep,
alt
)
|
3461166b459e9a5715353080f271ec9ee014bb34
| 633,530 |
def diverging_cmap(cmap='RdBu_r'):
"""Default diverging colormap"""
return cmap
|
7caf998c325b698c9b1a84c90bd91e2ad3ebc27b
| 265,874 |
import requests
def process_response(wrapped, instance, args, kwargs):
"""
Decorator to process requests.Response
Raises:
Exception: Service Unavailable
Returns:
dict: json data
"""
try:
resp = wrapped(*args, **kwargs)
except (requests.ConnectionError, requests.Timeout) as e:
raise Exception("Service Unavailable") from e
else:
resp.raise_for_status()
return resp.json()
|
f2d47dd156a8c38f154206639d2ab439459cbe83
| 193,529 |
def create_open_url_payload(url: str):
"""Create and return "openUrl" dictionary to send to the Plugin Manager.
Args:
url (str): Url to open in browser.
Returns:
dict: Dictionary with payload to open url in browser.
"""
return {
"event": "openUrl",
"payload": {
"url": url
}
}
|
3492031523a7c88a7fbd426f4f822658aee31558
| 314,022 |
def get_total_figure_number(axis_matrix):
"""
Gets the number of figures from an axis_matrix generated by pyplot. Used by the hash to place configs in the
correct part of the axis_matrix
Parameters:
axis_matrix (1D or 2D numpy array) = Array returned by pyplot once multiple figures are introduced into
a single plot
Returns:
total_figure_number (int) = Total number of figures
"""
# Get the number of figures, using the axis_matrix
try:
total_figure_number = axis_matrix.shape[0] * axis_matrix.shape[1]
except IndexError:
total_figure_number = axis_matrix.shape[0]
return total_figure_number
|
dcab29bd85735bee19756a084c182fb0a60e9549
| 616,040 |
def S(*regexps) -> str:
"""Just a shortcut to concatenate multiple regexps more easily"""
return ''.join(regexps)
|
38984899c7c3000c39dc38a66b3e790a3b36207d
| 511,673 |
import torch
def compress_weights(W, l):
"""Compress the weight matrix W of an inner product (fully connected) layer
using truncated SVD.
Parameters:
W: N x M weights matrix
l: number of singular values to retain
Returns:
Ul, L: matrices such that W \approx Ul*L
"""
# numpy doesn't seem to have a fast truncated SVD algorithm...
# this could be faster
U, s, V = torch.svd(W, some=True)
Ul = U[:, :l]
sl = s[:l]
Vl = V[:l, :]
L = torch.dot(torch.diag(sl), Vl)
return Ul, L
|
94b43e1bd4eff1a4e90eb13653de28c0fdfb72a9
| 568,547 |
def is_between(val, bound_1, bound_2):
"""
Return if a value falls between two boundary values
"""
if val > max([bound_1, bound_2]):
print("not between!")
return False
if val < min([bound_1, bound_2]):
print("not between!")
return False
return True
|
9fc710e947b389ad5412749f1a60b876ed04fb54
| 246,339 |
import math
def entropy(probabilities):
""" Calcola entropia della distribuzione di probabilità
:param probabilities: lista contenente una distribuzione di probabilità
:return: valore dell'entropia relativa alla distribuzione in input
"""
entropy = 0.0
for i in range(len(probabilities)):
entropy += probabilities[i] * math.log(probabilities[i], 2)
entropy = -(entropy)
return entropy
|
81b7f4b4b95664804a2ed17086c12a43125ac6b6
| 657,752 |
import tokenize
import codecs
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with codecs.open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
|
a6fce0f2acdb5156872ef572ab7e77ba6507873d
| 695,901 |
def to_unsigned(number, nb_bits):
"""Compute the unsigned value of the given number according to the given number of bits used for encoding.
>>> to_unsigned(5, 32)
5
>>> to_unsigned(-5, 8)
251
>>> to_unsigned(-5, 9)
507
:param int number: the number to convert.
:param int nb_bits: the number of bits considered.
:returns: an integer which is the unsigned value of the given number.
"""
if number < 0:
return (1 << nb_bits) + number
return number
|
6071519006a294a24c52381472ea683cbdc31663
| 218,844 |
import itertools
def adjacent_pairs(seq):
"""From e0, e1, e2, e3, ... produce (e0,e1), (e1,e2), (e2,e3), ..."""
return zip(seq, itertools.islice(seq, 1, None))
|
99382158526602f44e25f8d0611e5599afcd328e
| 153,941 |
def updateCharacterName(old_name: str, new_name: str) -> str:
"""Return a query to update a given character's name."""
return (f"UPDATE game_character "
f"SET name='{new_name}' "
f"WHERE name='{old_name}';"
)
|
eb829e6be49393baf1c007c0331fd45a50050af5
| 16,930 |
def str2num(string, decimal_sep='.'):
"""
A helper function that converts strings to numbers if possible
and replaces the float decimal separator with the given value.
eg. '1' => 1
'1.2' => 1.2
'1,2' => 1.2
'True' => True
"""
if not isinstance(string, str): return string
if string.isdigit(): return int(string)
if string=='True': return True
if string=='False': return False
try:
string_x = string.translate({ord(decimal_sep): 46})
# necessary because of PEP-515, ignore _ in strings
string_x = string_x.translate({95:35})
return float(string_x)
except: return string
|
1e677e7c9bad46266125373f56040e3f053b7d56
| 200,533 |
def sec2hms(sec):
"""
Convert seconds to hours, minutes and seconds.
"""
hours = int(sec/3600)
minutes = int((sec -3600*hours)/60)
seconds = int(sec -3600*hours -60*minutes)
return hours,minutes,seconds
|
efea3a641c5f13313adb571c201cc25d2895757e
| 3,492 |
import math
def next_halton(index, base):
"""Calculate next halton number """
result = 0.0
f = 1.0
i = index
while i > 0:
f = f / base
result += + f * (i % base)
i = math.floor(i / base)
return result
|
d9500c341a7438a388e54833434bdb761865786e
| 264,780 |
def get_treeview_action(self, **options):
"""Return the tree view action for `self`.
If the `self` is a singleton, return the view type 'form' so that it's
easier to see the full object.
:rtype: An action `dict` you can return to the web client.
`options` is used to override **almost any** key in the returned dict. An
interesting option is `target`.
"""
_, _, context = self.env.args
ids = self.ids
if not ids or len(ids) > 1:
vtype = "list"
active_id = None
else:
vtype = "form"
active_id = ids[0]
result = {
"type": "ir.actions.act_window",
"res_model": self._name,
"src_model": False,
"view_id": False,
"view_type": vtype,
"view_mode": "list,form",
"views": [(False, "list"), (False, "form")],
"target": "current",
"context": context,
}
if ids:
result["domain"] = [("id", "in", tuple(ids))]
if active_id:
result["active_id"] = active_id
result.update(options)
return result
|
dedbf9f3945afea72c05d4b2ad42b66e89aa7d0f
| 207,794 |
def norm_angle(a):
"""Normalizes an angle in degrees to -180 ~ +180 deg."""
a = a % 360
if a >= 0 and a <= 180:
return a
if a > 180:
return -180 + (-180 + a)
if a >= -180 and a < 0:
return a
return 180 + (a + 180)
|
0ee87049eda911bbd9307aa64464b1508791a16c
| 510,511 |
def _event_as_string(event):
"""Convert a midi event packet to a string representation."""
return '[id, status, cnum, cval, d1, d2] = %3d, %3d, %3d, %3d, %3d, %3d' % (
event.midiId, event.status, event.controlNum, event.controlVal, event.data1, event.data2)
|
95bc65cfa40664cad4c953b3d6eb09b9d14562eb
| 329,968 |
def get_cheat_sheet(cheat_sheet):
"""converts a cheat sheet from .json to string to display
Parameters
----------
:param dictionary cheat_sheet: dictionary that stores the content of given cheat sheet.
:return: a str representation of a cheat sheet.
"""
sheet = []
separator = '\n'
for data_type in cheat_sheet:
sheet.append(f'__**{data_type}**__')
for method in cheat_sheet[data_type]:
method_description = cheat_sheet[data_type][method]
sheet.append(f'**{method}** - {method_description}')
sheet.append('')
return separator.join(sheet)
|
b8d401e0c73c0f103cf0b3f404a2150b7bc28a36
| 36,887 |
def datetime_to_iso_8601(dt):
"""Format a datetime as an iso 8601 string - YYYY-MM-DDTHH:MM:SS with optional timezone +HH:MM."""
if dt:
return dt.replace(microsecond=0).isoformat()
else:
return None
|
1f1610b6d34409a00f4a1b0d8d47caa5ec8e8ad8
| 680,177 |
def fullname(clazz):
"""
Return fully qualified name of the given class.
>>> from collections import Counter
>>> fullname(Counter)
'collections.Counter'
"""
clazz_name = clazz.__name__ if hasattr(clazz, '__name__') else clazz._name
return f'{clazz.__module__}.{clazz_name}'
|
35262e7c2f9e5428745054aa3f0b4ff7107a7ef1
| 634,522 |
def _str_to_bool(text):
"""
convert a string to boolean. 'true', 'TRUE', 'TrUe', 'Yes', '1', 'y' and
similar returns True, otherwise returns False
:param text: text to convert to boolean
:return: the boolean representation of the text
"""
return text.lower() in ['true', 'yes', 'y', '1']
|
e6fd0098c8605a56501211320931874c8a52f974
| 383,045 |
def create_empty(val=0x00):
"""Create an empty Userdata object.
val: value to fill the empty user data fields with (default is 0x00)
"""
user_data_dict = {}
for index in range(1, 15):
key = "d{}".format(index)
user_data_dict.update({key: val})
return user_data_dict
|
55e614059940b7eda5a0684dd105a60dbc7f1549
| 698,746 |
def get_group(parser, group_name, env_var_prefix=None):
"""
Return an argument group based on the group name.
This will return an existing group if it exists, or create a new one.
:argument parser: :py:class:`python3:argparse.ArgumentParser` to
add/retrieve the group to/from.
:argument group_name: Name of the argument group to be created/returned.
:argument env_var_prefix: Prefix to use for the environment variables support of this group. If set to None,
uses the title of the _ArgumentGroup that this is wrapping. If not set or set to False,
does not add any prefix. This argument MUST be a keyword argument.
"""
# We don't want to add an additional group if there's already a 'logging'
# group. Sadly, ArgumentParser doesn't provide an API for this, so we have
# to be naughty and access a private variable.
groups = [g.title for g in parser._action_groups] # pylint: disable=protected-access
if group_name in groups:
group = parser._action_groups[groups.index(group_name)] # pylint: disable=protected-access
else:
group = parser.add_argument_group(title=group_name, env_var_prefix=env_var_prefix)
return group
|
3ec99a16c34d438b4b3c66a890a7b63f1060deea
| 141,806 |
def set_if_true(func):
"""Decorate a setter to only set if the value is nonzero"""
def new_func(self, var):
if var:
func(self, var)
return new_func
|
4afafe5e8ecd72cbebbc49fd5e619ccf33ec3b1f
| 475,644 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.