content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def finding_the_percentage(n: int, arr: List[str], query_name: str) -> str:
"""
>>> finding_the_percentage(3, ['Krishna 67 68 69', 'Arjun 70 98 63',
... 'Malika 52 56 60'], 'Malika')
'56.00'
>>> finding_the_percentage(2, ['Harsh 25 26.5 28', 'Anurag 26 28 30'],
... 'Harsh')
'26.50'
"""
student_marks = {}
for i in range(n):
name, *line = arr[i].split()
scores = list(map(float, line))
student_marks[name] = sum(scores)/len(scores)
return '{:.2f}'.format(student_marks[query_name])
| 5,347,800 |
def mvg_logpdf_fixedcov(x, mean, inv_cov):
"""
Log-pdf of the multivariate Gaussian distribution where the determinant and inverse of the covariance matrix are
precomputed and fixed.
Note that this neglects the additive constant: -0.5 * (len(x) * log(2 * pi) + log_det_cov), because it is
irrelevant when comparing pdf values with a fixed covariance, but it means that this is not the normalised pdf.
Args:
x (1D numpy array): Vector value at which to evaluate the pdf.
mean (1D numpy array): Mean vector of the multivariate Gaussian distribution.
inv_cov (2D numpy array): Inverted covariance matrix.
Returns:
float: Log-pdf value.
"""
dev = x - mean
return -0.5 * (dev @ inv_cov @ dev)
| 5,347,801 |
def noisy_image_data_generator(
dataset,
batch_size: int = 32,
min_value: float = 0.0,
max_value: float = 255.0,
min_noise_std: float = 0.01,
max_noise_std: float = 10.0,
random_invert: bool = False,
random_brightness: bool = False,
zoom_range: float = 0.25,
rotation_range: int = 90,
width_shift_range: float = 0.1,
height_shift_range: float = 0.1,
vertical_flip: bool = True,
horizontal_flip: bool = True):
"""
Create a dataset generator flow that adds noise to a dateset
:param dataset:
:param min_value: Minimum allowed value
:param max_value: Maximum allowed value
:param batch_size: Batch size
:param random_invert: Randomly (50%) invert the image
:param random_brightness: Randomly add offset or multiplier
:param zoom_range: Randomly zoom in (percentage)
:param rotation_range: Add random rotation range (in degrees)
:param min_noise_std: Min standard deviation of noise
:param max_noise_std: Max standard deviation of noise
:param horizontal_flip: Randomly horizontally flip image
:param vertical_flip: Randomly vertically flip image
:param height_shift_range: Add random vertical shift (percentage of image)
:param width_shift_range: Add random horizontal shift (percentage of image)
:return:
"""
# --- argument checking
if dataset is None:
raise ValueError("dataset cannot be empty")
if min_noise_std > max_noise_std:
raise ValueError("min_noise_std must be < max_noise_std")
if min_value > max_value:
raise ValueError("min_value must be < max_value")
# --- variables setup
max_min_diff = (max_value - min_value)
# --- create data generator
if isinstance(dataset, np.ndarray):
data_generator = \
ImageDataGenerator(
zoom_range=zoom_range,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
vertical_flip=vertical_flip,
horizontal_flip=horizontal_flip,
zca_whitening=False,
featurewise_center=False,
featurewise_std_normalization=False)
else:
raise NotImplementedError()
# --- iterate over random batches
for x_batch in \
data_generator.flow(
x=dataset,
shuffle=True,
batch_size=batch_size):
# randomly invert batch
if random_invert:
if np.random.choice([False, True]):
x_batch = (max_value - x_batch) + min_value
# add random offset
if random_brightness:
if np.random.choice([False, True]):
offset = \
np.random.uniform(
low=0.0,
high=0.1 * max_min_diff)
x_batch = x_batch + offset
# adjust the std of the noise
# pick std between min and max std
if max_noise_std > 0.0:
if np.random.choice([False, True]):
std = \
np.random.uniform(
low=min_noise_std,
high=max_noise_std)
# add noise to create the noisy input
x_batch = \
x_batch + \
np.random.normal(0.0, std, x_batch.shape)
# clip all to be between min and max value
x_batch = np.clip(x_batch, a_min=min_value, a_max=max_value)
# return input, target
yield x_batch, x_batch
| 5,347,802 |
def app_dir(app_name: str = APP_NAME) -> Path:
"""Finds the application data directory for the current platform.
If it does not exists, it creates the required directory tree.
Returns:
The path to the root app directory.
"""
if sys.platform == "win32":
path = Path.home() / "AppData" / "Local" / app_name
elif sys.platform == "darwin":
path = Path.home() / "Library" / "Application Support" / app_name
else:
path = Path.home() / f".{app_name}"
_create_tree(path)
return path
| 5,347,803 |
def make_frac_grid(frac_spacing, numrows=50, numcols=50, model_grid=None,
seed=0):
"""Create a grid that contains a network of random fractures.
Creates and returns a grid containing a network of random fractures, which
are represented as 1's embedded in a grid of 0's.
Parameters
----------
frac_spacing : int
Average spacing of fractures (in grid cells)
numrows : int, optional
Number of rows in grid (if model_grid parameter is given,
uses values from the model grid instead)
numcols : int, optional
Number of columns in grid (if model_grid parameter is given,
uses values from the model grid instead)
model_grid : Landlab RasterModelGrid object, optiona
RasterModelGrid to use for grid size
seed : int, optional
Seed used for random number generator
Returns
-------
m : Numpy array
Array containing fracture grid, represented as 0's (matrix) and 1's
(fractures). If model_grid parameter is given, returns a 1D array
corresponding to a node-based array in the model grid. Otherwise,
returns a 2D array with dimensions given by numrows, numcols.
"""
# Make an initial grid of all zeros. If user specified a model grid,
# use that. Otherwise, use the given dimensions.
if model_grid is not None:
numrows = model_grid.shape[0]
numcols = model_grid.shape[1]
m = zeros((numrows,numcols), dtype=int)
# Add fractures to grid
nfracs = (numrows + numcols) // frac_spacing
for i in range(nfracs):
(y, x) = calculate_fracture_starting_position((numrows, numcols), seed+i)
ang = calculate_fracture_orientation((y, x), seed+i)
(dy, dx) = calculate_fracture_step_sizes((y, x), ang)
trace_fracture_through_grid(m, (y, x), (dy, dx))
# If we have a model_grid, flatten the frac grid so it's equivalent to
# a node array.
if model_grid is not None:
m.shape = (m.shape[0]*m.shape[1])
return m
| 5,347,804 |
def generate_colors():
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
N = 30
brightness = 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
perm = [15, 13, 25, 12, 19, 8, 22, 24, 29, 17, 28, 20, 2, 27, 11, 26, 21, 4, 3, 18, 9, 5, 14, 1, 16, 0, 23, 7, 6, 10]
colors = [colors[idx] for idx in perm]
return colors
| 5,347,805 |
def secant_method(f, x0, x1, iterations):
"""Return the root calculated using the secant method."""
for i in range(iterations):
f_x1 = f(x1)
x2 = x1 - f_x1 * (x1 - x0) / (f_x1 - f(x0) + 1e-9).float()
x0, x1 = x1, x2
return x2
| 5,347,806 |
def to_skopt_space(x):
"""converts the space x into skopt compatible space"""
if isinstance(x, list):
if all([isinstance(s, Dimension) for s in x]):
_space = Space(x)
elif len(x) == 1 and isinstance(x[0], tuple):
if len(x[0]) == 2:
if 'int' in x[0][0].__class__.__name__:
_space = Integer(low=x[0][0], high=x[0][1])
elif 'float' in x[0][0].__class__.__name__:
_space = Integer(low=x[0][0], high=x[0][1])
else:
raise NotImplementedError
else:
raise NotImplementedError
elif all([s.__class__.__name__== "Apply" for s in x]):
_space = Space([skopt_space_from_hp_space(v) for v in x])
else:
raise NotImplementedError
elif isinstance(x, dict): # todo, in random, should we build Only Categorical space?
space_ = []
for k, v in x.items():
if isinstance(v, list):
s = space_from_list(v, k)
elif isinstance(v, Dimension):
# it is possible that the user has not specified the name so assign the names
# because we have keys.
if v.name is None or v.name.startswith('real_') or v.name.startswith('integer_'):
v.name = k
s = v
elif v.__class__.__name__== "Apply" or 'rv_frozen' in v.__class__.__name__:
s = skopt_space_from_hp_space(v, k)
elif isinstance(v, tuple):
s = Categorical(v, name=k)
elif isinstance(v, np.ndarray):
s = Categorical(v.tolist(), name=k)
else:
raise NotImplementedError(f"unknown type {v}, {type(v)}")
space_.append(s)
# todo, why converting to Space
_space = Space(space_) if len(space_) > 0 else None
elif 'rv_frozen' in x.__class__.__name__ or x.__class__.__name__== "Apply":
_space = Space([skopt_space_from_hp_space(x)])
else:
raise NotImplementedError(f"unknown type {x}, {type(x)}")
return _space
| 5,347,807 |
def build_response(status=OK, etag='etag', modified='modified', max_age=None):
"""Make a requests.Response object suitable for testing.
Args:
status: HTTP status
exp-time: cache expire time (set to future for fresh cache, past for
stale cache (defaults to stale))
etag: etag cache-control header
modified: last-modified cache-control header
Returns:
A Response instance populated according to the arguments.
"""
headers = {'last-modified': modified, 'etag': etag, 'Cache-Control':
'max-age={}'.format(max_age)}
test_response = requests.Response()
test_response.status_code = status
test_response.headers = headers
return test_response
| 5,347,808 |
def _do_outer_cv(searcher, X, y, outer_cv, scoring, error_score="raise", outfile=None):
"""Do outer cross-validation for nested CV
Parameters
----------
searcher : object
SearchCV object
X : numpy array
Containing features
y : numpy array
Target values or labels
outer_cv : int or CV splitter
Control the cv splitting
scoring : object
Scorer
error_score: str, float or numpy float
Whether to raise fit error or return an value
outfile : str
File path to store the restuls
"""
if error_score == "raise":
rval = cross_validate(
searcher,
X,
y,
scoring=scoring,
cv=outer_cv,
n_jobs=N_JOBS,
verbose=0,
error_score=error_score,
)
else:
warnings.simplefilter("always", FitFailedWarning)
with warnings.catch_warnings(record=True) as w:
try:
rval = cross_validate(
searcher,
X,
y,
scoring=scoring,
cv=outer_cv,
n_jobs=N_JOBS,
verbose=0,
error_score=error_score,
)
except ValueError:
pass
for warning in w:
print(repr(warning.message))
keys = list(rval.keys())
for k in keys:
if k.startswith("test"):
rval["mean_" + k] = np.mean(rval[k])
rval["std_" + k] = np.std(rval[k])
if k.endswith("time"):
rval.pop(k)
rval = pd.DataFrame(rval)
rval = rval[sorted(rval.columns)]
rval.to_csv(path_or_buf=outfile, sep="\t", header=True, index=False)
| 5,347,809 |
def split_list(big_list: List[T], delimiter: T) -> List[List[T]]:
"""Like string.split(foo), except for lists."""
cur_list: List[T] = []
parts: List[List[T]] = []
for item in big_list:
if item == delimiter:
if cur_list:
parts.append(cur_list)
cur_list = []
else:
cur_list.append(item)
if cur_list:
parts.append(cur_list)
return parts
| 5,347,810 |
def generate_partitions(data):
"""
Generates a random nested partition for an array of integers
:param data:
:return:
"""
if len(data) == 1:
return data
else:
mask1 = np.random.choice(len(data), np.floor(len(data)/2), replace=False)
par1 = [data[i] for i in range(len(data)) if i in mask1]
par2 = [data[i] for i in range(len(data)) if i not in mask1]
return [generate_partitions(par1), generate_partitions(par2)]
| 5,347,811 |
def adjust_shapefile_to_aoi(data_uri, aoi_uri, output_uri, \
empty_raster_allowed = False):
"""Adjust the shapefile's data to the aoi, i.e.reproject & clip data points.
Inputs:
- data_uri: uri to the shapefile to adjust
- aoi_uri: uir to a single polygon shapefile
- base_path: directory where the intermediate files will be saved
- output_uri: dataset that is clipped and/or reprojected to the
aoi if necessary.
- empty_raster_allowed: boolean flag that, if False (default),
causes the function to break if output_uri is empty, or return
an empty raster otherwise.
Returns: output_uri
"""
# Data and aoi are the same URIs, data is good as it is, return it.
if data_uri == aoi_uri:
return data_uri
# Split the path apart from the filename
head, tail = os.path.split(output_uri)
# Split the file basename from the file extension
base, _ = os.path.splitext(tail)
# Open URIs and get the projections
data = ogr.Open(data_uri)
message = "OGR Can't open " + data_uri
assert data is not None, message
aoi = ogr.Open(aoi_uri)
data_wkt = shapefile_wkt(data)
aoi_wkt = shapefile_wkt(aoi)
if projections_match([data_wkt, aoi_wkt]):
# Same projections, just clip
clip_datasource(aoi, data, output_uri)
else:
# Reproject the aoi to be in data's projection
projected_aoi_uri = os.path.join(head, base + '_projected_aoi')
# TODO: include this in raster utils
# Removing output_uri if it already exists
if os.path.isdir(projected_aoi_uri):
shutil.rmtree(projected_aoi_uri)
pygeoprocessing.geoprocessing.reproject_datasource(aoi, data_wkt, projected_aoi_uri)
# Clip all the shapes outside the aoi
out_uri = os.path.join(head, base + '_clipped')
clip_datasource(ogr.Open(projected_aoi_uri), data, out_uri)
# Convert the datasource back to the original projection (aoi's)
# Removing output_uri if it already exists
if os.path.isdir(output_uri):
shutil.rmtree(output_uri)
pygeoprocessing.geoprocessing.reproject_datasource(ogr.Open(out_uri), aoi_wkt, \
output_uri)
# Ensure the resulting file's 1st layer is not empty
out_shapefile = ogr.Open(output_uri)
out_layer = out_shapefile.GetLayer(0)
out_feature_count = out_layer.GetFeatureCount()
out_layer = None
out_shapefile = None
# Break if returning an empty raster is not allowed
if not empty_raster_allowed:
message = 'Error: first layer of ' + output_uri + ' is empty. Are ' + \
data_uri + ' and ' + aoi_uri + ' mis-aligned?'
assert out_feature_count > 0, message
return output_uri
| 5,347,812 |
def randomRectangularCluster(nRow, nCol, minL, maxL, mask=None):
"""
Create a random rectangular cluster neutral landscape model with
values ranging 0-1.
Parameters
----------
nRow : int
The number of rows in the array.
nCol : int
The number of columns in the array.
minL: int
The minimum possible length of width and height for each random
rectangular cluster.
maxL: int
The maximum possible length of width and height for each random
rectangular cluster.
mask : array, optional
2D array used as a binary mask to limit the elements with values.
Returns
-------
out : array
2D array.
"""
if mask is None:
mask = np.ones((nRow, nCol))
# Create an empty array of correct dimensions
array = np.zeros((nRow, nCol)) - 1
# Keep applying random clusters until all elements have a value
while np.min(array) == -1:
width = np.random.choice(range(minL, maxL))
height = np.random.choice(range(minL, maxL))
row = np.random.choice(range(-maxL, nRow))
col = np.random.choice(range(-maxL, nCol))
array[row:row + width, col:col + height] = np.random.random()
# Apply mask and rescale 0-1
maskedArray = maskArray(array, mask)
rescaledArray = linearRescale01(maskedArray)
return(rescaledArray)
| 5,347,813 |
def auth(credentials_file_path):
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(credentials_file_path+'token.pickle'):
with open(credentials_file_path+'token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_file_path+'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(credentials_file_path+'token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
return service
| 5,347,814 |
def add_arc():
"""
:return: arc object
"""
l_hand = GArc(200, 200, 60, 150, x=480, y=270)
l_hand.filled = True
l_hand.fill_color = "#8eded9"
r_hand = GArc(200, 200, -30, 120, x=650, y=300)
r_hand.filled = True
r_hand.fill_color = "#8eded9"
return l_hand, r_hand
| 5,347,815 |
def get_data_item_or_add(results_dic, name, n_hid, epochs, horizon, timesteps):
""" Return or create a new DataItem in `results_dic` with the corresponding
metadata.
"""
if name not in results_dic:
results_dic[name] = []
found = False
for item in results_dic[name]:
if item.is_metadata(n_hid, epochs, horizon, timesteps):
found = True
return item
if not found:
results_dic[name].append(
DataItem(n_hid, epochs, horizon, timesteps))
return results_dic[name][-1]
| 5,347,816 |
def _split_compound_loc(compound_loc):
"""Split a tricky compound location string (PRIVATE).
>>> list(_split_compound_loc("123..145"))
['123..145']
>>> list(_split_compound_loc("123..145,200..209"))
['123..145', '200..209']
>>> list(_split_compound_loc("one-of(200,203)..300"))
['one-of(200,203)..300']
>>> list(_split_compound_loc("complement(123..145),200..209"))
['complement(123..145)', '200..209']
>>> list(_split_compound_loc("123..145,one-of(200,203)..209"))
['123..145', 'one-of(200,203)..209']
>>> list(_split_compound_loc("123..145,one-of(200,203)..one-of(209,211),300"))
['123..145', 'one-of(200,203)..one-of(209,211)', '300']
>>> list(_split_compound_loc("123..145,complement(one-of(200,203)..one-of(209,211)),300"))
['123..145', 'complement(one-of(200,203)..one-of(209,211))', '300']
>>> list(_split_compound_loc("123..145,200..one-of(209,211),300"))
['123..145', '200..one-of(209,211)', '300']
>>> list(_split_compound_loc("123..145,200..one-of(209,211)"))
['123..145', '200..one-of(209,211)']
>>> list(_split_compound_loc("complement(149815..150200),complement(293787..295573),NC_016402.1:6618..6676,181647..181905"))
['complement(149815..150200)', 'complement(293787..295573)', 'NC_016402.1:6618..6676', '181647..181905']
"""
if "one-of(" in compound_loc:
# Hard case
while "," in compound_loc:
assert compound_loc[0] != ","
assert compound_loc[0:2] != ".."
i = compound_loc.find(",")
part = compound_loc[:i]
compound_loc = compound_loc[i:] # includes the comma
while part.count("(") > part.count(")"):
assert "one-of(" in part, (part, compound_loc)
i = compound_loc.find(")")
part += compound_loc[:i + 1]
compound_loc = compound_loc[i + 1:]
if compound_loc.startswith(".."):
i = compound_loc.find(",")
if i == -1:
part += compound_loc
compound_loc = ""
else:
part += compound_loc[:i]
compound_loc = compound_loc[i:] # includes the comma
while part.count("(") > part.count(")"):
assert part.count("one-of(") == 2
i = compound_loc.find(")")
part += compound_loc[:i + 1]
compound_loc = compound_loc[i + 1:]
if compound_loc.startswith(","):
compound_loc = compound_loc[1:]
assert part
yield part
if compound_loc:
yield compound_loc
else:
# Easy case
for part in compound_loc.split(","):
yield part
| 5,347,817 |
def midcurve_atm_fwd_rate(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
benchmark_type: str = None,
floating_rate_tenor: str = None,
clearing_house: str = None, location: PricingLocation = None, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day atm forward rate for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param benchmark_type: benchmark type e.g. LIBOR
:param floating_rate_tenor: floating index rate
:param clearing_house: Example - "LCH", "EUREX", "JSCC", "CME"
:param location: Example - "TKO", "LDN", "NYC"
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption implied normal volatility curve
"""
df = _get_swaption_measure(asset, benchmark_type=benchmark_type, floating_rate_tenor=floating_rate_tenor,
effective_date=forward_tenor, expiration_tenor=expiration_tenor,
termination_tenor=termination_tenor, clearing_house=clearing_house, source=source,
real_time=real_time, start=DataContext.current.start_date,
end=DataContext.current.end_date,
query_type=QueryType.MIDCURVE_ATM_FWD_RATE, location=location)
return _extract_series_from_df(df, QueryType.MIDCURVE_ATM_FWD_RATE)
| 5,347,818 |
def test_primary_2():
"""
>>> db = get_connection('sqlite://')
>>> db.metadata.drop_all()
>>> class Group(Model):
... name = Field(unicode, primary_key=True)
>>> class User(Model):
... username = Field(unicode, primary_key=True)
... year = Field(int, default=30)
... group = Reference('group')
>>> g = Group(name='group')
>>> g.save()
True
>>> u = User(username='guest', group=g)
>>> u.save()
True
>>> u1 = User.get('guest')
>>> u1
<User {'username':u'guest','year':30,'group':<ReferenceProperty:group>}>
>>> print u1.group
group
"""
| 5,347,819 |
def extrode_multiple_urls(urls):
""" Return the last (right) url value """
if urls:
return urls.split(',')[-1]
return urls
| 5,347,820 |
def save_sms_sjjs_data(data):
"""
保存短信接收数据
@params:
data : 保存数据(必填参数) list
"""
# return 'save_sms_sjjs_data success!'
db = MySqLHelper()
sql = """INSERT IGNORE INTO t_603_sms_sjjs (ip_addr, d_time, isp_ip, sjjs_1m)
VALUES (%s,%s,%s,%s)"""
try:
result = db.insertmany(sql, data)
logger.debug('save_sms_sjjs_data:{}'.format(result))
except Exception as e:
logger.error(e)
# send_to_axxnr.send_message('save_sms_sjjs_data:{}'.format(e))
| 5,347,821 |
def build_header(cp: Config) -> str:
"""Build the email header for a SMTP email message"""
header = '\n'.join([
'From: {}'.format(cp.sender),
'To: {}'.format(''.join(cp.receiver)),
'Subject: {}\n\n'.format(cp.subject)
])
return header
| 5,347,822 |
def apex_distance(r0, rc, Rc, uvec):
"""
Implements equation (E4) of TYH18
"""
R0 = rc + Rc * uvec - r0
return np.hypot(*R0)
| 5,347,823 |
def test_tracker_8():
"""
Test tracking over an empty frame.
"""
peaks = {"x" : numpy.array([1.0, 2.0, 3.0]),
"y" : numpy.array([1.0, 1.0, 1.0]),
"sum" : numpy.array([4.0, 4.0, 4.0])}
empty = {"x" : numpy.array([]),
"y" : numpy.array([]),
"sum" : numpy.array([])}
filename = "test_sa_hdf5.hdf5"
h5_name = storm_analysis.getPathOutputTest(filename)
storm_analysis.removeFile(h5_name)
# Write data.
with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
h5.addLocalizations(peaks, 0)
h5.addLocalizations(empty, 1)
h5.addLocalizations(peaks, 2)
h5.addMovieInformation(FakeReader(n_frames = 3))
# Track.
tracker.tracker(h5_name, descriptor = "111", radius = 0.1)
# Tracking.
with saH5Py.SAH5Py(h5_name) as h5:
assert(h5.getNTracks() == 6)
for t in h5.tracksIterator():
assert(numpy.allclose(numpy.ones(6), t["track_length"]))
| 5,347,824 |
def copy_attr(a, b, include=(), exclude=()):
"""Copy attributes from b to a, options to only include [...] and to
exclude [...]."""
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith("_") or k in exclude:
continue
else:
setattr(a, k, v)
| 5,347,825 |
def test_span_char_start_and_char_end():
"""Test chart_start and char_end of TemporarySpan that comes from Ngrams.apply."""
ngrams = Ngrams()
sent = Sentence()
sent.text = "BC548BG"
sent.words = ["BC548BG"]
sent.char_offsets = [0]
sent.abs_char_offsets = [0]
result = list(ngrams.apply(sent))
assert len(result) == 1
assert result[0].get_span() == "BC548BG"
assert result[0].char_start == 0
assert result[0].char_end == 6
| 5,347,826 |
def train(env, make_policy, make_optimizer, *,
epochs=100,
gamma=.96,
lr_inner=1., # lr for the inner loop steps
lr_outer=1., # lr for the outer loop steps
lr_value=.1, # lr for the value function estimator
lr_om=.1, # lr for opponent modeling
n_agents=2,
n_inner_steps=1,
inner_asymm=True,
om_batch_size=64, # batch size used for fitting opponent models
om_epochs=5, # epochs per iteration to fit opponent models
value_batch_size=64, # batch size used for fitting the values
value_epochs=5, # epochs per iteration to fit value functions
use_baseline=True,
use_dice=True,
use_opp_modeling=False,
save_dir='.'):
"""The main training function."""
os.makedirs(save_dir, exist_ok=True)
# Build.
tf.reset_default_graph()
(policies, rollout_policies, pol_losses, val_losses, om_losses,
update_pol_ops, update_val_ops, update_om_ops) = build_graph(
env, make_policy, make_optimizer,
lr_inner=lr_inner, lr_outer=lr_outer, lr_value=lr_value, lr_om=lr_om,
n_agents=n_agents, n_inner_steps=n_inner_steps,
use_baseline=use_baseline, use_dice=use_dice,
use_opp_modeling=use_opp_modeling)
# Train.
acs_all = []
rets_all = []
params_all = []
params_om_all = []
times_all = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Construct update functions.
update_funcs = {
'policy': [
get_update(
[policies[k]] + policies[k].opponents,
pol_losses[k], update_pol_ops[k], sess,
gamma=gamma)
for k in range(n_agents)],
'value': [
get_update(
[policies[k]],
val_losses[k], update_val_ops[k], sess,
gamma=gamma)
for k in range(n_agents)],
'opp': [
get_update(
policies[k].root.opponents,
om_losses[k], update_om_ops[k], sess,
gamma=gamma)
for k in range(n_agents)
] if om_losses else None,
}
root_policies = [pi.root for pi in policies]
# Train for a number of epochs.
for e in range(epochs):
times = []
# Model opponents.
if use_opp_modeling:
with U.elapsed_timer() as om_timer:
# Fit opponent models for several epochs.
om_losses = np.zeros((n_agents, n_agents - 1))
for om_ep in range(om_epochs):
traces = rollout(
env, root_policies, rollout_policies, sess,
gamma=gamma, parent_traces=[])
om_traces = [
[tr for j, tr in enumerate(traces) if j != k]
for k in range(n_agents)]
for k in range(n_agents):
update_om = update_funcs['opp'][k]
for trace_batch in gen_trace_batches(
om_traces[k], batch_size=om_batch_size):
update_om(trace_batch)
loss = update_om(om_traces[k])
om_losses[k] += np.asarray(loss)
om_losses /= om_epochs
times.append(om_timer())
else:
om_losses = np.array([])
# Fit value functions.
with U.elapsed_timer() as val_timer:
# Fit value functions for several epochs.
value_losses = np.zeros(n_agents)
for v_ep in range(value_epochs):
traces = rollout(
env, root_policies, rollout_policies, sess,
gamma=gamma, parent_traces=[])
for k in range(n_agents):
update_val = update_funcs['value'][k]
for trace_batch in gen_trace_batches(
[traces[k]], batch_size=value_batch_size):
update_val(trace_batch)
loss = update_val([traces[k]])
value_losses[k] += loss[0]
value_losses /= value_epochs
times.append(val_timer())
# Save parameters of the agents (for debug purposes).
params = sess.run([
tf.squeeze(pi.root.parameters[0])
for pi in policies])
params_all.append(params)
# Save parameters of the opponent models (for debug purposes).
params = [
sess.run([
tf.squeeze(opp.root.parameters[0])
for opp in pi.opponents])
for pi in policies]
params_om_all.append(params)
# Inner loop rollouts (lookahead steps).
with U.elapsed_timer() as inner_timer:
inner_traces = []
for k in range(n_agents):
parent_traces = []
for m in range(n_inner_steps):
policies_k = [policies[k].parents[m]] + [
opp.parents[m] for opp in policies[k].opponents]
traces = rollout(
env, policies_k, rollout_policies, sess,
gamma=gamma, parent_traces=parent_traces)
parent_traces.append(traces)
inner_traces.append(parent_traces)
times.append(inner_timer())
# Outer loop rollouts (each agent plays against updated opponents).
with U.elapsed_timer() as outer_timer:
outer_traces = []
for k in range(n_agents):
parent_traces = inner_traces[k]
policies_k = [policies[k]] + policies[k].opponents
traces = rollout(
env, policies_k, rollout_policies, sess,
gamma=gamma, parent_traces=parent_traces)
outer_traces.append(traces)
times.append(outer_timer())
# Updates.
update_time = 0
policy_losses = []
for k in range(n_agents):
# Policy
with U.elapsed_timer() as pol_upd_timer:
parent_traces = inner_traces[k]
update_pol = update_funcs['policy'][k]
loss = update_pol(
outer_traces[k], parent_traces=parent_traces)
policy_losses.append(loss)
update_time += pol_upd_timer()
# Logging.
if n_inner_steps > 0:
obs, acs, rets, vals, infos = list(zip(*inner_traces[0][0]))
else:
obs, acs, rets, vals, infos = list(zip(*outer_traces[0]))
times_all.append(times)
acs_all.append([ac.mean() for ac in acs])
rets_all.append([r.sum(axis=0).mean() * (1 - gamma) for r in rets])
# rets_all.append([r.sum(axis=0).mean() for r in rets])
print("Epoch:", e + 1, '-' * 60)
# print("Policy losses:", list(map(sum, policy_losses)))
print("Value losses:", value_losses.tolist())
print("OM losses:", om_losses.tolist())
print("Returns:", rets_all[-1])
print("Defection rate:", acs_all[-1])
# Save stuff
np.save(save_dir + '/acs.npy', acs_all)
np.save(save_dir + '/rets.npy', rets_all)
np.save(save_dir + '/params.npy', params_all)
np.save(save_dir + '/params_om.npy', params_om_all)
np.save(save_dir + '/times.npy', times_all)
| 5,347,827 |
def pixel2phase(data):
"""
converts each channel of images in the data to phase component of its 2-dimensional discrete Fourier transform.
:param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:return: numpy array with same shape as data
"""
channels = data.shape[-1]
return fourier(data)[:, :, :, channels:]
| 5,347,828 |
def take_last_while(predicate, list):
"""Returns a new list containing the last n elements of a given list, passing
each value to the supplied predicate function, and terminating when the
predicate function returns false. Excludes the element that caused the
predicate function to fail. The predicate function is passed one argument:
(value)"""
for i, e in enumerate(reversed(list)):
if not predicate(e):
return list[-i:]
return list
| 5,347,829 |
def main():
"""
TODO:This function can output a blurred image.
"""
old_img = SimpleImage("images/smiley-face.png")
old_img.show()
blurred_img = blur(old_img)
for i in range(4):
blurred_img = blur(blurred_img)
blurred_img.show()
| 5,347,830 |
def get_notes() -> str:
"""Scrape notes and disclaimers from dashboard."""
# As of 6/5/20, the only disclaimer is "Data update weekdays at 4:30pm"
with get_firefox() as driver:
notes = []
match = re.compile('disclaimers?', re.IGNORECASE)
driver.implicitly_wait(30)
driver.get(dashboard_url)
soup = BeautifulSoup(driver.page_source, 'html5lib')
has_notes = False
text = soup.get_text().splitlines()
for text_item in text:
if match.search(text_item):
notes.append(text_item.strip())
has_notes = True
if not has_notes:
raise FormatError(
"This dashboard url has changed. None of the <div> elements contains'Disclaimers' " + dashboard_url)
return '\n\n'.join(notes)
| 5,347,831 |
def func(num1, num2):
"""
the function will divide two given numbers
with each others and print the result
:param num1: the first number
:type num1: int
:param num2: the second number
:type num2: int
:return: the result of dividing the 2 numbers/
:rtype: float
"""
print (num1 / num2)
| 5,347,832 |
def signup_logout(request):
"""
Just wrapping the built in
"""
return logout_view(request, template_name='logged_out.html')
| 5,347,833 |
def stress_rotation(stress, angle):
"""
Rotates a stress vector against a given angle.
This rotates the stress from local to the global axis sytem.
Use a negative angle to rotate from global to local system.
The stress vector must be in Voigt notation and engineering stress is used.
Parameters
----------
stress : vector
The matrix that must be rotated.
angle : float
The rotation angle in degrees.
Returns
-------
stress_rot : vector
A rotated version of the matrix.
"""
angle = angle * np.pi/180 # convert to radians
m = np.cos(-angle)
n = np.sin(-angle)
T1_inv = np.matrix([[m**2, n**2, 2*m*n],
[n**2, m**2, -2*m*n],
[-m*n, m*n, m**2-n**2]])
stress_rot = T1_inv * stress
return stress_rot
| 5,347,834 |
def blue_noise(x=None, hue=None, data=None, dodge=False, orient='v', plot_width=None,
color='black', palette='tab10', size=3, centralized=False,
filename='', scaling=10):
""" Renders a *Blue Noise Plot* from the given data.
Args:
x (str in data): Variables that specify positions on the data-encoding axes.
hue (str in data): Optional. Grouping variable that will produce points with different
colors.
data (pandas.DataFrame): Input data structure. Long-form collection of vectors that can be
assigned to named variables.
dodge (boolean): Optional. Wether to dodge the categorical classes of the plot.
Defaults to False.
orient ("v" | "h"): Optional. Orientation of the plot (vertical or horizontal).
Defaults to 'v'.
color (str): Color to use for markers, in case there is only one class (hue not given).
Defaults to 'black'.
palette (str): Method for choosing the colors to use when mapping the hue semantic.
String values are passed to color_palette(). List or dict values imply
categorical mapping, while a colormap object implies numeric mapping.
Defaults to 'tab10'.
size (float): The marker size in points**2.
centralized (boolean): Optional. Where the plot should be centralized or not.
Defaults to False.
plot_width (float): Optional. Width of the plot. This is a ratio, assuming the encoding axis
is between 0 and 1. So, 0.2 for plot_width would give you a plot with is
5 times as wide in the encoding axis as in the non-encoding axis.
filename (str): Optional. Filename of the plot to render.
scaling (int): Optional. Scaling for the size of plot.
Defaults to 10 for a 740 pixel lot (long side).
Returns:
List[List[[float, float]]] 2D-Array, relaxed points. Here the first dimension of the array
encodes the clases in the data. So for a single-class blue noise plot,
len(blue_noise_plot) would be 1.
Each of these arrays contains arrays with points within this class.
"""
return __plot(x=x, hue=hue, data=data, dodge=dodge, orient=orient, plot_width=plot_width,
color=color, palette=palette, size=size, centralized=centralized,
filename=filename, scaling=scaling, method='blue_noise')
| 5,347,835 |
def get_repository(auth_user: check_auth, repository_id: hug.types.text):
"""
GET: /repository/{repository_id}
Returns the CLA repository requested by UUID.
"""
return cla.controllers.repository.get_repository(repository_id)
| 5,347,836 |
def _isDebug():
"""*bool* = "--debug" or "--debugger" """
return options is not None and (options.debug or options.debugger)
| 5,347,837 |
def _and(mat,other,obj,m):
"""
Can only be used with '&' operator not with 'and'
Multi-column boolean matrices' values are compared with 'and' operator, meaning that 1 false value
causes whole row to be reduced to a false value
"""
if mat.BOOL_MAT:
if isinstance(other,obj):
if mat.dim!=other.dim:
raise ValueError("Dimensions of the matrices don't match")
if not other.BOOL_MAT:
raise TypeError("Can't compare bool matrix to non-bool matrix")
d0,d1 = mat.dim
o = other.matrix
true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False]
data = []
#Reduce multiple columns into one
#Remove rows with false boolean values
for i in range(d0):
mrow,orow = m[i],o[i]
if (false in mrow) or (false in orow):
data.append([false])
continue
data.append([true])
return obj(dim=[d0,1],
data=data,
features=mat.features[:1],
index=mat.index[:],
implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false})
else:
d0,d1 = mat.dim
true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False]
data = []
if isinstance(other,obj):
if mat.dim!=other.dim:
raise ValueError("Dimensions of the matrices don't match")
if other.BOOL_MAT:
raise TypeError("Can't compare non-bool matrix to bool matrix")
o = other.matrix
for i in range(d0):
mrow,orow = m[i],o[i]
data.append([true if (bool(mrow[j]) and bool(orow[j])) else false for j in range(d1)])
elif isinstance(other,list):
if mat.d1!=len(other):
raise ValueError("Length of the list doesn't match matrix's column amount")
for i in range(d0):
mrow = m[i]
data.append([true if (bool(mrow[j]) and bool(other[j])) else false for j in range(d1)])
else:
for i in range(d0):
mrow = m[i]
data.append([true if (bool(mrow[j]) and bool(other)) else false for j in range(d1)])
return obj(dim=[d0,d1],
data=data,
features=mat.features[:],
index=mat.index[:],
implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false})
| 5,347,838 |
def MAP (request, resource, optimize_already_mapped_nfs=True,
migration_handler_name=None, migration_coeff=None,
load_balance_coeff=None, edge_cost_coeff=None,
time_limit=None, mip_gap_limit=None, node_limit=None, logger=None,
**migration_handler_kwargs):
"""
Starts an offline optimization of the 'resource', which may contain NFs for
considering migration if optimize_already_mapped_nfs is set. 'request' should
be new NF-s to be mapped during the reoptimization of 'resource'.
If 'optimize_already_mapped_nfs' is set to false, 'request' should contain
only NF-s which are net yet mapped to resource.
:param mip_gap_limit: termination optimality condition for the MILP
:param time_limit: termination execution time condition for the MILP
:param node_limit:
:param optimize_already_mapped_nfs:
:param request:
:param resource:
:param migration_handler_name:
:param migration_handler_kwargs:
:return:
"""
# Make heuristic and MILP even in number of large object deepcopies
# This would also be required for correct behaviour (Maybe the mapping
# shouldn't change the input NFFG)
request = copy.deepcopy(request)
resource = copy.deepcopy(resource)
# overwrite logger object if we got one from the caller!
if logger is not None:
global log
log = logger
migration_handler = None
req_nf_ids = [nf.id for nf in request.nfs]
if optimize_already_mapped_nfs:
# This is a full reoptimization, add VNFs and everything from resource to
# request for reoptimization!
for vnf in resource.nfs:
if vnf.id not in req_nf_ids:
# log.debug("Adding NF %s to request for reoptimization."%vnf.id)
request.add_nf(vnf)
NFFGToolBox.recreate_all_sghops(resource)
for sg in resource.sg_hops:
if not request.network.has_edge(sg.src.node.id, sg.dst.node.id,
key=sg.id):
# log.debug("Adding SGHop %s to request from resource."%sg.id)
add_saps_if_needed_for_link(sg, request)
request.add_sglink(sg.src, sg.dst, hop=sg)
# reqs in the substrate (requirements satisfied by earlier mapping) needs
# to be respected by the reoptimization, and mogration can only be done
# if it is not violated!
log.debug("e2e reqs in request:%s, e2e reqs in resource, e.g: %s"%
([r.sg_path for r in request.reqs],
[r.sg_path for r in resource.reqs][:20]))
# log.debug("SAPs in resource: %s" % [s for s in resource.saps])
for req in resource.reqs:
# all possible SAPs should be added already!
if not request.network.has_edge(req.src.node.id, req.dst.node.id,
key=req.id):
# log.debug("Adding requirement link on path %s between %s and %s to request to preserve it "
# "during reoptimization"%(req.sg_path, req.src, req.dst))
add_saps_if_needed_for_link(req, request)
# bandwidth requirement of the already mapped SGHops are stored by
# the resource graph!
req.bandwidth = 0.0
request.add_req(req.src, req.dst, req=req)
# We have to deal with migration in this case only.
if migration_handler_name is not None and type(
migration_handler_name) is str:
migration_cls = eval("migration_costs." + migration_handler_name)
# This resource NFFG needs to include all VNFs, which may play any role in
# migration or mapping. Migration need to know about all of them for
# setting zero cost for not yet mapped VNFs
migration_handler = migration_cls(request, resource,
**migration_handler_kwargs)
else:
# No migration can happen! We just map the given request and resource
# with MILP.
# Fail if there is VNF which is mapped already!
for vnf in resource.nfs:
if vnf.id in req_nf_ids:
raise uet.BadInputException("If 'optimize_already_mapped_nfs' is set to "
"False, request shouldn't contain VNFs "
"from resource", "VNF %s is both in request "
"and resource!"%vnf.id)
mappedNFFG = convert_mip_solution_to_nffg([request], resource,
migration_handler=migration_handler,
migration_coeff=migration_coeff,
load_balance_coeff=load_balance_coeff,
edge_cost_coeff=edge_cost_coeff,
reopt=optimize_already_mapped_nfs,
time_limit=time_limit, mip_gap_limit=mip_gap_limit,
node_limit=node_limit)
if mappedNFFG is not None:
try:
mappedNFFG.calculate_available_node_res()
mappedNFFG.calculate_available_link_res([])
except RuntimeError as re:
log.error("MILP's resulting NFFG is invalid: %s"%re.message)
raise uet.InternalAlgorithmException("MILP's mapping is invalid!!")
return mappedNFFG
else:
raise uet.MappingException("MILP couldn't map the given service request.",
False)
| 5,347,839 |
def generate_terms(reg):
""" Generate the Terms layer """
layer_generator = terms.Terms(reg)
print NodeEncoder().encode(layer_generator.build())
| 5,347,840 |
def _parcel_profile_helper(pressure, temperature, dewpt):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpt)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, np.array([]) * press_lower.units,
temp_lower[:-1], temp_lcl, np.array([]) * temp_lower.units)
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
| 5,347,841 |
def write_readme_files(org_name, repo_name, filetype="rst", ignore=None, header=""):
"""Write readme files into the docs directory under their package names
"""
# We look for markdown files, as readmes on github for the strands
# repositories are written in markdown
readmes = get_repo_files(org_name, repo_name, match_ext=[".md"], match_filename=["readme"], ignore=ignore, header=header)
subpkg_readmes = files_to_subpackages(readmes)
# Get the default branch for the repo, to use later when we want to link to the original files
repo_rq = requests.get("https://api.github.com/repos/{0}/{1}".format(org_name, repo_name), headers=header)
default_branch = json.loads(repo_rq.text)["default_branch"]
for subpkg in subpkg_readmes.keys():
print("processing {0}".format(subpkg))
# The path we get in each item is something like
# strands_navigation/topological_rviz_tools/readme.md. When using
# mkdocs, this will generate the documentation in subheadings for each
# subdirectory, whereas we would prefer it to be grouped under
# strands_navigation. So, we will save the data in readme.md to
# strands_navigation/topological_rviz_tools.{filetype}. In the case of packages
# with multiple readmes, we will create a separate directory for them so
# they are in their own section.
base_path = os.path.join("docs", repo_name)
multiple = False
if len(subpkg_readmes[subpkg]) > 1:
# sometimes the top level may have multiple files, but we don't want
# to put them in a subdirectory
if subpkg != "index":
base_path = os.path.join(base_path, subpkg)
multiple = True
for readme in subpkg_readmes[subpkg]:
# # Get a filename for the new readme file based on where it was in the directory tree.
split_path = path_to_arr(os.path.dirname(readme[0]))
if multiple:
# There is more than one file in the subpackage
lower_fname = os.path.splitext(os.path.basename(readme[0]))[0].lower()
if len(split_path) <= 1:
# The file was at level 0 or 1 in the directory tree. If
# it was called readme, then we rename it to index.{filetype} so
# that it is used as a base page in the documentation.
# Otherwise, we keep its current name in lowercase.
if lower_fname == "readme":
fname = "index.{}".format(filetype)
else:
fname = lower_fname + ".{}".format(filetype)
else:
# The path is long, so the file was nested deeper than
# level 1 in the tree. We will rename it to the name of
# the directory that it was in.
print("path is long: {0}".format(split_path))
fname = split_path[-1] + ".{}".format(filetype)
else:
# There is only one file in the subpackage. If the split
# path length is zero, that means it was a toplevel readme,
# so rename it to index so it's parsed differently by the
# documentation code.
if len(split_path) == 0:
fname = "index.{}".format(filetype)
else:
# Otherwise, rename it to the name of the directory it
# was in.
fname = split_path[-1] + ".{}".format(filetype)
# make sure a directory exists for the files
path = os.path.join(base_path, fname)
print("Saving {0} to {1}".format(readme[1]["path"], path))
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
# Get the contents of the readme file from github and output them to a file
file_rq = json.loads(requests.get(readme[1]["url"], headers=header).text)
# decode and output the base64 string to file
with open(path, 'w') as f:
if filetype == "md":
f.write(base64.b64decode(file_rq["content"]))
else:
f.write(pypandoc.convert_text(base64.b64decode(file_rq["content"]),
filetype,
format="md").encode('utf-8'))
original_url = "https://github.com/{}/{}/blob/{}/{}".format(org_name, repo_name, default_branch, readme[0])
add_doc_footer(original_url, path)
| 5,347,842 |
def extract_subject(subj, mask_name, summary_func=np.mean,
residual=False, exp_name=None):
"""Extract timeseries from within a mask, summarizing flexibly.
Parameters
----------
subj : string
subject name
mask_name : string
name of mask in data hierarchy
summary_func : callable or None
callable to reduce data over voxel dimensions. can take an
``axis`` argument to operate over each frame, if this
argument does not exist the function will be called on the
n_tr x n_voxel array. if None, simply returns all voxels.
residual : boolean
If True, extract from the registered residual timecourse.
exp_name : string
experiment name, if not using the default experiment
Returns
-------
data : dict with ndarray
datta array is n_runs x n_timepoint x n_dimension,
data are not otherwise altered
"""
project = gather_project_info()
if exp_name is None:
exp_name = project["default_exp"]
# Get a path to the file where
cache_dir = op.join(project["analysis_dir"],
exp_name, subj, "evoked")
try:
os.makedirs(cache_dir)
except OSError:
pass
if summary_func is None:
func_name = ""
else:
func_name = summary_func.__name__
cache_fname = mask_name + "_" + func_name
cache_fname = cache_fname.strip("_") + ".npz"
cache_file = op.join(cache_dir, cache_fname)
# Get paths to the relevant files
mask_file = op.join(project["data_dir"], subj, "masks",
"%s.nii.gz" % mask_name)
ts_dir = op.join(project["analysis_dir"], exp_name, subj,
"reg", "epi", "unsmoothed")
n_runs = len(glob(op.join(ts_dir, "run_*")))
ftemp = op.join(ts_dir, "run_{:d}/{}_xfm.nii.gz")
fstem = "res4d" if residual else "timeseries"
ts_files = [ftemp.format(r_i, fstem) for r_i in range(1, n_runs+1)]
# Get the hash value for this extraction
cache_hash = hashlib.sha1()
cache_hash.update(mask_name)
cache_hash.update(str(op.getmtime(mask_file)))
for ts_file in ts_files:
cache_hash.update(str(op.getmtime(ts_file)))
cache_hash = cache_hash.hexdigest()
# If the file exists and the hash matches, return the data
if op.exists(cache_file):
with np.load(cache_file) as cache_obj:
if cache_hash == str(cache_obj["hash"]):
return dict(cache_obj.items())
# Otherwise, do the extraction
data = []
mask = nib.load(mask_file).get_data().astype(bool)
for run, ts_file in enumerate(ts_files):
ts_data = nib.load(ts_file).get_data()
roi_data = ts_data[mask].T
if summary_func is None:
data.append(roi_data)
continue
# Try to use the axis argument to summarize over voxels
try:
roi_data = summary_func(roi_data, axis=1)
# Catch a TypeError and just call the function
# This lets us do e.g. a PCA
except TypeError:
roi_data = summary_func(roi_data)
data.append(roi_data)
data = np.array(list(map(np.squeeze, data)))
# Save the results and return them
data_dict = dict(data=data, subj=subj, hash=cache_hash)
np.savez(cache_file, **data_dict)
return data_dict
| 5,347,843 |
def best_param_search(low=1, margin=1, func=None):
"""
Perform a binary search to determine the best parameter value.
In this specific context, the best
parameter is (the highest) value of the parameter (e.g. batch size)
that can be used to run a func(tion)
(e.g., training) successfully. Beyond a certain value,
the function fails to run for reasons such as out-of-memory.
param low: a starting low value to start searching from (defaults to 1).
param margin: denotes the margin allowed when choosing the
configuration parameter (and the optimal parameter).
param func: the function that is required to be run with the
configuration parameter.
"""
assert low > 0
assert margin > 0
assert func is not None
# Determine if the function succeeds to run at the starting (low) value.
# If not, keep lowering the value of low until the run succeeds.
try:
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception:
success = False
print("Run failed! The starting value of the parameter is itself too high!\n")
while not success and low > 0:
try:
low = low // 2
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception:
print("Run failed! Lowering the parameter value.\n")
if not success:
print("The function failed to run even at the lowest parameter value !")
return
# Set coarse limits on low (function succeeds to run) and
# high (function does not succeed running).
while success:
high = 2 * low
try:
print(f"Trying with a parameter value of {high}.")
func(high)
low = high
except Exception:
success = False
print("Run failed!\n")
print(
f"Low and high parameter values set to {low} and {high} respectively."
)
# Binary search to find the optimal value of low (within the margin).
current_margin = high - low
while current_margin > margin:
mid = (low + high) // 2
try:
print(f"Trying with a parameter value of {mid}.")
func(mid)
low = mid
except Exception:
high = mid
print("Run failed!\n")
print(f"Low and high parameter values set to {low} and {high} respectively.")
current_margin = high - low
print(f"Setting the parameter value to {low}\n")
return low
| 5,347,844 |
def configure_plugins_plugin_uninstall(request, pk):
"""
Disables a plugin from the system
:param request:
:param pk: The primary key of the plugin to be disabled
:return:
"""
# TODO: See about pulling this out into a common methods
plugin = get_object_or_404(Plugin, pk=pk)
action = reverse(
"api_dispatch_uninstall",
kwargs={"api_name": "v1", "resource_name": "plugin", "pk": pk},
)
ctx = RequestContext(
request,
{
"method": "DELETE",
"action": action,
"i18n": {
"title": ugettext_lazy(
"configure_plugins_plugin_uninstall.title"
), # 'Confirm Uninstall Plugin'
"confirmmsg": ugettext_lazy(
"configure_plugins_plugin_uninstall.messages.confirmmsg.singular"
)
% { # 'Are you sure you want to uninstall %(versionedName)s Plugin (%(id)s)?'
"id": str(pk),
"versionedName": plugin.versionedName(),
},
"submit": ugettext_lazy(
"configure_plugins_plugin_uninstall.action.submit"
), # 'Yes, Uninstall!'
"cancel": ugettext_lazy("global.action.modal.cancel"),
"submitmsg": ugettext_lazy(
"configure_plugins_plugin_uninstall.messages.submitmsg"
), # 'Now uninstalling, please wait.'
},
},
)
return render_to_response(
"rundb/configure/modal_confirm_plugin_uninstall.html", context_instance=ctx
)
| 5,347,845 |
def freeze_params(model: nn.Module) -> None:
"""Disable weight updates on given model.
Args:
model (nn.Module): model
"""
for par in model.parameters():
par.requires_grad = False
| 5,347,846 |
def get_supervised_timeseries_data_set(data, input_steps):
"""This function transforms a univariate timeseries into a supervised learning problem where the input consists
of sequences of length input_steps and the output is the prediction of the next step
"""
series = pd.Series(data)
data_set = pd.DataFrame({'t' : series, 't+1' : series.shift(-1)})
if input_steps > 1:
x_values = np.concatenate([data[i:i+input_steps]
.reshape(1, input_steps) for i in range(len(series) - input_steps)])
timesteps_df = pd.DataFrame(x_values[:,:-1], index=np.arange(input_steps - 1, input_steps - 1 + len(x_values)),
columns = ['t-' + str(input_steps - i) for i in range(1, input_steps)])
data_set = pd.concat([timesteps_df, data_set], axis=1, join='inner')
data_set = data_set.dropna()
X = data_set.drop('t+1', axis=1)
y = data_set.loc[:,'t+1']
return (X, y)
| 5,347,847 |
def _add_layer_metadata(map_layer, layer_cfg):
"""Add layer metadata.
Renders a jinja template to a temporary file location as a valid QGIS qmd
metadata file. This metadata then gets associated with the `map_layer` using
its `loadNamedMetadata` method. This metadata gets written to the project
file when the layer is added to the `project`.
"""
# Load/render the template.
template_path = os.path.join(ASSETS_DIR, 'templates', 'metadata.jinja')
with open(template_path, 'r') as f:
qmd_template_str = ' '.join(f.readlines())
# Set the layer's tooltip
tooltip = build_layer_description(layer_cfg)
map_layer.setAbstract(tooltip)
# Render the qmd template.
abstract = build_layer_abstract(layer_cfg)
layer_extent = map_layer.extent()
qmd_template = Template(qmd_template_str)
rendered_qmd = qmd_template.render(
abstract=abstract,
title=layer_cfg['title'],
minx=layer_extent.xMinimum(),
miny=layer_extent.yMinimum(),
maxx=layer_extent.xMaximum(),
maxy=layer_extent.yMaximum()
)
# Write the rendered tempalte to a temporary file
# location. `map_layer.loadNamedMetadata` expects a string URI corresponding
# to a file on disk.
with tempfile.NamedTemporaryFile('w') as temp_file:
temp_file.write(rendered_qmd)
temp_file.flush()
map_layer.loadNamedMetadata(temp_file.name)
| 5,347,848 |
def test_AbstractGrid_uniform_attributes(
attr,
type,
type_in_iter,
value,
abstract_grid_uniform,
):
"""
Tests that the attributes of AbstractGrid have the correct type and
values for the fixture abstract_grid_uniform.
"""
attr = getattr(abstract_grid_uniform, attr)
assert isinstance(attr, type)
# If the attribute is an iterable, check the type inside too
if type_in_iter is not None:
for elem in attr:
isinstance(elem, type_in_iter)
# If an expected value is given, verify the attribute matches
if value is not None:
if isinstance(value, np.ndarray):
assert np.allclose(attr, value, rtol=0.1)
elif isinstance(value, (float, int)):
assert np.isclose(attr, value, rtol=0.1)
else:
assert attr == value
| 5,347,849 |
def _format_axes(ax):
"""
Adjust axis parameters.
Parameters
----------
ax : axis
Axis object (matplotlib).
"""
from matplotlib.ticker import AutoMinorLocator
# Draw major and minor tick marks inwards
ax.tick_params(
axis='both', which='both', direction='in',
bottom=True, top=True, left=True, right=True)
# Auto-adjust minor tick marks
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
return
| 5,347,850 |
def load_df(name):
"""Load a pandas dataframe from csv file at results/name."""
load_name = os.path.join(here, "..", "results", name)
df = pd.read_csv(load_name)
return df
| 5,347,851 |
def load_template_spectra_from_folder(parent_folder,
spectrum_identifier,
normalization=None):
"""
Load template spectrum data into a dictionary. This allows templates from
different folders to be loaded into different dictionaries.
Parameters:
-----------
parent_folder : string
Name of folder or path
spectrum_identifier : string
Radioactive source identifier. Ex: '235U'
normalization : string or boolean
Default = None
Accepts: 'normalheight', 'normalarea', None
How the dataset should be normalized.
Returns:
--------
temp_dict : Dictionary containing all template spectra from a folder.
"""
temp_dict = {}
def normalize_spectrum(ID):
"""
Normalizes the spectrum data.
Parameters:
-----------
ID : string
The ID key for the radioactive source in your spectrum.
Returns:
--------
temp_dict : Dictionary
Contains all normalized datasets.
"""
temp_spectrum = an.read_spectrum(
parent_folder + ID + spectrum_identifier)
if np.max(temp_spectrum) == 0:
print(ID + ' Contains no values')
if normalization is None:
return temp_spectrum
elif normalization == 'normalheight':
return temp_spectrum / np.max(temp_spectrum)
elif normalization == 'normalarea':
return temp_spectrum / np.sum(temp_spectrum)
for i in range(len(an.isotopes) - 3):
temp_dict[an.isotopes[i]] = normalize_spectrum(
an.isotopes_sources_GADRAS_ID[i])
return temp_dict
| 5,347,852 |
def remove_screenshot_from_object(request):
"""
Removes the screenshot from being associated with a top-level object.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
analyst = request.user.username
obj = request.POST.get('obj', None)
oid = request.POST.get('oid', None)
sid = request.POST.get('sid', None)
result = delete_screenshot_from_object(obj, oid, sid, analyst)
return HttpResponse(json.dumps(result),
mimetype="application/json")
| 5,347,853 |
def read_csv_as_dicts(
filename,
newline="",
delimiter=",",
quotechar='"',
encoding="utf-8",
remove_prefix=True,
prefix="dv.",
json_cols=CSV_JSON_COLS,
false_values=["FALSE"],
true_values=["TRUE"],
):
"""Read in CSV file into a list of :class:`dict`.
This offers an easy import functionality of your data from CSV files.
See more at
`csv <https://docs.python.org/3/library/csv.html>`_.
CSV file structure:
1) The header row contains the column names.
2) A row contains one dataset
3) A column contains one specific attribute.
Recommendation: Name the column name the way you want the attribute to be
named later in your Dataverse object. See the
`pyDataverse templates <https://github.com/GDCC/pyDataverse_templates>`_
for this. The created :class:`dict` can later be used for the `set()`
function to create Dataverse objects.
Parameters
----------
filename : str
Filename with full path.
newline : str
Newline character.
delimiter : str
Cell delimiter of CSV file. Defaults to ';'.
quotechar : str
Quote-character of CSV file. Defaults to '"'.
encoding : str
Character encoding of file. Defaults to 'utf-8'.
Returns
-------
list
List with one :class:`dict` each row. The keys of a :class:`dict` are
named after the columen names.
"""
assert isinstance(filename, str)
assert isinstance(newline, str)
assert isinstance(delimiter, str)
assert isinstance(quotechar, str)
assert isinstance(encoding, str)
with open(filename, "r", newline=newline, encoding=encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)
data = []
for row in reader:
data.append(dict(row))
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if val in false_values:
ds_tmp[key] = False
ds_tmp[key] = True
elif val in true_values:
ds_tmp[key] = True
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
if remove_prefix:
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if key.startswith(prefix):
ds_tmp[key[len(prefix) :]] = val
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
if len(json_cols) > 0:
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if key in json_cols:
ds_tmp[key] = json.loads(val)
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
return data
| 5,347,854 |
def update_order():
""" endpoint for updating an existing order.
---
parameters:
- name: x-access-token
in: header
type: string
required: true
- name: order_id
in: path
type: integer
required: true
- name: meal_id
in: formData
type: integer
required: true
"""
| 5,347,855 |
def get_rewrite_outputs(wrapped_model: nn.Module,
model_inputs: Dict[str, Union[Tuple, List,
torch.Tensor]],
deploy_cfg: mmcv.Config,
run_with_backend: bool = True) -> Tuple[Any, bool]:
"""To get outputs of generated onnx model after rewrite.
Args:
wrapped_model (nn.Module): The input model.
model_inputs (dict): Inputs for model.
deploy_cfg (mmcv.Config): Deployment config.
run_with_backend (bool): Whether to run inference with backend.
Default is True.
Returns:
List[torch.Tensor]: The outputs of model.
bool: A flag indicate the type of outputs. If the flag is True, then
the outputs are backend output, otherwise they are outputs of wrapped
pytorch model.
"""
backend = get_backend(deploy_cfg)
with RewriterContext(
cfg=deploy_cfg, backend=backend.value, opset=11), torch.no_grad():
ctx_outputs = wrapped_model(**model_inputs)
ir_type = get_ir_config(deploy_cfg).get('type', None)
if ir_type == IR.TORCHSCRIPT.value:
ir_file_path = get_ts_model(wrapped_model, model_inputs, deploy_cfg)
else: # TODO onnx as default, make it strict when more IR types involved
ir_file_path = get_onnx_model(wrapped_model, model_inputs, deploy_cfg)
backend_outputs = None
if run_with_backend:
backend_outputs = get_backend_outputs(ir_file_path, model_inputs,
deploy_cfg)
if backend_outputs is None:
return ctx_outputs, False
else:
return backend_outputs, True
| 5,347,856 |
def _get_implied_dependencies(path: str) -> list:
""" Attempt to replace _get_requirements_from_file
Extracts import statements via regex.
Does not catch all import statements and its
use was rolled back.
Might still be overhauled and integrated again.
"""
_python_files = search_filename(
base_folder=path,
file_name="**/*.py",
recursive_flag=True
)
_tmp_project_path = tempfile.mkdtemp()
_tmp_file_path = _tmp_project_path + "/dependencies.py"
_tmp_file = open(_tmp_file_path, 'w')
for file in _python_files:
for _import in _get_imports(file):
_tmp_file.write(_import.strip()+'\n')
_tmp_file.close()
try:
_all_imports = pipreqs.get_all_imports(
path=_tmp_project_path,
encoding='utf-8'
)
except (IndentationError, SyntaxError):
return None
# Clean up tmp folder
if os.path.isfile(_tmp_file_path):
os.remove(_tmp_file_path)
if os.path.isdir(_tmp_project_path):
os.rmdir(_tmp_project_path)
_imports = _remove_local_dependencies(path, _all_imports)
return pipreqs.get_pkg_names(_imports)
| 5,347,857 |
def get_interface_breakout_param(dut,**kwargs):
"""
Author: Naveen Nag
email : [email protected]
:param dut:
:param interface:
:param fields:
:return: interface breakout speed
Usage:
port.get_interface_breakout_param(dut1, 'Ethernet4')
:return - ['4x10G', 'Completed']
"""
param_breakout = []
if 'interface' not in kwargs :
st.error("Mandatory argument \'interface\' is missing")
return False
if 'Eth' in kwargs['interface']:
st.log('Physical interface name is provided, mapping it to a port group')
res1 = get_interface_breakout_mode(dut, kwargs['interface'], 'port')
if res1:
kwargs['interface'] = 'port ' + res1[0]['port']
else:
st.error('Invalid interface, cannot get the status')
return False
output = st.show(dut, "show interface breakout {}".format(kwargs['interface']), type='klish')
if len(output) == 0:
st.error("Provided interface is not a breakout port")
return False
else:
param_breakout.append(str(output[0]['breakout_mode'].strip('G')))
param_breakout.append(output[0]['status'])
return param_breakout
| 5,347,858 |
def get_process_basic_window_enriched(cb, print_detail, window):
"""
Text
Args:
cb (CBCloudAPI): API object
print_detail (bool): whether to print full info to the console, useful for debugging
window (str): period to search
Returns:
process_guid of the first process in the returned list
"""
print("\n----------------------------------------------------------")
print("API Calls:")
print("Start a Process Search (v2)")
print("Get the Status of a Process Search (v1)")
print("Retrieve Results for a Process Search (v2)\n")
process_query = cb.select(Process).where("enriched:true")
process_query.set_time_range(window=window)
matching_processes = [process for process in process_query]
print(f"There are {len(matching_processes)} found in {window} of processes")
if print_detail:
for process in matching_processes:
print("{0:16} {1:5} {2:20}".format(process.device_name, process.process_pids[0], process.process_guid))
try:
print(f"process guid being used is {matching_processes[6].process_guid}")
print("Test PASSED")
except IndexError:
print("Test FAILED")
print("----------------------------------------------------------")
return matching_processes[6].process_guid
| 5,347,859 |
def merge_bams(bams, output_file, sort_by="index"):
"""Merge a list of .bam files into a single sorted, indexed .bam file"""
if sort_by not in ["index", "name"]:
raise ValueError(
"sort_by must be one of ['name', 'index'], not %s" % sort_by
)
# Empty bams cause samtools error, exclude them
bams = [bam for bam in bams if not is_empty(bam)]
merge_args = ["cat"] + bams
command = samtools.__getitem__(merge_args)
if sort_by == "index":
command = command | samtools["sort"]
else:
command = command | samtools["sort", "-n"]
(command > output_file) & FG
| 5,347,860 |
def home():
"""Home view"""
if flask.session.get('userid'):
leaderboard_players = rankedlist(
member=db.web.session.query(
models.Member).get(
flask.session['userid']))
member = db.web.session.query(
models.Member).get(
flask.session.get('userid'))
else:
leaderboard_players = rankedlist(
member=db.web.session.query(
models.Member).filter_by(
rank=3).first())
member = None
news = db.web.session.query(models.NewsArticle).order_by(
desc("date"))
return render_template('content_home.html', news=news, member=member,
leaderboard_players=leaderboard_players)
| 5,347,861 |
def is_number(string):
""" Tests if a string is valid float. """
try:
float(string)
return True
except ValueError:
return False
| 5,347,862 |
def recursive_load_gfx(path, accept=(".png", ".bmp", ".svg")):
"""
Load graphics files.
This operates on a one folder at a time basis.
Note: An empty string doesn't count as invalid,
since that represents a folder name.
"""
colorkey = c.UGLY_PURPLE
graphics = {}
for pic in os.listdir(path):
pic_path = os.path.join(path, pic)
name, ext = os.path.splitext(pic)
if ext.lower() in accept:
img = pygame.image.load(pic_path)
if img.get_alpha():
#img = img.convert_alpha()
img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name] = img
elif not ext:
pass
else:
print("Got unexpected gfx format\n" \
"Path: {}\n" \
"Name: {}\n" \
"Ext: {}\n".format(pic_path, name, ext))
return graphics
| 5,347,863 |
def inputs(filename, batch_size, n_read_threads = 3, num_epochs = None, image_width = 200, image_height=290):
"""
reads the paired images for comparison
input: name of the file to load from, parameters of the loading process
output: the two images and the label (a logit classifier for 2 class - yes or no)
"""
with tf.device('/cpu:0'): #we need to load using the CPU or it allocated a stupid amount of memory
x1, x2, y_ = pc.input_pipeline([filename], batch_size, n_read_threads, num_epochs=num_epochs, imgwidth = image_width, imgheight = image_height)
return x1, x2, y_
| 5,347,864 |
def combinedlogger(
log_name,
log_level=logging.WARN,
syslogger_format="%(levelname)s %(message)s",
consolelogger_format="%(asctime)s %(levelname)s %(message)s",
):
"""
Returns a combined SysLogHandler/StreamHandler logging instance
with formatters
"""
if "LOGLEVEL" in os.environ:
log_level = os.environ["LOGLEVEL"]
try:
log_level = int(log_level)
except ValueError:
pass
# for writing to syslog
newlogger = logging.getLogger(log_name)
if syslogger_format and os.path.exists("/dev/log"):
my_syslog_formatter = logging.Formatter(
fmt=" ".join((log_name, syslogger_format))
)
my_syslog_handler = logging.handlers.SysLogHandler(
address="/dev/log",
facility=SysLogHandler.LOG_DAEMON,
)
my_syslog_handler.setFormatter(my_syslog_formatter)
newlogger.addHandler(my_syslog_handler)
if consolelogger_format:
my_stream_formatter = logging.Formatter(fmt=consolelogger_format)
my_stream_handler = logging.StreamHandler()
my_stream_handler.setFormatter(my_stream_formatter)
newlogger.addHandler(my_stream_handler)
newlogger.setLevel(log_level)
return newlogger
| 5,347,865 |
def abs_p_diff(predict_table, categA='sandwich', categB='sushi'):
"""Calculates the absolute distance between two category predictions
:param predict_table: as returned by `predict_table`
:param categA: the first of two categories to compare
:param categB: the second of two categoreis to compare
:returns: series with the absolute difference between the predictions
:rtype: pandas Series
"""
return abs(predict_table['p_%s' % categA] - predict_table['p_%s' % categB])
| 5,347,866 |
def _is_target_feature(column_names, column_mapping):
"""Assert that a feature only contains target columns if it contains any."""
column_names_set = set(column_names)
column_types = set(column['type']
for column_name, column in column_mapping.iteritems()
if column_name in column_names_set)
if 'target' in column_types:
assert len(column_types) == 1, (
'Features with target columns can only contain target columns.'
'Found column_types: %s for columns %s' % (column_types,
column_names))
return True
else:
return False
| 5,347,867 |
def server_upload_document(path: str, title: str, peer: int, document_type: str = "doc") -> \
typing.Tuple[bool, typing.Union[str, typing.Any]]:
""" Uploads document to the server and returns it (as document string). """
try:
# Trying to upload document.
# Getting api for the uploader.
server_docs_api = SERVER_API.get_api().docs # noqa
# Getting upload url.
if "upload_url" in (upload_server := server_docs_api.getMessagesUploadServer(type=document_type, peer_id=peer)):
# If there is our URL.
# Get URL.
upload_url = upload_server["upload_url"]
else:
# If no.
# Error.
return False, "Upload Server Error" + str(upload_server)
# Posting file on the server.
request = json.loads(requests.post(upload_url, files={
"file": open(path, "rb")
}).text)
if "file" in request:
# If there is all fields in response.
# Saving file to the docs.
request = server_docs_api.save(file=request["file"], title=title, tags=[])
# Get fields.
document_id = request[document_type]["id"]
document_owner_id = request[document_type]["owner_id"]
# Returning document.
return True, f"doc{document_owner_id}_{document_id}"
# If there is not all fields.
# Debug message.
debug_message(f"[Server] Error when uploading document (Request)! Request - {request}")
# Returning request as error.
return False, "Request Error" + str(request)
except Exception as exception: # noqa, pylint: disable=broad-except, redefined-outer-name
# If there is error.
# Debug message.
debug_message(f"Error when uploading document (Exception)! Exception: {exception}")
# Returning exception.
return False, "Exception Error" + str(exception)
| 5,347,868 |
def _default_mono_text_dataset_hparams():
"""Returns hyperparameters of a mono text dataset with default values.
See :meth:`texar.MonoTextData.default_hparams` for details.
"""
return {
"files": [],
"compression_type": None,
"vocab_file": "",
"embedding_init": Embedding.default_hparams(),
"delimiter": " ",
"max_seq_length": None,
"length_filter_mode": "truncate",
"pad_to_max_seq_length": False,
"bos_token": SpecialTokens.BOS,
"eos_token": SpecialTokens.EOS,
"other_transformations": [],
"variable_utterance": False,
"utterance_delimiter": "|||",
"max_utterance_cnt": 5,
"data_name": None,
"@no_typecheck": ["files"]
}
| 5,347,869 |
def validate_engine_mode(engine_mode):
"""
Validate database EngineMode for DBCluster
Property: DBCluster.EngineMode
"""
VALID_DB_ENGINE_MODES = (
"provisioned",
"serverless",
"parallelquery",
"global",
"multimaster",
)
if engine_mode not in VALID_DB_ENGINE_MODES:
raise ValueError(
"DBCluster EngineMode must be one of: %s" % ", ".join(VALID_DB_ENGINE_MODES)
)
return engine_mode
| 5,347,870 |
def GetBlameListForV2Build(build):
""" Uses gitiles_commit from the previous build and current build to get
blame_list.
Args:
build (build_pb2.Build): All info about the build.
Returns:
(list of str): Blame_list of the build.
"""
search_builds_response = buildbucket_client.SearchV2BuildsOnBuilder(
build.builder, build_range=(None, build.id), page_size=2)
previous_build = None
for search_build in search_builds_response.builds:
# TODO(crbug.com/969124): remove the loop when SearchBuilds RPC works as
# expected.
if search_build.id != build.id:
previous_build = search_build
break
if not previous_build:
logging.error(
'No previous build found for build %d, cannot get blame list.',
build.id)
return []
repo_url = git.GetRepoUrlFromV2Build(build)
return git.GetCommitsBetweenRevisionsInOrder(
previous_build.input.gitiles_commit.id, build.input.gitiles_commit.id,
repo_url)
| 5,347,871 |
def run_command(cmd, log_method=log.info):
"""Subprocess wrapper for capturing output of processes to logs
"""
if isinstance(cmd, str):
cmd = cmd.split(" ")
start = datetime.utcnow()
log_method("Starting run_command for: {}".format(" ".join([str(x) for x in cmd])))
p = sp.Popen(cmd, bufsize=0, stdout=sp.PIPE, stderr=sp.STDOUT)
ret_val = None
while True:
line = p.stdout.readline()
ret_val = p.poll()
if not line and ret_val != None:
break
log_method(line.decode())
log_method("Completed run_command in {} for: {}".format((datetime.utcnow() - start).total_seconds(), " ".join(cmd)))
return ret_val
| 5,347,872 |
def jar(state: State, fail: Fail):
"""
Store a function by a name
"""
(identifier, (code, rest)) = state.infinite_stack()
if identifier.tag != "atom":
fail(f"{identifier} is not an atom")
if code.tag not in ["code", "native"]:
fail(f"{code} is not code")
if code.tag == "code":
code = code.with_name(identifier.value)
return state.with_stack(rest).set_name(identifier.value, code)
| 5,347,873 |
def _write_info(hass, auth):
"""Write auth info for specified mode.
Pass in None for data to remove authentication for that mode.
"""
path = hass.config.path(AUTH_FILE)
mode = get_mode(hass)
if os.path.isfile(path):
with open(path) as file:
content = json.load(file)
else:
content = {}
if auth.is_logged_in:
content[mode] = {
'id_token': auth.cognito.id_token,
'access_token': auth.cognito.access_token,
'refresh_token': auth.cognito.refresh_token,
}
else:
content.pop(mode, None)
with open(path, 'wt') as file:
file.write(json.dumps(content, indent=4, sort_keys=True))
| 5,347,874 |
def insert_new_post(post_arg_set):
"""
insert new post into redis
"""
api, post_data, acct_data, page_id, config = post_arg_set
try:
post_id = post_data['id'] if post_data.has_key('id') else None
except Exception as e:
log.error( e )
else:
# parse date
if post_data.has_key('created_time') and post_data['created_time'] is not None:
dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)
date_time = tz_adj(dt, config)
time_bucket = round_datetime(date_time, config)
raw_timestamp = int(date_time.strftime("%s"))
else:
time_bucket = None
raw_timestamp = None
# extract message so we can find links within the msg if not in url
article_urls = [get_fb_link(post_data, config, unshorten=True)]
message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None
message_urls = get_message_urls(article_urls, message, config)
# detect article links, unshorten and parse
article_urls = [
parse_url(unshorten_link(url, config)) \
for url in article_urls + message_urls
if url is not None
]
article_urls = [url for url in article_urls if is_article(url, config)]
if article_urls:
for article_url in set(article_urls):
# sluggify url
article_slug = sluggify(article_url)
# format data
post_value = {
'article_slug': article_slug,
'article_url': article_url,
'time_bucket': time_bucket,
'fb_post_created': raw_timestamp,
'raw_timestamp': raw_timestamp,
'fb_raw_link' : get_fb_link(post_data, config=config),
'fb_page_id': page_id,
'fb_post_id': post_id,
'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,
'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,
'fb_type': post_data['type'] if post_data.has_key('type') else None,
'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,
'fb_message': message
}
# always insert insights data
if is_insights(page_id, config):
log.info( "INSIGHTS\tAdding data from %s re: %s" % (page_id, article_slug) )
# fetch data
insights_value = get_insights_data(api, page_id, post_id)
# create datasource name
data_source = "facebook_insights_%s" % page_id
# upsert url
upsert_url(article_url, article_slug, data_source, config)
# insert id
db.sadd('facebook_post_ids', post_id)
# format time bucket
current_time_bucket = gen_time_bucket(config)
insights_value['time_bucket'] = current_time_bucket
post_value.pop('time_bucket', None)
value = json.dumps({
data_source : dict(post_value.items() + insights_value.items())
})
# upload data to redis
db.zadd(article_slug, current_time_bucket, value)
# only insert new posts
if not db.sismember('facebook_post_ids', post_id):
log.info( "FACEBOOK\tNew post %s\t%s" % (post_id, article_url) )
# insert id
db.sadd('facebook_post_ids', post_id)
# upsert url
data_source = "facebook_%s" % page_id
upsert_url(article_url, article_slug, data_source, config)
value = json.dumps( {data_source : post_value} )
# upload data to redis
db.zadd(article_slug, time_bucket, value)
| 5,347,875 |
def get_ideology_lookup(schema):
"""
getter function to enable ideology lookup for a concept
input: group schema
output: generator object of ideology-list(concept) pairs
(TODO: will require a check for correct format of group schema)
"""
for ideology, attribute in schema.items():
for concepts in attribute.values():
yield ideology, list(concepts.keys())
| 5,347,876 |
def existant_file(filepath:str):
"""Argparse type, raising an error if given file does not exists"""
if not os.path.exists(filepath):
raise argparse.ArgumentTypeError(
" file {} doesn't exists".format(C_FILE + filepath + C_ENDC)
)
return filepath
| 5,347,877 |
def get_daily_activity(p_sap_date: str) -> dict:
""" Returns activities on the given date """
fiori_url = config.CONSTANTS["ECZ_DAHA_DAILY_URL"] + "?date=" + p_sap_date
resp = requests.get(
fiori_url,
auth=HTTPBasicAuth(
config.CONSTANTS["ECZ_DAHA_USER"],
config.CONSTANTS["ECZ_DAHA_PASS"]))
resp_as_dict = json.loads(resp.text)
return resp_as_dict
| 5,347,878 |
def load_example_abc(title: Optional[str] = None) -> str:
"""Load a random example ABC if `title` not provided.
Case ignored in the title.
"""
if title is None:
import random
k = random.choice(list(examples))
else:
k = title.lower()
abc = examples.get(k)
if abc is None:
example_list = "\n".join(f" {t!r}" for t in examples)
raise ValueError("invalid tune title. Valid options are:\n" f"{example_list}")
return abc
| 5,347,879 |
def load_config(config_file_path):
"""
Load the config ini, parse settings to WORC
Args:
config_file_path (String): path of the .ini config file
Returns:
settings_dict (dict): dict with the loaded settings
"""
if not os.path.exists(config_file_path):
e = f'File {config_file_path} does not exist!'
raise ae.WORCKeyError(e)
settings = configparser.ConfigParser()
settings.read(config_file_path)
settings_dict = {'ComBat': dict()}
# Convert settings
settings_dict['ComBat']['batch'] =\
[str(item).strip() for item in
settings['ComBat']['batch'].split(',')]
settings_dict['ComBat']['mod'] =\
[str(item).strip() for item in
settings['ComBat']['mod'].split(',')]
settings_dict['ComBat']['par'] =\
settings['ComBat'].getint('par')
settings_dict['ComBat']['eb'] =\
settings['ComBat'].getint('eb')
settings_dict['ComBat']['language'] =\
str(settings['ComBat']['language'])
settings_dict['ComBat']['matlab'] =\
str(settings['ComBat']['matlab'])
settings_dict['ComBat']['per_feature'] =\
int(settings['ComBat']['per_feature'])
settings_dict['ComBat']['excluded_features'] =\
[str(item).strip() for item in
settings['ComBat']['excluded_features'].split(',')]
return settings_dict
| 5,347,880 |
def get_last_confirmed() -> Dict:
"""
This function get the last day saved on mongodb and
show us the confirmed cases and the accumulated.
- The country is the only needed path parameter.
"""
date = db.find({}, {"date": 1, "_id": 0}).sort("date", -1).limit(1)
date = list(date)
pipeline = [
{"$match": {"date": {"$eq": date[0]["date"]}}},
{
"$project": {
"cases": 1,
"cases_accumulated": 1,
"date": {"$dateToString": {"format": "%Y-%m-%d", "date": "$date"}},
}
},
{
"$group": {
"_id": "$date",
"cases_accumulated": {"$sum": "$cases_accumulated"},
"cases": {"$sum": "$cases"},
}
},
{"$project": {"date": "$_id", "cases": 1, "cases_accumulated": 1, "_id": 0}},
]
result = db.aggregate(pipeline)
return loads(json_util.dumps(list(result)[0]))
| 5,347,881 |
def plot_modes(Nsq, depth, nmodes, wmodes, pmodes, rmodes):
"""Plot Brunt-Vaisala (buoyancy) frequency profile and 3 sets of modes
(vertical velocity, horizontal velocity, and vertical density) in 4 panes.
:arg Nsq: Brunt-Vaisala (buoyancy) frequencies squared in [1/s^2]
:type Nsq: :class:`numpy.ndarray`
:arg depth: Depths in [m]
:type depth: :class:`numpy.ndarray`
:arg wmodes: Vertical velocity modes
:type wmodes: :class:`numpy.ndarray`
:arg pmodes: Horizontal velocity modes
:type pmodes: :class:`numpy.ndarray`
:arg rmodes: Vertical density modes
:type rmodes: :class:`numpy.ndarray`
:arg nmodes: Number of modes to calculate
:type nmodes: int
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(2, 2, 1)
# Nsq
ax.plot(Nsq, -depth)
ax.ticklabel_format(style='sci', scilimits=(2, 2), axis='x')
ax.set_ylabel('z')
ax.set_xlabel('N^2')
# modes
mode_sets = [
# (values, subplot number, x-axis title)
(wmodes, 2, 'wmodes'),
(pmodes, 3, 'pmodes'),
(rmodes, 4, 'rmodes'),
]
for mode_set in mode_sets:
modes, subplot, title = mode_set
ax = fig.add_subplot(2, 2, subplot)
for i in xrange(nmodes):
ax.plot(modes[i], -depth, label='mode {}'.format(i + 1))
ax.ticklabel_format(style='sci', scilimits=(3, 3), axis='x')
ax.set_ylabel('z')
ax.set_xlabel(title)
ax.legend(loc='best')
| 5,347,882 |
def _newNode( cls, named ):
"""Construct new instance of cls, set proper color, and add to objects"""
if not scene.visible:
scene.visible = 1
if not [k for k in ('color','red','green','blue') if k in named]:
named['color'] = scene.foreground
if 'display' in named:
target = named['display']
del named['display'] # XXX fix when have backref added
else:
target = scene
if not target.visible:
target.visible = 1
node = cls(**named)
objs = target.objects
objs.append( node )
target.objects = objs
return node
| 5,347,883 |
def index_f_and_f(dump_pk, user_pk):
"""
Run all plugin for a new index on dask
"""
dask_client = Client(settings.DASK_SCHEDULER_URL)
fire_and_forget(dask_client.submit(unzip_then_run, dump_pk, user_pk))
| 5,347,884 |
def MplJs():
"""
Serves the generated matplotlib javascript file. The content
is dynamically generated based on which toolbar functions the
user has defined. Call `FigureManagerWebAgg` to get its
content.
"""
js_content = FigureManagerWebAgg.get_javascript()
resp = make_response(js_content, 200)
resp.headers['Content-Type'] = 'application/javascript'
return resp
| 5,347,885 |
def load_jupyter_server_extension(nbapp):
"""Load the Jupyter server extension.
"""
config = JupyterLabGit(config=nbapp.config)
git = Git(nbapp.web_app.settings['contents_manager'], config)
nbapp.web_app.settings["git"] = git
setup_handlers(nbapp.web_app)
| 5,347,886 |
def VVSS2021_fig4_plot(data, model, sizes=fig_sizes, cmaps=colormaps):
"""
Create and save a plot of the results from the linear regression reaction time model
:param data: the data frame
:param model: the fitted reaction time model
:param cmaps: a dictionary of colormaps
:param sizes: a dictionary of sizes
:return: Nothind
"""
fig, axs = plt.subplots(1, 1, figsize=(sizes['width'], sizes['height']))
sampleIDs = [1, 2, 3, 4, 5, 6]
t_cm_discrete = cmaps['t_cm'](np.linspace(0, 1, len(sampleIDs)))
for col, c in zip(sampleIDs, t_cm_discrete):
tw = 'sampleProbHit_0{}'.format(col)
pred_rt = model.coefs.loc['(Intercept)', 'Estimate'] + model.coefs.loc[tw, 'Estimate'] * data.loc[:, tw]
axs.plot(data[tw], pred_rt, label='sample {}'.format(col), color=c, linewidth=5)
axs.legend(loc=(1, 0))
axs.set_ylabel('response time [s]')
axs.set_xlabel('normalized p[H]')
fig.savefig(path_figs + "Fig4_lmRTs.pdf", bbox_inches='tight')
return None
| 5,347,887 |
def end_db_session(error):
"""Commit any changes or rollback on failure."""
if hasattr(g, "db_session"):
db_session = g.db_session
try:
if error:
raise error
db_session.commit()
except SQLAlchemyError:
db_session.rollback()
LOGGER.exception("Error committing changes, rolling back.")
finally:
db_session.remove()
| 5,347,888 |
def stat(noten):
""" Berechne Mittelwert, Median, min, max, oberes und unteres Quantil """
minimum = round(min(noten), 2)
maximum = round(max(noten), 2)
_median = median(noten)
_mittelwert = mittelwert(noten)
[unteres_quartil, oberes_quartil] = quartile(noten)
return [minimum, unteres_quartil, _median, _mittelwert, oberes_quartil, maximum]
| 5,347,889 |
def check_order_type_enabled(order_type):
"""Assert that the input order type is enabled in the settings
Parameters
----------
order_type: oseoserver.constants.OrderType
Enumeration value
"""
generic_config = utilities.get_generic_order_config(order_type)
if not generic_config.get("enabled", False):
logger.debug("Orders of type {0} are not enabled".format(order_type))
if order_type in (Order.PRODUCT_ORDER,
Order.MASSIVE_ORDER):
raise errors.ProductOrderingNotSupportedError()
elif order_type == Order.SUBSCRIPTION_ORDER:
raise errors.SubscriptionNotSupportedError()
else: # Order.TASKING_ORDER
raise errors.FutureProductNotSupportedError()
| 5,347,890 |
def translation_activate_block(function=None, language=None):
"""
Activate language only for one method or function
"""
def _translation_activate_block(function):
def _decorator(*args, **kwargs):
tmp_language = translation.get_language()
try:
translation.activate(language or settings.LANGUAGE_CODE)
return function(*args, **kwargs)
finally:
translation.activate(tmp_language)
return wraps(function)(_decorator)
if function:
return _translation_activate_block(function)
else:
return _translation_activate_block
| 5,347,891 |
def exportItems():
"""
Export the API items into form for use in another script.
"""
infile=open(api_item_filename,'w')
#write function lead in
infile.write("# Auto-generated file (see get-api_items.py)\n#\n\ndef get_mapped_items():\n\tmapped_wiki_inline_code = dict()\n" )
for item in api_reference_items:
#Write out each API item to add
infile.write("\tmapped_wiki_inline_code['%s']='%s'\n" % (item, api_reference_items[item]) )
#write the return fucntion
infile.write("\treturn mapped_wiki_inline_code" )
infile.close()
| 5,347,892 |
def _loaded_schema_collections(schema_file_relative_dir) -> SchemaCollectionManager:
"""A loaded ``SchemaCollectionManager`` object, but this should never be modified. This object manages ``Schema``
objects corresponding to ``tests/{datasets,formats,licenses}.yaml``. Note that these are not necessarily the same as
the ones used in other schema fixtures, so please do not assume that it is equal to other schema fixtures. One
purpose of this fixture is to reduce repeated call in the test to the same function when ``loaded_schemata`` is
used. The other purpose is to provide other session-scoped fixtures access to the loaded schemata, because
session-scoped fixtures can't load function-scoped fixtures.
"""
return SchemaCollectionManager(datasets=DatasetSchemaCollection(schema_file_relative_dir / 'datasets.yaml'),
formats=FormatSchemaCollection(schema_file_relative_dir / 'formats.yaml'),
licenses=LicenseSchemaCollection(schema_file_relative_dir / 'licenses.yaml'))
| 5,347,893 |
def dag(name=None, child_tasks=None, edges=None, target=None):
"""
Create a DAG task
Args:
name (str): Name for the task
child_tasks (list [Task]): Child tasks within this dag
edges (list [tuple (Ref, Ref)]): List of tuples of ref(Task).
Each element denotes an edge from
first task to the second.
target (Ref): Target entity reference
Returns:
(Task): DAG task
"""
dag_edges = []
for edge in edges or []:
if len(edge) != 2:
raise ValueError("DAG edges require a tuple of two task references")
for task_ref in edge:
if not getattr(task_ref, "__kind__") == "app_ref":
raise ValueError("{} is not a valid task reference".format(task_ref))
from_ref = edge[0]
to_ref = edge[1]
dag_edges.append({"from_task_reference": from_ref, "to_task_reference": to_ref})
# This follows UI naming convention for runbooks
name = name or str(uuid.uuid4())[:8] + "_dag"
kwargs = {
"name": name,
"child_tasks_local_reference_list": [
task.get_ref() for task in child_tasks or []
],
"attrs": {"edges": dag_edges},
"type": "DAG",
}
if target:
kwargs["target_any_local_reference"] = target
return _task_create(**kwargs)
| 5,347,894 |
async def async_validate_pdf_signature(
embedded_sig: EmbeddedPdfSignature,
signer_validation_context: ValidationContext = None,
ts_validation_context: ValidationContext = None,
ac_validation_context: ValidationContext = None,
diff_policy: DiffPolicy = None,
key_usage_settings: KeyUsageConstraints = None,
skip_diff: bool = False) -> PdfSignatureStatus:
"""
.. versionadded:: 0.9.0
.. versionchanged: 0.11.0
Added ``ac_validation_context`` param.
Validate a PDF signature.
:param embedded_sig:
Embedded signature to evaluate.
:param signer_validation_context:
Validation context to use to validate the signature's chain of trust.
:param ts_validation_context:
Validation context to use to validate the timestamp's chain of trust
(defaults to ``signer_validation_context``).
:param ac_validation_context:
Validation context to use to validate attribute certificates.
If not supplied, no AC validation will be performed.
.. note::
:rfc:`5755` requires attribute authority trust roots to be specified
explicitly; hence why there's no default.
:param diff_policy:
Policy to evaluate potential incremental updates that were appended
to the signed revision of the document.
Defaults to
:const:`~pyhanko.sign.diff_analysis.DEFAULT_DIFF_POLICY`.
:param key_usage_settings:
A :class:`.KeyUsageConstraints` object specifying which key usages
must or must not be present in the signer's certificate.
:param skip_diff:
If ``True``, skip the difference analysis step entirely.
:return:
The status of the PDF signature in question.
"""
sig_object = embedded_sig.sig_object
if embedded_sig.sig_object_type != '/Sig':
raise SignatureValidationError("Signature object type must be /Sig")
# check whether the subfilter type is one we support
subfilter_str = sig_object.get('/SubFilter', None)
_validate_subfilter(
subfilter_str,
(SigSeedSubFilter.ADOBE_PKCS7_DETACHED, SigSeedSubFilter.PADES),
"%s is not a recognized SubFilter type in signatures."
)
if ts_validation_context is None:
ts_validation_context = signer_validation_context
embedded_sig.compute_integrity_info(
diff_policy=diff_policy, skip_diff=skip_diff
)
status_kwargs = embedded_sig.summarise_integrity_info()
ts_status_kwargs = await collect_timing_info(
embedded_sig.signer_info, ts_validation_context,
raw_digest=embedded_sig.external_digest
)
status_kwargs.update(ts_status_kwargs)
if 'signer_reported_dt' not in status_kwargs:
# maybe the PDF signature dictionary declares /M
signer_reported_dt = embedded_sig.self_reported_timestamp
if signer_reported_dt is not None:
status_kwargs['signer_reported_dt'] = signer_reported_dt
status_kwargs = await cms_basic_validation(
embedded_sig.signed_data, status_cls=PdfSignatureStatus,
raw_digest=embedded_sig.external_digest,
validation_context=signer_validation_context,
status_kwargs=status_kwargs, key_usage_settings=key_usage_settings
)
tst_validity = status_kwargs.get('timestamp_validity', None)
timestamp_found = (
tst_validity is not None
and tst_validity.valid and tst_validity.trusted
)
sv_update = report_seed_value_validation(
embedded_sig, status_kwargs['validation_path'], timestamp_found
)
status_kwargs.update(sv_update)
if ac_validation_context is not None:
ac_validation_context.certificate_registry.register_multiple(
embedded_sig.other_embedded_certs
)
status_kwargs.update(
await collect_signer_attr_status(
sd_attr_certificates=embedded_sig.embedded_attr_certs,
signer_cert=embedded_sig.signer_cert,
validation_context=ac_validation_context,
sd_signed_attrs=embedded_sig.signer_info['signed_attrs']
)
)
return PdfSignatureStatus(**status_kwargs)
| 5,347,895 |
def get_one_to_many_foreign_key_column_name(model, name):
"""
Returns the constituent column names for the foreign key on the remote
table of the one-to-many relationship specified by name.
Args:
model (class or object): The given model class or model instance.
name (string): The name of the attribute on `model` which is a
one-to-many relationship.
Return:
list: One-to-many foreign key column names as a list of strings.
"""
if not inspect.isclass(model):
return get_one_to_many_foreign_key_column_name(model.__class__, name)
attr = getattr(model, name, None)
if not attr:
# Unknown attribute.
return []
remote_columns = getattr(attr.property, 'remote_side', None)
if not remote_columns:
# This is not a one-to-many relationship.
return []
remote_tables = set(c.table.name for c in remote_columns)
if len(remote_tables) > 1:
# This is a many-to-many relationship with a cross reference table.
return []
foreign_key_column_names = []
for remote_column in remote_columns:
if getattr(remote_column, 'foreign_keys', False):
foreign_key_column_names.append(remote_column.name)
else:
remote_model = get_model_by_table(model, remote_column.table)
if remote_model:
# Quasi foreign keys don't actually have foreign_keys set,
# but they need to be treated as though they did.
foreign_keys = getattr(remote_model, 'quasi_foreign_keys', [])
if remote_column.name in foreign_keys:
foreign_key_column_names.append(remote_column.name)
return foreign_key_column_names
| 5,347,896 |
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='Check the status of dnsmasq')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--url', default='www.redhat.com', help='site to be checked')
return parser.parse_args()
| 5,347,897 |
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonacci demonstration")
parser.add_argument(
"--version",
action="version",
version="bytespread {ver}".format(ver=__version__))
parser.add_argument(
"-d",
dest="directory",
required=True,
help="The directly to analyse")
parser.add_argument(
"-w",
dest="wildcard",
default="*",
required=False,
help="Wildcard for file match within the directory (default: *)")
parser.add_argument(
"-c",
dest="clusters",
default=32,
required=False,
type=int,
help="Number of clusters (default: 32)")
parser.add_argument(
"-b",
dest="bricks",
default=100,
required=False,
type=int,
help="Number bricks to show for the longest column (default: 100)")
parser.add_argument(
"-r",
dest="recursive",
action='store_true',
required=False,
help="Recursive within the provided folder (default: false)")
return parser.parse_args(args)
| 5,347,898 |
def screen_handler():
"""
Prints to stdout with a well formated style:
[CALLER_PARENT/CALLER @ TIME]: text - Output created by writer program
"""
global to_print
while True:
if to_print != []:
if to_print[0][1] == "!stop":
print(r'Closing Print function...', end='\n')
break
if not muted:
try:
output.write(to_print[0][1], to_print[0][0], end=to_print[0][2])
except Exception as ex:
output.write(f'{type(ex)} --> {ex}', 'Print handler Error')
del to_print[0]
| 5,347,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.