content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import json
import requests
import base64
import Crypto
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA
#from Crypto import Random
public_key = RSA.importKey(open('/tmp/duetpublickey.pem').read())
message = "1,27"
# default hash Algorithm is SHA1, mask generation function is MGF1, no label is specified
# https://pycryptodome.readthedocs.io/en/latest/src/cipher/oaep.html
cipher = PKCS1_OAEP.new(public_key)
encrypted_message = base64.encodestring(cipher.encrypt(message)).replace("\n","")
data = { "value" : encrypted_message }
headers = { 'Content-type': 'application/json', 'Accept': 'application/json' }
requests.post('http://localhost:5000/insert', data=json.dumps(data), headers=headers)
#print(encrypted_message)
|
python
|
#!/bin/python
import sys
import math
def _pal2(n):
h = n / 1000 + 1
while True:
p = str(h)
p += p[2::-1]
yield int(p)
h -= 1
def ifact(i):
f1 = int(math.sqrt(float(i))) + 1
while f1 > 99:
f2,r = divmod(i,f1)
if f2 > 999:
return None
if r == 0:
return (f1,f2)
f1 -= 1
return None
T = int(raw_input().strip())
for t in range(T):
N = int(raw_input().strip())
for x in _pal2(N):
if x >= N:
continue
facts = ifact(x)
if facts:
print x
break
|
python
|
#
# -*- coding: utf-8 -*-
#
# @Author: Arrack
# @Date: 2020-05-25 17:25:12
# @Last modified by: Arrack
# @Last Modified time: 2020-06-08 15:27:48
#
from wtforms import BooleanField
from wtforms import Form
from wtforms import PasswordField
from wtforms import StringField
from wtforms import SubmitField
from wtforms import TextAreaField
from wtforms import HiddenField
from wtforms.validators import DataRequired
from wtforms.validators import Length
class LoginForm(Form):
email = StringField(
label='Email',
validators=[DataRequired(), Length(1, 64)])
password = PasswordField(
label='Password',
validators=[DataRequired(), Length(1, 128)])
remember_me = BooleanField('Remember me.')
submit = SubmitField()
class TalkForm(Form):
content = TextAreaField(validators=[DataRequired()])
private = BooleanField()
submit = SubmitField()
class ArticleForm(Form):
title = StringField(validators=[DataRequired()])
content = TextAreaField(validators=[DataRequired()])
# time = StringField('datetime', validators=[DataRequired()])
tags = StringField()
newTags = StringField()
category = HiddenField()
tags = HiddenField()
# url_name = StringField('urlName', validators=[DataRequired()])
# save_draft = SubmitField('save')
submit = SubmitField()
|
python
|
numero1 = float(input("primeiro numero: "))
numero2 = float(input("segundo numero: "))
numero3 = float(input("terceiro numero: "))
numero4 = float(input("quarto numero: "))
numero5 = float(input("quinto numero: "))
lista = [numero1,numero2,numero3,numero4,numero5]
soma = sum(lista)
print (soma)
|
python
|
"""
Python definitions used to help with plotting routines.
*Methods Overview*
-> geo_scatter(): Geographical scatter plot.
"""
import matplotlib.pyplot as plt
from warnings import warn
from .logging_util import warn
import numpy as np
def r2_lin(x, y, fit):
"""For calculating r-squared of a linear fit. Fit should be a python polyfit object."""
y_estimate = fit(x)
difference = (y - y_estimate) ** 2
y_mean = np.nanmean(y)
mean_square_deviation = (y - y_mean) ** 2
total_deviation = np.nansum(mean_square_deviation)
residual = np.nansum(difference)
correlation_coefficient = 1 - residual / total_deviation
return correlation_coefficient
def scatter_with_fit(x, y, s=10, c="k", yex=True, dofit=True):
"""Does a scatter plot with a linear fit. Will also draw y=x for
comparison.
Parameters
----------
x : (array) Values for the x-axis
y : (array) Values for the y-axis
s : (float or array) Marker size(s)
c : (float or array) Marker colour(s)
yex : (bool) True to plot y=x
dofit : (bool) True to calculate and plot linear fit
Returns
-------
Figure and axis objects for further customisation
Example Useage
-------
x = np.arange(0,50)
y = np.arange(0,50)/1.5
f,a = scatter_with_fit(x,y)
a.set_title('Example scatter with fit')
a.set_xlabel('Example x axis')
a.set_ylabel('Example y axis')
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
x = np.ma.masked_invalid(x)
y = np.ma.masked_invalid(y)
combined_mask = np.ma.mask_or(x.mask, y.mask)
x.mask = combined_mask
y.mask = combined_mask
xmax = np.ma.max(x)
xmin = np.ma.min(x)
ymax = np.ma.max(y)
ymin = np.ma.min(y)
axmax0 = np.max([xmax, ymax])
axmin0 = np.min([xmin, ymin])
axmin = axmin0 - 0.1 * np.abs(axmax0 - axmin0)
axmax = axmax0 + 0.1 * np.abs(axmax0 - axmin0)
if yex:
line_x = [axmin, axmax]
fit_yx = np.poly1d([1, 0])
ax.plot(line_x, fit_yx(line_x), c=[0.5, 0.5, 0.5], linewidth=1)
ax.scatter(x, y, c=c, s=s)
if dofit:
line_x = [axmin, axmax]
# Calculate data fit and cast to poly1d object
fit_tmp = np.ma.polyfit(x, y, 1)
fit = np.poly1d(fit_tmp)
ax.plot(line_x, fit(line_x), c=[1, 128 / 255, 0], linewidth=1.5)
r2 = r2_lin(x, y, fit)
ax.set_xlim(axmin, axmax)
ax.set_ylim(axmin, axmax)
ax.set_aspect("equal", adjustable="box")
ax.grid()
if dofit:
ax.text(
0.4, 0.125, "{} {:03.2f} {} {:03.2f}".format("y =", fit_tmp[0], "x +", fit_tmp[1]), transform=ax.transAxes
)
ax.text(0.4, 0.05, "{} {:03.2f} ".format("$R^2$ =", r2), transform=ax.transAxes)
return fig, ax
def create_geo_subplots(lonbounds, latbounds, n_r=1, n_c=1, figsize=(7, 7)):
"""
A routine for creating an axis for any geographical plot. Within the
specified longitude and latitude bounds, a map will be drawn up using
cartopy. Any type of matplotlib plot can then be added to this figure.
For example:
Example Useage
#############
f,a = create_geo_axes(lonbounds, latbounds)
sca = a.scatter(stats.longitude, stats.latitude, c=stats.corr,
vmin=.75, vmax=1,
edgecolors='k', linewidths=.5, zorder=100)
f.colorbar(sca)
a.set_title('SSH correlations \n Monthly PSMSL tide gauge vs CO9_AMM15p0',
fontsize=9)
* Note: For scatter plots, it is useful to set zorder = 100 (or similar
positive number)
"""
import cartopy.crs as ccrs # mapping plots
from cartopy.feature import NaturalEarthFeature
# If no figure or ax is provided, create a new one
# fig = plt.figure()
# fig.clf()
fig, ax = plt.subplots(
n_r, n_c, subplot_kw={"projection": ccrs.PlateCarree()}, sharey=True, sharex=True, figsize=figsize
)
land_color = [0.9, 0.9, 0.9]
coast_color = [0, 0, 0]
coast_width = 0.25
if n_r * n_c > 1:
ax = ax.flatten()
for rr in range(n_r * n_c):
coast = NaturalEarthFeature(category="physical", facecolor=land_color, name="coastline", scale="50m")
ax[rr].add_feature(coast, edgecolor=coast_color, linewidth=coast_width)
# ax.coastlines(facecolor=[0.8,0.8,0.8])
gl = ax[rr].gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=0.5, color="gray", linestyle="-")
gl.top_labels = False
gl.right_labels = False
if rr % n_c == 0:
gl.left_labels = True
else:
gl.left_labels = False
if np.abs(n_r * n_c - rr) <= n_c:
gl.bottom_labels = True
else:
gl.bottom_labels = False
ax[rr].set_xlim(lonbounds[0], lonbounds[1])
ax[rr].set_ylim(latbounds[0], latbounds[1])
ax[rr].set_aspect("auto")
ax = ax.reshape((n_r, n_c))
else:
coast = NaturalEarthFeature(category="physical", facecolor=land_color, name="coastline", scale="50m")
ax.add_feature(coast, edgecolor=coast_color, linewidth=coast_width)
# ax.coastlines(facecolor=[0.8,0.8,0.8])
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=0.5, color="gray", linestyle="-")
gl.top_labels = False
gl.right_labels = False
gl.left_labels = True
gl.bottom_labels = True
ax.set_xlim(lonbounds[0], lonbounds[1])
ax.set_ylim(latbounds[0], latbounds[1])
ax.set_aspect("auto")
plt.show()
return fig, ax
def create_geo_axes(lonbounds, latbounds):
"""
A routine for creating an axis for any geographical plot. Within the
specified longitude and latitude bounds, a map will be drawn up using
cartopy. Any type of matplotlib plot can then be added to this figure.
For example:
Example Useage
#############
f,a = create_geo_axes(lonbounds, latbounds)
sca = a.scatter(stats.longitude, stats.latitude, c=stats.corr,
vmin=.75, vmax=1,
edgecolors='k', linewidths=.5, zorder=100)
f.colorbar(sca)
a.set_title('SSH correlations \n Monthly PSMSL tide gauge vs CO9_AMM15p0',
fontsize=9)
* Note: For scatter plots, it is useful to set zorder = 100 (or similar
positive number)
"""
import cartopy.crs as ccrs # mapping plots
from cartopy.feature import NaturalEarthFeature
# If no figure or ax is provided, create a new one
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
coast = NaturalEarthFeature(category="physical", facecolor=[0.9, 0.9, 0.9], name="coastline", scale="50m")
ax.add_feature(coast, edgecolor="gray")
# ax.coastlines(facecolor=[0.8,0.8,0.8])
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=0.5, color="gray", linestyle="-")
gl.top_labels = False
gl.bottom_labels = True
gl.right_labels = False
gl.left_labels = True
ax.set_xlim(lonbounds[0], lonbounds[1])
ax.set_ylim(latbounds[0], latbounds[1])
ax.set_aspect("auto")
plt.show()
return fig, ax
def ts_diagram(temperature, salinity, depth):
fig = plt.figure(figsize=(10, 7))
ax = plt.scatter(salinity, temperature, c=depth)
cbar = plt.colorbar()
cbar.set_label("Depth (m)")
plt.title("T-S Diagram")
plt.xlabel("Salinity")
plt.ylabel("Temperature")
return fig, ax
def geo_scatter(
longitude,
latitude,
c=None,
s=None,
scatter_kwargs=None,
coastline_kwargs=None,
gridline_kwargs=None,
figure_kwargs={},
title="",
figsize=None,
): # TODO Some unused parameters here
"""
Uses CartoPy to create a geographical scatter plot with land boundaries.
Parameters
----------
longitude : (array) Array of longitudes of marker locations
latitude : (array) Array of latitudes of marker locations
colors : (array) Array of values to use for colouring markers
title : (str) Plot title, to appear at top of figure
xlim : (tuple) Tuple of limits to apply to the x-axis (longitude axis)
ylim : (tuple) Limits to apply to the y-axis (latitude axis)
Returns
-------
Figure and axis objects for further customisation
"""
try:
import cartopy.crs as ccrs # mapping plots
from cartopy.feature import NaturalEarthFeature
except ImportError:
import sys
warn("No cartopy found - please run\nconda install -c conda-forge cartopy")
sys.exit(-1)
if coastline_kwargs is None:
coastline_kwargs = {"facecolor": [0.9, 0.9, 0.9], "name": "coastline", "scale": "50m"}
if scatter_kwargs is None:
scatter_kwargs = {}
fig = plt.figure(**figure_kwargs)
ax = plt.subplot(111, projection=ccrs.PlateCarree())
sca = ax.scatter(longitude, y=latitude, c=c, s=s, zorder=100, **scatter_kwargs)
coast = NaturalEarthFeature(category="physical", **coastline_kwargs)
ax.add_feature(coast, edgecolor="gray")
# ax.coastlines(facecolor=[0.8,0.8,0.8])
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=0.5, color="gray", linestyle="-")
gl.top_labels = False
gl.bottom_labels = True
gl.right_labels = False
gl.left_labels = True
plt.title(title)
if c is not None and "vmax" in scatter_kwargs.keys() and "vmin" in scatter_kwargs.keys():
extend_max = np.nanmax(c) > scatter_kwargs["vmax"]
extend_min = np.nanmin(c) < scatter_kwargs["vmin"]
extend = "neither"
if extend_max and extend_min:
extend = "both"
if extend_max and not extend_min:
extend = "max"
if not extend_max and extend_min:
extend = "min"
else:
extend = "neither"
plt.colorbar(sca, extend=extend)
ax.set_aspect("auto")
plt.show()
return fig, ax
def determine_colorbar_extension(color_data, vmin, vmax):
"""Can be used to automatically determine settings for colorbar
extension arrows. Color_data is the data used for the colormap, vmin
and vmax are the colorbar limits. Will output a string: "both", "max",
"min" or "neither", which can be inserted straight into a call to
matplotlib.pyplot.colorbar().
"""
extend_max = np.nanmax(color_data) > vmax
extend_min = np.nanmin(color_data) < vmin
if extend_max and extend_min:
return "both"
elif extend_max and not extend_min:
return "max"
elif not extend_max and extend_min:
return "min"
else:
return "neither"
def determine_clim_by_standard_deviation(color_data, n_std_dev=2.5):
"""Automatically determine color limits based on number of standard
deviations from the mean of the color data (color_data). Useful if there
are outliers in the data causing difficulties in distinguishing most of
the data. Outputs vmin and vmax which can be passed to plotting routine
or plt.clim().
"""
color_data_mean = np.nanmean(color_data)
color_data_std = np.nanstd(color_data)
vmin = color_data_mean - n_std_dev * color_data_std
vmax = color_data_mean + n_std_dev * color_data_std
return vmin, vmax
|
python
|
from direct.directnotify import DirectNotifyGlobal
from src.connection.protocol import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
from src.messagedirector.ChannelWatcher import ChannelWatcher
class MDParticipant(ChannelWatcher):
notify = DirectNotifyGlobal.directNotify.newCategory("MessageDirectorParticipant")
notify.setInfo(True)
channelAllocator = None
canClearChannel = False
channelWatcher = ChannelWatcher()
def __init__(self, base_class):
self.base_class = base_class
def handleDatagram(self, dgi, connection):
messageType = dgi.getUint16()
if messageType == CONTROL_SET_CHANNEL:
self.registerChannel(dgi.getUint64(), connection)
elif messageType == CONTROL_REMOVE_CHANNEL:
self.unregisterChannel(dgi.getUint64(), connection)
elif messageType == CONTROL_MESSAGE:
self.base_class.routeMessageToChannel(dgi.getUint64(), dgi.getUint64(), dgi.getDatagram(), connection)
elif messageType == CONTROL_ADD_RANGE:
self.addChannelRange(dgi.getUint64())
elif messageType == CONTROL_REMOVE_RANGE:
self.removeChannelRange()
elif messageType == CONTROL_ADD_POST_REMOVE:
self.addPostRemove(dgi.getUint64())
elif messageType == CONTROL_CLEAR_POST_REMOVE:
if canClearChannel:
self.clearPostRemove(dgi.getUint64())
else:
self.notify.warning("Participant was not authorized to remove channel: %s" % str(dgi.getUint64()))
return
else:
self.notify.warning("Could not handle incoming datagram: %s" % str(messageType))
return
def registerChannel(self, channel, connection):
if channel not in self.base_class.channels:
if channel is None:
self.notify.warning("Someone tried to register a channel but the channel value was null!")
return
else:
self.base_class.channels[channel] = connection
self.channelWatcher.subscribed_channel(channel)
else:
self.notify.warning("Channel: %s is already registered!" % str(channel))
return
def unregisterChannel(self, channel, connection):
if channel in self.base_class.channels:
del self.base_class.channels[channel]
self.channelWatcher.unsubscribed_channel(channel)
else:
self.notify.warning("Channel: %s was never registered!" % str(channel))
return
def addChannelRange(self, channelRange):
pass
def removeChannelRange(self):
pass
def addPostRemove(self, channel):
pass
def clearPostRemove(self, channel):
pass
|
python
|
# NOTE THAT 'data' IN THIS CODE IS TRANSPOSE, COMPARED WITH THE MANIFOLDER CODE
# data is shaped [13848,8] here ... ogm ...
# loads in data, dimlist, n_channels, npts, x
# file_location_in_data
# file_location_out_data
import numpy as np
# Coweb, if not installed, can 'pip install -U concept_formation'
# https://github.com/cmaclell/concept_formation
from concept_formation.cobweb3 import Cobweb3Tree
from concept_formation.cluster import cluster
import time
import os
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
dim_list = ['p_speed', 'p_density', 'xhelicity', 'O7to6', 'residualE', 'absB', 'Z_Fe', 'Fe_to_O']
file_location_in_data = '/Users/jonathan/Documents/repos/MANIFOLDER/whitened_short_set/whitened_short_set.csv'
file_location_out_data = '/Users/jonathan/Documents/repos/MANIFOLDER/whitened_short_set/whitened_short_set_2.csv'
# read data from csv file
def load_data():
""" load the data from csv, and do initial parsing """
x = np.genfromtxt(file_location_in_data, delimiter=',')
data = x[:, :].astype('float64') # data points: 13848 x 8 numpy array: 13848 points, 8 channels
print('data.shape = ', data.shape)
#npts = data.shape[0]
#n_channels = data.shape[1]
return data
def cluster_cobweb3(data):
""" cluster the data, using cobweb3"""
npts = data.shape[0]
n_channels = data.shape[1]
# convert data from np array to list of dictionariesw
data_new = []
for i in range(npts):
pt = data[i, :]
pt_dict = {dim_list[j]: pt[j] for j in range(n_channels)}
data_new.append(pt_dict)
# perform cobweb3 clustering and get labels
print('starting cobweb3')
print('note, this can take some time ...')
start_time = time.time()
tree = Cobweb3Tree()
clusters = cluster(tree, data_new[:])[0]
print('# points:', len(clusters))
clust_names = [c for c in set(clusters)]
print(' cluster names:', clust_names)
clust_dict = {c: idx for idx, c in enumerate(clust_names)}
print(clust_dict)
lbs = [clust_dict[c] for c in clusters]
print('length of lbs:', len(lbs))
clust_dict = {c: idx for idx, c in enumerate(clust_names)}
print(clust_dict)
lbs = [clust_dict[c] for c in clusters]
print('length of lbs:', len(lbs))
elapsed_time = time.time() - start_time
print('done, elapsed mins:', np.round(elapsed_time / 60, 2))
# append labels to csv file of data
lbs = np.asarray(lbs).reshape(len(lbs), 1)
print(lbs.shape)
new = np.concatenate((data, lbs), axis=1)
print(new.shape)
np.savetxt(file_location_out_data, new, delimiter=',')
print('done with cluster_cobweb3')
# main use of this function is to return the clusters, and the labels?
return clusters, lbs
def show_clusters_cobweb3(data, clusters, lbs, base_path='results/cobweb/'):
""" need clusters and lbs """
npts = data.shape[0]
n_channels = data.shape[1]
# get point indices for plotting
indices_lists = []
n_clusters = len(set(clusters))
for i in range(n_clusters):
indices = np.where(lbs == i)[0].tolist()
indices_lists.append(indices)
print('# clusters:', len(indices_lists))
npts_list = [len(i) for i in indices_lists]
print('# points in clusters: {}'.format(npts_list))
# sort indices_lists based on number of points in each sublist
npts_list_sorted = np.argsort(np.asarray(npts_list))
print('Sorted indices of lists of number of points:', npts_list_sorted)
indices_lists_sorted = [indices_lists[i] for i in npts_list_sorted]
indices_lists = indices_lists_sorted
print('Sorted # poitns in clusters: {}'.format([len(i) for i in indices_lists]))
npts_list = sorted(npts_list)
print('Sorted # poitns in clusters: {}'.format(npts_list))
# plot clusters - each figure shows two channels
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
plt.figure(figsize=(20, 4))
for k in range(n_channels):
for i in range(n_channels):
plt.subplot(2, 4, i + 1)
for indices, c in zip(indices_lists, colors):
c = c.reshape((1, -1)) # NOTE - for the warning, make into a single row?
plt.scatter(data[indices, k], data[indices, i], c=c, s=1) # c causes issues
#plt.scatter(data[indices, k], data[indices,i], s=1)
#plt.title('x:c{} y:c{}'.format(k, i))
plt.tight_layout()
plt.savefig('results/cobweb3/c{}_vs_all.pdf'.format(k)) #note save pdfs instead (slower, but better images)
#savefig('c{}_vs_all.png'.format(k))
#plt.show() # NOTE - code did not originally show()
plt.clf()
print('Figure c{} done.'.format(k))
plt.close()
print('Figures saved successfully')
# plot original data (channel=speed) in time
target_var = data[:, 0]
# plot 10 intervals of 1000 time points
interval = 1000
plt.figure(figsize=(20, 4))
for i in range(10):
begin = i * interval
end = (i+1) * interval
# find indices of points in each cluster
lb_indices = []
for indices in indices_lists:
indices_interval = [idx for idx in indices if idx >= begin and idx < end]
lb_indices.append(indices_interval)
# plot different colors for clusters
plt.subplot(2, 5, i + 1)
for indices, c in zip(lb_indices, colors):
c = c.reshape((1, -1)) # NOTE - for the warning, make into a single row?
plt.scatter(indices, target_var[indices], c=c, s=1)
plt.title('x in [{}, {}]'.format(begin, end - 1))
print('Plot: x in [{}, {}]'.format(begin, end - 1))
plt.tight_layout()
# savefig('speed_10examples.png') # TODO, port the figure save
plt.savefig('results/cobweb3/speed_10examples.pdf')
plt.close()
print('Figure saved successfully')
###
### Portion for DBScan
###
from sklearn.cluster import DBSCAN
def scan(data):
# perform dbscan
db = DBSCAN(eps=0.5, min_samples=20).fit(data)
#print gmm.means_
lbs = db.labels_
print(lbs)
lbs = lbs.reshape(lbs.shape[0], 1)
print(lbs.shape)
new = np.concatenate((data, lbs), axis=1)
print(new.shape)
np.savetxt('results/dbscan/whitened_short_set_labels_dbscan.csv', new, delimiter=',')
# read data and labels
# NOTE, this file was already generated ... maybe gets regenerated, from above?
#file_location_in_data = '/Users/jonathan/Documents/repos/MANIFOLDER/whitened_short_set/whitened_short_set_labels.csv'
# NOTE - maybe there is a better place for the file?
### WHAT???
# originas is whitened_short_set_labels.csv, not sure ...
file_location_in_data = '/Users/jonathan/Documents/repos/MANIFOLDER/whitened_short_set/cobweb3/whitened_short_set_labels.csv'
# looks like this is loaded in from the wrong location ... ???
data = np.genfromtxt(file_location_in_data, delimiter=',')
lbs = data[:, -1]
n_channels = data.shape[1] - 1
print('Reading from file done.')
return lbs # clusters not needed?
def show_clusters_dbscan(data, lbs):
npts = data.shape[0]
n_channels = data.shape[1]
# get point indices for plotting
indices_lists = []
num_clusters = 0
num_points_used = 0
while True:
indices = np.where(lbs == num_clusters)[0].tolist()
if len(indices) == 0:
break
indices_lists.append(indices)
num_points_used += len(indices)
num_clusters += 1
n_cluster = len(indices_lists)
print('# clusters:', n_cluster)
print('# points used:', num_points_used)
print('# outliers:', npts - num_points_used)
npts_list = [len(i) for i in indices_lists]
print('# points in clusters: {}'.format(npts_list))
# sort indices_lists based on number of points in each sublist
npts_list_sorted = np.argsort(np.asarray(npts_list))
print('Sorted indices of lists of number of points:', npts_list_sorted)
indices_lists_sorted = [indices_lists[i] for i in npts_list_sorted]
indices_lists = indices_lists_sorted
print('Sorted # poitns in clusters: {}'.format([len(i) for i in indices_lists]))
npts_list = sorted(npts_list)
print('Sorted # poitns in clusters: {}'.format(npts_list))
# plot clusters - each figure shows two channels
colors = cm.rainbow(np.linspace(0, 1, n_cluster))
plt.figure(figsize=(20, 4))
for k in range(n_channels):
for i in range(n_channels):
plt.subplot(2, 4, i + 1)
for indices, c in zip(indices_lists, colors):
c = c.reshape((1, -1)) # NOTE - for the warning, make into a single row?
plt.scatter(data[indices, k], data[indices, i], c=c, s=1)
plt.title('x:c{} y:c{}'.format(k, i))
plt.tight_layout()
plt.savefig('results/dbscan/c{}_vs_all.pdf'.format(k))
plt.clf()
print('Figure c{} done.'.format(k))
print('Figures saved successfully')
# plot original data (channel=speed) in time
target_var = data[:, 0]
# plot 10 intervals of 1000 time points
interval = 1000
plt.figure(figsize=(20, 4))
for i in range(10):
begin = i * interval
end = (i+1) * interval
# find indices of points in each cluster
lb_indices = []
for indices in indices_lists:
indices_interval = [idx for idx in indices if idx >= begin and idx < end]
lb_indices.append(indices_interval)
# plot different colors for clusters
plt.subplot(2, 5, i + 1)
for indices, c in zip(lb_indices, colors):
c = c.reshape((1, -1)) # NOTE - for the warning, make into a single row?
plt.scatter(indices, target_var[indices], c=c, s=1)
plt.title('x in [{}, {}]'.format(begin, end - 1))
print('Plot: x in [{}, {}]'.format(begin, end - 1))
plt.tight_layout()
#savefig('speed_10examples.png')
plt.savefig('results/dbscan/speed_10examples.pdf')
plt.clf()
print('Figure saved successfully')
from matplotlib.legend_handler import HandlerLine2D
# plot colors
plt.figure(figsize=(4, 4))
colors = cm.rainbow(np.linspace(0, 1, n_cluster))
x = range(2)
for i, c in zip(range(n_cluster), colors):
y = [(1./n_cluster) * (i+1)] * 2
#c = c.reshape((1,-1)) # NOTE - for the warning, make into a single row?
plt.plot(x, y, c=c, label=str(npts_list[i]))
plt.legend()
plt.savefig('results/dbscan/colors and number of points.pdf')
plt.clf()
print('Plot colors and legend done.')
# if not installed, can 'pip install scikit-fuzzy'
#import skfuzzy_cluster as fcm
# looks like a newer version
import skfuzzy as fuzz
def fuzzy(data):
# jd - feel like this has been done?
# # read data from csv file
# x = np.genfromtxt('../whitened_short_set.csv', delimiter=',')
# data = x[:, :].astype('float64') # data points: 13848 x 8 numpy array: 13848 points, 8 channels
# dim_list = ['p_speed', 'p_density', 'xhelicity', 'O7to6', 'residualE', 'absB', 'Z_Fe', 'Fe_to_O']
# print('data.shape = ', data.shape)
# npts = data.shape[0]
# n_channels = data.shape[1]
n_channels = data.shape[1]
# perform fcm
n_clusters = 8
# cntr, u, u0, d, jm, p, fpc = fcm.cmeans(data.transpose(), n_clusters, 1.1, error=0.005, maxiter=1000)
cntr, u, u0, d, jm, p, fpc = fuzz.cmeans(data.transpose(), n_clusters, 1.1, error=0.005, maxiter=1000)
lbs = np.argmax(u, axis=0)
print('FCM done...')
print(lbs)
print(p)
# # append labels to csv file of data
# lbs = lbs.reshape(lbs.shape[0], 1)
# print lbs.shape
# new = np.concatenate((data, lbs), axis=1)
# print new.shape
# np.savetxt('whitened_short_set_labels.csv', new, delimiter=',')
# get point indices for plotting
indices_lists = []
for i in range(n_clusters):
indices = np.where(lbs == i)[0].tolist()
indices_lists.append(indices)
print('# clusters:', len(indices_lists))
npts_list = [len(i) for i in indices_lists]
print('# points in clusters: {}'.format(npts_list))
# sort indices_lists based on number of points in each sublist
npts_list_sorted = np.argsort(np.asarray(npts_list))
print('Sorted indices of lists of number of points:', npts_list_sorted)
indices_lists_sorted = [indices_lists[i] for i in npts_list_sorted]
indices_lists = indices_lists_sorted
print('Sorted # poitns in clusters: {}'.format([len(i) for i in indices_lists]))
npts_list = sorted(npts_list)
print('Sorted # poitns in clusters: {}'.format(npts_list))
# plot clusters - each figure shows two channels
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
plt.figure(figsize=(20, 4))
for k in range(n_channels):
for i in range(n_channels):
plt.subplot(2, 4, i + 1)
for indices, c in zip(indices_lists, colors):
c = c.reshape((1, -1)) # NOTE - for the warning, make into a single row?
plt.scatter(data[indices, k], data[indices, i], c=c, s=1)
plt.title('x:c{} y:c{}'.format(k, i))
plt.tight_layout()
plt.savefig('results/fcm/c{}_vs_all.pdf'.format(k))
plt.clf()
print('Figure c{} done.'.format(k))
plt.close()
print('Figures saved successfully')
# plot original data (channel=speed) in time
target_var = data[:, 0]
# plot 10 intervals of 1000 time points
interval = 1000
plt.figure(figsize=(20, 4))
for i in range(10):
begin = i * interval
end = (i+1) * interval
# find indices of points in each cluster
lb_indices = []
for indices in indices_lists:
indices_interval = [idx for idx in indices if idx >= begin and idx < end]
lb_indices.append(indices_interval)
# plot different colors for clusters
plt.subplot(2, 5, i + 1)
for indices, c in zip(lb_indices, colors):
c = c.reshape((1, -1)) # NOTE - for the warning, make into a single row?
plt.scatter(indices, target_var[indices], c=c, s=1)
plt.title('x in [{}, {}]'.format(begin, end - 1))
print('Plot: x in [{}, {}]'.format(begin, end - 1))
plt.tight_layout()
plt.savefig('results/fcm/speed_10examples.pdf')
plt.close()
print('Figure saved successfully')
from matplotlib.legend_handler import HandlerLine2D
# plot colors
plt.figure(figsize=(4, 4))
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
x = range(2)
for i, c in zip(range(n_clusters), colors):
y = [(1./n_clusters) * (i+1)] * 2
plt.plot(x, y, c=c, label=str(npts_list[i]))
plt.legend()
plt.savefig('results/fcm/colors and number of points.pdf')
plt.close()
print('Plot colors and legend done.')
|
python
|
from django.conf import settings
from django.core.mail import send_mail
from chatbot.models import Notification
class NotificationProcessor:
def __init__(self, notification: Notification):
self.notification = notification
self.notification_map = {
'welcome': {
'subject': self.get_welcome_subject,
'message': self.get_welcome_message,
}
}
def get_welcome_subject(self) -> str:
"""Returns the subject for the welcome email"""
return 'Welcome {}'.format(self.notification.user.username)
def get_welcome_message(self) -> str:
"""Returns the message for the welcome email"""
return 'Lorem Ipsum is simply dummy text of the printing and typesetting industry. \
Lorem Ipsum has been the industry standard dummy text ever since the 1500s.'
def send(self) -> None:
"""Send the notification email"""
_type = self.notification.extra_data['type']
send_mail(
self.notification_map[_type]['subject'](),
self.notification_map[_type]['message'](),
settings.EMAIL_FROM,
[self.notification.user.email],
fail_silently=False,
)
self.notification.sent = True
self.notification.save()
|
python
|
from django.shortcuts import render
from django.views.generic import (
ListView,
)
from .models import (
Event,
EventPhoto,
)
class AllEventsView(ListView):
model = Event
queryset = Event.objects.all().filter(status = True)
template_name = "event/all_events.html"
context_object_name = "context_allevents"
class EventDetailView(ListView):
model = Event
template_name = "event/event.html"
context_object_name = "context_eventdetail"
def get_queryset(self, **kwargs):
pk = self.kwargs.get('pk', None)
queryset = self.model.objects.all()
return queryset.filter(id = pk)
def get_context_data(self, **kwargs):
context = super(EventDetailView, self).get_context_data(**kwargs)
pk = self.kwargs.get('pk', None)
# EventPhoto context
query_eventphotos = EventPhoto.objects.filter(status = True, event__pk = pk).select_related()
context['context_eventphotos'] = query_eventphotos
return context
|
python
|
from tests.utils.owtftest import OWTFCliTestCase
class OWTFCliExceptTest(OWTFCliTestCase):
categories = ['cli']
def test_except(self):
"""Run OWTF web plugins except one."""
self.run_owtf('-s', '-g', 'web', '-e', 'OWTF-WVS-006', "%s://%s:%s" % (self.PROTOCOL, self.IP, self.PORT))
self.assert_is_in_logs(
'All jobs have been done. Exiting.',
name='MainProcess',
msg='OWTF did not finish properly!')
self.assert_is_not_in_logs(
'Target: %s://%s:%s -> Plugin: Skipfish Unauthenticated' % (self.PROTOCOL, self.IP, self.PORT),
name='Worker',
msg='Skipfish plugin should not have been run!')
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 14})
mpl.rc('text', usetex=True)
#V1 = np.genfromtxt('Heat.txt')[::-1]
V2 = np.genfromtxt('Heat_b25.txt')[::-1]
'''
N = len(V1)
plt.figure(figsize=(7,7))
plt.title(r'$\beta=0.1$')
#plt.title('Potential $V(x,y)$')
plt.imshow(V1, interpolation='kaiser') #, cmap=mpl.cm.get_cmap('jet')
#plt.plot(0, 1, 'bo')
#plt.plot(-np.sqrt(3)/2,-0.5,'bo', label='Magnet')
#plt.plot( np.sqrt(3)/2,-0.5,'bo')
plt.xlabel(r'$y$')
plt.ylabel(r'$x$')
plt.tight_layout()
plt.gca().set_xticks(np.arange(-1 , len(V1), len(V1)/4));
plt.gca().set_yticks(np.arange(len(V1)-1, -1, -len(V1)/4));
plt.gca().set_xticklabels(np.arange(-2, 3, 1));
plt.gca().set_yticklabels(np.arange(-2, 3, 1));
#plt.show()
plt.savefig('heat_map_b1.pdf')
plt.cla()
plt.clf()
'''
N = len(V2)
plt.figure(figsize=(7,7))
plt.title(r'$\beta=0.25$')
#plt.title('Potential $V(x,y)$')
plt.imshow(V2, interpolation='kaiser') #, cmap=mpl.cm.get_cmap('jet')
#plt.plot(0, 1, 'bo')
#plt.plot(-np.sqrt(3)/2,-0.5,'bo', label='Magnet')
#plt.plot( np.sqrt(3)/2,-0.5,'bo')
plt.xlabel(r'$y$')
plt.ylabel(r'$x$')
plt.tight_layout()
plt.gca().set_xticks(np.arange(-1 , len(V2), len(V2)/4));
plt.gca().set_yticks(np.arange(len(V2)-1, -1, -len(V2)/4));
plt.gca().set_xticklabels(np.arange(-2, 3, 1));
plt.gca().set_yticklabels(np.arange(-2, 3, 1));
plt.colorbar()
#plt.show()
plt.savefig('heat_map_b25.pdf')
|
python
|
import time
import json
from random import choice, randrange, shuffle
class Citation:
"""
Notre objet Citation est composé de :
- Le contenu (un texte càd str)
- Un auteur (le nom de l'auteur càd str)
- Une origine (titre de l'oeuvre dont elle est extraitre càd str)
- Année de publication càd int
"""
def __init__ (self, dict): #constructeur de notre citation
self.__dict__.update(dict)
def version_texte(self):
if self.date == -1 and not self.origine:
return "« {0} » par {1}".format(self.contenu, self.auteur)
return "« {0} » par {1} en {2}, extrait de {3}".format(self.contenu, self.auteur, self.date, self.origine)
recueil = []
#lit le fichier data.json et convertit son contenu en Citations
for json_citation in json.load(open('data.json')):
recueil.append(Citation(json_citation))
#supprime les doublons (un set ne peut pas avoir d'élément en double)
set_auteurs = set()
for citation in recueil:
set_auteurs.add(citation.auteur)
score = 0
#game loop
for i in range (20):
#tire au hasard une citation et la supprime du recueil
index = randrange(len(recueil))
citation_a_trouver = recueil[index]
del recueil[index]
#génère une liste d'auteurs et un texte stylisé
six_authors = {citation_a_trouver.auteur}
while len(six_authors) < 6:
fake_author = choice(list(set_auteurs))
six_authors.add(fake_author)
result = list(six_authors)
shuffle(result)
right_author_index = result.index(citation_a_trouver.auteur)
designed_authors = "\n"
for author_index in range (len(result)):
designed_authors += "\n{0} - {1}".format(str(author_index+1), result[author_index])
#énoncé
print("---[Question {0}/20]---".format(i+1))
print ("\n> Citation : « {0} »".format(citation_a_trouver.contenu))
print ("\n> Liste d'auteurs possibles :", designed_authors)
proposition = input("\n> Donne le numéro correspondant : ")
if not proposition:
print("Tu as sauté la question")
elif int(proposition)-1 == right_author_index:
print("Tu as trouvé, tu gagnes 1 point")
score += 1
else:
print("Tu t'es trompé, tu perds 2 points")
score -= 2
input("\nVoici la citation et ses informations complémentaires : {0}, pour passer à la question suivante appuie sur entrer !\n\n".format(citation_a_trouver.version_texte()))
#affiche le message final
print("Ton score est de:", str(score))
|
python
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe, json
no_cache = True
def get_context(context):
token = frappe.local.form_dict.token
if token:
paypal_express_payment = frappe.get_doc("Paypal Express Payment", token)
paypal_express_payment.status = "Verified"
paypal_express_payment.save(ignore_permissions=True)
frappe.db.commit()
context.token = token
context.data = json.loads(paypal_express_payment.data or "{}")
|
python
|
from tensorflow.keras import layers
from tensorflow.keras.layers import TimeDistributed, LayerNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
import kapre
from kapre.composed import get_melspectrogram_layer
import tensorflow as tf
import os
def Conv1D(N_CLASSES=10, SR=16000, DT=1.0):
input_shape = (int(SR*DT), 1)
i = get_melspectrogram_layer(input_shape=input_shape,
n_mels=128,
pad_end=True,
n_fft=512,
win_length=400,
hop_length=160,
sample_rate=SR,
return_decibel=True,
input_data_format='channels_last',
output_data_format='channels_last')
x = LayerNormalization(axis=2, name='batch_norm')(i.output)
x = TimeDistributed(layers.Conv1D(8, kernel_size=(4), activation='tanh'), name='td_conv_1d_tanh')(x)
x = layers.MaxPooling2D(pool_size=(2,2), name='max_pool_2d_1')(x)
x = TimeDistributed(layers.Conv1D(16, kernel_size=(4), activation='relu'), name='td_conv_1d_relu_1')(x)
x = layers.MaxPooling2D(pool_size=(2,2), name='max_pool_2d_2')(x)
x = TimeDistributed(layers.Conv1D(32, kernel_size=(4), activation='relu'), name='td_conv_1d_relu_2')(x)
x = layers.MaxPooling2D(pool_size=(2,2), name='max_pool_2d_3')(x)
x = TimeDistributed(layers.Conv1D(64, kernel_size=(4), activation='relu'), name='td_conv_1d_relu_3')(x)
x = layers.MaxPooling2D(pool_size=(2,2), name='max_pool_2d_4')(x)
x = TimeDistributed(layers.Conv1D(128, kernel_size=(4), activation='relu'), name='td_conv_1d_relu_4')(x)
x = layers.GlobalMaxPooling2D(name='global_max_pooling_2d')(x)
x = layers.Dropout(rate=0.1, name='dropout')(x)
x = layers.Dense(64, activation='relu', activity_regularizer=l2(0.001), name='dense')(x)
o = layers.Dense(N_CLASSES, activation='softmax', name='softmax')(x)
model = Model(inputs=i.input, outputs=o, name='1d_convolution')
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def Conv2D(N_CLASSES=10, SR=16000, DT=1.0):
input_shape = (int(SR*DT), 1)
i = get_melspectrogram_layer(input_shape=input_shape,
n_mels=128,
pad_end=True,
n_fft=512,
win_length=400,
hop_length=160,
sample_rate=SR,
return_decibel=True,
input_data_format='channels_last',
output_data_format='channels_last')
x = LayerNormalization(axis=2, name='batch_norm')(i.output)
x = layers.Conv2D(8, kernel_size=(7,7), activation='tanh', padding='same', name='conv2d_tanh')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same', name='max_pool_2d_1')(x)
x = layers.Conv2D(16, kernel_size=(5,5), activation='relu', padding='same', name='conv2d_relu_1')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same', name='max_pool_2d_2')(x)
x = layers.Conv2D(16, kernel_size=(3,3), activation='relu', padding='same', name='conv2d_relu_2')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same', name='max_pool_2d_3')(x)
x = layers.Conv2D(32, kernel_size=(3,3), activation='relu', padding='same', name='conv2d_relu_3')(x)
x = layers.MaxPooling2D(pool_size=(2,2), padding='same', name='max_pool_2d_4')(x)
x = layers.Conv2D(32, kernel_size=(3,3), activation='relu', padding='same', name='conv2d_relu_4')(x)
x = layers.Flatten(name='flatten')(x)
x = layers.Dropout(rate=0.2, name='dropout')(x)
x = layers.Dense(64, activation='relu', activity_regularizer=l2(0.001), name='dense')(x)
o = layers.Dense(N_CLASSES, activation='softmax', name='softmax')(x)
model = Model(inputs=i.input, outputs=o, name='2d_convolution')
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def LSTM(N_CLASSES=10, SR=16000, DT=1.0):
input_shape = (int(SR*DT), 1)
i = get_melspectrogram_layer(input_shape=input_shape,
n_mels=128,
pad_end=True,
n_fft=512,
win_length=400,
hop_length=160,
sample_rate=SR,
return_decibel=True,
input_data_format='channels_last',
output_data_format='channels_last',
name='2d_convolution')
x = LayerNormalization(axis=2, name='batch_norm')(i.output)
x = TimeDistributed(layers.Reshape((-1,)), name='reshape')(x)
s = TimeDistributed(layers.Dense(64, activation='tanh'),
name='td_dense_tanh')(x)
x = layers.Bidirectional(layers.LSTM(32, return_sequences=True),
name='bidirectional_lstm')(s)
x = layers.concatenate([s, x], axis=2, name='skip_connection')
x = layers.Dense(64, activation='relu', name='dense_1_relu')(x)
x = layers.MaxPooling1D(name='max_pool_1d')(x)
x = layers.Dense(32, activation='relu', name='dense_2_relu')(x)
x = layers.Flatten(name='flatten')(x)
x = layers.Dropout(rate=0.2, name='dropout')(x)
x = layers.Dense(32, activation='relu',
activity_regularizer=l2(0.001),
name='dense_3_relu')(x)
o = layers.Dense(N_CLASSES, activation='softmax', name='softmax')(x)
model = Model(inputs=i.input, outputs=o, name='long_short_term_memory')
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
|
python
|
"""Stack Analysis Load test."""
import os
import datetime
import time
from requests_futures.sessions import FuturesSession
start_time = datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S %Z %Y")
print("TEST START TIME: {}".format(start_time))
three_scale_token = os.getenv('THREE_SCALE_PREVIEW_USER_KEY', '')
api_url = os.getenv('F8A_API_V2_URL')
api_suffix = ''
def close_fps(fp_arr):
"""Close all the file pointers."""
for file_p in fp_arr:
file_p.close()
fp_array = []
futures = []
ecosystem_file_mapping = {'npm': 'npmlist.json', 'pypi': 'pylist.json', 'maven': 'dependencies.txt'}
params = {'x-3scale-account-secret': three_scale_token}
session = FuturesSession(max_workers=3)
try:
for i in range(0, 15):
for ecosystem in ['npm', 'maven', 'pypi']:
file_name = ecosystem_file_mapping[ecosystem]
fp = open('data/{}'.format(file_name), 'rb')
fp_array.append(fp)
file_path = os.path.abspath(os.path.dirname('data/{}'.format(file_name)))
future = session.post('{}/api/v2/stack-analyses{}'.format(api_url, api_suffix),
files={'manifest': (file_name, fp)},
data={'file_path': file_path, 'ecosystem': ecosystem},
headers=params)
futures.append(future)
time.sleep(4)
i += 1
except Exception as e:
print(e)
pass
for future in futures:
print('The response details are {}'.format(future.result().text))
close_fps(fp_array)
|
python
|
from flask import render_template, flash
from flask_login import login_required
from mcadmin.config import CONFIG, _F_USE_JAR
from mcadmin.forms.config.version_form import SetVersionForm
from mcadmin.io.files.server_list import SERVER_LIST
from mcadmin.io.server.server import SERVER
from mcadmin.main import app
@app.route('/panel/configuration/versions', methods=['GET', 'POST'])
@login_required
def server_versions():
version_form = SetVersionForm()
versions = SERVER_LIST.versions()
if version_form.is_submitted() and version_form.validate():
# Update configuration with the new jar name
CONFIG.set_use_jar(version_form.jar_name.data)
flash('Server executable _set to be %s. It will be used next time the server boots.' % SERVER.jar)
return render_template('panel/config/server_versions.html',
current_jar=SERVER.jar,
version_form=version_form,
versions=versions)
|
python
|
import os
import math
import torch
import pickle
import argparse
# Data
from data.data import add_data_args
# Model
from model.model import get_model, get_model_id
from model.baseline import get_baseline
from survae.distributions import DataParallelDistribution
# Optim
from optim import get_optim, get_optim_id, add_optim_args
# Experiment
from experiment.student_experiment import StudentExperiment
from experiment.dropout_experiment import DropoutExperiment
from experiment.gp_experiment import GaussianProcessExperiment
###########
## Setup ##
###########
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default=None)
parser.add_argument('--model_type', type=str, default=None, choices=['flow', 'gp', 'dropout'])
parser.add_argument('--seed', type=int, default=0)
eval_args = parser.parse_args()
path_args = '{}/args.pickle'.format(eval_args.model)
path_check = '{}/check/checkpoint.pt'.format(eval_args.model)
torch.manual_seed(eval_args.seed)
###############
## Load args ##
###############
with open(path_args, 'rb') as f:
args = pickle.load(f)
################
## Experiment ##
################
if eval_args.model_type == "flow":
student, teacher, data_id = get_model(args)
model_id = get_model_id(args)
args.dataset = data_id
optimizer, scheduler_iter, scheduler_epoch = get_optim(args, student.parameters())
optim_id = get_optim_id(args)
exp = StudentExperiment(args=args, data_id=data_id, model_id=model_id, optim_id=optim_id,
model=student,
teacher=teacher,
optimizer=optimizer,
scheduler_iter=scheduler_iter,
scheduler_epoch=scheduler_epoch)
else:
student, teacher, data_id = get_baseline(args)
model_id = get_model_id(args)
args.dataset = data_id
if args.baseline == "gp":
exp = GaussianProcessExperiment(args=args, data_id=data_id,model_id=model_id,
model=student,
teacher=teacher)
elif args.baseline == "dropout":
optimizer, scheduler_iter, scheduler_epoch = get_optim(args, student.parameters())
optim_id = get_optim_id(args)
exp = DropoutExperiment(args=args, data_id=data_id, model_id=model_id, optim_id=optim_id,
model=student,
teacher=teacher,
optimizer=optimizer,
scheduler_iter=scheduler_iter,
scheduler_epoch=scheduler_epoch)
# Load checkpoint
exp.checkpoint_load('{}/check/'.format(more_args.model), device=more_args.new_device)
##############
## Evaluate ##
##############
exp.eval_fn()
|
python
|
"""
Copyright 2020 Hype3808
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Mapping
__all__ = ['SearchError', 'QuotaExceededError']
class SearchError(Exception):
"""Raised when the API returns an error."""
def __init__(self, data: Mapping[str, str]) -> None:
super().__init__("[{code}: {status}] {message}".format(**data))
class QuotaExceededError(SearchError):
"""Raised when the active API key has run out of uses."""
def __init__(self) -> None:
Exception.__init__(self, "100 queries/day quota has been exceeded for this API key")
|
python
|
import os
from setuptools import setup
this_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_dir, "README.rst"), "r") as f:
long_description = f.read()
setup(
name="flake8parser",
description=(
"A public python API for flake8 created by parsing the command line output."
),
long_description=long_description,
version="0.1.1",
author="Alex M.",
author_email="[email protected]",
url="https://github.com/newAM/flake8parser",
license="MIT",
python_requires=">=3.6",
install_requires=["flake8>=3.8.2"],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["flake8parser"],
)
|
python
|
import typing
import unittest
from m3c import mwb
from m3c import prefill
List = typing.List
class TestPrefill(unittest.TestCase):
def setUp(self):
self.olddb = [
prefill.db.add_organization,
prefill.db.find_organizations,
prefill.db.get_organization,
prefill.db.add_person,
prefill.get_person,
]
prefill.db.add_organization = add_organization
prefill.db.find_organizations = find_organizations
prefill.db.get_organization = get_organization
prefill.db.add_person = add_person
prefill.get_person = get_person
def tearDown(self):
prefill.db.add_organization,
prefill.db.find_organizations,
prefill.db.get_organization,
prefill.db.add_person,
prefill.get_person = self.olddb
del self.olddb
organizations.clear()
def test_monkeypatch(self):
expected = "The Corporation"
cursor = MockDatabaseConnection().cursor()
rec = make_record(institute=expected)
prefill.add_organizations(cursor, rec)
self.assertEqual(len(organizations), 1)
self.assertEqual(organizations[0], expected)
def test_strip_names(self):
expected = ["The Corporation", "College University"]
cursor = MockDatabaseConnection().cursor()
rec = make_record(institute="The Corporation; College University")
prefill.add_organizations(cursor, rec)
self.assertListEqual(organizations, expected)
def test_multiname_single_institute_department_lab(self):
expected = ["The Institute", "The Department", "The Lab"]
cursor = MockDatabaseConnection().cursor()
rec = make_record(institute=expected[0], department=expected[1],
laboratory=expected[2])
prefill.add_organizations(cursor, rec)
self.assertListEqual(organizations, expected)
def test_multiname_same_number_of_institutes_departments_and_labs(self):
cursor = MockDatabaseConnection().cursor()
rec = make_record(institute="UF ; FSU",
department="Chem; Chem",
laboratory="Smith;Jones")
prefill.add_organizations(cursor, rec)
expected = ["UF", "FSU", "Chem", "Chem", "Smith", "Jones"]
self.assertListEqual(organizations, expected)
def test_multiname_single_institute_and_dept_many_labs(self):
cursor = MockDatabaseConnection().cursor()
rec = make_record(institute="UF",
department="Computers",
laboratory="Teabeau; Clueknee")
prefill.add_organizations(cursor, rec)
expected = ["UF", "Computers", "Teabeau", "Clueknee"]
self.assertListEqual(organizations, expected)
def test_multiname_single_institute_same_number_of_depts_and_labs(self):
cursor = MockDatabaseConnection().cursor()
rec = make_record(institute="UF",
department="Chemistry; Taste and Smell",
laboratory="Smith;Akkbar")
prefill.add_organizations(cursor, rec)
expected = ["UF", "Chemistry", "Taste and Smell", "Smith", "Akkbar"]
self.assertListEqual(organizations, expected)
def test_multiname_fewer_depts_than_institutes_errors(self):
# Ambiguity: which institute does the department belong to?
rec = make_record(institute="UF;FSU",
department="Biology",
laboratory="Bobby")
cursor = MockDatabaseConnection().cursor()
with self.assertRaises(prefill.AmbiguityError):
prefill.add_organizations(cursor, rec)
def test_multiname_fewer_labs_than_departments_errors(self):
# Ambiguity: which department does the lab belong to?
rec = make_record(institute="UF",
department="Biology;Chem",
laboratory="Bobby")
cursor = MockDatabaseConnection().cursor()
with self.assertRaises(prefill.AmbiguityError):
prefill.add_organizations(cursor, rec)
def test_multiname_too_many_labs(self):
# Ambiguity: which department does the last lab belong to?
rec = make_record(institute="UF;FSU",
department="Biology;Chem",
laboratory="Bobby;Jones;Davis")
cursor = MockDatabaseConnection().cursor()
with self.assertRaises(prefill.AmbiguityError):
prefill.add_organizations(cursor, rec)
def test_multiname_too_many_departments(self):
# Ambiguity: which institute does the last department belong to?
rec = make_record(institute="UF;FSU",
department="Biology;Chem;Yo",
laboratory="Bobby;Jones;Davis")
cursor = MockDatabaseConnection().cursor()
with self.assertRaises(prefill.AmbiguityError):
prefill.add_organizations(cursor, rec)
def test_multiname_single_person(self):
rec = make_record(last_name="Bond", first_name="James")
cursor = MockDatabaseConnection().cursor()
actual = prefill.add_people(cursor, rec)
self.assertEqual(len(actual), 1)
self.assertEqual(people[0], "James Bond")
def test_multiname_too_few_surnames(self):
rec = make_record(last_name="Bond", first_name="James;Michael")
cursor = MockDatabaseConnection().cursor()
with self.assertRaises(prefill.AmbiguousNamesError):
prefill.add_people(cursor, rec)
def test_multiname_too_many_emails(self):
rec = make_record(last_name="Bond", first_name="James",
email="[email protected] and [email protected]")
cursor = MockDatabaseConnection().cursor()
prefill.add_people(cursor, rec)
self.assertEqual(emails[0], "")
organizations: List[str] = []
people: List[str] = []
emails: List[str] = []
def add_organization(cursor, type, name, parent_id=None):
organizations.append(name)
return len(organizations)
def add_person(cursor, first_name, last_name, email, phone):
people.append(f"{first_name} {last_name}")
emails.append(email)
return len(people)
def find_organizations(cursor):
return []
def get_organization(cursor, type, name, parent_id=None):
if not parent_id:
parent_id = 0
try:
return organizations.index((name, parent_id))+1
except ValueError:
return 0
def get_person(cursor, first_name, last_name, exclude_withheld=True):
return []
class MockDatabaseConnection:
def cursor(self):
return self
def __enter__(self):
pass
def __exit__(self, a, b, c):
pass
def make_record(psid="PR123", pstype=mwb.PROJECT, first_name="", last_name="",
institute="", department="", laboratory="", email="", phone=""
) -> mwb.NameRecord:
return mwb.NameRecord(psid, pstype, first_name, last_name, institute,
department, laboratory, email, phone)
if __name__ == "__main__":
unittest.main()
|
python
|
from abc import ABC, abstractmethod
from stateful_simulator.datatypes.DataTypes import FeatureVector
from typing import List
class StatelessModel(ABC):
@abstractmethod
def predict(self, fv: FeatureVector) -> float:
pass
@abstractmethod
def train(self, fvs: List[FeatureVector]):
pass
|
python
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from turtle import *
import os
import shutil
class Recorder(object):
def __init__(self, func, fps=30):
self.func = func
self.fps = fps
def __enter__(self):
self.record()
return self
def __exit__(self, type, value, traceback):
self.remove_temp()
def draw(self):
self.func()
ontimer(self.stop, 500)
def stop(self):
self.running = False
def save_eps(self, counter=[1]):
if not os.path.exists('tmp'):
os.mkdir('tmp')
getcanvas().postscript(file='./tmp/{0:03d}.eps'.format(counter[0]))
counter[0] += 1
if self.running:
ontimer(self.save_eps, int(1000 / self.fps))
def save_animation(self):
print('Capturing animation: Please close the window when the animation ends.')
self.running = True
self.save_eps()
ontimer(self.draw, 500)
done()
self.frames = len(os.listdir('./tmp'))
print('Captured frames: {}'.format(self.frames))
def load_animation(self):
import matplotlib.pyplot as plt
from matplotlib import animation
fig = plt.figure()
plt.axis('off')
plt.subplots_adjust(left=0, bottom=0, right=1, top=1)
img = plt.imread('./tmp/001.eps')
plt.imshow(img)
# plt.show()
def init():
img = plt.imread('./tmp/001.eps')
return img
def animate(i):
filename = './tmp/{0:03d}.eps'.format(i + 1)
plt.clf()
plt.axis('off')
plt.subplots_adjust(left=0, bottom=0, right=1, top=1)
img = plt.imread(filename)
plt.imshow(img)
return img
self.animation = animation.FuncAnimation(fig, animate, init_func=init, frames=self.frames, interval=1000 / self.fps)
print('Animation loaded. Prepare to generate video/gif...')
def remove_temp(self):
shutil.rmtree('./tmp')
def record(self):
self.save_animation()
self.load_animation()
# self.remove_temp()
def to_video(self, output):
print('Generating video...')
self.animation.save(output, fps=self.fps, extra_args=['-vcodec', 'libx264'])
# self.animation.save(output, fps=self.fps)
print('Video generated successfully.')
def to_gif(self, output):
print('Generating gif...')
self.animation.save(output, writer='imagemagick')
print('Gif generated successfully.')
|
python
|
import unittest
from unittest import mock
from flumine.events import events
class BaseEventTest(unittest.TestCase):
def setUp(self) -> None:
self.mock_event = mock.Mock()
self.base_event = events.BaseEvent(self.mock_event)
def test_init(self):
mock_event = mock.Mock()
base_event = events.BaseEvent(mock_event)
self.assertIsNone(base_event.EVENT_TYPE)
self.assertIsNone(base_event.QUEUE_TYPE)
self.assertEqual(base_event.event, mock_event)
self.assertIsNotNone(base_event._time_created)
def test_elapsed_seconds(self):
self.assertGreaterEqual(self.base_event.elapsed_seconds, 0)
def test_str(self):
self.base_event = events.MarketBookEvent(None)
self.assertEqual(str(self.base_event), "<MARKET_BOOK [HANDLER]>")
|
python
|
import json
import requests
class Gen3FileError(Exception):
pass
class Gen3File:
"""For interacting with Gen3 file management features.
A class for interacting with the Gen3 file download services.
Supports getting presigned urls right now.
Args:
endpoint (str): The URL of the data commons.
auth_provider (Gen3Auth): A Gen3Auth class instance.
Examples:
This generates the Gen3File class pointed at the sandbox commons while
using the credentials.json downloaded from the commons profile page.
>>> endpoint = "https://nci-crdc-demo.datacommons.io"
... auth = Gen3Auth(endpoint, refresh_file="credentials.json")
... sub = Gen3File(endpoint, auth)
"""
def __init__(self, endpoint, auth_provider):
self._auth_provider = auth_provider
self._endpoint = endpoint
def get_presigned_url(self, guid, protocol="http"):
"""Generates a presigned URL for a file.
Retrieves a presigned url for a file giving access to a file for a limited time.
Args:
guid (str): The GUID for the object to retrieve.
protocol (:obj:`str`, optional): The protocol to use for picking the available URL for generating the presigned URL.
Examples:
>>> Gen3File.get_presigned_url(query)
"""
api_url = "{}/user/data/download/{}?protocol={}".format(
self._endpoint, guid, protocol
)
output = requests.get(api_url, auth=self._auth_provider).text
try:
data = json.loads(output)
except:
return output
return data
|
python
|
"""
setup.py: Install IsCAn
"""
import os
import sys
import re
import subprocess
from os.path import join as pjoin
from glob import glob
import setuptools
from distutils.extension import Extension
from distutils.core import setup
from Cython.Distutils import build_ext
import numpy
# ------------------------------------------------------------------------------
# HEADER
#
VERSION = "0.0.1"
ISRELEASED = False
DISABLE_CUDA = True
__author__ = "Frederic Poitevin"
__version__ = VERSION
metadata = {
'name': 'IsCAn',
'version': VERSION,
'author': __author__,
'author_email': '[email protected]',
'license': 'MIT',
'url': 'https://github.com/fredericpoitevin/IsCAn',
'download_url': 'https://github.com/fredericpoitevin/IsCAn',
'platforms': ['Linux', 'OSX'],
'description': "Component analysis and clustering of structural datasets",
'long_description': """IsCAn offers analysis tools for structural datasets."""}
# ------------------------------------------------------------------------------
# HELPER FUNCTIONS -- path finding, git, python version, readthedocs
#
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_warning(string):
print(bcolors.WARNING + string + bcolors.ENDC)
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def get_numpy_include():
"""
Obtain the numpy include directory. This logic works across numpy versions.
"""
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
return numpy_include
def git_version():
"""
Return the git revision as a string.
Copied from numpy setup.py
"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# -----------------------------------------------------------------------------
# INSTALL
metadata['packages'] = ['IsCAn']
metadata['package_dir'] = {'IsCAn' : 'src'}
metadata['ext_modules'] = []
metadata['scripts'] = [s for s in glob('scripts/*') if not s.endswith('__.py')]
#metadata['data_files'] = [('reference', glob('./reference/*'))]
#metadata['cmdclass'] = {'build_ext': custom_build_ext}
# ------------------------------------------------------------------------------
#
# Finally, print a warning at the *end* of the build if something fails
#
def print_warnings():
print("\n")
if __name__ == '__main__':
setup(**metadata) # ** will unpack dictionary 'metadata' providing the values as arguments
print_warnings()
|
python
|
from setuptools import find_packages, setup
setup(
name="typer", packages=find_packages(),
)
|
python
|
from textwrap import dedent
endc = "\033[0m"
bcolors = dict(
blue="\033[94m",
green="\033[92m",
orange="\033[93m",
red="\033[91m",
bold="\033[1m",
underline="\033[4m",
)
def _color_message(msg, style):
return bcolors[style] + msg + endc
def _message_box(msg, color="green", border="=" * 38, doprint=True, print_func=print):
# Prepare the message so the indentation is the same as the box
msg = dedent(msg)
# Color and create the box
border_colored = _color_message(border, color)
box = """
{border_colored}
{msg}
{border_colored}
"""
box = dedent(box).format(msg=msg, border_colored=border_colored)
if doprint is True:
print_func(box)
return box
|
python
|
import sys
import subprocess
import gzip
from tqdm import tqdm
def get_lines_count(file_path):
if file_path[-3:] == ".gz":
ps = subprocess.Popen(
f"gzip -cd {file_path} | wc -l", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return int(ps.communicate()[0])
else:
return int(subprocess.getoutput('wc -l ' + file_path).split()[0])
if len(sys.argv) == 1:
exit("run: python fasta_to_fastq.py <seq1> <seq2> <seq3> ....")
for fasta_file in sys.argv[1:]:
no_seqs = get_lines_count(fasta_file) // 2
fastq_file = fasta_file[:-1] + "q"
read_len = int()
with open(fasta_file) as fastaReader:
next(fastaReader)
read_len = len(next(fastaReader).strip())
fakeQuality = "+\n" + "?"*read_len + "\n"
with open(fasta_file) as fastaReader, open(fastq_file, 'w') as fastqWriter:
for line in tqdm(fastaReader, total=no_seqs):
seq1_header = line.strip().replace(">", "@")[:-2] + "\n"
seq1_seq = next(fastaReader)
fastqWriter.write(seq1_header + seq1_seq + fakeQuality)
|
python
|
# convert ERAiterim data from (Claudia Wekerle's) netcdf files to ieee-be
# compute specific humidity from dew point temperature
# unfortunately precipitation and downward radiation are only available
# as daily averages, I don't know why
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import os
ncdir = '/work/ollie/clidyn/forcing/erai'
input_variables = ['precip', 'tdew', 'rad', 't_02', 'u_10', 'v_10']
years = range(2017, 2019)
def check_flds(fld):
print(fld.shape)
print(fld.dtype)
fmt='mean: %12.6e std: %12.6e min: %12.6e max: %12.6e'
print(fmt%(fld.mean(),fld.std(),fld.min(),fld.max()))
def specific_humidity(Td, P):
'''calculate specific humidity from
dew-point temperature and surface pressure
following ECMWF Equation:
http://www.ecmwf.int/sites/default/files/elibrary/2015/9211-part-iv-physical-processes.pdf,equations 7.4/7.5
data: data from NetCDF file
a1,a3,a4: Parameters according to Buck 1981
Rd,Rv: gas constants of dry air and water vapor
T0: reference temperature
'''
# Parameters according to Buck 1981 for saturation over water
a1 = 611.21 # (Pa) Pascal
a3 = 17.502 #
a4 = 32.19 # (K) Kelvin
# Gas constants
Rd = 287.06 # (J/(kg*K)) dry air
Rv = 461.53 # (J/(kg*K)) water vapor
T0 = 273.16 # (K) reference temperature
R = Rd/Rv
# (Pa) saturation water vapor pressure
e_sat = a1*np.exp( a3*( (Td -T0)/(Td -a4) ) )
q_sat = (R*e_sat) / (P - (1.0-R)*e_sat)
return q_sat
def writefield(fname,arr):
import sys
print('writing '+fname)
if True:
pass
else:
data = arr.data
fid = open(fname,"wb")
data.astype('>f4').tofile(fid)
fid.close()
for y in years:
print(y)
for invar in input_variables:
fname=os.path.join(ncdir,'erai.%s.%i.nc'%(invar,y))
if os.path.isfile(fname): print(fname+' exists and is a file')
ds = Dataset(fname,'r')
# flip direction of y-axis because the ERA convention is to have
# indices (0,0) at the top left corner of the field
if invar=='t_02':
outfld = ds['T_2_MOD'][:,::-1,:]
bfile = 't2m_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='u_10':
outfld = ds['U_10_MOD'][:,::-1,:]
bfile = 'u10_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='v_10':
outfld = ds['V_10_MOD'][:,::-1,:]
bfile = 'v10_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='tdew':
tdew = ds['d2m'][:,::-1,:]
slpname=os.path.join(ncdir,'erai.slp.%i.nc'%(y))
if os.path.isfile(slpname):
print(slpname+' exists and is a file')
dslp = Dataset(slpname,'r')
slp = dslp['SLP'][:,::-1,:]
else:
print(slpname+' does not exist, using slp = 1 bar')
slp = np.zeros(tdew.shape,dtype='float32') + 1e5
outfld = specific_humidity(tdew, slp)
bfile='q_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='precip':
rain = ds['RAIN'][:,::-1,:]
snow = ds['SNOW'][:,::-1,:]
outfld = rain+snow
bfile='tp_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
if invar=='rad':
swdw = ds['SWDW'][:,::-1,:]
bfile='ssrd_ERAi_6hourly_'+str(y)
check_flds(swdw)
writefield(bfile,swdw)
lwdw = ds['LWDW'][:,::-1,:]
bfile='strd_ERAi_6hourly_'+str(y)
check_flds(lwdw)
writefield(bfile,lwdw)
|
python
|
"""
Conversion functions between corresponding data structures.
"""
import json
import logging
from collections import Hashable, OrderedDict # pylint: disable=E0611,no-name-in-module # moved to .abc in Python 3
from copy import deepcopy
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING
from urllib.parse import urlparse
from owslib.wps import (
ComplexData,
Input as OWS_Input_Type,
Metadata as OWS_Metadata,
Output as OWS_Output_Type,
is_reference
)
from pywps import Process as ProcessWPS
from pywps.app.Common import Metadata as WPS_Metadata
from pywps.inout import BoundingBoxInput, BoundingBoxOutput, ComplexInput, ComplexOutput, LiteralInput, LiteralOutput
from pywps.inout.basic import BasicIO
from pywps.inout.formats import Format
from pywps.inout.literaltypes import ALLOWEDVALUETYPE, RANGECLOSURETYPE, AllowedValue, AnyValue
from pywps.validator.mode import MODE
from weaver import xml_util
from weaver.exceptions import PackageTypeError
from weaver.execute import (
EXECUTE_MODE_ASYNC,
EXECUTE_RESPONSE_DOCUMENT,
EXECUTE_TRANSMISSION_MODE_REFERENCE,
EXECUTE_TRANSMISSION_MODE_VALUE
)
from weaver.formats import (
CONTENT_TYPE_ANY,
CONTENT_TYPE_APP_JSON,
CONTENT_TYPE_TEXT_PLAIN,
get_cwl_file_format,
get_extension,
get_format
)
from weaver.processes.constants import (
CWL_REQUIREMENT_APP_WPS1,
PACKAGE_ARRAY_BASE,
PACKAGE_ARRAY_ITEMS,
PACKAGE_ARRAY_MAX_SIZE,
PACKAGE_ARRAY_TYPES,
PACKAGE_CUSTOM_TYPES,
PACKAGE_ENUM_BASE,
PACKAGE_LITERAL_TYPES,
PROCESS_SCHEMA_OGC,
PROCESS_SCHEMA_OLD,
WPS_BOUNDINGBOX,
WPS_COMPLEX,
WPS_COMPLEX_DATA,
WPS_INPUT,
WPS_LITERAL,
WPS_LITERAL_DATA_TYPE_NAMES,
WPS_OUTPUT,
WPS_REFERENCE
)
from weaver.utils import (
bytes2str,
fetch_file,
get_any_id,
get_sane_name,
get_url_without_query,
null,
str2bytes,
transform_json
)
from weaver.wps.utils import get_wps_client
if TYPE_CHECKING:
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from urllib.parse import ParseResult
from pywps.app import WPSRequest
from owslib.wps import Process as ProcessOWS
from requests.models import Response
from weaver.typedefs import (
AnySettingsContainer,
AnyValueType,
CWL,
CWL_IO_EnumSymbols,
CWL_IO_Value,
CWL_Input_Type,
CWL_Output_Type,
JSON
)
# typing shortcuts
# pylint: disable=C0103,invalid-name
WPS_Input_Type = Union[LiteralInput, ComplexInput, BoundingBoxInput]
WPS_Output_Type = Union[LiteralOutput, ComplexOutput, BoundingBoxOutput]
WPS_IO_Type = Union[WPS_Input_Type, WPS_Output_Type]
OWS_IO_Type = Union[OWS_Input_Type, OWS_Output_Type]
JSON_IO_Type = JSON
JSON_IO_ListOrMap = Union[List[JSON], Dict[str, Union[JSON, str]]]
CWL_IO_Type = Union[CWL_Input_Type, CWL_Output_Type]
PKG_IO_Type = Union[JSON_IO_Type, WPS_IO_Type]
ANY_IO_Type = Union[CWL_IO_Type, JSON_IO_Type, WPS_IO_Type, OWS_IO_Type]
ANY_Format_Type = Union[Dict[str, Optional[str]], Format]
ANY_Metadata_Type = Union[OWS_Metadata, WPS_Metadata, Dict[str, str]]
# WPS object attribute -> all possible *other* naming variations (no need to repeat key name)
WPS_FIELD_MAPPING = {
"identifier": ["id", "ID", "Id", "Identifier"],
"title": ["Title", "Label", "label"],
"abstract": ["description", "Description", "Abstract"],
"version": ["processVersion", "Version"],
"metadata": ["Metadata"],
"keywords": ["Keywords"],
"allowed_values": ["AllowedValues", "allowedValues", "allowedvalues", "Allowed_Values", "Allowedvalues"],
"allowed_collections": ["AllowedCollections", "allowedCollections", "allowedcollections", "Allowed_Collections",
"Allowedcollections"],
"any_value": ["anyvalue", "anyValue", "AnyValue"],
"literal_data_domains": ["literalDataDomains"],
"default": ["default_value", "defaultValue", "DefaultValue", "Default", "data_format", "data"],
"supported_values": ["SupportedValues", "supportedValues", "supportedvalues", "Supported_Values"],
"supported_formats": ["SupportedFormats", "supportedFormats", "supportedformats", "Supported_Formats", "formats"],
"additional_parameters": ["AdditionalParameters", "additionalParameters", "additionalparameters",
"Additional_Parameters"],
"type": ["Type", "data_type", "dataType", "DataType", "Data_Type"],
"min_occurs": ["minOccurs", "MinOccurs", "Min_Occurs", "minoccurs"],
"max_occurs": ["maxOccurs", "MaxOccurs", "Max_Occurs", "maxoccurs"],
"max_megabytes": ["maximumMegabytes", "max_size"],
"mime_type": ["mimeType", "MimeType", "mime-type", "Mime-Type", "mimetype",
"mediaType", "MediaType", "media-type", "Media-Type", "mediatype"],
"range_minimum": ["minval", "minimum", "minimumValue"],
"range_maximum": ["maxval", "maximum", "maximumValue"],
"range_spacing": ["spacing"],
"range_closure": ["closure", "rangeClosure"],
"encoding": ["Encoding"],
"href": ["url", "link", "reference"],
}
# WPS fields that contain a structure corresponding to `Format` object
# - keys must match `WPS_FIELD_MAPPING` keys
# - fields are placed in order of relevance (prefer explicit format, then supported, and defaults as last resort)
WPS_FIELD_FORMAT = ["formats", "supported_formats", "supported_values", "default"]
# WPS 'type' string variations employed to indicate a Complex (file) I/O by different libraries
# for literal types, see 'any2cwl_literal_datatype' and 'any2wps_literal_datatype' functions
WPS_COMPLEX_TYPES = [WPS_COMPLEX, WPS_COMPLEX_DATA, WPS_REFERENCE]
# WPS 'type' string of all combinations (type of data / library implementation)
WPS_ALL_TYPES = [WPS_LITERAL, WPS_BOUNDINGBOX] + WPS_COMPLEX_TYPES
# default format if missing (minimal requirement of one)
DEFAULT_FORMAT = Format(mime_type=CONTENT_TYPE_TEXT_PLAIN)
DEFAULT_FORMAT_MISSING = "__DEFAULT_FORMAT_MISSING__"
setattr(DEFAULT_FORMAT, DEFAULT_FORMAT_MISSING, True)
INPUT_VALUE_TYPE_MAPPING = {
"bool": bool,
"boolean": bool,
"file": str,
"File": str,
"float": float,
"int": int,
"integer": int,
"str": str,
"string": str,
}
LOGGER = logging.getLogger(__name__)
def complex2json(data):
# type: (Union[ComplexData, Any]) -> Union[JSON, Any]
"""
Obtains the JSON representation of a :class:`ComplexData` or simply return the unmatched type.
"""
if not isinstance(data, ComplexData):
return data
# backward compat based on OWSLib version, field did not always exist
max_mb = getattr(data, "maximumMegabytes", None)
if isinstance(max_mb, str) and max_mb.isnumeric():
max_mb = int(max_mb)
return {
"mimeType": data.mimeType,
"encoding": data.encoding,
"schema": data.schema,
"maximumMegabytes": max_mb,
"default": False, # always assume it is a supported format/value, caller should override
}
def metadata2json(meta, force=False):
# type: (Union[ANY_Metadata_Type, Any], bool) -> Union[JSON, Any]
"""
Retrieve metadata information and generate its JSON representation.
Obtains the JSON representation of a :class:`OWS_Metadata` or :class:`pywps.app.Common.Metadata`.
Otherwise, simply return the unmatched type.
If requested, can enforce parsing a dictionary for the corresponding keys.
"""
if not force and not isinstance(meta, (OWS_Metadata, WPS_Metadata)):
return meta
title = get_field(meta, "title", search_variations=True, default=None)
href = get_field(meta, "href", search_variations=True, default=None)
role = get_field(meta, "role", search_variations=True, default=None)
rel = get_field(meta, "rel", search_variations=True, default=None)
# many remote servers do not provide the 'rel', but instead provide 'title' or 'role'
# build one by default to avoid failing schemas that expect 'rel' to exist
if not rel:
href_rel = urlparse(href).hostname
rel = str(title or role or href_rel).lower() # fallback to first available
rel = get_sane_name(rel, replace_character="-", assert_invalid=False)
return {"href": href, "title": title, "role": role, "rel": rel}
def ows2json_field(ows_field):
# type: (Union[ComplexData, OWS_Metadata, AnyValueType]) -> Union[JSON, AnyValueType]
"""
Obtains the JSON or raw value from an :mod:`owslib.wps` I/O field.
"""
if isinstance(ows_field, ComplexData):
return complex2json(ows_field)
if isinstance(ows_field, OWS_Metadata):
return metadata2json(ows_field)
return ows_field
def ows2json_io(ows_io):
# type: (OWS_IO_Type) -> JSON_IO_Type
"""
Converts I/O definition from :mod:`owslib.wps` to JSON.
"""
json_io = dict()
for field in WPS_FIELD_MAPPING:
value = get_field(ows_io, field, search_variations=True)
# preserve numeric values (ex: "minOccurs"=0) as actual parameters
# ignore undefined values represented by `null`, empty list, or empty string
if value or value in [0, 0.0]:
if isinstance(value, list):
# complex data is converted as is
# metadata converted and preserved if it results into a minimally valid definition (otherwise dropped)
json_io[field] = [
complex2json(v) if isinstance(v, ComplexData) else
metadata2json(v) if isinstance(v, OWS_Metadata) else v
for v in value if not isinstance(v, OWS_Metadata) or v.url is not None
]
elif isinstance(value, ComplexData):
json_io[field] = complex2json(value)
elif isinstance(value, OWS_Metadata):
json_io[field] = metadata2json(value)
else:
json_io[field] = value
json_io["id"] = get_field(json_io, "identifier", search_variations=True, pop_found=True)
io_type = json_io.get("type")
# add 'format' if missing, derived from other variants
if io_type == WPS_COMPLEX_DATA:
fmt_default = False
if "default" in json_io and isinstance(json_io["default"], dict):
json_io["default"]["default"] = True # provide for workflow extension (internal), schema drops it (API)
fmt_default = True
# retrieve alternate format definitions
if "formats" not in json_io:
# correct complex data 'formats' from OWSLib from initial fields loop can get stored in 'supported_values'
fmt_val = get_field(json_io, "supported_values", pop_found=True)
if fmt_val:
json_io["formats"] = fmt_val
else:
# search for format fields directly specified in I/O body
for field in WPS_FIELD_FORMAT:
fmt = get_field(json_io, field, search_variations=True)
if not fmt:
continue
if isinstance(fmt, dict):
fmt = [fmt]
fmt = filter(lambda f: isinstance(f, dict), fmt)
if not isinstance(json_io.get("formats"), list):
json_io["formats"] = []
for var_fmt in fmt:
# add it only if not exclusively provided by a previous variant
json_fmt_items = [j_fmt.items() for j_fmt in json_io["formats"]]
if any(all(var_item in items for var_item in var_fmt.items()) for items in json_fmt_items):
continue
json_io["formats"].append(var_fmt)
json_io.setdefault("formats", [])
# apply the default flag
for fmt in json_io["formats"]:
fmt["default"] = fmt_default and is_equal_formats(json_io["default"], fmt)
if fmt["default"]:
break
# NOTE:
# Don't apply 'minOccurs=0' as in below literal case because default 'format' does not imply that unspecified
# input is valid, but rather that given an input without explicit 'format' specified, that 'default' is used.
return json_io
# add value constraints in specifications
elif io_type in WPS_LITERAL_DATA_TYPE_NAMES:
domains = any2json_literal_data_domains(ows_io)
if domains:
json_io["literalDataDomains"] = domains
# fix inconsistencies of some process descriptions
# WPS are allowed to report 'minOccurs=1' although 'defaultValue' can also be provided
# (see https://github.com/geopython/pywps/issues/625)
if "defaultValue" in domains[0]:
json_io["min_occurs"] = 0
return json_io
# FIXME: add option to control auto-fetch, disable during workflow by default to avoid double downloads?
# (https://github.com/crim-ca/weaver/issues/183)
def ows2json_output_data(output, process_description, container=None):
# type: (OWS_Output_Type, ProcessOWS, Optional[AnySettingsContainer]) -> JSON
"""
Utility method to convert an :mod:`owslib.wps` process execution output data (result) to `JSON`.
In the case that a ``reference`` output of `JSON` content-type is specified and that it refers to a file that
contains an array list of URL references to simulate a multiple-output, this specific output gets expanded to
contain both the original URL ``reference`` field and the loaded URL list under ``data`` field for easier access
from the response body.
Referenced file(s) are fetched in order to store them locally if executed on a remote process, such that they can
become accessible as local job result for following reporting or use by other processes in a workflow chain.
If the ``dataType`` details is missing from the data output (depending on servers that might omit it), the
:paramref:`process_description` is employed to retrieve the original description with expected result details.
:param output: output with data value or reference according to expected result for the corresponding process.
:param process_description: definition of the process producing the specified output following execution.
:param container: container to retrieve application settings (for request options during file retrieval as needed).
:return: converted JSON result data and additional metadata as applicable based on data-type and content-type.
"""
if not output.dataType:
for process_output in getattr(process_description, "processOutputs", []):
if getattr(process_output, "identifier", "") == output.identifier:
output.dataType = process_output.dataType
break
json_output = {
"identifier": output.identifier,
"title": output.title,
"dataType": output.dataType
}
# WPS standard v1.0.0 specify that either a reference or a data field has to be provided
if output.reference:
json_output["reference"] = output.reference
# Handle special case where we have a reference to a json array containing dataset reference
# Avoid reference to reference by fetching directly the dataset references
json_array = _get_multi_json_references(output, container)
if json_array and all(str(ref).startswith("http") for ref in json_array):
json_output["data"] = json_array
else:
# WPS standard v1.0.0 specify that Output data field has Zero or one value
json_output["data"] = output.data[0] if output.data else None
if (json_output["dataType"] == WPS_COMPLEX_DATA or "reference" in json_output) and output.mimeType:
json_output["mimeType"] = output.mimeType
return json_output
# FIXME: support metalink unwrapping (weaver #25)
# FIXME: reuse functions
# definitely can be improved and simplified with 'fetch_file' function
# then return parsed contents from that file
def _get_multi_json_references(output, container):
# type: (OWS_Output_Type, Optional[AnySettingsContainer]) -> Optional[List[JSON]]
"""
Obtains the JSON contents of a single output corresponding to multi-file references.
Since WPS standard does not allow to return multiple values for a single output,
a lot of process actually return a JSON array containing references to these outputs.
Because the multi-output references are contained within this JSON file, it is not very convenient to retrieve
the list of URLs as one always needs to open and read the file to get them. This function goal is to detect this
particular format and expand the references to make them quickly available in the job output response.
:return:
Array of HTTP(S) references if the specified output is effectively a JSON containing that, ``None`` otherwise.
"""
# Check for the json datatype and mime-type
if output.dataType == WPS_COMPLEX_DATA and output.mimeType == CONTENT_TYPE_APP_JSON:
try:
# If the json data is referenced read it's content
if output.reference:
with TemporaryDirectory() as tmp_dir:
file_path = fetch_file(output.reference, tmp_dir, settings=container)
with open(file_path, "r") as tmp_file:
json_data_str = tmp_file.read()
# Else get the data directly
else:
# process output data are append into a list and
# WPS standard v1.0.0 specify that Output data field has zero or one value
if not output.data:
return None
json_data_str = output.data[0]
# Load the actual json dict
json_data = json.loads(json_data_str)
except Exception as exc: # pylint: disable=W0703
LOGGER.debug("Failed retrieval of JSON output file for multi-reference unwrapping", exc_info=exc)
return None
if isinstance(json_data, list):
return None if any(not is_reference(data_value) for data_value in json_data) else json_data
return None
def any2cwl_io(wps_io, io_select):
# type: (Union[JSON_IO_Type, WPS_IO_Type, OWS_IO_Type], str) -> Tuple[CWL_IO_Type, Dict[str, str]]
"""
Converts a `WPS`-like I/O to `CWL` corresponding I/O.
Because of `CWL` I/O of type `File` with `format` field, the applicable namespace is also returned.
:returns: converted I/O and namespace dictionary with corresponding format references as required
"""
def _get_cwl_fmt_details(wps_fmt):
# type: (ANY_Format_Type) -> Union[Tuple[Tuple[str, str], str, str], Tuple[None, None, None]]
_wps_io_fmt = get_field(wps_fmt, "mime_type", search_variations=True)
if not _wps_io_fmt:
return None, None, None
_cwl_io_ext = get_extension(_wps_io_fmt)
_cwl_io_ref, _cwl_io_fmt = get_cwl_file_format(_wps_io_fmt, must_exist=True, allow_synonym=False)
return _cwl_io_ref, _cwl_io_fmt, _cwl_io_ext
wps_io_type = get_field(wps_io, "type", search_variations=True)
wps_io_id = get_field(wps_io, "identifier", search_variations=True)
cwl_ns = dict()
cwl_io = {"id": wps_io_id} # type: CWL_IO_Type
if wps_io_type not in WPS_COMPLEX_TYPES:
cwl_io_type = any2cwl_literal_datatype(wps_io_type)
wps_allow = get_field(wps_io, "allowed_values", search_variations=True)
if isinstance(wps_allow, list) and len(wps_allow) > 0:
cwl_io["type"] = {"type": PACKAGE_ENUM_BASE, "symbols": wps_allow}
else:
cwl_io["type"] = cwl_io_type
# FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
else:
cwl_io_fmt = None
cwl_io_ext = CONTENT_TYPE_ANY
cwl_io["type"] = "File"
# inputs are allowed to define multiple 'supported' formats
# outputs are allowed to define only one 'applied' format
for field in WPS_FIELD_FORMAT:
fmt = get_field(wps_io, field, search_variations=True)
if isinstance(fmt, dict):
cwl_io_ref, cwl_io_fmt, cwl_io_ext = _get_cwl_fmt_details(fmt)
if cwl_io_ref and cwl_io_fmt:
cwl_ns.update(cwl_io_ref)
break
if isinstance(fmt, list):
if len(fmt) == 1:
cwl_io_ref, cwl_io_fmt, cwl_io_ext = _get_cwl_fmt_details(fmt[0])
if cwl_io_ref and cwl_io_fmt:
cwl_ns.update(cwl_io_ref)
break
if io_select == WPS_OUTPUT and len(fmt) > 1:
break # don't use any format because we cannot enforce one
cwl_ns_multi = {}
cwl_fmt_multi = []
for fmt_i in fmt:
# FIXME: (?)
# when multiple formats are specified, but at least one schema/namespace reference can't be found,
# we must drop all since that unknown format is still allowed but cannot be validated
# avoid potential validation error if that format was the one provided during execute...
# (see: https://github.com/crim-ca/weaver/issues/50)
cwl_io_ref_i, cwl_io_fmt_i, _ = _get_cwl_fmt_details(fmt_i)
if cwl_io_ref_i and cwl_io_fmt_i:
cwl_ns_multi.update(cwl_io_ref_i)
cwl_fmt_multi.append(cwl_io_fmt_i)
else:
# reset all since at least one format could not be mapped to an official schema
cwl_ns_multi = {}
cwl_fmt_multi = None
break
cwl_io_fmt = cwl_fmt_multi # all formats or none of them
cwl_ns.update(cwl_ns_multi)
break
if cwl_io_fmt:
cwl_io["format"] = cwl_io_fmt
# for backward compatibility with deployed processes, consider text/plan as 'any' for glob pattern
cwl_io_txt = get_extension(CONTENT_TYPE_TEXT_PLAIN)
if cwl_io_ext == cwl_io_txt:
cwl_io_any = get_extension(CONTENT_TYPE_ANY)
LOGGER.warning("Replacing '%s' [%s] to generic '%s' [%s] glob pattern. "
"More explicit format could be considered for %s '%s'.",
CONTENT_TYPE_TEXT_PLAIN, cwl_io_txt, CONTENT_TYPE_ANY, cwl_io_any, io_select, wps_io_id)
cwl_io_ext = cwl_io_any
if io_select == WPS_OUTPUT:
# FIXME: (?) how to specify the 'name' part of the glob (using the "id" value for now)
cwl_io["outputBinding"] = {
"glob": "{}{}".format(wps_io_id, cwl_io_ext)
}
# FIXME: multi-outputs (https://github.com/crim-ca/weaver/issues/25)
# min/max occurs can only be in inputs, outputs are enforced min/max=1 by WPS
if io_select == WPS_INPUT:
wps_default = get_field(wps_io, "default", search_variations=True)
wps_min_occ = get_field(wps_io, "min_occurs", search_variations=True, default=1)
# field 'default' must correspond to a fallback "value", not a default "format"
is_min_null = wps_min_occ in [0, "0"]
if wps_default != null and not isinstance(wps_default, dict):
cwl_io["default"] = wps_default
# NOTE:
# Don't set any 'default' field here (neither 'null' string or 'None' type) if no value was provided
# since those are interpreted by CWL as literal string 'null' (for 'string' type) or null object.
# Instead, 'null' entry is added to 'type' to indicate drop/ignore missing input.
wps_max_occ = get_field(wps_io, "max_occurs", search_variations=True)
if wps_max_occ != null and (wps_max_occ == "unbounded" or wps_max_occ > 1):
cwl_array = {
"type": PACKAGE_ARRAY_BASE,
"items": cwl_io["type"]
}
# if single value still allowed, or explicitly multi-value array if min greater than one
if wps_min_occ > 1:
cwl_io["type"] = cwl_array
else:
cwl_io["type"] = [cwl_io["type"], cwl_array]
# apply default null after handling literal/array/enum type variants
# (easier to apply against their many different structures)
if is_min_null:
if isinstance(cwl_io["type"], list):
cwl_io["type"].insert(0, "null") # if min=0,max>1 (null, <type>, <array-type>)
else:
cwl_io["type"] = ["null", cwl_io["type"]] # if min=0,max=1 (null, <type>)
return cwl_io, cwl_ns
def wps2cwl_requirement(wps_service_url, wps_process_id):
# type: (Union[str, ParseResult], str) -> JSON
"""
Obtains the `CWL` requirements definition needed for parsing by a remote `WPS` provider as an `Application Package`.
"""
return OrderedDict([
("cwlVersion", "v1.0"),
("class", "CommandLineTool"),
("hints", {
CWL_REQUIREMENT_APP_WPS1: {
"provider": get_url_without_query(wps_service_url),
"process": wps_process_id,
}}),
])
def ows2json(wps_process, wps_service_name, wps_service_url, wps_provider_name=None):
# type: (ProcessOWS, str, Union[str, ParseResult], Optional[str]) -> Tuple[CWL, JSON]
"""
Generates the `CWL` package and process definitions from a :class:`owslib.wps.Process` hosted under `WPS` location.
"""
process_info = OrderedDict([
("id", wps_process.identifier),
("keywords", [wps_service_name] if wps_service_name else []),
])
if wps_provider_name and wps_provider_name not in process_info["keywords"]:
process_info["keywords"].append(wps_provider_name)
default_title = wps_process.identifier.capitalize()
process_info["title"] = get_field(wps_process, "title", default=default_title, search_variations=True)
process_info["description"] = get_field(wps_process, "abstract", default=None, search_variations=True)
process_info["version"] = get_field(wps_process, "version", default=None, search_variations=True)
process_info["metadata"] = []
if wps_process.metadata:
for meta in wps_process.metadata:
metadata = metadata2json(meta)
if metadata:
process_info["metadata"].append(metadata)
process_info["inputs"] = [] # type: List[JSON]
process_info["outputs"] = [] # type: List[JSON]
for wps_in in wps_process.dataInputs: # type: OWS_Input_Type
process_info["inputs"].append(ows2json_io(wps_in))
for wps_out in wps_process.processOutputs: # type: OWS_Output_Type
process_info["outputs"].append(ows2json_io(wps_out))
# generate CWL for WPS-1 using parsed WPS-3
cwl_package = wps2cwl_requirement(wps_service_url, wps_process.identifier)
for io_select in [WPS_INPUT, WPS_OUTPUT]:
io_section = "{}s".format(io_select)
cwl_package[io_section] = list()
for wps_io in process_info[io_section]:
cwl_io, cwl_ns = any2cwl_io(wps_io, io_select)
cwl_package[io_section].append(cwl_io)
if cwl_ns:
if "$namespaces" not in cwl_package:
cwl_package["$namespaces"] = dict()
cwl_package["$namespaces"].update(cwl_ns)
return cwl_package, process_info
def xml_wps2cwl(wps_process_response, settings):
# type: (Response, AnySettingsContainer) -> Tuple[CWL, JSON]
"""
Obtains the ``CWL`` definition that corresponds to a XML WPS-1 process.
Converts a `WPS-1 ProcessDescription XML` tree structure to an equivalent `WPS-3 Process JSON`. and builds the
associated `CWL` package in conformance to :data:`weaver.processes.wps_package.CWL_REQUIREMENT_APP_WPS1`.
:param wps_process_response: valid response (XML, 200) from a `WPS-1 ProcessDescription`.
:param settings: application settings to retrieve additional request options.
"""
def _tag_name(_xml):
# type: (Union[xml_util.XML, str]) -> str
"""
Obtains ``tag`` from a ``{namespace}Tag`` `XML` element.
"""
if hasattr(_xml, "tag"):
_xml = _xml.tag
return _xml.split("}")[-1].lower()
# look for `XML` structure starting at `ProcessDescription` (WPS-1)
xml_resp = xml_util.fromstring(str2bytes(wps_process_response.content))
xml_wps_process = xml_resp.xpath("//ProcessDescription") # type: List[xml_util.XML]
if not len(xml_wps_process) == 1:
raise ValueError("Could not retrieve a valid 'ProcessDescription' from WPS-1 response.")
process_id = None
for sub_xml in xml_wps_process[0]:
tag = _tag_name(sub_xml)
if tag == "identifier":
process_id = sub_xml.text
break
if not process_id:
raise ValueError("Could not find a match for 'ProcessDescription.identifier' from WPS-1 response.")
# transform WPS-1 -> WPS-3
wps = get_wps_client(wps_process_response.url, settings)
wps_service_url = urlparse(wps_process_response.url)
if wps.provider:
wps_service_name = wps.provider.name
else:
wps_service_name = wps_service_url.hostname
wps_process = wps.describeprocess(process_id, xml=wps_process_response.content)
cwl_package, process_info = ows2json(wps_process, wps_service_name, wps_service_url)
return cwl_package, process_info
def is_cwl_file_type(io_info):
# type: (CWL_IO_Type) -> bool
"""
Identifies if the provided `CWL` input/output corresponds to one, many or potentially a ``File`` type(s).
When multiple distinct *atomic* types are allowed for a given I/O (e.g.: ``[string, File]``) and that one of them
is a ``File``, the result will be ``True`` even if other types are not ``Files``.
Potential ``File`` when other base type is ``"null"`` will also return ``True``.
"""
io_type = io_info.get("type")
if not io_type:
raise ValueError("Missing CWL 'type' definition: [{!s}]".format(io_info))
if isinstance(io_type, str):
return io_type == "File"
if isinstance(io_type, dict):
if io_type["type"] == PACKAGE_ARRAY_BASE:
return io_type["items"] == "File"
return io_type["type"] == "File"
if isinstance(io_type, list):
return any(typ == "File" or is_cwl_file_type({"type": typ}) for typ in io_type)
msg = "Unknown parsing of CWL 'type' format ({!s}) [{!s}] in [{}]".format(type(io_type), io_type, io_info)
raise ValueError(msg)
def is_cwl_array_type(io_info):
# type: (CWL_IO_Type) -> Tuple[bool, str, MODE, Union[AnyValueType, List[Any]]]
"""
Verifies if the specified I/O corresponds to one of various CWL array type definitions.
:returns:
``tuple(is_array, io_type, io_mode, io_allow)`` where:
- ``is_array``: specifies if the I/O is of array type.
- ``io_type``: array element type if ``is_array`` is True, type of ``io_info`` otherwise.
- ``io_mode``: validation mode to be applied if sub-element requires it, defaults to ``MODE.NONE``.
- ``io_allow``: validation values to be applied if sub-element requires it, defaults to ``AnyValue``.
:raises PackageTypeError: if the array element doesn't have the required values and valid format.
"""
# use mapping to allow sub-function updates
io_return = {
"array": False,
"allow": AnyValue,
"type": io_info["type"],
"mode": MODE.NONE,
}
def _update_if_sub_enum(_io_item):
# type: (CWL_IO_Type) -> bool
"""
Updates the ``io_return`` parameters if ``io_item`` evaluates to a valid ``enum`` type.
Parameter ``io_item`` should correspond to field ``items`` of an array I/O definition.
Simple pass-through if the array item is not an ``enum``.
"""
_is_enum, _enum_type, _enum_mode, _enum_allow = is_cwl_enum_type({"type": _io_item}) # noqa: typing
if _is_enum:
LOGGER.debug("I/O [%s] parsed as 'array' with sub-item as 'enum'", io_info["name"])
io_return["type"] = _enum_type
io_return["mode"] = _enum_mode
io_return["allow"] = _enum_allow
return _is_enum
# optional I/O could be an array of '["null", "<type>"]' with "<type>" being any of the formats parsed after
# is it the literal representation instead of the shorthand with '?'
if isinstance(io_info["type"], list) and any(sub_type == "null" for sub_type in io_info["type"]):
# we can ignore the optional indication in this case because it doesn't impact following parsing
io_return["type"] = list(filter(lambda sub_type: sub_type != "null", io_info["type"]))[0]
# array type conversion when defined as '{"type": "array", "items": "<type>"}'
# validate against 'Hashable' instead of 'dict' since 'OrderedDict'/'CommentedMap' can fail 'isinstance()'
if (
not isinstance(io_return["type"], str)
and not isinstance(io_return["type"], Hashable)
and "items" in io_return["type"]
and "type" in io_return["type"]
):
io_type = dict(io_return["type"]) # make hashable to allow comparison
if io_type["type"] != PACKAGE_ARRAY_BASE:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
# parse enum in case we got an array of allowed symbols
is_enum = _update_if_sub_enum(io_type["items"])
if not is_enum:
io_return["type"] = io_type["items"]
if io_return["type"] not in PACKAGE_ARRAY_ITEMS:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
LOGGER.debug("I/O [%s] parsed as 'array' with nested dict notation", io_info["name"])
io_return["array"] = True
# array type conversion when defined as string '<type>[]'
elif isinstance(io_return["type"], str) and io_return["type"] in PACKAGE_ARRAY_TYPES:
io_return["type"] = io_return["type"][:-2] # remove '[]'
if io_return["type"] in PACKAGE_CUSTOM_TYPES:
# parse 'enum[]' for array of allowed symbols, provide expected structure for sub-item parsing
io_item = deepcopy(io_info)
io_item["type"] = io_return["type"] # override corrected type without '[]'
_update_if_sub_enum(io_item)
if io_return["type"] not in PACKAGE_ARRAY_ITEMS:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
LOGGER.debug("I/O [%s] parsed as 'array' with shorthand '[]' notation", io_info["name"])
io_return["array"] = True
return io_return["array"], io_return["type"], io_return["mode"], io_return["allow"]
def is_cwl_enum_type(io_info):
# type: (CWL_IO_Type) -> Tuple[bool, str, int, Optional[CWL_IO_EnumSymbols]]
"""
Verifies if the specified I/O corresponds to a CWL enum definition.
:returns:
``tuple(is_enum, io_type, io_allow)`` where:
- ``is_enum``: specifies if the I/O is of enum type.
- ``io_type``: enum base type if ``is_enum=True``, type of ``io_info`` otherwise.
- ``io_mode``: validation mode to be applied if input requires it, defaults to ``MODE.NONE``.
- ``io_allow``: validation values of the enum.
:raises PackageTypeError: if the enum doesn't have the required parameters and valid format.
"""
io_type = io_info["type"]
if not isinstance(io_type, dict) or "type" not in io_type or io_type["type"] not in PACKAGE_CUSTOM_TYPES:
return False, io_type, MODE.NONE, None
if "symbols" not in io_type:
raise PackageTypeError("Unsupported I/O 'enum' definition: '{!r}'.".format(io_info))
io_allow = io_type["symbols"]
if not isinstance(io_allow, list) or len(io_allow) < 1:
raise PackageTypeError("Invalid I/O 'enum.symbols' definition: '{!r}'.".format(io_info))
# validate matching types in allowed symbols and convert to supported CWL type
first_allow = io_allow[0]
for io_i in io_allow:
if type(io_i) is not type(first_allow):
raise PackageTypeError("Ambiguous types in I/O 'enum.symbols' definition: '{!r}'.".format(io_info))
if isinstance(first_allow, str):
io_type = "string"
elif isinstance(first_allow, float):
io_type = "float"
elif isinstance(first_allow, int):
io_type = "int"
else:
raise PackageTypeError("Unsupported I/O 'enum' base type: `{!s}`, from definition: `{!r}`."
.format(type(first_allow), io_info))
# allowed value validator mode must be set for input
return True, io_type, MODE.SIMPLE, io_allow
def get_cwl_io_type(io_info):
# type: (CWL_IO_Type) -> Tuple[str, bool]
"""
Obtains the basic type of the CWL input and identity if it is optional.
CWL allows multiple shorthand representation or combined types definition.
The *base* type must be extracted in order to identify the expected data format and supported values.
Obtains real type if ``"default"`` or shorthand ``"<type>?"`` was in CWL, which
can also be defined as type ``["null", <type>]``.
CWL allows multiple distinct types (e.g.: ``string`` and ``int`` simultaneously), but not WPS inputs.
WPS allows only different amount of *same type* through ``minOccurs`` and ``maxOccurs``.
Considering WPS conversion, we can also have following definition ``["null", <type>, <array-type>]`` (same type).
Whether single or array-like type, the base type can be extracted.
:param io_info: definition of the CWL input.
:return: tuple of guessed base type and flag indicating if it can be null (optional input).
"""
io_type = io_info["type"]
is_null = False
if isinstance(io_type, list):
if not len(io_type) > 1:
raise PackageTypeError("Unsupported I/O type as list cannot have only one base type: '{}'".format(io_info))
if "null" in io_type:
if len(io_type) == 1:
raise PackageTypeError("Unsupported I/O cannot be only 'null' type: '{}'".format(io_info))
LOGGER.debug("I/O parsed for 'default'")
is_null = True # I/O can be omitted since default value exists
io_type = [typ for typ in io_type if typ != "null"]
if len(io_type) == 1: # valid if other was "null" now removed
io_type = io_type[0]
else:
# check that many sub-type definitions all match same base type (no conflicting literals)
io_type_many = set()
io_base_type = None
for i, typ in enumerate(io_type):
sub_type = {"type": typ, "name": "{}[{}]".format(io_info["name"], i)}
is_array, array_elem, _, _ = is_cwl_array_type(sub_type)
is_enum, enum_type, _, _ = is_cwl_enum_type(sub_type)
# array base type more important than enum because later array conversion also handles allowed values
if is_array:
io_base_type = typ # highest priority (can have sub-literal or sub-enum)
io_type_many.add(array_elem)
elif is_enum:
io_base_type = io_base_type if io_base_type is not None else enum_type # less priority
io_type_many.add(enum_type)
else:
io_base_type = io_base_type if io_base_type is not None else typ # less priority
io_type_many.add(typ) # literal base type by itself (not array/enum)
if len(io_type_many) != 1:
raise PackageTypeError("Unsupported I/O with many distinct base types for info: '{!s}'".format(io_info))
io_type = io_base_type
LOGGER.debug("I/O parsed for multiple base types")
return io_type, is_null
def cwl2wps_io(io_info, io_select):
# type:(CWL_IO_Type, str) -> WPS_IO_Type
"""
Converts input/output parameters from CWL types to WPS types.
:param io_info: parsed IO of a CWL file
:param io_select: :py:data:`WPS_INPUT` or :py:data:`WPS_OUTPUT` to specify desired WPS type conversion.
:returns: corresponding IO in WPS format
"""
is_input = False
is_output = False
# FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
if io_select == WPS_INPUT:
is_input = True
io_literal = LiteralInput # type: Union[Type[LiteralInput], Type[LiteralOutput]]
io_complex = ComplexInput # type: Union[Type[ComplexInput], Type[ComplexOutput]]
# io_bbox = BoundingBoxInput # type: Union[Type[BoundingBoxInput], Type[BoundingBoxOutput]]
elif io_select == WPS_OUTPUT:
is_output = True
io_literal = LiteralOutput # type: Union[Type[LiteralInput], Type[LiteralOutput]]
io_complex = ComplexOutput # type: Union[Type[ComplexInput], Type[ComplexOutput]]
# io_bbox = BoundingBoxOutput # type: Union[Type[BoundingBoxInput], Type[BoundingBoxOutput]]
else:
raise PackageTypeError("Unsupported I/O info definition: '{!r}' with '{}'.".format(io_info, io_select))
# obtain base type considering possible CWL type representations
io_type, is_null = get_cwl_io_type(io_info)
io_info["type"] = io_type # override resolved multi-type base for more parsing
io_name = io_info["name"]
io_min_occurs = 0 if is_null else 1
io_max_occurs = 1 # unless array after
# convert array types
is_array, array_elem, io_mode, io_allow = is_cwl_array_type(io_info)
if is_array:
LOGGER.debug("I/O parsed for 'array'")
io_type = array_elem
io_max_occurs = PACKAGE_ARRAY_MAX_SIZE
# convert enum types
is_enum, enum_type, enum_mode, enum_allow = is_cwl_enum_type(io_info)
if is_enum:
LOGGER.debug("I/O parsed for 'enum'")
io_type = enum_type
io_allow = enum_allow
io_mode = enum_mode
# debug info for unhandled types conversion
if not isinstance(io_type, str):
LOGGER.debug("is_array: [%s]", repr(is_array))
LOGGER.debug("array_elem: [%s]", repr(array_elem))
LOGGER.debug("is_enum: [%s]", repr(is_enum))
LOGGER.debug("enum_type: [%s]", repr(enum_type))
LOGGER.debug("enum_allow: [%s]", repr(enum_allow))
LOGGER.debug("io_info: [%s]", repr(io_info))
LOGGER.debug("io_type: [%s]", repr(io_type))
LOGGER.debug("type(io_type): [%s]", type(io_type))
raise TypeError("I/O type has not been properly decoded. Should be a string, got: '{!r}'".format(io_type))
# literal types
if is_enum or io_type in PACKAGE_LITERAL_TYPES:
if io_type == "Any":
io_type = "anyvalue"
if io_type == "null":
io_type = "novalue"
if io_type in ["int", "integer", "long"]:
io_type = "integer"
if io_type in ["float", "double"]:
io_type = "float"
# keywords commonly used by I/O
kw = {
"identifier": io_name,
"title": io_info.get("label", ""),
"abstract": io_info.get("doc", ""),
"data_type": io_type,
"mode": io_mode,
}
if is_input:
# avoid storing 'AnyValue' which become more problematic than
# anything later on when CWL/WPS merging is attempted
if io_allow is not AnyValue:
kw["allowed_values"] = io_allow
kw["default"] = io_info.get("default", None)
kw["min_occurs"] = io_min_occurs
kw["max_occurs"] = io_max_occurs
return io_literal(**kw)
# complex types
else:
# keywords commonly used by I/O
kw = {
"identifier": io_name,
"title": io_info.get("label", io_name),
"abstract": io_info.get("doc", ""),
}
if "format" in io_info:
io_formats = [io_info["format"]] if isinstance(io_info["format"], str) else io_info["format"]
kw["supported_formats"] = [get_format(fmt) for fmt in io_formats]
kw["mode"] = MODE.SIMPLE # only validate the extension (not file contents)
else:
# we need to minimally add 1 format, otherwise empty list is evaluated as None by pywps
# when "supported_formats" is None, the process's json property raises because of it cannot iterate formats
kw["supported_formats"] = [DEFAULT_FORMAT]
kw["mode"] = MODE.NONE # don't validate anything as default is only raw text
if is_output:
if io_type == "Directory":
kw["as_reference"] = True
if io_type == "File":
has_contents = io_info.get("contents") is not None
kw["as_reference"] = not has_contents
else:
# note:
# value of 'data_format' is identified as 'default' input format if specified with `Format`
# otherwise, `None` makes it automatically use the first one available in 'supported_formats'
kw["data_format"] = get_field(io_info, "data_format")
kw["data_format"] = json2wps_field(kw["data_format"], "supported_formats") if kw["data_format"] else None
kw.update({
"min_occurs": io_min_occurs,
"max_occurs": io_max_occurs,
})
return io_complex(**kw)
def cwl2json_input_values(data, schema=PROCESS_SCHEMA_OGC):
# type: (Dict[str, CWL_IO_Value], str) -> JSON
"""
Converts :term:`CWL` formatted :term:`Job` inputs to corresponding :term:`OGC API - Processes` format.
:param data: dictionary with inputs formatted as key-value pairs with relevant structure based on :term:`CWL` types.
:param schema: either ``OGC`` or ``OLD`` format respectively for mapping/listing representations.
:raises TypeError: if input data is invalid.
:raises ValueError: if any input value could not be parsed with expected schema.
:returns: converted inputs for :term:`Job` submission either in ``OGC`` or ``OLD`` format.
"""
if not isinstance(data, dict):
raise TypeError(f"Invalid CWL input values format must be a dictionary of keys to values. Got [{type(data)}].")
inputs = {}
for input_id, input_value in data.items():
# single file
if isinstance(input_value, dict) and input_value.get("class") == "File":
inputs[input_id] = {"href": input_value.get("path")}
# single literal value
elif isinstance(input_value, (str, int, float, bool)):
inputs[input_id] = {"value": input_value}
# multiple files
elif isinstance(input_value, list) and all(
isinstance(val, dict) and val.get("class") == "File" for val in input_value
):
inputs[input_id] = [{"href": val.get("path")} for val in input_value]
# multiple literal values
elif isinstance(input_value, list) and all(
isinstance(val, (str, int, float, bool)) for val in input_value
):
inputs[input_id] = [{"value": val} for val in input_value]
else:
raise ValueError(f"Input [{input_id}] value definition could not be parsed: {input_value!s}")
schema = schema.upper()
if schema == PROCESS_SCHEMA_OGC:
return inputs
if schema != PROCESS_SCHEMA_OLD:
raise NotImplementedError(f"Unknown conversion format of input values for schema: [{schema}]")
input_list = []
for input_id, input_value in inputs.items():
if isinstance(input_value, list):
input_key = list(input_value[0])[0]
input_list.extend([{"id": input_id, input_key: val[input_key]} for val in input_value])
else:
input_key = list(input_value)[0]
input_value = input_value[input_key]
input_list.append({"id": input_id, input_key: input_value})
return input_list
def repr2json_input_values(inputs):
# type: (List[str]) -> List[JSON]
"""
Converts inputs in string representation to corresponding :term:`JSON` values.
Expected format is as follows:
.. code-block:: text
input_id[:input_type]=input_value[;input_array]
Where:
- ``input_id`` represents the target identifier of the input
- ``input_type`` represents the conversion type, as required
(includes ``File`` for ``href`` instead of ``value`` key in resulting object)
- ``input_value`` represents the desired value subject to conversion by ``input_type``
- ``input_array`` represents any additional values for array-like inputs (``maxOccurs > 1``)
:param inputs: list of string inputs to parse.
:return: parsed inputs if successful.
"""
values = []
for str_input in inputs:
str_id, str_val = str_input.split("=")
str_id_typ = str_id.split(":")
if len(str_id_typ) == 2:
str_id, str_typ = str_id_typ
elif len(str_id_typ) != 1:
raise ValueError(f"Invalid input value ID representation. Must be 'ID[:TYPE]' for '{str_id!s}'.")
else:
str_typ = "string"
val_typ = any2cwl_literal_datatype(str_typ)
if not str_id or (val_typ is null and str_typ not in INPUT_VALUE_TYPE_MAPPING):
raise ValueError(f"Invalid input value ID representation. "
f"Missing or unknown 'ID[:type]' parts after resolution as '{str_id!s}:{str_typ!s}'.")
map_typ = val_typ if val_typ is not null else str_typ
arr_val = str_val.split(";")
arr_typ = INPUT_VALUE_TYPE_MAPPING[map_typ]
arr_val = [arr_typ(val) for val in arr_val]
val_key = "href" if str_typ in ["file", "File"] else "value"
values.append({"id": str_id, val_key: arr_val if ";" in str_val else arr_val[0]})
return values
def any2cwl_literal_datatype(io_type):
# type: (str) -> Union[str, Type[null]]
"""
Solves common literal data-type names to supported ones for `CWL`.
"""
if io_type in ["string", "date", "time", "dateTime", "anyURI"]:
return "string"
if io_type in ["scale", "angle", "float", "double"]:
return "float"
if io_type in ["integer", "long", "positiveInteger", "nonNegativeInteger"]:
return "int"
if io_type in ["bool", "boolean"]:
return "boolean"
LOGGER.warning("Could not identify a CWL literal data type with [%s].", io_type)
return null
def any2wps_literal_datatype(io_type, is_value):
# type: (AnyValueType, bool) -> Union[str, Type[null]]
"""
Solves common literal data-type names to supported ones for `WPS`.
Verification is accomplished by name when ``is_value=False``, otherwise with python ``type`` when ``is_value=True``.
"""
if isinstance(io_type, str):
if not is_value:
if io_type in ["string", "date", "time", "dateTime", "anyURI"]:
return "string"
if io_type in ["scale", "angle", "float", "double"]:
return "float"
if io_type in ["int", "integer", "long", "positiveInteger", "nonNegativeInteger"]:
return "integer"
if io_type in ["bool", "boolean"]:
return "boolean"
LOGGER.warning("Unknown named literal data type: '%s', using default 'string'. Should be one of: %s",
io_type, list(WPS_LITERAL_DATA_TYPE_NAMES))
return "string"
if is_value and isinstance(io_type, bool):
return "boolean"
if is_value and isinstance(io_type, int):
return "integer"
if is_value and isinstance(io_type, float):
return "float"
return null
def any2json_literal_allowed_value(io_allow):
# type: (Union[AllowedValue, JSON, str, float, int, bool]) -> Union[JSON, str, str, float, int, bool, Type[null]]
"""
Converts an ``AllowedValues`` definition from different packages into standardized JSON representation of `OGC-API`.
"""
if isinstance(io_allow, AllowedValue):
io_allow = io_allow.json
if isinstance(io_allow, dict):
wps_range = {}
for field, dest in [
("range_minimum", "minimumValue"),
("range_maximum", "maximumValue"),
("range_spacing", "spacing"),
("range_closure", "rangeClosure")
]:
wps_range_value = get_field(io_allow, field, search_variations=True, pop_found=True)
if wps_range_value is not null:
wps_range[dest] = wps_range_value
# in case input was a PyWPS AllowedValue object converted to JSON,
# extra metadata must be removed/transformed accordingly for literal value
basic_type = io_allow.pop("type", None)
allowed_type = io_allow.pop("allowed_type", None)
allowed_type = allowed_type or basic_type
allowed_value = io_allow.pop("value", None)
if allowed_value is not None:
# note: closure must be ignored for range compare because it defaults to 'close' even for a 'value' type
range_fields = ["minimumValue", "maximumValue", "spacing"]
if allowed_type == "value" or not any(field in io_allow for field in range_fields):
return allowed_value
if not io_allow: # empty container
return null
return io_allow
def any2json_literal_data_domains(io_info):
# type: (ANY_IO_Type) -> Union[Type[null], List[JSON]]
"""
Extracts allowed value constrains from the input definition and generate the expected literal data domains.
The generated result, if applicable, corresponds to a list of a single instance of
schema definition :class:`weaver.wps_restapi.swagger_definitions.LiteralDataDomainList` with following structure.
.. code-block:: yaml
default: bool
defaultValue: float, int, bool, str
dataType: {name: string, <reference: url: string>}
uom: string
valueDefinition:
oneOf:
- string
- url-string
- {anyValue: bool}
- [float, int, bool, str]
- [{minimum, maximum, spacing, closure}]
"""
io_type = get_field(io_info, "type", search_variations=False)
if io_type in [WPS_BOUNDINGBOX, WPS_COMPLEX]:
return null
io_data_type = get_field(io_info, "type", search_variations=True, only_variations=True)
domain = {
"default": True, # since it is generated from convert, only one is available anyway
"dataType": {
"name": any2wps_literal_datatype(io_data_type, is_value=False), # just to make sure, simplify type
# reference: # FIXME: unsupported named-reference data-type (need example to test it)
}
# uom: # FIXME: unsupported Unit of Measure (need example to test it)
}
wps_allowed_values = get_field(io_info, "allowed_values", search_variations=True)
wps_default_value = get_field(io_info, "default", search_variations=True)
wps_value_definition = {"anyValue": get_field(io_info, "any_value", search_variations=True, default=False)}
if wps_default_value not in [null, None]:
domain["defaultValue"] = wps_default_value
if isinstance(wps_allowed_values, list) and len(wps_allowed_values) > 0:
wps_allowed_values = [any2json_literal_allowed_value(io_value) for io_value in wps_allowed_values]
wps_allowed_values = [io_value for io_value in wps_allowed_values if io_value is not null]
if wps_allowed_values:
wps_value_definition = wps_allowed_values
domain["valueDefinition"] = wps_value_definition
return [domain]
def json2wps_datatype(io_info):
# type: (JSON_IO_Type) -> str
"""
Converts a JSON input definition into the corresponding :mod:`pywps` parameters.
Guesses the literal data-type from I/O JSON information in order to allow creation of the corresponding I/O WPS.
Defaults to ``string`` if no suitable guess can be accomplished.
"""
io_type = get_field(io_info, "type", search_variations=False, pop_found=True)
if str(io_type).lower() == WPS_LITERAL:
io_type = null
io_guesses = [
(io_type, False),
(get_field(io_info, "type", search_variations=True), False),
(get_field(io_info, "default", search_variations=True), True),
(get_field(io_info, "allowed_values", search_variations=True), True),
(get_field(io_info, "supported_values", search_variations=True), True)
]
for io_guess, is_value in io_guesses:
if io_type:
break
if isinstance(io_guess, list) and len(io_guess):
io_guess = io_guess[0]
io_type = any2wps_literal_datatype(io_guess, is_value)
if not isinstance(io_type, str):
LOGGER.warning("Failed literal data-type guess, using default 'string' for I/O [%s].",
get_field(io_info, "identifier", search_variations=True))
return "string"
return io_type
def json2wps_field(field_info, field_category):
# type: (JSON, str) -> Any
"""
Converts an I/O field from a JSON literal data, list, or dictionary to corresponding WPS types.
:param field_info: literal data or information container describing the type to be generated.
:param field_category: one of ``WPS_FIELD_MAPPING`` keys to indicate how to parse ``field_info``.
"""
if field_category == "allowed_values":
return json2wps_allowed_values({"allowed_values": field_info})
elif field_category == "supported_formats":
if isinstance(field_info, dict):
return Format(**field_info)
if isinstance(field_info, str):
return Format(field_info)
elif field_category == "metadata":
if isinstance(field_info, WPS_Metadata):
return field_info
if isinstance(field_info, dict):
meta = metadata2json(field_info, force=True)
meta.pop("rel", None)
return WPS_Metadata(**meta)
if isinstance(field_info, str):
return WPS_Metadata(field_info)
elif field_category == "keywords" and isinstance(field_info, list):
return field_info
elif field_category in ["identifier", "title", "abstract"] and isinstance(field_info, str):
return field_info
LOGGER.warning("Field of type '%s' not handled as known WPS field.", field_category)
return None
def json2wps_allowed_values(io_info):
# type: (JSON_IO_Type) -> Union[Type[null], List[AllowedValue]]
"""
Obtains the allowed values constrains for the literal data type from a JSON I/O definition.
Converts the ``literalDataDomains`` definition into ``allowed_values`` understood by :mod:`pywps`.
Handles explicit ``allowed_values`` if available and not previously defined by ``literalDataDomains``.
.. seealso::
Function :func:`any2json_literal_data_domains` defines generated ``literalDataDomains`` JSON definition.
"""
domains = get_field(io_info, "literal_data_domains", search_variations=True)
allowed = get_field(io_info, "allowed_values", search_variations=True)
if not domains and isinstance(allowed, list):
if all(isinstance(value, AllowedValue) for value in allowed):
return allowed
if all(isinstance(value, (float, int, str)) for value in allowed):
return [AllowedValue(value=value) for value in allowed]
if all(isinstance(value, dict) for value in allowed):
allowed_values = []
for value in allowed:
min_val = get_field(value, "range_minimum", search_variations=True, default=None)
max_val = get_field(value, "range_maximum", search_variations=True, default=None)
spacing = get_field(value, "range_spacing", search_variations=True, default=None)
closure = get_field(value, "range_closure", search_variations=True, default=RANGECLOSURETYPE.CLOSED)
literal = get_field(value, "value", search_variations=False, default=None)
if min_val or max_val or spacing:
allowed_values.append(AllowedValue(ALLOWEDVALUETYPE.RANGE,
minval=min_val, maxval=max_val,
spacing=spacing, range_closure=closure))
elif literal:
allowed_values.append(AllowedValue(ALLOWEDVALUETYPE.VALUE, value=literal))
# literalDataDomains could be 'anyValue', which is to be ignored here
return allowed_values
LOGGER.debug("Cannot parse literal I/O AllowedValues: %s", allowed)
raise ValueError("Unknown parsing of 'AllowedValues' for value: {!s}".format(allowed))
if domains:
for domain in domains:
values = domain.get("valueDefinition")
if values:
allowed = json2wps_allowed_values({"allowed_values": values})
# stop on first because undefined how to combine multiple
# no multiple definitions by 'any2json_literal_data_domains' regardless, and not directly handled by pywps
if allowed:
return allowed
return null
def json2wps_io(io_info, io_select):
# type: (JSON_IO_Type, str) -> WPS_IO_Type
"""
Converts an I/O from a JSON dict to PyWPS types.
:param io_info: I/O in JSON dict format.
:param io_select: :py:data:`WPS_INPUT` or :py:data:`WPS_OUTPUT` to specify desired WPS type conversion.
:return: corresponding I/O in WPS format.
"""
io_info["identifier"] = get_field(io_info, "identifier", search_variations=True, pop_found=True)
rename = {
"formats": "supported_formats",
"minOccurs": "min_occurs",
"maxOccurs": "max_occurs",
"dataType": "data_type",
"defaultValue": "default",
"supportedValues": "supported_values",
}
remove = [
"id",
"workdir",
"any_value",
"data_format",
"data",
"file",
"mimetype",
"mediaType",
"encoding",
"schema",
"asreference",
"additionalParameters",
]
replace_values = {"unbounded": PACKAGE_ARRAY_MAX_SIZE}
transform_json(io_info, rename=rename, remove=remove, replace_values=replace_values)
# convert allowed value objects
values = json2wps_allowed_values(io_info)
if values is not null:
if isinstance(values, list) and len(values) > 0:
io_info["allowed_values"] = values
else:
io_info["allowed_values"] = AnyValue # noqa
# convert supported format objects
formats = get_field(io_info, "supported_formats", search_variations=True, pop_found=True)
if formats is not null:
for fmt in formats:
fmt["mime_type"] = get_field(fmt, "mime_type", search_variations=True, pop_found=True)
fmt.pop("maximumMegabytes", None)
# define the 'default' with 'data_format' to be used if explicitly specified from the payload
if fmt.pop("default", None) is True:
if get_field(io_info, "data_format") != null: # if set by previous 'fmt'
raise PackageTypeError("Cannot have multiple 'default' formats simultaneously.")
# use 'data_format' instead of 'default' to avoid overwriting a potential 'default' value
# field 'data_format' is mapped as 'default' format
io_info["data_format"] = json2wps_field(fmt, "supported_formats")
io_info["supported_formats"] = [json2wps_field(fmt, "supported_formats") for fmt in formats]
# convert metadata objects
metadata = get_field(io_info, "metadata", search_variations=True, pop_found=True)
if metadata is not null:
io_info["metadata"] = [json2wps_field(meta, "metadata") for meta in metadata]
# convert literal fields specified as is
for field in ["identifier", "title", "abstract", "keywords"]:
value = get_field(io_info, field, search_variations=True, pop_found=True)
if value is not null:
io_info[field] = json2wps_field(value, field)
# convert by type, add missing required arguments and
# remove additional arguments according to each case
io_type = io_info.pop("type", WPS_COMPLEX) # only ComplexData doesn't have "type"
# attempt to identify defined data-type directly in 'type' field instead of 'data_type'
if io_type not in WPS_ALL_TYPES:
io_type_guess = any2wps_literal_datatype(io_type, is_value=False)
if io_type_guess is not null:
io_type = WPS_LITERAL
io_info["data_type"] = io_type_guess
if io_select == WPS_INPUT:
if ("max_occurs", "unbounded") in io_info.items():
io_info["max_occurs"] = PACKAGE_ARRAY_MAX_SIZE
if io_type in WPS_COMPLEX_TYPES:
if "supported_formats" not in io_info:
io_info["supported_formats"] = [DEFAULT_FORMAT]
io_info.pop("data_type", None)
io_info.pop("allowed_values", None)
io_info.pop("supported_values", None)
return ComplexInput(**io_info)
if io_type == WPS_BOUNDINGBOX:
io_info.pop("supported_formats", None)
io_info.pop("supportedCRS", None)
return BoundingBoxInput(**io_info)
if io_type == WPS_LITERAL:
io_info.pop("data_format", None)
io_info.pop("supported_formats", None)
io_info["data_type"] = json2wps_datatype(io_info)
allowed_values = json2wps_allowed_values(io_info)
if allowed_values:
io_info["allowed_values"] = allowed_values
else:
io_info.pop("allowed_values", None)
io_info.pop("literalDataDomains", None)
return LiteralInput(**io_info)
elif io_select == WPS_OUTPUT:
io_info.pop("min_occurs", None)
io_info.pop("max_occurs", None)
io_info.pop("allowed_values", None)
io_info.pop("data_format", None)
io_info.pop("default", None)
if io_type in WPS_COMPLEX_TYPES:
io_info.pop("supported_values", None)
return ComplexOutput(**io_info)
if io_type == WPS_BOUNDINGBOX:
io_info.pop("supported_formats", None)
return BoundingBoxOutput(**io_info)
if io_type == WPS_LITERAL:
io_info.pop("supported_formats", None)
io_info["data_type"] = json2wps_datatype(io_info)
allowed_values = json2wps_allowed_values(io_info)
if allowed_values:
io_info["allowed_values"] = allowed_values
else:
io_info.pop("allowed_values", None)
io_info.pop("literalDataDomains", None)
return LiteralOutput(**io_info)
raise PackageTypeError("Unknown conversion from dict to WPS type (type={0}, mode={1}).".format(io_type, io_select))
def wps2json_io(io_wps):
# type: (WPS_IO_Type) -> JSON_IO_Type
"""
Converts a PyWPS I/O into a dictionary based version with keys corresponding to standard names (WPS 2.0).
"""
if not isinstance(io_wps, BasicIO):
raise PackageTypeError("Invalid type, expected 'BasicIO', got: [{0!r}] '{1!r}'".format(type(io_wps), io_wps))
if not hasattr(io_wps, "json"):
raise PackageTypeError("Invalid type definition expected to have a 'json' property.")
io_wps_json = io_wps.json # noqa
rename = {
"identifier": "id",
"abstract": "description",
"supported_formats": "formats",
"mime_type": "mediaType",
"min_occurs": "minOccurs",
"max_occurs": "maxOccurs",
}
replace_values = {
PACKAGE_ARRAY_MAX_SIZE: "unbounded",
}
replace_func = {
"maxOccurs": str,
"minOccurs": str,
}
transform_json(io_wps_json, rename=rename, replace_values=replace_values, replace_func=replace_func)
# in some cases (Complex I/O), 'as_reference=True' causes "type" to be overwritten, revert it back
if "type" in io_wps_json and io_wps_json["type"] == WPS_REFERENCE:
io_wps_json["type"] = WPS_COMPLEX
# minimum requirement of 1 format object which defines mime-type
if io_wps_json["type"] == WPS_COMPLEX:
# FIXME: should we store 'None' in db instead of empty string when missing "encoding", "schema", etc. ?
if "formats" not in io_wps_json or not len(io_wps_json["formats"]):
io_wps_json["formats"] = [DEFAULT_FORMAT.json]
for io_format in io_wps_json["formats"]:
transform_json(io_format, rename=rename, replace_values=replace_values, replace_func=replace_func)
# set 'default' format if it matches perfectly, or if only mime-type matches and it is the only available one
# (this avoid 'encoding' possibly not matching due to CWL not providing this information)
io_default = get_field(io_wps_json, "default", search_variations=True)
for io_format in io_wps_json["formats"]:
io_format["default"] = (io_default != null and is_equal_formats(io_format, io_default))
if io_default and len(io_wps_json["formats"]) == 1 and not io_wps_json["formats"][0]["default"]:
io_default_mime_type = get_field(io_default, "mime_type", search_variations=True)
io_single_fmt_mime_type = get_field(io_wps_json["formats"][0], "mime_type", search_variations=True)
io_wps_json["formats"][0]["default"] = (io_default_mime_type == io_single_fmt_mime_type)
elif io_wps_json["type"] == WPS_BOUNDINGBOX:
pass # FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
else: # literal
domains = any2json_literal_data_domains(io_wps_json)
if domains:
io_wps_json["literalDataDomains"] = domains
return io_wps_json
def wps2json_job_payload(wps_request, wps_process):
# type: (WPSRequest, ProcessWPS) -> JSON
"""
Converts the input and output values of a :mod:`pywps` WPS ``Execute`` request to corresponding WPS-REST job.
The inputs and outputs must be parsed from XML POST payload or KVP GET query parameters, and converted to data
container defined by :mod:`pywps` based on the process definition.
"""
data = {
"inputs": [],
"outputs": [],
"response": EXECUTE_RESPONSE_DOCUMENT,
"mode": EXECUTE_MODE_ASYNC,
}
multi_inputs = list(wps_request.inputs.values())
for input_list in multi_inputs:
iid = get_any_id(input_list[0])
for input_value in input_list:
input_data = input_value.get("data")
input_href = input_value.get("href")
if input_data:
data["inputs"].append({"id": iid, "data": input_data})
elif input_href:
data["inputs"].append({"id": iid, "href": input_href})
output_ids = list(wps_request.outputs)
for output in wps_process.outputs:
oid = output.identifier
as_ref = isinstance(output, ComplexOutput)
if oid not in output_ids:
data_output = {"identifier": oid, "asReference": str(as_ref).lower()}
else:
data_output = wps_request.outputs[oid]
if as_ref:
data_output["transmissionMode"] = EXECUTE_TRANSMISSION_MODE_REFERENCE
else:
data_output["transmissionMode"] = EXECUTE_TRANSMISSION_MODE_VALUE
data_output["id"] = oid
data["outputs"].append(data_output)
return data
def get_field(io_object, field, search_variations=False, only_variations=False, pop_found=False, default=null):
# type: (Any, str, bool, bool, bool, Any) -> Any
"""
Gets a field by name from various I/O object types.
Default value is :py:data:`null` used for most situations to differentiate from literal ``None`` which is often
used as default for parameters. The :class:`NullType` allows to explicitly tell that there was 'no field' and
not 'no value' in existing field. If you provided another value, it will be returned if not found within the
input object.
When :paramref:`search_variation` is enabled and that :paramref:`field` could not be found within the object,
field lookup will employ the values under the :paramref:`field` entry within :data:`WPS_FIELD_MAPPING` as
additional field names to search for an existing property or key. Search continues until the first match is found,
respecting order within the variations listing, and finally uses :paramref:`default` if no match was found.
:param io_object: Any I/O representation, either as a class instance or JSON container.
:param field: Name of the field to look for, either as property or key name based on input object type.
:param search_variations: If enabled, search for all variations to the field name to attempt search until matched.
:param only_variations: If enabled, skip the first 'basic' field and start search directly with field variations.
:param pop_found: If enabled, whenever a match is found by field or variations, remove that entry from the object.
:param default: Alternative default value to return if no match could be found.
:returns: Matched value (including search variations if enabled), or ``default``.
"""
if not (search_variations and only_variations):
if isinstance(io_object, dict):
value = io_object.get(field, null)
if value is not null:
if pop_found:
io_object.pop(field)
return value
else:
value = getattr(io_object, field, null)
if value is not null:
return value
if search_variations and field in WPS_FIELD_MAPPING:
for var in WPS_FIELD_MAPPING[field]:
value = get_field(io_object, var, search_variations=False, only_variations=False, pop_found=pop_found)
if value is not null:
return value
return default
def set_field(io_object, field, value, force=False):
# type: (Union[ANY_IO_Type, ANY_Format_Type], str, Any, bool) -> None
"""
Sets a field by name into various I/O object types.
Field value is set only if not ``null`` to avoid inserting data considered `invalid`.
If ``force=True``, verification of ``null`` value is ignored.
"""
if value is not null or force:
if isinstance(io_object, dict):
io_object[field] = value
return
setattr(io_object, field, value)
def _are_different_and_set(item1, item2):
# type: (Any, Any) -> bool
"""
Verifies if two items are set and are different of different "representative" value.
Compares two value representations and returns ``True`` only if both are not ``null``, are of same ``type`` and
of different representative value. By "representative", we consider here the visual representation of byte/unicode
strings rather than literal values to support XML/JSON and Python 2/3 implementations.
Other non string-like types are verified with literal (usual) equality method.
"""
if item1 is null or item2 is null:
return False
try:
# Note:
# Calling ``==`` will result in one defined item's type ``__eq__`` method calling a property to validate
# equality with the second. When compared to a ``null``, ``None`` or differently typed second item, the
# missing property on the second item could raise and ``AssertionError`` depending on the ``__eq__``
# implementation (eg: ``Format`` checking for ``item.mime_type``, etc.).
equal = item1 == item2
except AttributeError:
return False
if equal:
return False
# Note: check for both (str, bytes) for any python implementation that modifies its value
type1 = str if isinstance(item1, (str, bytes)) else type(item1)
type2 = str if isinstance(item2, (str, bytes)) else type(item2)
if type1 is str and type2 is str:
return bytes2str(item1) != bytes2str(item2)
return True
def is_equal_formats(format1, format2):
# type: (Union[Format, JSON], Union[Format, JSON]) -> bool
"""
Verifies for matching formats.
"""
mime_type1 = get_field(format1, "mime_type", search_variations=True)
mime_type2 = get_field(format2, "mime_type", search_variations=True)
encoding1 = get_field(format1, "encoding", search_variations=True)
encoding2 = get_field(format2, "encoding", search_variations=True)
if (
mime_type1 == mime_type2 and encoding1 == encoding2
and all(f != null for f in [mime_type1, mime_type2, encoding1, encoding2])
):
return True
return False
def normalize_ordered_io(io_section, order_hints=None):
# type: (JSON_IO_ListOrMap, Optional[JSON_IO_ListOrMap]) -> List[JSON]
"""
Reorders and converts I/O from any representation (:class:`dict` or :class:`list`) considering given ordering hints.
First, converts I/O definitions defined as dictionary to an equivalent :class:`list` representation,
in order to work only with a single representation method. The :class:`list` is chosen over :class:`dict` because
sequences can enforce a specific order, while mapping have no particular order. The list representation ensures
that I/O order is preserved when written to file and reloaded afterwards regardless of each server and/or library's
implementation of the mapping container.
If this function fails to correctly order any I/O or cannot correctly guarantee such result because of the provided
parameters (e.g.: no hints given when required), the result will not break nor change the final processing behaviour
of parsers. This is merely *cosmetic* adjustments to ease readability of I/O to avoid always shuffling their order
across multiple :term:`Application Package` and :term:`Process` reporting formats.
The important result of this function is to provide the I/O as a consistent list of objects so it is less
cumbersome to compare/merge/iterate over the elements with all functions that will follow.
.. note::
When defined as a dictionary, an :class:`OrderedDict` is expected as input to ensure preserved field order.
Prior to Python 3.7 or CPython 3.5, preserved order is not guaranteed for *builtin* :class:`dict`.
In this case the :paramref:`order_hints` is required to ensure same order.
:param io_section: Definition contained under the ``inputs`` or ``outputs`` fields.
:param order_hints: Optional/partial I/O definitions hinting an order to sort unsorted-dict I/O.
:returns: I/O specified as list of dictionary definitions with preserved order (as best as possible).
"""
if isinstance(io_section, list):
return io_section
io_list = []
io_dict = OrderedDict()
if isinstance(io_section, dict) and not isinstance(io_section, OrderedDict) and order_hints and len(order_hints):
# convert the hints themselves to list if they are provided as mapping
if isinstance(order_hints, dict):
order_hints = [dict(id=key, **values) for key, values in order_hints.items()]
# pre-order I/O that can be resolved with hint when the specified I/O section is not ordered
io_section = deepcopy(io_section)
for hint in order_hints:
hint_id = get_field(hint, "identifier", search_variations=True)
if hint_id and hint_id in io_section: # ignore hint where ID could not be resolved
io_dict[hint_id] = io_section.pop(hint_id)
for hint in io_section:
io_dict[hint] = io_section[hint]
else:
io_dict = io_section
for io_id, io_value in io_dict.items():
# I/O value can be a literal type string or dictionary with more details at this point
# make it always detailed dictionary to avoid problems for later parsing
# this is also required to make the list, since all list items must have a matching type
if isinstance(io_value, str):
io_list.append({"type": io_value})
else:
io_list.append(io_value)
io_list[-1]["id"] = io_id
return io_list
def merge_io_formats(wps_formats, cwl_formats):
# type: (List[ANY_Format_Type], List[ANY_Format_Type]) -> List[ANY_Format_Type]
"""
Merges I/O format definitions by matching ``mime-type`` field.
In case of conflict, preserve the WPS version which can be more detailed (for example, by specifying ``encoding``).
Verifies if ``DEFAULT_FORMAT_MISSING`` was written to a single `CWL` format caused by a lack of any value
provided as input. In this case, *only* `WPS` formats are kept.
In the event that ``DEFAULT_FORMAT_MISSING`` was written to the `CWL` formats and that no `WPS` format was
specified, the :py:data:`DEFAULT_FORMAT` is returned.
:raises PackageTypeError: if inputs are invalid format lists
"""
if not (isinstance(wps_formats, (list, tuple, set)) and isinstance(cwl_formats, (list, tuple, set))):
raise PackageTypeError("Cannot merge formats definitions with invalid lists.")
if not len(wps_formats):
wps_formats = [DEFAULT_FORMAT]
if len(cwl_formats) == 1 and get_field(cwl_formats[0], DEFAULT_FORMAT_MISSING) is True:
return wps_formats
formats = []
cwl_fmt_dict = OrderedDict((get_field(fmt, "mime_type", search_variations=True), fmt) for fmt in cwl_formats)
wps_fmt_dict = OrderedDict((get_field(fmt, "mime_type", search_variations=True), fmt) for fmt in wps_formats)
for cwl_fmt in cwl_fmt_dict:
if cwl_fmt in wps_fmt_dict:
formats.append(wps_fmt_dict[cwl_fmt])
else:
formats.append(cwl_fmt_dict[cwl_fmt])
wps_fmt_only = set(wps_fmt_dict) - set(cwl_fmt_dict)
for wps_fmt in wps_fmt_only:
formats.append(wps_fmt_dict[wps_fmt])
return formats
def merge_package_io(wps_io_list, cwl_io_list, io_select):
# type: (List[ANY_IO_Type], List[WPS_IO_Type], str) -> List[WPS_IO_Type]
"""
Merges corresponding parameters of different I/O definitions from CWL/WPS sources.
Update I/O definitions to use for process creation and returned by GetCapabilities, DescribeProcess.
If WPS I/O definitions where provided during deployment, update `CWL-to-WPS` converted I/O with the WPS I/O
complementary details. Otherwise, provide minimum field requirements that can be retrieved from CWL definitions.
Removes any deployment WPS I/O definitions that don't match any CWL I/O by ID.
Adds missing deployment WPS I/O definitions using expected CWL I/O IDs.
:param wps_io_list: list of WPS I/O (as json) passed during process deployment.
:param cwl_io_list: list of CWL I/O converted to WPS-like I/O for counter-validation.
:param io_select: :py:data:`WPS_INPUT` or :py:data:`WPS_OUTPUT` to specify desired WPS type conversion.
:returns: list of validated/updated WPS I/O for the process matching CWL I/O requirements.
"""
if not isinstance(cwl_io_list, list):
raise PackageTypeError("CWL I/O definitions must be provided, empty list if none required.")
if not wps_io_list:
wps_io_list = list()
wps_io_dict = OrderedDict((get_field(wps_io, "identifier", search_variations=True), deepcopy(wps_io))
for wps_io in wps_io_list)
cwl_io_dict = OrderedDict((get_field(cwl_io, "identifier", search_variations=True), deepcopy(cwl_io))
for cwl_io in cwl_io_list)
missing_io_list = [cwl_io for cwl_io in cwl_io_dict if cwl_io not in wps_io_dict] # preserve ordering
updated_io_list = list()
# WPS I/O by id not matching any converted CWL->WPS I/O are discarded
# otherwise, evaluate provided WPS I/O definitions and find potential new information to be merged
for cwl_id in cwl_io_dict:
cwl_io = cwl_io_dict[cwl_id]
updated_io_list.append(cwl_io)
if cwl_id in missing_io_list:
continue # missing WPS I/O are inferred only using CWL->WPS definitions
# enforce expected CWL->WPS I/O required parameters
cwl_io_json = cwl_io.json
wps_io_json = wps_io_dict[cwl_id]
cwl_identifier = get_field(cwl_io_json, "identifier", search_variations=True)
cwl_title = get_field(wps_io_json, "title", search_variations=True)
wps_io_json.update({
"identifier": cwl_identifier,
"title": cwl_title if cwl_title is not null else cwl_identifier
})
# apply type if WPS deploy definition was partial but can be retrieved from CWL
wps_io_json.setdefault("type", get_field(cwl_io_json, "type", search_variations=True))
# fill missing WPS min/max occurs in 'provided' json to avoid overwriting resolved CWL values by WPS default '1'
# with 'default' field, this default '1' causes erroneous result when 'min_occurs' should be "0"
# with 'array' type, this default '1' causes erroneous result when 'max_occurs' should be "unbounded"
cwl_min_occurs = get_field(cwl_io_json, "min_occurs", search_variations=True)
cwl_max_occurs = get_field(cwl_io_json, "max_occurs", search_variations=True)
wps_min_occurs = get_field(wps_io_json, "min_occurs", search_variations=True)
wps_max_occurs = get_field(wps_io_json, "max_occurs", search_variations=True)
if wps_min_occurs == null and cwl_min_occurs != null:
wps_io_json["min_occurs"] = cwl_min_occurs
if wps_max_occurs == null and cwl_max_occurs != null:
wps_io_json["max_occurs"] = cwl_max_occurs
wps_io = json2wps_io(wps_io_json, io_select)
# Retrieve any complementing fields (metadata, keywords, etc.) passed as WPS input.
# Enforce some additional fields to keep value specified by WPS if applicable.
# These are only added here rather that 'WPS_FIELD_MAPPING' to avoid erroneous detection by other functions.
# - Literal: 'default' value defined by 'data'
# - Complex: 'default' format defined by 'data_format'
# (see function 'json2wps_io' for detail)
for field_type in list(WPS_FIELD_MAPPING) + ["data", "data_format"]:
cwl_field = get_field(cwl_io, field_type)
wps_field = get_field(wps_io, field_type)
# override provided formats if different (keep WPS), or if CWL->WPS was missing but is provided by WPS
if _are_different_and_set(wps_field, cwl_field) or (wps_field is not null and cwl_field is null):
# list of formats are updated by comparing format items since information can be partially complementary
if field_type in ["supported_formats"]:
wps_field = merge_io_formats(wps_field, cwl_field)
# default 'data_format' must be one of the 'supported_formats'
# avoid setting something invalid in this case, or it will cause problem after
# note: 'supported_formats' must have been processed before
if field_type == "data_format":
wps_fmts = get_field(updated_io_list[-1], "supported_formats", search_variations=False, default=[])
if wps_field not in wps_fmts:
continue
set_field(updated_io_list[-1], field_type, wps_field)
return updated_io_list
|
python
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.graphics"
__author__ = "Patrick Kunzmann"
__all__ = ["plot_dendrogram"]
import numpy as np
def plot_dendrogram(axes, tree, orientation="left", use_distances=True,
labels=None, label_size=None, color="black",
show_distance=True, **kwargs):
"""
Plot a dendrogram from a (phylogenetic) tree.
Parameters
----------
tree : Tree
The tree to be visualized
orientation : {'left', 'right', 'bottom', 'top'}, optional
The position of the root node in the plot
use_distances : bool, optional
If true, the `distance` attribute of the :class:`TreeNode`
objects are used as distance measure.
Otherwise the topological distance is used.
labels : list of str, optional
The leaf node labels.
The label of a leaf node is the entry at the position of its
`index` attribute.
label_size : float, optional
The font size of the labels
color : tuple or str, optional
A *Matplotlib* compatible color, that is used to draw the lines
of the dendrogram.
show_distance : bool, optional
If true, the distance from the root is shown on the
corresponding axis.
**kwargs
Additional parameters that are used to draw the dendrogram
lines.
"""
indices = tree.root.get_indices()
leaf_dict = {indices[i] : i for i in indices}
# Required for setting the plot limits
max_distance = 0
def _plot_node(node, distance):
"""
Draw the lines from the given node to its children.
Parameters
----------
dist : float
the distance of the node from root
Returns
-------
pos : float
the postion of the node on the 'label' axis
"""
# The term 'distance'
# refers to positions along the 'distance' axis
# the term 'pos'
# refers to positions along the other axis
nonlocal max_distance
if max_distance < distance:
max_distance = distance
if node.is_leaf():
# No children -> no line can be drawn
return leaf_dict[node.index]
else:
children = node.children
if use_distances:
child_distances = [distance + c.distance for c in children]
else:
# Use topologic distance of children to this node,
# which is always 1
child_distances = [distance + 1 for c in children]
child_pos = [
_plot_node(child, child_distance)
for child, child_distance in zip(children, child_distances)
]
# Position of this node is in the center of the child nodes
center_pos = sum(child_pos) / len(child_pos)
if orientation in ["left", "right"]:
# Line connecting the childs
axes.plot(
[distance, distance], [child_pos[0], child_pos[-1]],
color=color, marker="None", **kwargs
)
# Lines depicting the distances of the childs
for child_dist, pos in zip(child_distances, child_pos):
axes.plot(
[distance, child_dist], [pos, pos],
color=color, marker="None", **kwargs
)
elif orientation in ["bottom", "top"]:
# Line connecting the childs
axes.plot(
[child_pos[0], child_pos[-1]], [distance, distance],
color=color, marker="None", **kwargs
)
# Lines depicting the distances of the childs
for child_dist, pos in zip(child_distances, child_pos):
axes.plot(
[pos, pos], [distance, child_dist],
color=color, marker="None", **kwargs
)
else:
raise ValueError(f"'{orientation}' is not a valid orientation")
return center_pos
_plot_node(tree.root, 0)
if labels is not None:
# Sort labels using the order of indices in the tree
# A list cannot be directly indexed with a list,
# hence the conversion to a ndarray
labels = np.array(labels)[indices].tolist()
else:
labels = [str(i) for i in indices]
# The distance axis does not start at 0,
# since the root line would not properly rendered
# Hence the limit is set a to small fraction of the entire axis
# beyond 0
zero_limit = -0.01 * max_distance
if orientation == "left":
axes.set_xlim(zero_limit, max_distance)
axes.set_ylim(-1, len(indices))
axes.set_yticks(np.arange(0, len(indices)))
axes.set_yticklabels(labels)
axes.yaxis.set_tick_params(
left=False, right=False, labelleft=False, labelright=True,
labelsize=label_size
)
axes.xaxis.set_tick_params(
bottom=True, top=False, labelbottom=show_distance, labeltop=False,
labelsize=label_size
)
elif orientation == "right":
axes.set_xlim(max_distance, zero_limit)
axes.set_ylim(-1, len(indices))
axes.set_yticks(np.arange(0, len(indices)))
axes.set_yticklabels(labels)
axes.yaxis.set_tick_params(
left=False, right=False, labelleft=True, labelright=False,
labelsize=label_size
)
axes.xaxis.set_tick_params(
bottom=True, top=False, labelbottom=show_distance, labeltop=False,
labelsize=label_size
)
elif orientation == "bottom":
axes.set_ylim(zero_limit, max_distance)
axes.set_xlim(-1, len(indices))
axes.set_xticks(np.arange(0, len(indices)))
axes.set_xticklabels(labels)
axes.xaxis.set_tick_params(
bottom=False, top=False, labelbottom=False, labeltop=True,
labelsize=label_size
)
axes.yaxis.set_tick_params(
left=True, right=False, labelleft=show_distance, labelright=False,
labelsize=label_size
)
elif orientation == "top":
axes.set_ylim(max_distance, zero_limit)
axes.set_xlim(-1, len(indices))
axes.set_xticks(np.arange(0, len(indices)))
axes.set_xticklabels(labels)
axes.xaxis.set_tick_params(
bottom=False, top=False, labelbottom=True, labeltop=False,
labelsize=label_size
)
axes.yaxis.set_tick_params(
left=True, right=False, labelleft=show_distance, labelright=False,
labelsize=label_size
)
else:
raise ValueError(f"'{orientation}' is not a valid orientation")
axes.set_frame_on(False)
|
python
|
import boto3
from trp import Document
# Document
s3BucketName = "ki-textract-demo-docs"
documentName = "expense.png"
# Amazon Textract client
textract = boto3.client('textract')
# Call Amazon Textract
response = textract.analyze_document(
Document={
'S3Object': {
'Bucket': s3BucketName,
'Name': documentName
}
},
FeatureTypes=["TABLES"])
#print(response)
doc = Document(response)
def isFloat(input):
try:
float(input)
except ValueError:
return False
return True
warning = ""
for page in doc.pages:
# Print tables
for table in page.tables:
for r, row in enumerate(table.rows):
itemName = ""
for c, cell in enumerate(row.cells):
print("Table[{}][{}] = {}".format(r, c, cell.text))
if(c == 0):
itemName = cell.text
elif(c == 4 and isFloat(cell.text)):
value = float(cell.text)
if(value > 1000):
warning += "{} is greater than $1000.".format(itemName)
if(warning):
print("\nReview needed:\n====================\n" + warning)
|
python
|
from itertools import chain
import attr
@attr.s(slots=True, cmp=False)
class KmerDataCollection(object):
_kmers_data = attr.ib()
num_colors = attr.ib(init=False)
_coverage = attr.ib(None)
_edges = attr.ib(None)
raw_kmer = attr.ib(None)
def __attrs_post_init__(self):
assert len(self._kmers_data) > 0
first = self._kmers_data[0]
assert all((first.kmer == k.kmer for k in self._kmers_data))
assert all((first.kmer_size == k.kmer_size for k in self._kmers_data))
self.num_colors = sum((k.num_colors for k in self._kmers_data))
@property
def kmer(self):
return self._kmers_data[0].kmer
@property
def kmer_size(self):
return self._kmers_data[0].kmer_size
@property
def coverage(self):
if self._coverage is None:
coverage = [c for c in chain.from_iterable(k.coverage for k in self._kmers_data)]
print(coverage)
self._coverage = tuple(coverage)
return self._coverage
@property
def edges(self):
if self._edges is None:
self._edges = list(chain.from_iterable(k.edges for k in self._kmers_data))
return self._edges
def get_raw_kmer(self):
for kmer in self._kmers_data:
try:
return kmer.get_raw_kmer()
except AttributeError:
pass
raise ValueError('At least one kmer should have a raw kmer')
|
python
|
__author__ = "Frédéric BISSON"
__copyright__ = "Copyright 2022, Frédéric BISSON"
__credits__ = ["Frédéric BISSON"]
__license__ = "mit"
__maintainer__ = "Frédéric BISSON"
__email__ = "[email protected]"
from dietpdf.info.decode_objstm import decode_objstm
def create_stream():
return b"""11 0 12 54 13 107
<</Type/Font/Subtype/TrueType/FontDescriptor 12 0 R>>
<</Type/FontDescriptor/Ascent 891/FontFile2 22 0 R>>
<</Type/Font/Subtype/Type0/ToUnicode 10 0 R>>"""
def test_decode_objstm():
objects = decode_objstm(create_stream(), 18)
assert len(objects) == 3
assert objects[0].obj_num == 11
assert objects[1].obj_num == 12
assert objects[2].obj_num == 13
|
python
|
import json
import os
from unittest.mock import patch
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker
from autotasks.models import AutomatedTask
from tacticalrmm.test import TacticalTestCase
class TestAPIv3(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
self.agent = baker.make_recipe("agents.agent")
def test_get_checks(self):
url = f"/api/v3/{self.agent.agent_id}/checkrunner/"
# add a check
check1 = baker.make_recipe("checks.ping_check", agent=self.agent)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], self.agent.check_interval) # type: ignore
self.assertEqual(len(r.data["checks"]), 1) # type: ignore
# override check run interval
check2 = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=20
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEqual(len(r.data["checks"]), 2) # type: ignore
# Set last_run on both checks and should return an empty list
check1.last_run = djangotime.now()
check1.save()
check2.last_run = djangotime.now()
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertFalse(r.data["checks"]) # type: ignore
# set last_run greater than interval
check1.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check1.save()
check2.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEquals(len(r.data["checks"]), 2) # type: ignore
url = "/api/v3/Maj34ACb324j234asdj2n34kASDjh34-DESKTOPTEST123/checkrunner/"
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
self.check_not_authenticated("get", url)
def test_sysinfo(self):
# TODO replace this with golang wmi sample data
url = "/api/v3/sysinfo/"
with open(
os.path.join(
settings.BASE_DIR, "tacticalrmm/test_data/wmi_python_agent.json"
)
) as f:
wmi_py = json.load(f)
payload = {"agent_id": self.agent.agent_id, "sysinfo": wmi_py}
r = self.client.patch(url, payload, format="json")
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("patch", url)
def test_checkrunner_interval(self):
url = f"/api/v3/{self.agent.agent_id}/checkinterval/"
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": self.agent.check_interval},
)
# add check to agent with check interval set
check = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=30
)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 30},
)
# minimum check run interval is 15 seconds
check = baker.make_recipe("checks.ping_check", agent=self.agent, run_interval=5)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 15},
)
def test_run_checks(self):
# force run all checks regardless of interval
agent = baker.make_recipe("agents.online_agent")
baker.make_recipe("checks.ping_check", agent=agent)
baker.make_recipe("checks.diskspace_check", agent=agent)
baker.make_recipe("checks.cpuload_check", agent=agent)
baker.make_recipe("checks.memory_check", agent=agent)
baker.make_recipe("checks.eventlog_check", agent=agent)
for _ in range(10):
baker.make_recipe("checks.script_check", agent=agent)
url = f"/api/v3/{agent.agent_id}/runchecks/"
r = self.client.get(url)
self.assertEqual(r.json()["agent"], agent.pk)
self.assertIsInstance(r.json()["check_interval"], int)
self.assertEqual(len(r.json()["checks"]), 15)
@patch("apiv3.views.reload_nats")
def test_agent_recovery(self, reload_nats):
reload_nats.return_value = "ok"
r = self.client.get("/api/v3/34jahsdkjasncASDjhg2b3j4r/recover/")
self.assertEqual(r.status_code, 404)
agent = baker.make_recipe("agents.online_agent")
url = f"/api/v3/{agent.agent_id}/recovery/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "pass", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="mesh")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "mesh", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make(
"agents.RecoveryAction",
agent=agent,
mode="command",
command="shutdown /r /t 5 /f",
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(), {"mode": "command", "shellcmd": "shutdown /r /t 5 /f"}
)
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="rpc")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "rpc", "shellcmd": ""})
reload_nats.assert_called_once()
def test_task_runner_get(self):
from autotasks.serializers import TaskGOGetSerializer
r = self.client.get("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
script = baker.make_recipe("scripts.script")
task = baker.make("autotasks.AutomatedTask", agent=agent, script=script)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(TaskGOGetSerializer(task).data, r.data) # type: ignore
def test_task_runner_results(self):
from agents.models import AgentCustomField
r = self.client.patch("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
# test passing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "passing") # type: ignore
# test failing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test collector task
text = baker.make("core.CustomField", model="agent", type="text", name="Test")
boolean = baker.make(
"core.CustomField", model="agent", type="checkbox", name="Test1"
)
multiple = baker.make(
"core.CustomField", model="agent", type="multiple", name="Test2"
)
# test text fields
task.custom_field = text # type: ignore
task.save() # type: ignore
# test failing failing with stderr
data = {
"stdout": "test test \nthe last line",
"stderr": "This is an error",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test saving to text field
data = {
"stdout": "test test \nthe last line",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=text, agent=task.agent).value, "the last line") # type: ignore
# test saving to checkbox field
task.custom_field = boolean # type: ignore
task.save() # type: ignore
data = {
"stdout": "1",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertTrue(AgentCustomField.objects.get(field=boolean, agent=task.agent).value) # type: ignore
# test saving to multiple field with commas
task.custom_field = multiple # type: ignore
task.save() # type: ignore
data = {
"stdout": "this,is,an,array",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this", "is", "an", "array"]) # type: ignore
# test mutiple with a single value
data = {
"stdout": "this",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this"]) # type: ignore
|
python
|
"""
Copyright 2020 Vitaliy Zarubin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sqlalchemy import or_
from sqlalchemy import and_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, TIMESTAMP
from datetime import datetime, timedelta
from .model_user import ModelUser
Base = declarative_base()
class ModelUserToken(Base):
CONST_DAYS = 30
__tablename__ = 'users_tokens'
id = Column(Integer, primary_key=True)
user_id = Column(Integer)
token = Column(String)
message_token = Column(String)
language = Column(String)
uid = Column(String)
created_at = Column(TIMESTAMP)
updated_at = Column(TIMESTAMP)
@classmethod
def clear_old(cls, app):
app.db.execute('DELETE FROM {} WHERE updated_at < NOW() - INTERVAL {} DAY'.format(cls.__tablename__, cls.CONST_DAYS))
app.db.commit()
app.log.info('clear older tokens done')
@classmethod
def find_by_day(cls, app, channel_id, days=15):
from .model_notification import ModelNotification
return app.db.query(ModelUserToken.user_id, ModelUserToken.message_token, ModelUserToken.language) \
.distinct(ModelUserToken.message_token) \
.filter(ModelUserToken.updated_at < (datetime.now() - timedelta(days=days))) \
.join(ModelUser, ModelUser.id == ModelUserToken.user_id) \
.outerjoin(ModelNotification, and_(ModelNotification.channel_id == channel_id, ModelNotification.user_id == ModelUserToken.user_id)) \
.filter(ModelUser.enabled == 1) \
.filter(ModelNotification.channel_id == None) \
.all()
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
class Connectable(object):
def __init__(self,
name=None,
auto_terminate=None):
self.uuid = uuid.uuid4()
if name is None:
self.name = "node_of_" + str(self.uuid)
else:
self.name = name
if auto_terminate is None:
self.auto_terminate = []
else:
self.auto_terminate = auto_terminate
self.connections = {}
self.out_proc = self
self.drop_empty_flowfiles = False
def connect(self, connections):
for rel in connections:
# Ensure that rel is not auto-terminated
if rel in self.auto_terminate:
del self.auto_terminate[self.auto_terminate.index(rel)]
# Add to set of output connections for this rel
if rel not in self.connections:
self.connections[rel] = []
self.connections[rel].append(connections[rel])
return self
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_uuid(self):
return self.uuid
def set_uuid(self, uuid):
self.uuid = uuid
|
python
|
class dtype:
def __eq__(self, other):
return self.__name__ == other.__name__
class complex128(dtype):
precedence = 10
class complex64(dtype):
precedence = 9
class float64(dtype):
precedence = 8
class float32(dtype):
precedence = 7
class float16(dtype):
precedence = 6
# class bfloat16(dtype):
# pass
class int64(dtype):
precedence = 5
class int32(dtype):
precedence = 4
class int16(dtype):
precedence = 3
class int8(dtype):
precedence = 2
class uint8(dtype):
precedence = 1
class bool(dtype):
precedence = 0
float = float32
double = float64
cfloat = complex64
cdouble = complex128
half = float16
short = int16
int = int32
long = int64
floatTypes = [float64, float32, float16]
floatDefault = float32
intDefault = int64
def maxDtype(*tensors):
maxPrecedence = -1
dtype = None
for tensor in tensors:
if isinstance(tensor.dtype, str):
raise Exception("tensor.dtype is " + tensor.dtype)
if tensor.dtype.precedence > maxPrecedence:
maxPrecedence = tensor.dtype.precedence
dtype = tensor.dtype
return dtype
|
python
|
"""
Global mail object used to send notification emails.
"""
from flask_mail import Mail, Message
from flask import current_app
mail = Mail()
def send_mail(email, output):
"""sends email"""
header = "Your analysis is ready"
content = '''<div style='font-size:14px;'>\
Your requested analysis is ready and available at:<br />\
<a href={0}{1}{2}>{0}{1}{2}</a>\
</div>'''\
.format(current_app.config['APP_DOMAIN'], '/explore/biomarker/result/', output['analysis_id'][0])
footer = '''<div style='font-size:12px;'>\
Thank you for using PredictIO, powered by <a href=https://www.pmgenomics.ca/bhklab/>BHK Lab</a>.\
</div>'''
if(output["error"][0]):
header = "Error occurred during analysis"
content = '''<div style='font-size:14px;'>\
Error occurred during your analysis.<br />\
Please contact <b>[email protected]</b> by citing your analysis ID: {0}\
</div>'''.format(output['analysis_id'][0])
body = "<div style='font-family:arial;'>{0}<br /><br />{1}</div>".format(content, footer)
msg = Message("[PredictIO] " + header, sender='[email protected]', recipients=[email])
msg.html = body
print('sending email')
mail.send(msg)
print('mail sent')
|
python
|
import argparse
import sys
from typing import Callable
from typing import List
from typing import Optional
from . import audit
from . import baseline
from . import filters
from . import plugins
from . import scan
from ...settings import get_settings
from .common import initialize_plugin_settings
from detect_secrets.__version__ import VERSION
class ParserBuilder:
def __init__(self) -> None:
self._parser = argparse.ArgumentParser()
self._post_processors: List[Callable[[argparse.Namespace], None]] = [
initialize_plugin_settings,
]
self.add_default_options()
def add_default_options(self) -> 'ParserBuilder':
self._parser.add_argument(
'-v',
'--verbose',
action='count',
help='Verbose mode.',
)
self._parser.add_argument(
'--version',
action='version',
version=VERSION,
help='Display version information.',
)
self._parser.add_argument(
'-C',
metavar='<path>',
dest='custom_root',
nargs=1,
default=[''],
help=(
'Run as if detect-secrets was started in <path>, rather than in the current '
'working directory.'
),
)
self._parser.add_argument(
'-c',
'--cores',
dest='num_cores',
nargs=1,
type=int,
default=[None],
help=(
'Specify the number of cores to use for parallel processing. Defaults to '
'using the max cores on the current host.'
),
)
return self
def add_console_use_arguments(self) -> 'ParserBuilder':
subparser = self._parser.add_subparsers(dest='action')
self._post_processors = [_assert_action_is_specified, *self._post_processors]
parser = scan.add_scan_action(subparser)
# NOTE: This ordering is important.
# 1. Baselines will be handled accordingly, and the global settings object will
# be initialized with a certain state.
# 2. Scan options can override this (e.g. --force-use-all-plugins)
# 3. Plugin options can override this again (e.g. disabling plugins, or different configs)
#
# In a similar way, the filter options must come after the settings object is initialized.
self._post_processors.append(scan.parse_args)
self.add_plugin_options(parser, action_filter='scan')
self.add_filter_options(parser, action_filter='scan')
# NOTE: scan allows a baseline, but we need to override the first post_processor
self._post_processors[1] = baseline.parse_args
audit.add_audit_action(subparser)
self._post_processors.append(audit.parse_args)
return self
def add_pre_commit_arguments(self) -> 'ParserBuilder':
self._parser.add_argument(
'filenames',
nargs='*',
help='Filenames to check.',
)
self.add_baseline_options(
help=(
'Explicitly ignore secrets through a baseline generated by `detect-secrets scan`'
),
)
self.add_plugin_options()
self.add_filter_options()
return self
def add_baseline_options(self, help: str = '') -> 'ParserBuilder':
baseline.add_baseline_option(self._parser, help=help)
for index, processor in enumerate(self._post_processors):
if processor == initialize_plugin_settings:
self._post_processors[index] = baseline.parse_args
break
return self
def add_plugin_options(
self,
parser: Optional[argparse.ArgumentParser] = None,
action_filter: Optional[str] = None,
) -> 'ParserBuilder':
if not parser:
parser = self._parser
plugins.add_plugin_options(parser)
if action_filter:
self._post_processors.append(
_action_specific_post_processor(action_filter, plugins.parse_args),
)
else:
self._post_processors.append(plugins.parse_args)
return self
def add_filter_options(
self,
parser: Optional[argparse.ArgumentParser] = None,
action_filter: Optional[str] = None,
) -> 'ParserBuilder':
if not parser:
parser = self._parser
filters.add_filter_options(parser)
if action_filter:
self._post_processors.append(
_action_specific_post_processor(action_filter, filters.parse_args),
)
else:
self._post_processors.append(filters.parse_args)
return self
def parse_args(self, argv: Optional[List[str]] = None) -> argparse.Namespace:
args = self._parser.parse_args(argv)
try:
for processor in self._post_processors:
processor(args)
except argparse.ArgumentTypeError as e:
# TODO: Better help text?
self._parser.print_usage(sys.stderr)
print(f'error: {str(e)}', file=sys.stderr)
sys.exit(1)
args.custom_root = args.custom_root[0]
if args.custom_root:
# This filter assumes current working directory, which will fail if we're running
# from a different directory.
# TODO: Maybe adjust this so that it is directory agnostic?
get_settings().disable_filters('detect_secrets.filters.common.is_invalid_file')
# Abide by the Principle of Least Surprise, and have the default value be the
# custom root directory itself.
if args.path == ['.']:
args.path = [args.custom_root]
args.num_cores = args.num_cores[0]
return args
def _assert_action_is_specified(args: argparse.Namespace) -> None:
if not args.action:
raise argparse.ArgumentTypeError('Unspecified action.')
def _action_specific_post_processor(action: str, processor: Callable) -> Callable:
def wrapped(args: argparse.Namespace) -> None:
if args.action != action:
return
processor(args)
return wrapped
|
python
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""select_run"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import select
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def select_run(shape_cond, shape_x, dtype_cond, dtype_x, attrs=None):
"""select_run implementation"""
if attrs is None:
attrs = {}
mod = utils.op_build_test(select.select, [shape_cond, shape_x, shape_x], [dtype_cond, dtype_x, dtype_x],
kernel_name='select', op_attrs=[], attrs=attrs)
args, exp_output, cond, x1, x2 = gen_data(shape_cond, shape_x, dtype_cond, dtype_x)
acu_output = utils.mod_launch(mod, args, expect=exp_output)
# compare result
rtol, atol = get_rtol_atol("select", dtype_x)
testcase_result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
return [cond, x1, x2], acu_output, exp_output, testcase_result
def gen_data(shape_cond, shape_x, dtype_cond, dtype_x):
# generate data
cond = np.random.randint(0, 2, shape_cond).astype(dtype_cond)
x1 = random_gaussian(shape_x, miu=10, sigma=0.3).astype(dtype_x)
x2 = random_gaussian(shape_x, miu=10, sigma=0.3).astype(dtype_x)
exp_output = np.where(cond, x1, x2)
# inputs and output to hold the data
output = np.full(shape_x, np.nan, dtype_x)
args = [cond, x1, x2, output]
return args, exp_output, cond, x1, x2
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 02:35:02 2018
@author: elvex
"""
"""Boite à outils de manipulation des base de données de tweets. """
#import json
import pandas as pd
import txt_analysis as TA
from math import log10
from glob import glob
from os.path import abspath
from re import split
from math import pi
from numpy import cos, sin
import datetime
def json2pd(adr):
"""
Convertit un json de tweets en base de donnée panda.
Entrée : l'adresse du json
Sortie : la base de donnée panda
"""
with open(adr, 'r') as f:
r = f.read()
bdd = pd.read_json(r, orient = 'records', lines = True)
bdd = bdd['user'].apply(pd.Series).join(bdd.drop('user', 1),
how = "left", lsuffix="_profile", rsuffix="_tweet")
return bdd
def filterBYlanguage(bdd, lan = 'fr'):
bdd = bdd[(bdd.lang_tweet == lan)]
return bdd
def keepNdropPD_txt(bdd):
bdd = bdd.loc[:, ["id_profile", "text"]]
return bdd
def aggregate_bddFiltered(bdd):
grp = bdd.groupby("id_profile")
bdd = grp.agg(["count", lambda x: "\n".join(x)])
bdd.columns = bdd.columns.droplevel(0)
bdd = bdd.rename(columns={ bdd.columns[0]: "counting", bdd.columns[1]: "text"})
return bdd
def json2bdd_agreg(json):
return aggregate_bddFiltered(keepNdropPD_txt(filterBYlanguage(json2pd(json))))
#bdd = aggregate_bddFiltered(keepNdropPD_txt(filterBYlanguage(json2pd(file))))
def concat_bdd_aggreg(bdd1, bdd2):
bdd21 = bdd1.counting.add(bdd2.counting, fill_value=0)
bdd22 = bdd1.text.add(bdd2.text, fill_value="")
bdd2 = pd.concat([bdd21, bdd22], axis=1)
return bdd2
def concat_dir(dirname):
path = abspath(dirname)
lst = glob(path+"/*.json")
bdd = json2bdd_agreg(lst[0])
for i in range(1, len(lst)):
try:
bdd2 = json2bdd_agreg(lst[i])
bdd = concat_bdd_aggreg(bdd, bdd2)
except ValueError as e:
print("Erreur '{}' sur l'étape {}".format(e, i))
continue
return bdd
def drop_profile(bdd, n = 2):
return bdd.loc[bdd["counting"] >= n, "text"]
def bdd2bow(bdd):
"""
Transforme un Data Frame panda de tweet en base donnée bag of words,
chaque collonne correspondant à un mot spécifique
et chaque ligne à un utilisateur,
avec comme contenu de la cellule le nombre d'occurence du mot dans le tweet.
Entrée : le dataframe panda
Sortie : le dataframe bag of word
"""
T = bdd["text"] if isinstance(bdd, pd.core.frame.DataFrame) else bdd
T = T.map(TA.formate_txt)
T = T.map(TA.bow)
bow = pd.DataFrame.from_dict(T.tolist())
bow = bow.fillna(0)
return bow
def filter_bow(bow, mini = 1):
"""
Permet de filtrer un dataframe bag of words en stipulant un nombre minimum
de tweets dans lequels les mots doivent apparaître.
Entrée :
bow : pandas dataframe bag of words
mini : entier stipulant le minimum
Sortie :
bow_f : le dataframe bag of words filtré
"""
test = (((bow > 0).sum()) >= mini).values
bow_f = bow.iloc[:, test]
return bow_f
def tf_idf(bow, lst = [], fonction = "idfi"):
"""
À partir d'un dataframe bag of words, applique une métrique de tf idf pour
pondérer le score des mots.
Entrée :
bow : dataframe bag of words
lst : liste de mots à garder dans le dataframe, si nul, tous les mots son gardés
fonction : fonction de pondération :
idfn => pas de pondération
idfi => prend en compte le nombre de tweets et la fréquence d'utilisation des mots
idfl => comme idfi mais en se laissant une sécurité sur le log10(0)
idfs => comme idfi mais en se laissant une autre sécurité sur le log10(0)
idff => prend simplement en compte la fréquence d'utilisation des mots
idfp => prend en compte le nombre de tweets et la fréquence d'utilisation des mots
"""
dico = {"idfi" : idfi,
"idfn" : idfn,
"idfl" : idfl,
"idfp" : idfp,
"idff" : idff,
"idfs" : idfs}
D, df = len(bow), (bow > 0).sum()
f_poids = dico.get(fonction, "idfi")
idf = bow * f_poids(D, df)
if len(lst) > 0: idf = intersection(bow, lst)
return idf
def intersection(bdd, lst):
"""Renvoie les colonnes d'une bdd pandas qui correspondent aux mots entrés.
Entrées :
bdd : panda dataframe
lst : liste de mots
Sortie :
nouvelle dataframe pandas
"""
s = set(map(str.lower, lst))
s = s.intersection(set(bdd.columns.values.tolist()))
return bdd.loc[:, list(s)]
def idfi(D, df):
return (D/df).apply(log10)
def idfn(D, df):
return 1
def idfl(D, df):
return (D/df + 1).apply(log10)
def idff(D, df):
return 1/df
def idfp(D, df):
return ((D - df) / df).apply(log10)
def idfs (D, df):
return (((D + 1) / df).apply(log10)) ** 2
def df2np(df):
"""Convertit un dataframe panda en matrice, renvoie cette matrice et le vecteur d'indice.
Entrée :
df, panda dataframe
Sortie :
idx : numpy array des indices de la dataframe
mtx : numpy array des valeurs de la dataframe
"""
mtx = df.values
idx = df.index.values
return (idx, mtx)
def dateBDD(bdd):
dico_month = {1 : 31, 2 : 28, 3 : 31, 4 : 30, 5 : 31, 6 : 30, 7 : 31,
8 : 31, 9 : 30, 10 : 31, 11 : 30, 12 : 30}
bdd = bdd.loc[:, ['id_tweet', 'created_at_tweet']].set_index('id_tweet')
bdd.created_at_tweet = bdd.created_at_tweet.apply(lambda x: list(map(int, split('[: -]', str(x)))))
bdd["hour"] = bdd.created_at_tweet.apply(lambda lst: (lst[-3] + lst[-2] / 60 + lst[-1] / (60**2)) * (pi/12))
bdd["hour_X"] = bdd.hour.apply(cos)
bdd["hour_Y"] = bdd.hour.apply(sin)
bdd["day_X"] = bdd.created_at_tweet.apply(lambda x: cos(x[2] * pi / 6))
bdd["day_Y"] = bdd.created_at_tweet.apply(lambda x: sin(x[2] * pi / 6))
bdd["dayweek"] = bdd.created_at_tweet.apply(lambda x: datetime.date(x[0], x[1], x[2]).weekday())
bdd["dayweek_X"] = bdd.dayweek.apply(lambda x: cos(x * 2 * pi / 7))
bdd["dayweek_Y"] = bdd.dayweek.apply(lambda x: sin(x * 2 * pi / 7))
bdd["month_X"] = bdd.created_at_tweet.apply(lambda x: cos(x[1] * pi / dico_month[x[2]]))
bdd["month_Y"] = bdd.created_at_tweet.apply(lambda x: sin(x[1] * pi / dico_month[x[2]]))
bdd["year"] = bdd.created_at_tweet.apply(lambda x: x[0])
bdd.drop(labels = ["created_at_tweet", "hour", "dayweek"], axis = 1, inplace = True)
return bdd
def json2dateBDD(json):
return dateBDD(filterBYlanguage(json2pd(json)))
def date_dir(dirname):
path = abspath(dirname)
lst = glob(path+"/*.json")
bdd = json2dateBDD(lst[0])
for i in range(1, len(lst)):
try:
bdd2 = json2dateBDD(lst[i])
bdd = pd.concat([bdd, bdd2], axis=0)
except ValueError as e:
print("Erreur '{}' sur l'étape {}".format(e, i))
continue
return bdd
def print_means_words(km, col, lim = 10):
means = km.means
D = pd.DataFrame(means, columns=col)
for i in range(km.nb_cluster):
lst = list(D.sort_values(by = i, axis = 1).iloc[i, :lim].index.values)
n = km.data.index[(km.grp[:, 1] == i)].size
print("Les {} mots représentatif du groupe {} composé de {} individus sont :\n\t {}".format(lim, i, n, ' - '.join(lst)))
return None
|
python
|
from jmetal.algorithm.singleobjective.simulated_annealing import SimulatedAnnealing
from jmetal.operator import PolynomialMutation
from jmetal.problem.bbob import bbob
from jmetal.util.observer import ProgressBarObserver
from jmetal.util.termination_criterion import StoppingByEvaluations
if __name__ == '__main__':
max_evaluations = 1000000
termination_criteria = StoppingByEvaluations(max_evaluations)
problem = bbob.BBOB()
algorithm = SimulatedAnnealing(
problem=problem,
mutation=PolynomialMutation(probability=0.1, distribution_index=20.0),
termination_criterion=StoppingByEvaluations(max=max_evaluations)
)
progress = ProgressBarObserver(termination_criteria.get_criterion())
algorithm.observable.register(progress)
algorithm.run()
# Random Trials
|
python
|
"""
Packaging setup for ledcontroller
"""
# pylint: disable=line-too-long
import os.path
from codecs import open as codecs_open
from setuptools import setup
with codecs_open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name='ledcontroller',
version='1.3.0',
description='Controller library for limitlessled/easybulb/milight Wi-Fi LEDs',
long_description=LONG_DESCRIPTION,
url='https://github.com/ojarva/python-ledcontroller',
author='Olli Jarva',
author_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Home Automation',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords='applight applamp dekolight easybulb ilight limitlessled led ledme milight wifi',
packages=["ledcontroller"],
install_requires=[],
test_suite="tests",
extras_require={
'dev': ['twine', 'wheel'],
},
)
|
python
|
'''
实验名称:RTC实时时钟
版本:v1.0
日期:2020.12
作者:01Studio
说明:在LCD上显示时间
社区:www.01studio.org
'''
#导入相关模块
import pyb
from tftlcd import LCD43M
#定义常用颜色
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
BLACK = (0,0,0)
WHITE = (255,255,255)
########################
# 构建4.3寸LCD对象并初始化
########################
d = LCD43M(portrait=1) #默认方向
d.fill(WHITE)#填充白色
#初始化RTC
rtc = pyb.RTC()
# 定义星期和时间(时分秒)显示字符列表
week = ['Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
time = ['', '', '']
#显示标题
d.printStr('01Studio RTC', 100, 10, BLACK, size=4)
# 首次上电配置时间,按顺序分别是:年,月,日,星期,时,分,秒,次秒级;这里做#了一个简单的判断,检查到当前年份不对就修改当前时间,开发者可以根据自己实际情况来修改。
if rtc.datetime()[0] != 2019:
rtc.datetime((2019, 4, 1, 1, 0, 0, 0, 0))
while True:
datetime = rtc.datetime() # 获取当前时间
# 显示日期,字符串可以直接用“+”来连接
d.printStr(str(datetime[0]) + '-' + str(datetime[1]) + '-' + str(datetime[2]) + ' ' + week[(datetime[3] - 1)], 10, 100, BLACK, size=4)
# 显示时间需要判断时、分、秒的值否小于10,如果小于10,则在显示前面补“0”以到较佳的显示效果
for i in range(4, 7):
if datetime[i] < 10:
time[i - 4] = "0"
else:
time[i - 4] = ""
# 显示时间
d.printStr(time[0] + str(datetime[4]) + ':' + time[1] + str(datetime[5]) + ':' + time[2] + str(datetime[6]), 10, 200, BLACK, size=4)
pyb.delay(300) #延时500ms
|
python
|
from __future__ import division, absolute_import, print_function
from .jdx import jdx_reader, jdx_file_reader, JdxFile
__all__ = ["jdx"]
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, shakeel vaim and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Add New Properties')
class TestAddNewProperties(unittest.TestCase):
pass
|
python
|
from datetime import datetime
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.parser import parse as dateparse
from city_scrapers.mixins.wayne_commission import WayneCommissionMixin
class WayneBuildingAuthoritySpider(WayneCommissionMixin, CityScrapersSpider):
name = "wayne_building_authority"
agency = "Wayne County Government"
start_urls = ["https://www.waynecounty.com/boards/buildingauthority/meetings.aspx"]
meeting_name = "Building Authority"
# Override the mixin for any unique attributes.
location = {
"name": "6th Floor, Guardian Building",
"address": "500 Griswold St, Detroit, MI 48226",
}
def _parse_entries(self, response):
current_year = datetime.now().year
current_year_non_empty_rows = response.xpath(
'//section[contains(.,"%s")]//tbody/tr[child::td/text()]' % current_year
)
return current_year_non_empty_rows
def _parse_start(self, item):
"""
Parse start date and time.
"""
# Strong text indicates a replacement meeting date
strong_text = item.xpath(".//td[2]/strong/text()").extract_first()
if strong_text is not None:
date_str = strong_text
else:
date_str = item.xpath(".//td[2]/text()").extract_first()
time_str = item.xpath(".//td[3]/text()").extract_first()
return dateparse("{0} {1}".format(date_str, time_str))
|
python
|
"""Base segment definitions.
Here we define:
- BaseSegment. This is the root class for all segments, and is
designed to hold other subsegments.
- RawSegment. This is designed to be the root segment, without
any children, and the output of the lexer.
- UnparsableSegment. A special wrapper to indicate that the parse
function failed on this block of segments and to prevent further
analysis.
These are the fundamental building blocks of the rest of the parser.
"""
import logging
from io import StringIO
from benchit import BenchIt
from .match import MatchResult, curtail_string, join_segments_raw
from ..errors import SQLLintError
def verbosity_logger(msg, verbosity=0, level='info', v_level=3):
"""Log or print based on configuration."""
if verbosity >= v_level:
print(msg)
else:
# Should be mostly equivalent to logging.info(msg)
getattr(logging, level)(msg)
def parse_match_logging(grammar, func, msg, parse_context, v_level, **kwargs):
"""Log in a particular consistent format for use while matching."""
# If we can avoid this, bank the performance increase
if parse_context.verbosity <= 1:
return
# Otherwise carry on...
symbol = kwargs.pop('symbol', '')
s = "[PD:{0} MD:{1}]\t{2:<50}\t{3:<20}\t{4:<4}".format(
parse_context.parse_depth, parse_context.match_depth,
('.' * parse_context.match_depth) + str(parse_context.match_segment),
"{0}.{1} {2}".format(grammar, func, msg),
symbol
)
if kwargs:
s += "\t[{0}]".format(
', '.join(
"{0}={1}".format(
k,
repr(v) if isinstance(v, str) else v
) for k, v in kwargs.items()
)
)
verbosity_logger(s, parse_context.verbosity, v_level=v_level)
def frame_msg(msg):
"""Frame a message with hashes so that it covers five lines."""
return "###\n#\n# {0}\n#\n###".format(msg)
def check_still_complete(segments_in, matched_segments, unmatched_segments):
"""Check that the segments in are the same as the segments out."""
initial_str = join_segments_raw(segments_in)
current_str = join_segments_raw(
matched_segments + unmatched_segments
)
if initial_str != current_str:
raise RuntimeError(
"Dropped elements in sequence matching! {0!r} != {1!r}".format(
initial_str, current_str))
class ParseBlacklist:
"""Acts as a cache to stop unnecessary matching."""
def __init__(self):
self._blacklist_struct = {}
def _hashed_version(self):
return {
k: {hash(e) for e in self._blacklist_struct[k]}
for k in self._blacklist_struct
}
def check(self, seg_name, seg_tuple):
"""Check this seg_tuple against this seg_name.
Has this seg_tuple already been matched
unsuccessfully against this segment name.
"""
if seg_name in self._blacklist_struct:
if seg_tuple in self._blacklist_struct[seg_name]:
return True
return False
def mark(self, seg_name, seg_tuple):
"""Mark this seg_tuple as not a match with this seg_name."""
if seg_name in self._blacklist_struct:
self._blacklist_struct[seg_name].add(seg_tuple)
else:
self._blacklist_struct[seg_name] = {seg_tuple}
def clear(self):
"""Clear the blacklist struct."""
self._blacklist_struct = {}
class ParseContext:
"""The context for parsing. It holds configuration and rough state.
We expect that an object (or copy of this object) will be passed
around rather than the individual variables for parse and match depth
as before.
"""
__slots__ = ['match_depth', 'parse_depth', 'verbosity', 'dialect', 'match_segment', 'recurse', 'blacklist']
def __init__(self, dialect=None, verbosity=0, match_depth=0, parse_depth=0, match_segment=None, recurse=True, blacklist=None):
# Write all the variables in a DRY way. Yes it's a bit convoluted. Sorry.
for k in self.__slots__:
setattr(self, k, locals()[k])
# Initialise a blacklist struct if one is not present.
if getattr(self, 'blacklist') is None:
setattr(self, 'blacklist', ParseBlacklist())
def copy(self, incr=None, decr=None, **kwargs):
"""Make a copy of the parse context, optionally with some edited variables."""
current_vals = {k: getattr(self, k) for k in self.__slots__}
current_vals.update(kwargs or {})
# Increment
if isinstance(incr, str):
current_vals[incr] += 1
elif incr:
for k in incr:
current_vals[k] += 1
# Decrement
if isinstance(decr, str):
current_vals[decr] -= 1
elif decr:
for k in decr:
current_vals[k] -= 1
# Return
return self.__class__(**current_vals)
@classmethod
def from_config(cls, config):
"""Construct a `ParseContext` from a `FluffConfig`."""
return cls(dialect=config.get('dialect_obj'), recurse=config.get('recurse'))
class BaseSegment:
"""The base segment element.
This defines the base element which drives both Lexing, Parsing and Linting.
A large chunk of the logic which defines those three operations are centered
here. Much of what is defined in the BaseSegment is also used by it's many
subclasses rather than directly here.
For clarity, the `BaseSement` is mostly centered around a segment which contains
other subsegments. For segments which don't have *children*, refer to the `RawSegment`
class (which still inherits from this one).
Segments are used both as instances to hold chunks of text, but also as classes
themselves where they function a lot like grammars, and return instances of themselves
when they match. The many classmethods in this class are usually to serve their
purpose as a matcher.
"""
# `type` should be the *category* of this kind of segment
type = 'base'
parse_grammar = None
match_grammar = None
grammar = None
comment_seperate = False
is_whitespace = False
optional = False # NB: See the seguence grammar for details
is_segment = True
_name = None
_func = None # Available for use by subclasses (e.g. the LambdaSegment)
is_meta = False
@property
def name(self):
"""The name of this segment.
The reason for two routes for names is that some subclasses
might want to overrise the name rather than just getting it
the class name.
Name should be specific to this kind of segment, while `type`
should be a higher level descriptor of the kind of segment.
For example, the name of `+` is 'plus' but the type might be
'binary_operator'.
"""
return self._name or self.__class__.__name__
@property
def is_expandable(self):
"""Return true if it is meaningful to call `expand` on this segment.
We need to do this recursively because even if *this* segment doesn't
need expanding, maybe one of it's children does.
"""
if self._parse_grammar():
return True
elif self.segments and any(s.is_expandable for s in self.segments):
return True
else:
return False
@classmethod
def simple(cls, parse_context):
"""Does this matcher support an uppercase hash matching route?"""
return False
@property
def is_code(self):
"""Return True if this segment contains any code."""
return any(seg.is_code for seg in self.segments)
@property
def is_comment(self):
"""Return True if this is entirely made of comments."""
return all(seg.is_comment for seg in self.segments)
@classmethod
def is_optional(cls):
"""Return True if this segment is optional.
This is used primarily in sequence matching, where optional
segments can be skipped.
"""
return cls.optional
@classmethod
def _match_grammar(cls):
"""Return the `match_grammar` attribute if present, or the `grammar` attribute if not."""
if cls.match_grammar:
return cls.match_grammar
else:
return cls.grammar
@classmethod
def _parse_grammar(cls):
"""Return the `parse_grammar` attribute if present, or the `grammar` attribute if not."""
if cls.parse_grammar:
return cls.parse_grammar
else:
return cls.grammar
def validate_segments(self, text="constructing", validate=True):
"""Validate the current set of segments.
Check the elements of the `segments` attribute are all
themselves segments, and that the positions match up.
`validate` confirms whether we should check contigiousness.
"""
# Placeholder variables for positions
start_pos = None
end_pos = None
prev_seg = None
for elem in self.segments:
if not isinstance(elem, BaseSegment):
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't a segment. Instead found element of type {2}.\nFound: {3}\nFull segments:{4}".format(
text,
type(self),
type(elem),
elem,
self.segments
))
# While applying fixes, we shouldn't validate here, because it will fail.
if validate:
# If we have a comparison point, validate that
if end_pos and elem.get_start_pos_marker() != end_pos:
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't contigious with previous: {2} > {3}. End pos: {4}."
" Prev String: {5!r}".format(
text,
type(self),
prev_seg,
elem,
end_pos,
prev_seg.raw
))
start_pos = elem.get_start_pos_marker()
end_pos = elem.get_end_pos_marker()
prev_seg = elem
if start_pos.advance_by(elem.raw) != end_pos:
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't self consistent: {2}".format(
text,
type(self),
elem
))
def get_end_pos_marker(self):
"""Return the pos marker at the end of this segment."""
return self.segments[-1].get_end_pos_marker()
def get_start_pos_marker(self):
"""Return the pos marker at the start of this segment."""
return self.segments[0].get_start_pos_marker()
def __init__(self, segments, pos_marker=None, validate=True):
if len(segments) == 0:
raise RuntimeError(
"Setting {0} with a zero length segment set. This shouldn't happen.".format(
self.__class__))
if hasattr(segments, 'matched_segments'):
# Safely extract segments from a match
self.segments = segments.matched_segments
elif isinstance(segments, tuple):
self.segments = segments
elif isinstance(segments, list):
self.segments = tuple(segments)
else:
raise TypeError(
"Unexpected type passed to BaseSegment: {0}".format(
type(segments)))
# Check elements of segments:
self.validate_segments(validate=validate)
if pos_marker:
self.pos_marker = pos_marker
else:
# If no pos given, it's the pos of the first segment
# Work out if we're dealing with a match result...
if hasattr(segments, 'initial_match_pos_marker'):
self.pos_marker = segments.initial_match_pos_marker()
elif isinstance(segments, (tuple, list)):
self.pos_marker = segments[0].pos_marker
else:
raise TypeError(
"Unexpected type passed to BaseSegment: {0}".format(
type(segments)))
def parse(self, parse_context=None):
"""Use the parse grammar to find subsegments within this segment.
A large chunk of the logic around this can be found in the `expand` method.
Use the parse setting in the context for testing, mostly to check how deep to go.
True/False for yes or no, an integer allows a certain number of levels.
"""
if not parse_context.dialect:
raise RuntimeError("No dialect provided to {0!r}!".format(self))
# Clear the blacklist cache so avoid missteps
if parse_context:
parse_context.blacklist.clear()
# the parse_depth and recurse kwargs control how deep we will recurse for testing.
if not self.segments:
# This means we're a root segment, just return an unmutated self
return self
# Get the Parse Grammar
g = self._parse_grammar()
if g is None:
# No parse grammar, go straight to expansion
logging.debug("{0}.parse: no grammar. Going straight to expansion".format(self.__class__.__name__))
else:
# Use the Parse Grammar (and the private method)
# NOTE: No match_depth kwarg, because this is the start of the matching.
m = g._match(
segments=self.segments,
parse_context=parse_context.copy(
match_segment=self.__class__.__name__
)
)
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{0}] {1}.match. Result is {2}, not a MatchResult!".format(
parse_context.parse_depth, self.__class__.__name__, type(m)))
# Basic Validation, that we haven't dropped anything.
check_still_complete(self.segments, m.matched_segments, m.unmatched_segments)
if m.has_match():
if m.is_complete():
# Complete match, happy days!
self.segments = m.matched_segments
else:
# Incomplete match.
# For now this means the parsing has failed. Lets add the unmatched bit at the
# end as something unparsable.
# TODO: Do something more intelligent here.
self.segments = m.matched_segments + (UnparsableSegment(
segments=m.unmatched_segments, expected="Nothing..."),)
else:
# If there's no match at this stage, then it's unparsable. That's
# a problem at this stage so wrap it in an unparable segment and carry on.
self.segments = (UnparsableSegment(
segments=self.segments,
expected=g.expected_string(dialect=parse_context.dialect)),) # NB: tuple
# Validate new segments
self.validate_segments(text="parsing")
bencher = BenchIt() # starts the timer
bencher("Parse complete of {0!r}".format(self.__class__.__name__))
# Recurse if allowed (using the expand method to deal with the expansion)
logging.debug(
"{0}.parse: Done Parse. Plotting Recursion. Recurse={1!r}".format(
self.__class__.__name__, parse_context.recurse))
parse_depth_msg = "###\n#\n# Beginning Parse Depth {0}: {1}\n#\n###\nInitial Structure:\n{2}".format(
parse_context.parse_depth + 1, self.__class__.__name__, self.stringify())
if parse_context.recurse is True:
logging.debug(parse_depth_msg)
self.segments = self.expand(
self.segments,
parse_context=parse_context.copy(
incr='parse_depth', match_depth=0, recurse=True
)
)
elif isinstance(parse_context.recurse, int):
if parse_context.recurse > 1:
logging.debug(parse_depth_msg)
self.segments = self.expand(
self.segments,
parse_context=parse_context.copy(decr='recurse', incr='parse_depth')
)
# Validate new segments
self.validate_segments(text="expanding")
return self
def __repr__(self):
return "<{0}: ({1})>".format(
self.__class__.__name__,
self.pos_marker)
def _reconstruct(self):
"""Make a string from the segments of this segment."""
return "".join(seg.raw for seg in self.segments)
@property
def raw(self):
"""Make a string from the segments of this segment."""
return self._reconstruct()
@property
def raw_upper(self):
"""Make an uppercase string from the segments of this segment."""
return self._reconstruct().upper()
@staticmethod
def _suffix():
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return ""
def _preface(self, ident, tabsize, pos_idx, raw_idx):
"""Returns the preamble to any logging."""
preface = (' ' * (ident * tabsize))
if self.is_meta:
preface += "[META] "
preface += self.__class__.__name__ + ":"
preface += (' ' * max(pos_idx - len(preface), 0))
if self.pos_marker:
preface += str(self.pos_marker)
else:
preface += '-'
sfx = self._suffix()
if sfx:
return preface + (' ' * max(raw_idx - len(preface), 0)) + sfx
else:
return preface
@property
def _comments(self):
"""Returns only the comment elements of this segment."""
return [seg for seg in self.segments if seg.type == 'comment']
@property
def _non_comments(self):
"""Returns only the non-comment elements of this segment."""
return [seg for seg in self.segments if seg.type != 'comment']
def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False):
"""Use indentation to render this segment and it's children as a string."""
buff = StringIO()
preface = self._preface(ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx)
buff.write(preface + '\n')
if not code_only and self.comment_seperate and len(self._comments) > 0:
if self._comments:
buff.write((' ' * ((ident + 1) * tabsize)) + 'Comments:' + '\n')
for seg in self._comments:
buff.write(seg.stringify(ident=ident + 2, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
if self._non_comments:
buff.write((' ' * ((ident + 1) * tabsize)) + 'Code:' + '\n')
for seg in self._non_comments:
buff.write(seg.stringify(ident=ident + 2, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
else:
for seg in self.segments:
# If we're in code_only, only show the code segments, otherwise always true
if not code_only or seg.is_code:
buff.write(seg.stringify(ident=ident + 1, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
return buff.getvalue()
@staticmethod
def segs_to_tuple(segs, **kwargs):
"""Return a tuple structure from an iterable of segments."""
return tuple(seg.to_tuple(**kwargs) for seg in segs)
def to_tuple(self, **kwargs):
"""Return a tuple structure from this segment.
NB: If he segment is a meta segment, i.e. it's an indent or dedent,
then it will never be returned from here!
"""
# works for both base and raw
code_only = kwargs.get('code_only', False)
show_raw = kwargs.get('show_raw', False)
if show_raw and not self.segments:
result = (self.type, self.raw)
elif code_only:
result = (self.type, tuple(seg.to_tuple(**kwargs) for seg in self.segments if seg.is_code and not seg.is_meta))
else:
result = (self.type, tuple(seg.to_tuple(**kwargs) for seg in self.segments if not seg.is_meta))
return result
@classmethod
def structural_simplify(cls, elem):
"""Simplify the structure recursively so it serializes nicely in json/yaml."""
if isinstance(elem, tuple):
# Does this look like an element?
if len(elem) == 2 and isinstance(elem[0], str):
# This looks like a single element, make a dict
elem = {elem[0]: cls.structural_simplify(elem[1])}
elif isinstance(elem[0], tuple):
# This looks like a list of elements.
keys = [e[0] for e in elem]
# Any duplicate elements?
if len(set(keys)) == len(keys):
# No, we can use a mapping typle
elem = {e[0]: cls.structural_simplify(e[1]) for e in elem}
else:
# Yes, this has to be a list :(
elem = [cls.structural_simplify(e) for e in elem]
return elem
def as_record(self, **kwargs):
"""Return the segment as a structurally simplified record.
This is useful for serialization to yaml or json.
kwargs passed to to_tuple
"""
return self.structural_simplify(self.to_tuple(**kwargs))
@classmethod
def match(cls, segments, parse_context):
"""Match a list of segments against this segment.
Note: Match for segments is done in the ABSTRACT.
When dealing with concrete then we're always in parse.
Parse is what happens during expand.
Matching can be done from either the raw or the segments.
This raw function can be overridden, or a grammar defined
on the underlying class.
"""
if cls._match_grammar():
# Call the private method
m = cls._match_grammar()._match(segments=segments, parse_context=parse_context.copy(incr='match_depth'))
# Calling unify here, allows the MatchResult class to do all the type checking.
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{0} MD:{1}] {2}.match. Result is {3}, not a MatchResult!".format(
parse_context.parse_depth, parse_context.match_depth, cls.__name__,
type(m)))
# Once unified we can deal with it just as a MatchResult
if m.has_match():
return MatchResult((cls(segments=m.matched_segments),), m.unmatched_segments)
else:
return MatchResult.from_unmatched(segments)
else:
raise NotImplementedError("{0} has no match function implemented".format(cls.__name__))
@classmethod
def _match(cls, segments, parse_context):
"""A wrapper on the match function to do some basic validation and logging."""
parse_match_logging(
cls.__name__[:10], '_match', 'IN', parse_context=parse_context,
v_level=4, ls=len(segments))
if isinstance(segments, BaseSegment):
segments = (segments,) # Make into a tuple for compatability
if not isinstance(segments, tuple):
logging.warning(
"{0}.match, was passed {1} rather than tuple or segment".format(
cls.__name__, type(segments)))
if isinstance(segments, list):
# Let's make it a tuple for compatibility
segments = tuple(segments)
if len(segments) == 0:
logging.info("{0}._match, was passed zero length segments list".format(cls.__name__))
m = cls.match(segments, parse_context=parse_context)
if not isinstance(m, tuple) and m is not None:
logging.warning(
"{0}.match, returned {1} rather than tuple".format(
cls.__name__, type(m)))
parse_match_logging(
cls.__name__[:10], '_match', 'OUT',
parse_context=parse_context, v_level=4, m=m)
# Validation is skipped at a match level. For performance reasons
# we match at the parse level only
# check_still_complete(segments, m.matched_segments, m.unmatched_segments)
return m
@staticmethod
def expand(segments, parse_context):
"""Expand the list of child segments using their `parse` methods."""
segs = ()
for stmt in segments:
try:
if not stmt.is_expandable:
verbosity_logger(
"[PD:{0}] Skipping expansion of {1}...".format(parse_context.parse_depth, stmt),
verbosity=parse_context.verbosity)
segs += (stmt,)
continue
except Exception as err:
# raise ValueError("{0} has no attribute `is_expandable`. This segment appears poorly constructed.".format(stmt))
logging.error("{0} has no attribute `is_expandable`. This segment appears poorly constructed.".format(stmt))
raise err
if not hasattr(stmt, 'parse'):
raise ValueError("{0} has no method `parse`. This segment appears poorly constructed.".format(stmt))
parse_depth_msg = "Parse Depth {0}. Expanding: {1}: {2!r}".format(
parse_context.parse_depth, stmt.__class__.__name__,
curtail_string(stmt.raw, length=40))
verbosity_logger(frame_msg(parse_depth_msg), verbosity=parse_context.verbosity)
res = stmt.parse(parse_context=parse_context)
if isinstance(res, BaseSegment):
segs += (res,)
else:
# We might get back an iterable of segments
segs += tuple(res)
# Basic Validation
check_still_complete(segments, segs, ())
return segs
def raw_list(self):
"""Return a list of raw elements, mostly for testing or searching."""
buff = []
for s in self.segments:
buff += s.raw_list()
return buff
def iter_raw_seg(self):
"""Iterate raw segments, mostly for searching."""
for s in self.segments:
for seg in s.iter_raw_seg():
yield seg
def iter_unparsables(self):
"""Iterate through any unparsables this segment may contain."""
for s in self.segments:
for u in s.iter_unparsables():
yield u
def type_set(self):
"""Return a set of the types contained, mostly for testing."""
typs = {self.type}
for s in self.segments:
typs |= s.type_set()
return typs
def __eq__(self, other):
# Equal if type, content and pos are the same
# NB: this should also work for RawSegment
return (type(self) is type(other)
and (self.raw == other.raw)
and (self.pos_marker == other.pos_marker))
def __len__(self):
"""Implement a len method to make everyone's lives easier."""
return 1
def is_raw(self):
"""Return True if this segment has no children."""
return len(self.segments) == 0
@classmethod
def expected_string(cls, dialect=None, called_from=None):
"""Return the expected string for this segment.
This is never going to be called on an _instance_
but rather on the class, as part of a grammar, and therefore
as part of the matching phase. So we use the match grammar.
"""
return cls._match_grammar().expected_string(dialect=dialect, called_from=called_from)
@classmethod
def as_optional(cls):
"""Construct a copy of this class, but with the optional flag set true.
Used in constructing grammars, will make an identical class
but with the optional argument set to true. Used in constructing
sequences.
"""
# Now lets make the classname (it indicates the mother class for clarity)
classname = "Optional_{0}".format(cls.__name__)
# This is the magic, we generate a new class! SORCERY
newclass = type(classname, (cls, ),
dict(optional=True))
# Now we return that class in the abstract. NOT INSTANTIATED
return newclass
def apply_fixes(self, fixes):
"""Apply an iterable of fixes to this segment.
Used in applying fixes if we're fixing linting errors.
If anything changes, this should return a new version of the segment
rather than mutating the original.
Note: We need to have fixes to apply AND this must have children. In the case
of raw segments, they will be replaced or removed by their parent and
so this function should just return self.
"""
# Let's check what we've been given.
if fixes and isinstance(fixes[0], SQLLintError):
logging.error("Transforming `fixes` from errors into a list of fixes")
# We've got linting errors, let's aggregate them into a list of fixes
buff = []
for err in fixes:
buff += err.fixes
# Overwrite fixes
fixes = buff
if fixes and not self.is_raw():
# Get a reference to self to start with, but this will rapidly
# become a working copy.
r = self
# Make a working copy
seg_buffer = []
todo_buffer = list(self.segments)
while True:
if len(todo_buffer) == 0:
break
else:
seg = todo_buffer.pop(0)
# We don't apply fixes to meta segments
if seg.is_meta:
seg_buffer.append(seg)
continue
fix_buff = fixes.copy()
unused_fixes = []
while fix_buff:
f = fix_buff.pop()
if f.anchor == seg:
if f.edit_type == 'delete':
# We're just getting rid of this segment.
seg = None
elif f.edit_type in ('edit', 'create'):
# We're doing a replacement (it could be a single segment or an iterable)
if isinstance(f.edit, BaseSegment):
seg_buffer.append(f.edit)
else:
for s in f.edit:
seg_buffer.append(s)
if f.edit_type == 'create':
# in the case of a creation, also add this segment on the end
seg_buffer.append(seg)
else:
raise ValueError(
"Unexpected edit_type: {0!r} in {1!r}".format(
f.edit_type, f))
# We've applied a fix here. Move on, this also consumes the fix
# TODO: Maybe deal with overlapping fixes later.
break
else:
# We've not used the fix so we should keep it in the list for later.
unused_fixes.append(f)
else:
seg_buffer.append(seg)
# Switch over the the unused list
fixes = unused_fixes + fix_buff
# Then recurse (i.e. deal with the children) (Requeueing)
seg_queue = seg_buffer
seg_buffer = []
for seg in seg_queue:
s, fixes = seg.apply_fixes(fixes)
seg_buffer.append(s)
# Reform into a new segment
r = r.__class__(
segments=tuple(seg_buffer),
pos_marker=r.pos_marker,
validate=False
)
# Lastly, before returning, we should realign positions.
# Note: Realign also returns a copy
return r.realign(), fixes
else:
return self, fixes
def realign(self):
"""Realign the positions in this segment.
Returns:
a copy of this class with the pos_markers realigned.
Note: this is used mostly during fixes.
Realign is recursive. We will assume that the pos_marker of THIS segment is
truthful, and that during recursion it will have been set by the parent.
This function will align the pos marker if it's direct children, we then
recurse to realign their children.
"""
seg_buffer = []
todo_buffer = list(self.segments)
running_pos = self.pos_marker
while True:
if len(todo_buffer) == 0:
# We're done.
break
else:
# Get the first off the buffer
seg = todo_buffer.pop(0)
# We'll preserve statement indexes so we should keep track of that.
# When recreating, we use the DELTA of the index so that's what matter...
idx = seg.pos_marker.statement_index - running_pos.statement_index
if seg.is_meta:
# It's a meta segment, just update the position
seg = seg.__class__(
pos_marker=running_pos
)
elif len(seg.segments) > 0:
# It's a compound segment, so keep track of it's children
child_segs = seg.segments
# Create a new segment of the same type with the new position
seg = seg.__class__(
segments=child_segs,
pos_marker=running_pos
)
# Realign the children of that class
seg = seg.realign()
else:
# It's a raw segment...
# Create a new segment of the same type with the new position
seg = seg.__class__(
raw=seg.raw,
pos_marker=running_pos
)
# Update the running position with the content of that segment
running_pos = running_pos.advance_by(
raw=seg.raw, idx=idx
)
# Add the buffer to my new segment
seg_buffer.append(seg)
# Create a new version of this class with the new details
return self.__class__(
segments=tuple(seg_buffer),
pos_marker=self.pos_marker
)
class RawSegment(BaseSegment):
"""This is a segment without any subsegments."""
type = 'raw'
_is_code = False
_is_comment = False
_template = '<unset>'
_case_sensitive = False
_raw_upper = None
@property
def is_expandable(self):
"""Return true if it is meaningful to call `expand` on this segment."""
return False
@property
def is_code(self):
"""Return True if this segment is code."""
return self._is_code
@property
def is_comment(self):
"""Return True if this segment is a comment."""
return self._is_comment
def __init__(self, raw, pos_marker):
self._raw = raw
self._raw_upper = raw.upper()
# pos marker is required here
self.pos_marker = pos_marker
@property
def raw_upper(self):
"""Make an uppercase string from the segments of this segment."""
return self._raw_upper
def iter_raw_seg(self):
"""Iterate raw segments, mostly for searching."""
yield self
@property
def segments(self):
"""Return an empty list of child segments.
This is in case something tries to iterate on this segment.
"""
return []
def raw_list(self):
"""Return a list of the raw content of this segment."""
return [self.raw]
def _reconstruct(self):
"""Return a string of the raw content of this segment."""
return self._raw
def __repr__(self):
return "<{0}: ({1}) {2!r}>".format(
self.__class__.__name__,
self.pos_marker,
self.raw)
def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False):
"""Use indentation to render this segment and it's children as a string."""
preface = self._preface(ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx)
return preface + '\n'
def _suffix(self):
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return "{0!r}".format(self.raw)
@classmethod
def make(cls, template, case_sensitive=False, name=None, **kwargs):
"""Make a subclass of the segment using a method."""
# Let's deal with the template first
if case_sensitive:
_template = template
else:
_template = template.upper()
# Use the name if provided otherwise default to the template
name = name or _template
# Now lets make the classname (it indicates the mother class for clarity)
classname = "{0}_{1}".format(name, cls.__name__)
# This is the magic, we generate a new class! SORCERY
newclass = type(classname, (cls, ),
dict(_template=_template, _case_sensitive=case_sensitive,
_name=name, **kwargs))
# Now we return that class in the abstract. NOT INSTANTIATED
return newclass
def edit(self, raw):
"""Create a new segment, with exactly the same position but different content.
Returns:
A copy of this object with new contents.
Used mostly by fixes.
"""
return self.__class__(
raw=raw,
pos_marker=self.pos_marker
)
def get_end_pos_marker(self):
"""Return the pos marker at the end of this segment."""
return self.pos_marker.advance_by(self.raw)
def get_start_pos_marker(self):
"""Return the pos marker at the start of this segment."""
return self.pos_marker
class UnparsableSegment(BaseSegment):
"""This is a segment which can't be parsed. It indicates a error during parsing."""
type = 'unparsable'
# From here down, comments are printed seperately.
comment_seperate = True
_expected = ""
def __init__(self, *args, **kwargs):
self._expected = kwargs.pop('expected', "")
super(UnparsableSegment, self).__init__(*args, **kwargs)
def _suffix(self):
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return "!! Expected: {0!r}".format(self._expected)
def iter_unparsables(self):
"""Iterate through any unparsables.
As this is an unparsable, it should yield itself.
"""
yield self
|
python
|
"""客户端查询排行榜"""
from upload import uploading
def rank():
pass
|
python
|
#!/usr/bin/env python
#pylint: disable=C0103
"""
This module provides business object class to interact with
DATASET_ACCESS_TYPES table.
"""
from WMCore.DAOFactory import DAOFactory
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class DBSDatasetAccessType:
"""
DatasetAccessType business object class
"""
def __init__(self, logger, dbi, owner):
daofactory = DAOFactory(package='dbs.dao', logger=logger,
dbinterface=dbi, owner=owner)
self.logger = logger
self.dbi = dbi
self.owner = owner
self.datasetAccessType = daofactory(classname="DatasetType.List")
def listDatasetAccessTypes(self, dataset_access_type=""):
"""
List dataset access types
"""
if isinstance(dataset_access_type, basestring):
try:
dataset_access_type = str(dataset_access_type)
except:
dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type)
else:
dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type)
conn = self.dbi.connection()
try:
plist = self.datasetAccessType.execute(conn, dataset_access_type.upper())
result = [{}]
if plist:
t = []
for i in plist:
for k, v in i.iteritems():
t.append(v)
result[0]['dataset_access_type'] = t
return result
finally:
if conn:
conn.close()
|
python
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/python/ReconstructionSystem/sensors/realsense_recorder.py
# pyrealsense2 is required.
# Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python
import pyrealsense2 as rs
import numpy as np
import cv2
import argparse
from os import makedirs
from os.path import exists, join
import shutil
import json
from enum import IntEnum
try:
# Python 2 compatible
input = raw_input
except NameError:
pass
class Preset(IntEnum):
Custom = 0
Default = 1
Hand = 2
HighAccuracy = 3
HighDensity = 4
MediumDensity = 5
def make_clean_folder(path_folder):
if not exists(path_folder):
makedirs(path_folder)
else:
user_input = input("%s not empty. Overwrite? (y/n) : " % path_folder)
if user_input.lower() == 'y':
shutil.rmtree(path_folder)
makedirs(path_folder)
else:
exit()
def save_intrinsic_as_json(filename, frame):
intrinsics = frame.profile.as_video_stream_profile().intrinsics
with open(filename, 'w') as outfile:
obj = json.dump(
{
'width':
intrinsics.width,
'height':
intrinsics.height,
'intrinsic_matrix': [
intrinsics.fx, 0, 0, 0, intrinsics.fy, 0, intrinsics.ppx,
intrinsics.ppy, 1
]
},
outfile,
indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"Realsense Recorder. Please select one of the optional arguments")
parser.add_argument("--output_folder",
default='../dataset/realsense/',
help="set output folder")
parser.add_argument("--record_rosbag",
action='store_true',
help="Recording rgbd stream into realsense.bag")
parser.add_argument(
"--record_imgs",
action='store_true',
help="Recording save color and depth images into realsense folder")
parser.add_argument("--playback_rosbag",
action='store_true',
help="Play recorded realsense.bag file")
args = parser.parse_args()
if sum(o is not False for o in vars(args).values()) != 2:
parser.print_help()
exit()
path_output = args.output_folder
path_depth = join(args.output_folder, "depth")
path_color = join(args.output_folder, "color")
if args.record_imgs:
make_clean_folder(path_output)
make_clean_folder(path_depth)
make_clean_folder(path_color)
path_bag = join(args.output_folder, "realsense.bag")
if args.record_rosbag:
if exists(path_bag):
user_input = input("%s exists. Overwrite? (y/n) : " % path_bag)
if user_input.lower() == 'n':
exit()
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
if args.record_imgs or args.record_rosbag:
# note: using 640 x 480 depth resolution produces smooth depth boundaries
# using rs.format.bgr8 for color image format for OpenCV based image visualization
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
if args.record_rosbag:
config.enable_record_to_file(path_bag)
if args.playback_rosbag:
config.enable_device_from_file(path_bag, repeat_playback=True)
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
# Using preset HighAccuracy for recording
if args.record_rosbag or args.record_imgs:
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 3 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
# Streaming loop
frame_count = 0
try:
while True:
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
if args.record_imgs:
if frame_count == 0:
save_intrinsic_as_json(
join(args.output_folder, "camera_intrinsic.json"),
color_frame)
cv2.imwrite("%s/%06d.png" % \
(path_depth, frame_count), depth_image)
cv2.imwrite("%s/%06d.jpg" % \
(path_color, frame_count), color_image)
print("Saved color + depth image %06d" % frame_count)
frame_count += 1
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
#depth image is 1 channel, color is 3 channels
depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
bg_removed = np.where((depth_image_3d > clipping_distance) | \
(depth_image_3d <= 0), grey_color, color_image)
# Render images
depth_colormap = cv2.applyColorMap(
cv2.convertScaleAbs(depth_image, alpha=0.09), cv2.COLORMAP_JET)
images = np.hstack((bg_removed, depth_colormap))
cv2.namedWindow('Recorder Realsense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Recorder Realsense', images)
key = cv2.waitKey(1)
# if 'esc' button pressed, escape loop and exit program
if key == 27:
cv2.destroyAllWindows()
break
finally:
pipeline.stop()
|
python
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^job-meta/',views.job_meta, name='job_meta'),
url(r'^job-success-failure',views.job_success_failure_ratio, name='job_success_failure_ratio'),
url(r'^$',views.dashboard, name='dashboard'),
]
|
python
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member,maybe-no-member
import ipaddress
from tortuga.cli.tortugaCli import TortugaCli
from tortuga.exceptions.invalidCliRequest import InvalidCliRequest
from tortuga.wsapi.networkWsApi import NetworkWsApi
class NetworkCli(TortugaCli):
"""
Base network command line interface class.
"""
def __init__(self):
super(NetworkCli, self).__init__()
# Initialize api instance
self._networkApi = None
def setupDefaultOptions(self):
"""
Set up default command-line options for all attributes in network...
used by update and add operations
"""
cmdline_grpname = _('Command-line')
self.addOptionGroup(cmdline_grpname, None)
# Simple common Options
self.addOptionToGroup(cmdline_grpname, '--network',
help=_('Network in XXX.XXX.XXX.XXX/YY or'
' XXX.XXX.XXX.XXX/YYY.YYY.YYY.YYY'
' format'))
self.addOptionToGroup(cmdline_grpname, '--address', dest='address',
help=_('Network address'))
self.addOptionToGroup(cmdline_grpname, '--netmask', dest='netmask',
help=_('Network mask'))
self.addOptionToGroup(cmdline_grpname, '--suffix', dest='suffix',
help=_('Network suffix'))
self.addOptionToGroup(cmdline_grpname, '--gateway', dest='gateway',
help=_('Network gateway'))
self.addOptionToGroup(cmdline_grpname, '--options', dest='options',
help=_('Network options'))
self.addOptionToGroup(cmdline_grpname, '--name', dest='name',
help=_('Network name'))
self.addOptionToGroup(cmdline_grpname, '--start-ip', dest='startIp',
help=_('Network starting IP address'))
self.addOptionToGroup(cmdline_grpname, '--type', dest='type',
help=_('Network type'))
self.addOptionToGroup(cmdline_grpname, '--increment',
dest='increment',
help=_('Network increment'), type=int)
self.addOptionToGroup(cmdline_grpname,
'--dhcp', dest='usingDhcp',
action='store_true',
help=_('Network addresses assigned via DHCP'))
self.addOptionToGroup(cmdline_grpname, '--static', dest='usingDhcp',
action='store_false',
help=_('Network addresses assigned'
' statically'))
self.addOptionToGroup(cmdline_grpname, '--vlan-id', dest='vlanId',
help=_('VLAN ID.'))
self.addOptionToGroup(cmdline_grpname, '--vlan-parent-network',
dest='vlanParentNetwork',
help=_('Parent network of the VLAN network'))
# Or an xml file can be passed in
xml_grpname = _('From XML file')
self.addOptionGroup(xml_grpname, None)
self.addOptionToGroup(xml_grpname, '--xml-file', dest='xmlFile',
help=_('XML file containing network'
' definition'))
def assertIp(self, ip, parameterName, errorMsg=None): \
# pylint: disable=no-self-use
"""
Convienience function for testing IPs and raising a configurable
exception if the IP is invalid.
"""
if errorMsg is None:
errorMsg = _('The %s parameter must be a valid IP address.') % (
parameterName)
try:
ipaddress.IPv4Address(str(ip))
except ipaddress.AddressValueError:
raise InvalidCliRequest(errorMsg)
def updateNetwork(self, network):
"""
Update a passed in network tortuga object with the values passed
in on the command line.
"""
# Check for conflicting command-line options
if (self.getArgs().netmask or self.getArgs().address) and \
self.getArgs().network:
self.getParser().error(
'Specify network using --network/--netmask or --network')
if self.getArgs().network:
# Use 'ipaddr' module to validate network spec
parsed_network, parsed_netmask = \
self.parseNetworkParameter(self.getArgs().network)
network.setAddress(parsed_network)
network.setNetmask(parsed_netmask)
else:
if self.getArgs().address is not None:
self.assertIp(self.getArgs().address, '--address')
network.setAddress(self.getArgs().address)
if self.getArgs().netmask is not None:
self.assertIp(self.getArgs().netmask, '--netmask')
network.setNetmask(self.getArgs().netmask)
if self.getArgs().suffix is not None:
network.setSuffix(self.getArgs().suffix)
if self.getArgs().gateway is not None:
self.assertIp(self.getArgs().gateway, '--gateway')
network.setGateway(self.getArgs().gateway)
if self.getArgs().name is not None:
network.setName(self.getArgs().name)
if self.getArgs().startIp is not None:
self.assertIp(self.getArgs().startIp, '--start-ip')
network.setStartIp(self.getArgs().startIp)
if self.getArgs().type is not None:
network.setType(self.getArgs().type)
if self.getArgs().increment is not None:
network.setIncrement(self.getArgs().increment)
optionsString = network.getOptions()
optionsDict = {}
if optionsString:
# VLAN info may already exist for this network
optionsList = optionsString.split(';')
for originalOption in optionsList:
key, value = originalOption.split('=')
optionsDict[key] = value
vlanIdFound = self.getArgs().vlanId is not None or \
'vlan' in optionsDict
vlanParentNetworkFound = \
self.getArgs().vlanParentNetwork is not None or \
'vlanparent' in optionsDict
if (vlanIdFound and not vlanParentNetworkFound) or \
(not vlanIdFound and vlanParentNetworkFound):
raise InvalidCliRequest(
_('--vlan-id and --vlan-parent-network must be used'
' together.'))
if self.getArgs().vlanId:
optionsDict['vlan'] = self.getArgs().vlanId
if self.getArgs().vlanParentNetwork:
# Match the given parent network to a network in the DB
networkAddr, subnetMask = self.parseNetworkParameter(
self.getArgs().vlanParentNetwork)
existingNetworkList = self.getNetworkApi().getNetworkList()
matchingNetworkId = None
for existingNetwork in existingNetworkList:
if existingNetwork.getAddress() == networkAddr and \
existingNetwork.getNetmask() == subnetMask:
matchingNetworkId = existingNetwork.getId()
if not matchingNetworkId:
raise InvalidCliRequest(
_('Network [%s] not found') % (
self.getArgs().vlanParentNetwork))
optionsDict['vlanparent'] = matchingNetworkId
newOptions = ''
if self.getArgs().vlanId or self.getArgs().vlanParentNetwork:
for entry in list(optionsDict.items()):
optionKey, optionValue = entry
newOptions += '%s=%s;' % (optionKey, optionValue)
# Take off the last semicolon
newOptions = newOptions[:-1]
if self.getArgs().options:
if newOptions:
newOptions = '%s;%s' % (newOptions, self.getArgs().options)
else:
newOptions = self.getArgs().options
if self.getArgs().options or self.getArgs().vlanId or \
self.getArgs().vlanParentNetwork:
network.setOptions(newOptions)
if self.getArgs().usingDhcp is not None:
network.setUsingDhcp(self.getArgs().usingDhcp)
def getNetworkFromXml(self):
"""
If the xmlFile option is present attempt to create a Network
object from the xml. Otherwise return None
"""
network = None
if self.getArgs().xmlFile:
# An XML file was provided as input...start with that...
f = open(self.getArgs().xmlFile, 'r')
try:
xmlString = f.read()
finally:
f.close()
try:
from tortuga.objects.network import Network
network = Network.getFromXml(xmlString)
except Exception as ex: # pylint: disable=W0703
self._logger.debug('Error parsing xml %s' % ex)
if network is None:
raise InvalidCliRequest(
_('File [%s] does not contain a valid network.') % (
self.getArgs().xmlFile))
return network
def getNetworkApi(self):
"""
Caching method for getting a networkApi instance.
"""
if self._networkApi is None:
self._networkApi = self.configureClient(NetworkWsApi)
return self._networkApi
def parseNetworkParameter(self, network): \
# pylint: disable=no-self-use
"""
Validator for the --network parameter.
"""
try:
result = ipaddress.IPv4Network(str(network))
except ipaddress.AddressValueError:
# Invalid argument to --network specified
raise InvalidCliRequest(
_('--network argument must be formatted as '
' XXX.XXX.XXX.XXX/YY or XXX.XXX.XXX.XXX/YYY.YYY.YYY.YYY'))
return result.network_address.exploded, result.netmask.exploded
def validateNetwork(self, network): # pylint: disable=no-self-use
"""
Verify a network object has the minimum populated fields needed to
add it to the database
"""
if not network.getAddress():
raise InvalidCliRequest(_('Network address must be specified.'))
if not network.getNetmask():
raise InvalidCliRequest(_('Subnet mask must be specified.'))
if not network.getType():
raise InvalidCliRequest(_('Network type must be specified.'))
if network.getUsingDhcp() is None:
raise InvalidCliRequest(
_('Address allocation must be specified as DHCP or'
' static.'))
if network.getIncrement():
increment = network.getIncrement()
try:
value = int(increment)
if value < 1:
raise InvalidCliRequest(
_('Increment must be positive.'))
except ValueError:
raise InvalidCliRequest(
_('Increment must be a positive integer.'))
def get_network_from_cmdline(self, retrieve_network=True):
"""
If 'retrieve_network' is True, return Network object matching network
specification (either --address/--netmask or --network), otherwise
return None.
Raises:
NetworkNotFound
"""
# Get network from XML if an xml file was passed in
network = self.getNetworkFromXml()
if network:
return network
# If we didn't have xml but network load the network from the
# api...otherwise error
if self.getArgs().address is None and \
self.getArgs().network is None or \
((self.getArgs().address or self.getArgs().netmask) and
self.getArgs().network):
self.getParser().error(
'--address/--netmask OR --network must be specified')
if self.getArgs().network:
_network, _netmask = self.parseNetworkParameter(
self.getArgs().network)
else:
_network = self.getArgs().address
_netmask = self.getArgs().netmask
if _netmask is None:
self.getParser().error('--netmask must be specified')
if not retrieve_network:
return None
return self.getNetworkApi().getNetwork(_network, _netmask)
|
python
|
import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
from textblob import TextBlob # for sentimental
import re
# For plotting and visualization:
from IPython.display import display
# for display use only import matplotlib.pyplot as plt
import seaborn as sns
# We import our access keys:
from keys.twitter_keys import * # This will allow us to use the keys as variables
# We import our access keys:
# optional from credentials import * # This will allow us to use the keys as variables
# API's setup:
def twitter_setup():
"""
Utility function to setup the Twitter's API
with our access keys provided.
"""
# Authentication and access using keys:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
# Return API with authentication:
api = tweepy.API(auth)
return api
# We create an extractor object:
extractor = twitter_setup()
# We create a tweet list as follows:
# tweets = extractor.user_timeline(screen_name="realDonaldTrump", count=200)
## search by hashtab
tweets = extractor.user_timeline(screen_name="cnnbrk", count=10)
# We create a pandas dataframe as follows:
data = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
# We display the first 10 elements of the dataframe:
#display(data.head(10))
# We add relevant data:
data['created_at'] = np.array([tweet.created_at for tweet in tweets])
data['len'] = np.array([len(tweet.text) for tweet in tweets])
data['ID'] = np.array([tweet.id for tweet in tweets])
data['Date'] = np.array([tweet.created_at for tweet in tweets])
data['Source'] = np.array([tweet.source for tweet in tweets])
data['Likes'] = np.array([tweet.favorite_count for tweet in tweets])
data['RTs'] = np.array([tweet.retweet_count for tweet in tweets])
### Below for sentimental Analysis
def clean_tweet(tweet):
'''
Utility function to clean the text in a tweet by removing
links and special characters using regex.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analize_sentiment(tweet):
'''
Utility function to classify the polarity of a tweet
using textblob.
'''
analysis = TextBlob(clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
try:
# We create a column with the result of the analysis:
data['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ])
#api.send("sentimentPreview","Rules:\n" + data (10));
#display (data (10))
myString = data.to_csv()
display ( myString )
except Exception as inst:
display ( "errors" + str(inst))
|
python
|
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Table
from sqlalchemy.orm import relationship, backref
from models import db_base as base
import os
import json
__author__ = "zadjii"
class Issue(base):
__tablename__ = "issue"
"""
Represents a single issue
"""
id = Column(Integer, primary_key=True)
raw_data = Column(String)
number = Column(Integer)
def __init__(self, api_obj):
self.number = api_obj.number
self.raw_data = json.dumps(api_obj._rawData)
|
python
|
port = 8888
logging = 'info'
log_file_prefix = "tivid-error.log"
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
java_source = "http://www.importnew.com/all-posts"
python_source = "http://python.jobbole.com/all-posts/"
|
python
|
from threading import Thread
def async_func(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
|
python
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SequencerConfig(AppConfig):
name = 'sequencer'
|
python
|
import os
import sys
import datetime
from glob import iglob
from skimage import io
import cv2
import tensorflow as tf
import numpy as np
import utils as ut
import training as tr
class VAE2predict:
def __init__(self, use_sampling=False):
self.use_sampling = use_sampling
self._build_model()
def _build_model(self):
in_image = tf.keras.layers.Input(shape=(144, 144, 3), name='in_image')
out_encoder = tr.Encoder()(in_image)
x = tf.keras.layers.Dense(512)(out_encoder)
# x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('elu', name='out_latent_1')(x)
self.z_mean = tf.keras.layers.Dense(512, name='z_mean')(x)
self.z_log_var = tf.keras.layers.Dense(512, name='z_logvar')(x)
if self.use_sampling:
self.z_latent = tf.keras.layers.Lambda(tr.sampling, output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
else:
self.z_latent = tf.keras.layers.Lambda(lambda x: x[0], output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
out_image_pre = tr.Decoder()(self.z_latent, 'out_image_pre', 3)
out_mask = tr.DecoderMask()(self.z_latent, 'out_mask', 1)
# Tidy the image to use only the face regions of the estimated output and join with the original background
x = tf.keras.layers.Multiply()([out_image_pre, out_mask])
x_bg = tf.keras.layers.Multiply()([in_image, 1. - out_mask])
out_image = tf.keras.layers.Add(name='out_image')([x, x_bg])
self.model = tf.keras.models.Model(in_image, [out_image, out_mask, out_image_pre])
def load_weights(self, modelpath=None, ckpt_dir=None):
if ckpt_dir is None and modelpath is None:
raise('Not possible to load the model')
sys.exit()
if ckpt_dir is not None:
fpaths_weights = list(iglob(os.path.join(ckpt_dir, 'w*.h5')))
fpaths_weights.sort()
self.modelpath = fpaths_weights[-1]
else:
self.modelpath = modelpath
self.model.load_weights(self.modelpath)
def predict(self, X):
if len(X.shape) == 3:
X = X[None]
return self.model.predict(X)
def predict_path(self, paths):
if isinstance(paths, str):
paths = [paths]
X = np.stack([ut.load_img(p) for p in paths])
return self.predict(X)
class VAENoMask2predict(VAE2predict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _build_model(self):
in_image = tf.keras.layers.Input(shape=(144, 144, 3), name='in_image')
out_encoder = tr.Encoder()(in_image)
x = tf.keras.layers.Dense(512)(out_encoder)
# x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('elu', name='out_latent_1')(x)
self.z_mean = tf.keras.layers.Dense(512, name='z_mean')(x)
self.z_log_var = tf.keras.layers.Dense(512, name='z_logvar')(x)
if self.use_sampling:
self.z_latent = tf.keras.layers.Lambda(tr.sampling, output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
else:
self.z_latent = tf.keras.layers.Lambda(lambda x: x[0], output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
self.out_image = tr.Decoder()(self.z_latent, 'out_image', 3)
self.model = tf.keras.models.Model(in_image, self.out_image)
def save_predictions(preds, org_dim=(144, 144)):
now_timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
now_timestamp = '.'
dir_save = os.path.join('cache', now_timestamp)
if not os.path.exists(dir_save):
os.makedirs(dir_save)
n_samples = preds[0].shape[0]
for i in range(n_samples):
for j, name in enumerate(['reconst', 'mask']):
jpath = os.path.join(dir_save, '{}_{}.png'.format(i, name))
Xsave = preds[j][i]
if j == 1:
# Round the mask pixels
Xsave = Xsave.round()
Xsave = cv2.resize(Xsave, org_dim)
io.imsave(jpath, Xsave)
if __name__ == '__main__':
impath = sys.argv[1]
im = io.imread(impath) / 255
org_dim = im.shape[:-1]
im = ut.resize_imx144(im)
vae = VAE2predict()
vae.load_weights(modelpath=ut.modelpath_best_predict)
X_pred = vae.predict(im)
save_predictions(X_pred, org_dim=org_dim)
|
python
|
from django import template
register = template.Library()
from urlparse import urlparse
def domain_only(full_url):
parsed = urlparse(full_url)
return parsed.netloc.lstrip("www.")
register.filter('domain_only', domain_only)
|
python
|
print('spam = 40')
spam = 40
print('eggs = 2')
eggs = 2
print('spam + eggs')
a = spam + eggs
print(a)
# Variable naming convention
# small then capital or sparated by _
# varA
|
python
|
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import copy
import math
import os
import sys
import unittest
from distutils.version import LooseVersion
from python import default_templates
from . import BaseInspectTestCase
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../plugins'))
import m.sphinx
class String(BaseInspectTestCase):
def test(self):
self.run_python({
'LINKS_NAVBAR1': [
('Modules', 'modules', []),
('Classes', 'classes', [])],
})
self.assertEqual(*self.actual_expected_contents('inspect_string.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.another_module.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.FooSlots.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.DerivedException.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Specials.html'))
self.assertEqual(*self.actual_expected_contents('classes.html'))
self.assertEqual(*self.actual_expected_contents('modules.html'))
class Object(BaseInspectTestCase):
def test(self):
# Reuse the stuff from inspect_string, but this time reference it via
# an object and not a string
sys.path.append(os.path.join(os.path.dirname(self.path), 'inspect_string'))
import inspect_string
self.run_python({
'LINKS_NAVBAR1': [
('Modules', 'modules', []),
('Classes', 'classes', [])],
'INPUT_MODULES': [inspect_string]
})
# The output should be the same as when inspecting a string
self.assertEqual(*self.actual_expected_contents('inspect_string.html', '../inspect_string/inspect_string.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.another_module.html', '../inspect_string/inspect_string.another_module.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Foo.html', '../inspect_string/inspect_string.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.FooSlots.html', '../inspect_string/inspect_string.FooSlots.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.DerivedException.html', '../inspect_string/inspect_string.DerivedException.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Specials.html', '../inspect_string/inspect_string.Specials.html'))
self.assertEqual(*self.actual_expected_contents('classes.html', '../inspect_string/classes.html'))
self.assertEqual(*self.actual_expected_contents('modules.html', '../inspect_string/modules.html'))
class AllProperty(BaseInspectTestCase):
def test(self):
self.run_python()
self.assertEqual(*self.actual_expected_contents('inspect_all_property.html'))
class Annotations(BaseInspectTestCase):
def test(self):
self.run_python()
self.assertEqual(*self.actual_expected_contents('inspect_annotations.html'))
self.assertEqual(*self.actual_expected_contents('inspect_annotations.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_annotations.FooSlots.html'))
# This should not list any internal stuff from the typing module
self.assertEqual(*self.actual_expected_contents('inspect_annotations.AContainer.html'))
# https://github.com/python/cpython/pull/13394
@unittest.skipUnless(LooseVersion(sys.version) >= LooseVersion('3.7.4'),
"signature with / for pow() is not present in 3.6, "
"3.7.3 and below has a different docstring")
def test_math(self):
# From math export only pow() so we have the verification easier, and
# in addition log() because it doesn't provide any signature metadata
assert not hasattr(math, '__all__')
math.__all__ = ['pow', 'log']
self.run_python({
'INPUT_MODULES': [math]
})
del math.__all__
assert not hasattr(math, '__all__')
self.assertEqual(*self.actual_expected_contents('math.html'))
# https://github.com/python/cpython/pull/13394
@unittest.skipUnless(LooseVersion(sys.version) < LooseVersion('3.7.4') and LooseVersion(sys.version) >= LooseVersion('3.7'),
"signature with / for pow() is not present in 3.6, "
"3.7.3 and below has a different docstring")
def test_math373(self):
# From math export only pow() so we have the verification easier, and
# in addition log() because it doesn't provide any signature metadata
assert not hasattr(math, '__all__')
math.__all__ = ['pow', 'log']
self.run_python({
'INPUT_MODULES': [math]
})
del math.__all__
assert not hasattr(math, '__all__')
self.assertEqual(*self.actual_expected_contents('math.html', 'math373.html'))
@unittest.skipUnless(LooseVersion(sys.version) < LooseVersion('3.7'),
"docstring for log() is different in 3.7")
def test_math36(self):
# From math export only pow() so we have the verification easier, and
# in addition log() because it doesn't provide any signature metadata
assert not hasattr(math, '__all__')
math.__all__ = ['log']
self.run_python({
'INPUT_MODULES': [math]
})
del math.__all__
assert not hasattr(math, '__all__')
self.assertEqual(*self.actual_expected_contents('math.html', 'math36.html'))
class NameMapping(BaseInspectTestCase):
def test(self):
self.run_python({
'NAME_MAPPING': {
'inspect_name_mapping._sub.bar._NameThatGetsOverridenExternally': 'yay.ThisGotOverridenExternally'
}
})
self.assertEqual(*self.actual_expected_contents('inspect_name_mapping.html'))
self.assertEqual(*self.actual_expected_contents('inspect_name_mapping.Class.html'))
self.assertEqual(*self.actual_expected_contents('inspect_name_mapping.submodule.html'))
class Recursive(BaseInspectTestCase):
def test(self):
self.run_python()
self.assertEqual(*self.actual_expected_contents('inspect_recursive.html'))
self.assertEqual(*self.actual_expected_contents('inspect_recursive.first.html'))
self.assertEqual(*self.actual_expected_contents('inspect_recursive.a.html'))
class TypeLinks(BaseInspectTestCase):
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_DOCS': ['docs.rst'],
'INPUT_PAGES': ['index.rst'],
'M_SPHINX_INVENTORIES': [
('../../../doc/documentation/python.inv', 'https://docs.python.org/3/', [], ['m-doc-external'])]
})
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.Foo.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.sub.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.sub.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.FooSlots.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.FooSlotsInvalid.html'))
class CreateIntersphinx(BaseInspectTestCase):
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_PAGES': ['page.rst'],
'M_SPHINX_INVENTORIES': [
# Nothing from here should be added to the output
('../../../doc/documentation/python.inv', 'https://docs.python.org/3/', [], ['m-doc-external'])],
'M_SPHINX_INVENTORY_OUTPUT': 'things.inv',
'PYBIND11_COMPATIBILITY': True
})
with open(os.path.join(self.path, 'output/things.inv'), 'rb') as f:
self.assertEqual(m.sphinx.pretty_print_intersphinx_inventory(f), """
# Sphinx inventory version 2
# Project: X
# Version: 0
# The remainder of this file is compressed using zlib.
inspect_create_intersphinx.Class.a_property py:attribute 2 inspect_create_intersphinx.Class.html#a_property -
inspect_create_intersphinx.Class py:class 2 inspect_create_intersphinx.Class.html -
inspect_create_intersphinx.Class.CLASS_DATA py:data 2 inspect_create_intersphinx.Class.html#CLASS_DATA -
inspect_create_intersphinx.MODULE_DATA py:data 2 inspect_create_intersphinx.html#MODULE_DATA -
inspect_create_intersphinx.Enum py:enum 2 inspect_create_intersphinx.html#Enum -
inspect_create_intersphinx.Enum.ENUM_VALUE py:enumvalue 2 inspect_create_intersphinx.html#Enum-ENUM_VALUE -
inspect_create_intersphinx.Class.class_method py:function 2 inspect_create_intersphinx.Class.html#class_method -
inspect_create_intersphinx.Class.method py:function 2 inspect_create_intersphinx.Class.html#method -
inspect_create_intersphinx.Class.static_method py:function 2 inspect_create_intersphinx.Class.html#static_method -
inspect_create_intersphinx.function py:function 2 inspect_create_intersphinx.html#function -
inspect_create_intersphinx.pybind.overloaded_function py:function 2 inspect_create_intersphinx.pybind.html#overloaded_function -
inspect_create_intersphinx py:module 2 inspect_create_intersphinx.html -
inspect_create_intersphinx.pybind py:module 2 inspect_create_intersphinx.pybind.html -
page std:doc 2 page.html -
index std:special 2 index.html -
modules std:special 2 modules.html -
classes std:special 2 classes.html -
pages std:special 2 pages.html -
""".lstrip())
# Yes, above it should say A documentation page, but it doesn't
try:
import attr
except ImportError:
attr = None
class Attrs(BaseInspectTestCase):
@unittest.skipUnless(attr, "the attr package was not found")
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_DOCS': ['docs.rst'],
'ATTRS_COMPATIBILITY': True
})
self.assertEqual(*self.actual_expected_contents('inspect_attrs.MyClass.html'))
self.assertEqual(*self.actual_expected_contents('inspect_attrs.MyClassAutoAttribs.html'))
self.assertEqual(*self.actual_expected_contents('inspect_attrs.MySlotClass.html'))
class Underscored(BaseInspectTestCase):
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_DOCS': ['docs.rst'],
'M_SPHINX_PARSE_DOCSTRINGS': True
})
self.assertEqual(*self.actual_expected_contents('inspect_underscored.html'))
self.assertEqual(*self.actual_expected_contents('inspect_underscored.Class.html'))
class ValueFormatting(BaseInspectTestCase):
def test(self):
self.run_python({})
self.assertEqual(*self.actual_expected_contents('inspect_value_formatting.html'))
class DuplicateClass(BaseInspectTestCase):
def test(self):
self.run_python({})
self.assertEqual(*self.actual_expected_contents('inspect_duplicate_class.html'))
self.assertEqual(*self.actual_expected_contents('inspect_duplicate_class.sub.html'))
self.assertEqual(*self.actual_expected_contents('inspect_duplicate_class.Bar.html'))
|
python
|
from django.apps import AppConfig
class MagiclinkConfig(AppConfig):
name = 'magiclink'
|
python
|
from flask import Flask
import os
app = Flask(__name__)
healthy = True
@app.route('/')
def hello():
global healthy
if healthy:
return f"Hello from {os.environ['HOST']}!\n"
else:
return "Unhealthy", 503
@app.route('/healthy')
def healthy():
global healthy
healthy = True
return f"[{os.environ['HOST']}] Set to healthy\n", 201
@app.route('/unhealthy')
def unhealthy():
global healthy
healthy = False
return f"[{os.environ['HOST']}] Set to unhealthy\n", 201
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=False)
|
python
|
'''
Integration tests for states.
'''
import unittest as ut
import numpy as np
import dynamite_test_runner as dtr
from dynamite.states import State
class RandomSeed(dtr.DynamiteTestCase):
def test_generation(self):
'''
Make sure that different processors get the same random seed.
'''
from dynamite import config
config.initialize()
from petsc4py import PETSc
comm = PETSc.COMM_WORLD.tompi4py()
seed = State.generate_time_seed()
all_seeds = comm.gather(seed, root = 0)
if comm.rank == 0:
self.assertTrue(all(s == seed for s in all_seeds))
class ToNumpy(dtr.DynamiteTestCase):
def setUp(self):
from petsc4py import PETSc
self.v = PETSc.Vec().create()
self.v.setSizes(PETSc.COMM_WORLD.size)
self.v.setFromOptions()
self.v.set(-1)
self.v[PETSc.COMM_WORLD.rank] = PETSc.COMM_WORLD.rank
self.v.assemblyBegin()
self.v.assemblyEnd()
def test_to_zero(self):
from petsc4py import PETSc
npvec = State._to_numpy(self.v)
if PETSc.COMM_WORLD.rank == 0:
for i in range(PETSc.COMM_WORLD.rank):
self.assertTrue(npvec[i] == i)
else:
self.assertIs(npvec, None)
def test_to_all(self):
from petsc4py import PETSc
npvec = State._to_numpy(self.v, to_all = True)
for i in range(PETSc.COMM_WORLD.rank):
self.assertTrue(npvec[i] == i)
class PetscMethods(dtr.DynamiteTestCase):
'''
Tests that the methods directly included from PETSc function as intended.
'''
def test_norm(self):
state = State()
start, end = state.vec.getOwnershipRange()
state.vec[start:end] = np.array([1]*(end-start))
state.vec.assemblyBegin()
state.vec.assemblyEnd()
self.assertAlmostEqual(state.norm()**2, state.subspace.get_dimension())
def test_normalize(self):
state = State()
start, end = state.vec.getOwnershipRange()
state.vec[start:end] = np.array([1]*(end-start))
state.vec.assemblyBegin()
state.vec.assemblyEnd()
state.normalize()
self.assertTrue(state.norm() == 1)
def test_copy_preallocate(self):
state1 = State()
state2 = State()
start, end = state1.vec.getOwnershipRange()
state1.vec[start:end] = np.arange(start, end)
state1.vec.assemblyBegin()
state1.vec.assemblyEnd()
result = np.ndarray((end-start,), dtype=np.complex128)
state1.copy(state2)
result[:] = state2.vec[start:end]
self.assertTrue(np.array_equal(result, np.arange(start, end)))
def test_copy_exception_L(self):
state1 = State()
state2 = State(L=state1.subspace.L+1)
with self.assertRaises(ValueError):
state1.copy(state2)
def test_copy_nopreallocate(self):
state1 = State()
start, end = state1.vec.getOwnershipRange()
state1.vec[start:end] = np.arange(start, end)
state1.vec.assemblyBegin()
state1.vec.assemblyEnd()
result = np.ndarray((end-start,), dtype=np.complex128)
state2 = state1.copy()
result[:] = state2.vec[start:end]
self.assertTrue(np.array_equal(result, np.arange(start, end)))
def test_scale(self):
vals = [2, 3.14]
for val in vals:
with self.subTest(val=val):
state = State(state='random')
start, end = state.vec.getOwnershipRange()
pre_values = np.ndarray((end-start,), dtype=np.complex128)
pre_values[:] = state.vec[start:end]
state *= val
for i in range(start, end):
self.assertEqual(state.vec[i], val*pre_values[i-start])
def test_scale_divide(self):
val = 3.14
state = State(state='random')
start, end = state.vec.getOwnershipRange()
pre_values = np.ndarray((end-start,), dtype=np.complex128)
pre_values[:] = state.vec[start:end]
state /= val
for i in range(start, end):
self.assertEqual(state.vec[i], (1/val)*pre_values[i-start])
def test_scale_exception_ary(self):
val = np.array([3.1, 4])
state = State()
with self.assertRaises(TypeError):
state *= val
def test_scale_exception_vec(self):
state1 = State()
state2 = State()
with self.assertRaises(TypeError):
state1 *= state2
# TODO: check state setting. e.g. setting an invalid state should fail (doesn't for Full subspace)
if __name__ == '__main__':
dtr.main()
|
python
|
import requests
import os
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
def send_to_slack(msg: str) -> None:
URL = os.getenv("SLACK_WEBHOOK")
headers = {"content-type": "application/json"}
payload = {
"attachments": [
{
"fallback": "Plain-text summary of the attachment.",
"color": "#fff",
"title": "🚨 Important notification",
"text": msg,
"title_link": f"https://medium.com/@fabianbosler/membership",
"footer": "Made by Fabian with ❤️",
"footer_icon": "https://image.flaticon.com/icons/png/512/2097/2097443.png",
"ts": datetime.utcnow().timestamp(),
}
]
}
requests.post(URL, json=payload, headers=headers)
|
python
|
from flask import abort, request
from . import app
from .helpers import render_error_template
import logging
# Catch all route for everything not matched elsewhere
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path): # pragma: no cover
abort(404, "Not Found")
@app.errorhandler(400)
def bad_request(e): # pragma: no cover
logging.exception('An error occurred during a request due to bad request error: %s', request.path)
return render_error_template(error=e, status_code=400)
@app.errorhandler(404)
def page_not_found(e):
return render_error_template(error=e, status_code=404)
@app.errorhandler(500)
def handle_internal_server_error(e): # pragma: no cover
logging.exception('An error occurred during a request due to internal server error: %s', request.path)
return render_error_template(error=e, status_code=500)
@app.errorhandler(502)
def handle_bad_gateway(e): # pragma: no cover
logging.exception('An error occurred during a request due to bad gateway: %s', request.path)
return render_error_template(error=e, status_code=502)
|
python
|
'''
Created on Jun 21, 2016
@author: MarcoXZh
'''
import sys, re
import xml.etree.ElementTree as ET
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
from PIL import Image
from ImageComparison import calcSSIM
def sameColor(color1, color2):
'''
@param color1: {String} rgb string such as "rgb(0,0,0)"
@param color2: {String} rgb string such as "rgb(0,0,0)"
@return: {Boolean} True if the two colors are the same; False otherwise
'''
if color1 == "transparent" and color2 == "transparent":
return True
if color1 != "transparent" and color2 != "transparent":
if "," in color1:
rgb1 = re.split(r"\D+", color1)[1:-1]
rgb1 = sRGBColor(int(rgb1[0]), int(rgb1[1]), int(rgb1[2]), is_upscaled=True)
else:
rgb1 = sRGBColor.new_from_rgb_hex(color1)
if "," in color2:
rgb2 = re.split(r"\D+", color2)[1:-1]
rgb2 = sRGBColor(int(rgb2[0]), int(rgb2[1]), int(rgb2[2]), is_upscaled=True)
else:
rgb2 = sRGBColor.new_from_rgb_hex(color2)
return delta_e_cie2000(convert_color(rgb1, LabColor), convert_color(rgb2, LabColor)) < 4.65
pass # if color1 != "transparent" and color2 != "ransparent"
return False
pass # def sameColor(color1, color2)
def sameImage(img1, img2):
empty1 = (img1 == "none" or img1 == "")
empty2 = (img2 == "none" or img2 == "")
if empty1 and empty2:
return True
if not empty1 or not empty2:
return False
if img1 == img2:
return True
# return False
return calcSSIM(Image.open(img1), Image.open(img2)) < 0.4
pass # def sameImage(img1, img2)
def normalizedHausdorffDistance(node1, node2):
def normailizedDistance_AtoB(nodeA, nodeB):
leftA = 1.0 * int(nodeA.location["x"])
topA = 1.0 * int(nodeA.location["y"])
rightA = 1.0 * int(nodeA.location["x"]) + int(nodeA.size["width"])
bottomA = 1.0 * int(nodeA.location["y"]) + int(nodeB.size["height"])
leftB = 1.0 * int(nodeB.location["x"])
topB = 1.0 * int(nodeB.location["y"])
rightB = 1.0 * int(nodeB.location["x"]) + int(nodeB.size["width"])
bottomB = 1.0 * int(nodeB.location["y"]) + int(nodeB.size["height"])
widthA, widthB = abs(rightA - leftA), abs(rightB - leftB);
heightA, heightB = abs(bottomA - topA), abs(bottomB - topB);
centerXA, centerYA = leftA + 0.5 * widthA, topA + 0.5 * heightA;
centerXB, centerYB = leftB + 0.5 * widthB, topB + 0.5 * heightB
if leftA >= leftB and rightA <= rightB and topA >= topB and bottomA <= bottomB:
return 0.0
if leftA >= leftB and rightA <= rightB:
return (abs(topB - topA) if centerYA < centerYB else abs(bottomA - bottomB)) / heightA
if topA >= topB and bottomA <= bottomB:
return (abs(leftB - leftA) if centerXA < centerXB else abs(rightA - rightB)) / widthA
deltaX = leftB - leftA if centerXA < centerXB else rightA - rightB
deltaY = topB - topA if centerYA < centerYB else bottomA - bottomB
return (deltaX ** 2.0 + deltaY ** 2.0) ** 0.5 / (widthA ** 2.0 + heightA ** 2.0) ** 0.5
pass # def normailizedDistance_AtoB(nodeA, nodeB)
return max(normailizedDistance_AtoB(node1, node2), normailizedDistance_AtoB(node2, node1))
pass # def normalizedHausdorffDistance(node1, node2)
def MergeNodeByGestaltLaws(elements, parent, CSS, debug=False):
'''
@param elements: {List} contains all sibling WebElements, both visible and invisible
@param parent: {ET.Element} the parent of the newly created BT nodes
@param CSS: {List} contains all supported CSS properties
@param debug: {Boolean} (Optional) True to display debugging information; False not
@return : {Tuple} the BT nodes created, as well as the BT-DT map list
'''
children = []
for e in elements:
if e.is_displayed() and int(e.size["height"]) != 0 and int(e.size["width"]) != 0.0:
children.append(e)
pass # for - if
elements = children
if len(elements) == 0:
return [], []
nhds, sames = [], []
for i, sibling in enumerate(elements):
if i == len(elements)-1:
break
node1, node2 = sibling, elements[i+1]
same = (node1.value_of_css_property("position") == node2.value_of_css_property("position")) # Common fate
if not same: # Continuity
same = (int(node1.location["x"]) == int(node2.location["x"]) or \
int(node1.location["y"]) == int(node2.location["y"]) or \
int(node1.location["x"]) + int(node1.size["width"]) == \
int(node2.location["x"]) + int(node2.size["width"]) or \
int(node1.location["y"]) + int(node1.size["height"]) == \
int(node2.location["y"]) + int(node2.size["height"]))
if not same: # Similarity
idx = 0
while idx < len(CSS):
css1 = node1.value_of_css_property(CSS[idx]).strip()
css2 = node2.value_of_css_property(CSS[idx]).strip()
if "color" in CSS[idx] and not sameColor(css1, css2):
break
if "image" in CSS[idx] and not sameImage(css1, css2):
break
if css1 != css2:
break
idx += 1
pass # while idx < len(CSS)
same = (idx >= len(CSS))
pass # if not same
sames.append(same)
nhds.append(normalizedHausdorffDistance(node1, node2)) # Proximity
pass # for i, sibling in enumerate(elements)
if debug and (len(sames) != len(elements) - 1 or len(nhds) != len(elements) - 1):
print "Error: NHDs and SAMEs size issue"
btNodeMapList, btNodes = [], []
curNodeMapList, curNodes = [0], [elements[0]]
if len(elements) != 1:
avg = 1.0 * sum(nhds) / len(nhds)
for i in range(len(nhds)):
if nhds[i] <= avg or sames[i]:
curNodeMapList.append(i+1)
curNodes.append(elements[i+1])
else:
btNodeMapList.append(curNodeMapList)
curNodeMapList = [i+1]
btNodes.append(curNodes)
curNodes = [elements[i+1]]
pass # else - if nhds[i] <= avg or sames[i]
pass # for i in range(len(nhds))
if len(curNodeMapList) > 0:
btNodeMapList.append(curNodeMapList)
btNodes.append(curNodes)
pass # if len(curNodeMapList) > 0
pass # if len(elements) != 1
pXpath = parent.attrib["xpath"] + "/"
for i, nodes in enumerate(btNodes):
btNode = ET.SubElement(parent, "DIV")
node_name = "[%s]" % (",".join(str(x) for x in btNodeMapList[i]))
btNode.set("node_name", node_name)
btNode.set("xpath", pXpath + node_name)
left, top, right, bottom = sys.maxint, sys.maxint, -1, -1
for node in nodes:
l, r = int(node.location["x"]), int(node.location["x"]) + int(node.size["width"])
t, b = int(node.location["y"]), int(node.location["y"]) + int(node.size["height"])
if l < left: left = l
if t < top: top = t
if r > right: right = r
if b > bottom: bottom = b
pass # for node in nodes
btNode.set("left", "%d" % left)
btNode.set("top", "%d" % top)
btNode.set("right", "%d" % right)
btNode.set("bottom", "%d" % bottom)
for style in CSS:
v = nodes[0].value_of_css_property(style)
btNode.set("css_" + style, v)
pass # for style in CSS
btNodes[i] = btNode
pass # for i, nodes in enumerate(btNodes)
return btNodeMapList, btNodes
pass # def MergeNodeByGestaltLaws(elements, parent, CSS, debug=False)
|
python
|
#!/usr/bin/env python3
# Xilinx CoolRunner II XC2C64A characteristics
bits_of_address = 7
bits_of_data = 274
bytes_of_data = (bits_of_data + 7) // 8
bits_in_program_row = bits_of_address + bits_of_data
address_sequence = (0x00, 0x40, 0x60, 0x20, 0x30, 0x70, 0x50, 0x10, 0x18, 0x58, 0x78, 0x38, 0x28, 0x68, 0x48, 0x08, 0x0c, 0x4c, 0x6c, 0x2c, 0x3c, 0x7c, 0x5c, 0x1c, 0x14, 0x54, 0x74, 0x34, 0x24, 0x64, 0x44, 0x04, 0x06, 0x46, 0x66, 0x26, 0x36, 0x76, 0x56, 0x16, 0x1e, 0x5e, 0x7e, 0x3e, 0x2e, 0x6e, 0x4e, 0x0e, 0x0a, 0x4a, 0x6a, 0x2a, 0x3a, 0x7a, 0x5a, 0x1a, 0x12, 0x52, 0x72, 0x32, 0x22, 0x62, 0x42, 0x02, 0x03, 0x43, 0x63, 0x23, 0x33, 0x73, 0x53, 0x13, 0x1b, 0x5b, 0x7b, 0x3b, 0x2b, 0x6b, 0x4b, 0x0b, 0x0f, 0x4f, 0x6f, 0x2f, 0x3f, 0x7f, 0x5f, 0x1f, 0x17, 0x57, 0x77, 0x37, 0x27, 0x67, 0x47, 0x07, 0x05, 0x45,)
def values_list_line_wrap(values):
line_length = 16
return [' '.join(values[n:n+line_length]) for n in range(0, len(values), line_length)]
def dec_lines(bytes):
return values_list_line_wrap(['%d,' % n for n in bytes])
def hex_lines(bytes):
return values_list_line_wrap(['0x%02x,' % n for n in bytes])
def reverse_bits(n, bit_count):
byte_count = (bit_count + 7) >> 3
# n = int(bytes.hex(), 16)
n_bits = bin(n)[2:].zfill(bit_count)
n_bits_reversed = n_bits[::-1]
n_reversed = int(n_bits_reversed, 2)
return n_reversed.to_bytes(byte_count, byteorder='little')
def extract_addresses(block):
return tuple([row['address'] for row in block])
def extract_data(block):
return tuple([row['data'] for row in block])
def extract_mask(block):
return tuple([row['mask'] for row in block])
def equal_blocks(block1, block2, mask):
block1_data = extract_data(block1)
block2_data = extract_data(block2)
assert(len(block1_data) == len(block2_data))
assert(len(block1_data) == len(mask))
for row1, row2, mask in zip(block1_data, block2_data, mask):
differences = (row1 ^ row2) & mask
if differences != 0:
return False
return True
def dump_block(rows, endian='little'):
data_bytes = (bits_of_data + 7) >> 3
for row in rows:
print('%02x %s' % (row['address'], row['data'].to_bytes(data_bytes, byteorder=endian).hex()))
def extract_programming_data(commands):
ir_map = {
0x01: 'idcode',
0xc0: 'conld',
0xe8: 'enable',
0xea: 'program',
0xed: 'erase',
0xee: 'verify',
0xf0: 'init',
0xff: 'bypass',
# Other instructions unimplemented and if encountered, will cause tool to crash.
}
ir = None
program = []
verify = []
for command in commands:
if command['type'] == 'xsir':
ir = ir_map[command['tdi']['data'][0]]
if ir == 'program':
program.append([])
if ir == 'verify':
verify.append([])
elif ir == 'verify' and command['type'] == 'xsdrtdo':
tdi_length = command['tdi']['length']
end_state = command['end_state']
if tdi_length == bits_of_address and end_state == 1:
address = int(command['tdi']['data'].hex(), 16)
verify[-1].append({'address': address})
elif tdi_length == bits_of_data and end_state == 0:
mask = int(command['tdo_mask']['data'].hex(), 16)
expected = int(command['tdo_expected']['data'].hex(), 16)
verify[-1][-1]['data'] = expected
verify[-1][-1]['mask'] = mask
elif ir == 'program' and command['type'] == 'xsdrtdo':
tdi_length = command['tdi']['length']
end_state = command['end_state']
if tdi_length == bits_in_program_row and end_state == 0:
tdi = int(command['tdi']['data'].hex(), 16)
address = (tdi >> bits_of_data) & ((1 << bits_of_address) - 1)
data = tdi & ((1 << bits_of_data) - 1)
program[-1].append({
'address': address,
'data': data
})
return {
'program': program,
'verify': verify,
}
def validate_programming_data(programming_data):
# Validate program blocks:
# There should be two extracted program blocks. The first contains the
# the bitstream with done bit(s) not asserted. The second updates the
# "done" bit(s) to finish the process.
assert(len(programming_data['program']) == 2)
# First program phase writes the bitstream to flash (or SRAM) with
# special bit(s) not asserted, so the bitstream is not yet valid.
assert(extract_addresses(programming_data['program'][0]) == address_sequence)
# Second program phase updates a single row to finish the programming
# process.
assert(len(programming_data['program'][1]) == 1)
assert(programming_data['program'][1][0]['address'] == 0x05)
# Validate verify blocks:
# There should be two extracted verify blocks.
assert(len(programming_data['verify']) == 2)
# The two verify blocks should match.
assert(programming_data['verify'][0] == programming_data['verify'][1])
# Check the row address order of the second verify block.
assert(extract_addresses(programming_data['verify'][0]) == address_sequence)
assert(extract_addresses(programming_data['verify'][1]) == address_sequence)
# Checks across programming and verification:
# Check that program data matches data expected during verification.
assert(equal_blocks(programming_data['program'][0], programming_data['verify'][0], extract_mask(programming_data['verify'][0])))
assert(equal_blocks(programming_data['program'][0], programming_data['verify'][1], extract_mask(programming_data['verify'][1])))
def make_sram_program(program_blocks):
program_sram = list(program_blocks[0])
program_sram[-2] = program_blocks[1][0]
return program_sram
#######################################################################
# Command line argument parsing.
#######################################################################
import argparse
parser = argparse.ArgumentParser()
action_group = parser.add_argument_group(title='outputs')
action_group.add_argument('--checksum', action='store_true', help='Print bitstream verification CRC32 value')
action_group.add_argument('--hackrf-data', type=str, help='C data file for HackRF bitstream loading/programming/verification')
action_group.add_argument('--portapack-data', type=str, help='C++ data file for PortaPack bitstream loading/programming/verification')
parser.add_argument('--crcmod', action='store_true', help='Use Python crcmod library instead of built-in CRC32 code')
parser.add_argument('--debug', action='store_true', help='Enable debug output')
parser.add_argument('--xsvf', required=True, type=str, help='HackRF Xilinx XC2C64A CPLD XSVF file containing erase/program/verify phases')
args = parser.parse_args()
#######################################################################
# Generic XSVF parsing phase, produces a tree of commands performed
# against the CPLD.
#######################################################################
with open(args.xsvf, "rb") as f:
from xsvf import XSVFParser
commands = XSVFParser().parse(f, debug=args.debug)
programming_data = extract_programming_data(commands)
validate_programming_data(programming_data)
#######################################################################
# Patch the second programming phase into the first for SRAM
# programming.
#######################################################################
verify_blocks = programming_data['verify']
program_blocks = programming_data['program']
#######################################################################
# Calculate CRC of data read from CPLD during the second verification
# pass, which is after the "done" bit is set. Mask off insignificant
# bits (turning them to zero) and extending rows to the next full byte.
#######################################################################
if args.checksum:
if args.crcmod:
# Use a proper CRC library
import crcmod
crc = crcmod.predefined.Crc('crc-32')
else:
# Use my home-grown, simple, slow CRC32 object to avoid additional
# Python dependencies.
from dumb_crc32 import DumbCRC32
crc = DumbCRC32()
verify_block = verify_blocks[1]
for address, data, mask in verify_block:
valid_data = data & mask
bytes = valid_data.to_bytes(bytes_of_data, byteorder='little')
crc.update(bytes)
print('0x%s' % crc.hexdigest().lower())
if args.hackrf_data:
program_sram = make_sram_program(program_blocks)
verify_block = verify_blocks[1]
verify_masks = tuple(frozenset(extract_mask(verify_block)))
verify_mask_index = dict([(k, v) for v, k in enumerate(verify_masks)])
verify_mask_row_index = [verify_mask_index[row['mask']] for row in verify_block]
result = []
result.extend((
'/* WARNING: Auto-generated file. Do not edit. */',
'',
'#include <cpld_xc2c.h>',
'',
'const cpld_xc2c64a_program_t cpld_hackrf_program_sram = { {',
))
data_lines = [', '.join(['0x%02x' % n for n in row['data'].to_bytes(bytes_of_data, byteorder='little')]) for row in program_sram]
result.extend(['\t{ { %s } },' % line for line in data_lines])
result.extend((
'} };',
'',
'const cpld_xc2c64a_verify_t cpld_hackrf_verify = {',
'\t.mask = {',
))
verify_mask_lines = [', '.join(['0x%02x' % n for n in mask.to_bytes(bytes_of_data, byteorder='little')]) for mask in verify_masks]
result.extend(['\t\t{ { %s } },' % line for line in verify_mask_lines])
result.extend((
'\t},'
'\t.mask_index = {',
))
result.extend(['\t\t%s' % line for line in dec_lines(verify_mask_row_index)])
result.extend((
'\t}',
'};',
'',
'const cpld_xc2c64a_row_addresses_t cpld_hackrf_row_addresses = { {',
))
result.extend(['\t%s' % line for line in hex_lines(address_sequence)])
result.extend((
'} };',
'',
))
with open(args.hackrf_data, 'w') as f:
f.write('\n'.join(result))
if args.portapack_data:
program_sram = make_sram_program(program_blocks)
verify_block = verify_blocks[1]
verify_masks = extract_mask(verify_block)
result = []
result.extend((
'/*',
' * WARNING: Auto-generated file. Do not edit.',
'*/',
'#include "hackrf_cpld_data.hpp"',
'namespace hackrf {',
'namespace one {',
'namespace cpld {',
'const ::cpld::xilinx::XC2C64A::verify_blocks_t verify_blocks { {',
))
data_lines = [', '.join(['0x%02x' % n for n in row['data'].to_bytes(bytes_of_data, byteorder='big')]) for row in program_sram]
mask_lines = [', '.join(['0x%02x' % n for n in mask.to_bytes(bytes_of_data, byteorder='big')]) for mask in verify_masks]
lines = ['{ 0x%02x, { { %s } }, { { %s } } }' % data for data in zip(address_sequence, data_lines, mask_lines)]
result.extend('\t%s,' % line for line in lines)
result.extend((
'} };',
'} /* namespace hackrf */',
'} /* namespace one */',
'} /* namespace cpld */',
'',
))
with open(args.portapack_data, 'w') as f:
f.write('\n'.join(result))
|
python
|
# Copyright 2020 reinforced_scinet (https://github.com/hendrikpn/reinforced_scinet)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from analyzer import AnalyzerSubGridWorld
PLOT_LATENT = False # plot the latent variable's behavior
PLOT_RESULTS = False # plot the performance of the RL agent
PLOT_RESULTS_LOSS = True # plot the performance of pretrainer
PLOT_FIGURE = False # plot the figure from the whitepaper
ENV_ID = 'env2' # the environment id to be used (usually not relevant)
if __name__ == "__main__":
analyzer = AnalyzerSubGridWorld(ENV_ID, load_model=PLOT_LATENT)
if PLOT_LATENT:
analyzer.plot_latent_space()
if PLOT_RESULTS:
analyzer.plot_results_figure(avg_mod=200)
if PLOT_RESULTS_LOSS:
analyzer.plot_loss_figure(avg_mod=100)
if PLOT_FIGURE:
analyzer.plot_selection_figure()
|
python
|
#!/usr/bin/env python
import os
import sys
from chromedriver_py import binary_path
print(
"""This command will fail if you have not run the setup.py script AND "source environment/env.sh" first.\n\n\nIt is installing a headless chrome web browser driver to allow making an image out of the big demo script sessions. It's not required to run the s/w, but nice to have"""
)
os.chdir(f"{os.environ['RGBW_CC_ROOT']}/bin/")
cmd = f"cp {binary_path} {os.environ['RGBW_CC_ROOT']}/bin/chrome; chmod a+x ./chrome; ln -s chrome chromium;"
os.system(cmd)
|
python
|
import unittest
from users import User
class TestUser(unittest.TestCase):
'''
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestUser: TestUser class that helps in creating test cases
'''
# Items up here .......
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_users = User("Frank","23456789") # create contact object
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_users.username,"Frank")
self.assertEqual(self.new_users.password,"23456789")
def test_save_users(self):
'''
test_save_user test case to test if the users object is saved into
the user list
'''
self.new_users.save_users() # saving the new contact
self.assertEqual(len(User.user_list),1)
# Items up here...
# setup and class creation up here
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
User.user_list = []
# other test cases here
def test_save_multiple_users(self):
'''
test_save_multiple_users to check if we can save multiple contact
objects to our user_list
'''
self.new_users.save_users()
test_users = User("users","23456789") # new contact
test_users.save_users()
self.assertEqual(len(User.user_list),2)
# More tests above
def test_delete_users(self):
'''
test_delete_users to test if we can remove a contact from our contact list
'''
self.new_users.save_users()
test_users = User("users","23456789") # new contact
test_users.save_users()
self.new_users.delete_users()# Deleting a contact object
self.assertEqual(len(User.user_list),1)
def test_find_users_by_username(self):
'''
test to check if we can find a user by username and display information
'''
self.new_users.save_users()
test_users = User("frank","23456789") # new contact
test_users.save_users()
found_user = User.find_by_username("frank")
self.assertEqual(found_user,test_users)
def test_users_exists(self):
'''
test to check if we can return a Boolean if we cannot find the users.
'''
self.new_users.save_users()
test_users = User("frank","23456789") # new contact
test_users.save_users()
users_exists = User.users_exist("frank")
self.assertTrue(users_exists)
def test_display_all_users(self):
'''
method that returns a list of all users saved
'''
self.assertEqual(User.display_users(),User.user_list)
if __name__ == '__main__':
unittest.main()
|
python
|
import random
import time
import os
import discord
import triggers
import data
import cmd
import tools
################################################################################
lurker_data = dict()
lurker_data['emoji'] = '👀'
lurker_data['min_chance'] = 1
lurker_data['max_chance'] = 10
data.NewGuildEnvAdd('lurker_data', lurker_data)
def GetLurkerData(local_env):
return local_env['lurker_data']
################################################################################
async def OnMessage(local_env, message, normalised_text):
lurker = GetLurkerData(local_env)
min_chance = lurker['min_chance']
max_chance = lurker['max_chance']
emoji = lurker['emoji']
chance = random.randint(min_chance, max_chance)
if tools.Success(chance):
await message.add_reaction(emoji)
await message.remove_reaction(emoji, message.guild.me)
triggers.on_message.append(OnMessage)
################################################################################
async def cmd_chance(ctx, args):
local_env = data.GetGuildEnvironment(ctx.guild)
lurker = GetLurkerData(local_env)
if len(args) != 2: raise RuntimeError("Incorrect number of arguments (min_chance max_chance expected)")
min_chance = int(args[0])
max_chance = int(args[1])
if min_chance < 0 or min_chance > 100: raise RuntimeError("Minimal chance must be within (0,100)")
if max_chance < min_chance or max_chance > 100: raise RuntimeError("Maximal chance must be within (min_chance, 100)")
lurker['min_chance'] = min_chance
lurker['max_chance'] = max_chance
async def cmd_emoji(ctx, args):
local_env = data.GetGuildEnvironment(ctx.guild)
lurker = GetLurkerData(local_env)
if len(args) != 1: raise RuntimeError("Incorrect number of arguments (emoji expected)")
emoji = args[0]
try:
await ctx.message.add_reaction(emoji)
await ctx.message.remove_reaction(emoji, ctx.guild.me)
except Exception as e:
raise RuntimeError(f"Cannot add emoji {emoji}")
lurker['emoji'] = emoji
async def cmd_settings(ctx, args):
local_env = data.GetGuildEnvironment(ctx.guild)
lurker = GetLurkerData(local_env)
output = "Lurker settings:\n" + f"Minimal chance: {lurker['min_chance']}\n" + f"Maximal chance: {lurker['max_chance']}\n" + f"Emoji: {lurker['emoji']}\n"
await ctx.message.reply(output, mention_author=False)
return True
################################################################################
parser = cmd.Parser()
cmd.Add(parser, "chance", cmd_chance, "", "", discord.Permissions.all())
cmd.Add(parser, "emoji", cmd_emoji, "", "", discord.Permissions.all())
cmd.Add(parser, "settings", cmd_settings, "", "")
cmd.Add(cmd.parser, "lurker", parser, "Setup lurker", "")
################################################################################
|
python
|
from enum import auto
from mstrio.utils.enum_helper import AutoName
class RefreshPolicy(AutoName):
ADD = auto()
DELETE = auto()
UPDATE = auto()
UPSERT = auto()
REPLACE = auto()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
# to work around tk_chooseDirectory not properly returning unicode paths on Windows
# need to use a dialog that can be hacked up to actually return full unicode paths
# originally based on AskFolder from EasyDialogs for Windows but modified to fix it
# to actually use unicode for path
# The original license for EasyDialogs is as follows
#
# Copyright (c) 2003-2005 Jimmy Retzlaff
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
AskFolder(...) -- Ask the user to select a folder Windows specific
"""
import ctypes
from ctypes.wintypes import LPCWSTR
import ctypes.wintypes as wintypes
__all__ = ['AskFolder']
# Load required Windows DLLs
ole32 = ctypes.windll.ole32
shell32 = ctypes.windll.shell32
user32 = ctypes.windll.user32
# Windows Constants
BFFM_INITIALIZED = 1
BFFM_SETOKTEXT = 1129
BFFM_SETSELECTIONA = 1126
BFFM_SETSELECTIONW = 1127
BIF_EDITBOX = 16
BS_DEFPUSHBUTTON = 1
CB_ADDSTRING = 323
CB_GETCURSEL = 327
CB_SETCURSEL = 334
CDM_SETCONTROLTEXT = 1128
EM_GETLINECOUNT = 186
EM_GETMARGINS = 212
EM_POSFROMCHAR = 214
EM_SETSEL = 177
GWL_STYLE = -16
IDC_STATIC = -1
IDCANCEL = 2
IDNO = 7
IDOK = 1
IDYES = 6
MAX_PATH = 260
OFN_ALLOWMULTISELECT = 512
OFN_ENABLEHOOK = 32
OFN_ENABLESIZING = 8388608
OFN_ENABLETEMPLATEHANDLE = 128
OFN_EXPLORER = 524288
OFN_OVERWRITEPROMPT = 2
OPENFILENAME_SIZE_VERSION_400 = 76
PBM_GETPOS = 1032
PBM_SETMARQUEE = 1034
PBM_SETPOS = 1026
PBM_SETRANGE = 1025
PBM_SETRANGE32 = 1030
PBS_MARQUEE = 8
PM_REMOVE = 1
SW_HIDE = 0
SW_SHOW = 5
SW_SHOWNORMAL = 1
SWP_NOACTIVATE = 16
SWP_NOMOVE = 2
SWP_NOSIZE = 1
SWP_NOZORDER = 4
VER_PLATFORM_WIN32_NT = 2
WM_COMMAND = 273
WM_GETTEXT = 13
WM_GETTEXTLENGTH = 14
WM_INITDIALOG = 272
WM_NOTIFY = 78
# Windows function prototypes
BrowseCallbackProc = ctypes.WINFUNCTYPE(ctypes.c_int, wintypes.HWND, ctypes.c_uint, wintypes.LPARAM, wintypes.LPARAM)
# Windows types
LPCTSTR = ctypes.c_char_p
LPTSTR = ctypes.c_char_p
LPVOID = ctypes.c_voidp
TCHAR = ctypes.c_char
class BROWSEINFO(ctypes.Structure):
_fields_ = [
("hwndOwner", wintypes.HWND),
("pidlRoot", LPVOID),
("pszDisplayName", LPTSTR),
("lpszTitle", LPCTSTR),
("ulFlags", ctypes.c_uint),
("lpfn", BrowseCallbackProc),
("lParam", wintypes.LPARAM),
("iImage", ctypes.c_int)
]
# Utilities
def CenterWindow(hwnd):
desktopRect = GetWindowRect(user32.GetDesktopWindow())
myRect = GetWindowRect(hwnd)
x = width(desktopRect) // 2 - width(myRect) // 2
y = height(desktopRect) // 2 - height(myRect) // 2
user32.SetWindowPos(hwnd, 0,
desktopRect.left + x,
desktopRect.top + y,
0, 0,
SWP_NOACTIVATE | SWP_NOSIZE | SWP_NOZORDER
)
def GetWindowRect(hwnd):
rect = wintypes.RECT()
user32.GetWindowRect(hwnd, ctypes.byref(rect))
return rect
def width(rect):
return rect.right-rect.left
def height(rect):
return rect.bottom-rect.top
def AskFolder(
message=None,
version=None,
defaultLocation=None,
location=None,
windowTitle=None,
actionButtonLabel=None,
cancelButtonLabel=None,
multiple=None):
"""Display a dialog asking the user for select a folder.
modified to use unicode strings as much as possible
returns unicode path
"""
def BrowseCallback(hwnd, uMsg, lParam, lpData):
if uMsg == BFFM_INITIALIZED:
if actionButtonLabel:
label = unicode(actionButtonLabel, errors='replace')
user32.SendMessageW(hwnd, BFFM_SETOKTEXT, 0, label)
if cancelButtonLabel:
label = unicode(cancelButtonLabel, errors='replace')
cancelButton = user32.GetDlgItem(hwnd, IDCANCEL)
if cancelButton:
user32.SetWindowTextW(cancelButton, label)
if windowTitle:
title = unicode(windowTitle, erros='replace')
user32.SetWindowTextW(hwnd, title)
if defaultLocation:
user32.SendMessageW(hwnd, BFFM_SETSELECTIONW, 1, defaultLocation.replace('/', '\\'))
if location:
x, y = location
desktopRect = wintypes.RECT()
user32.GetWindowRect(0, ctypes.byref(desktopRect))
user32.SetWindowPos(hwnd, 0,
desktopRect.left + x,
desktopRect.top + y, 0, 0,
SWP_NOACTIVATE | SWP_NOSIZE | SWP_NOZORDER)
else:
CenterWindow(hwnd)
return 0
# This next line is needed to prevent gc of the callback
callback = BrowseCallbackProc(BrowseCallback)
browseInfo = BROWSEINFO()
browseInfo.pszDisplayName = ctypes.c_char_p('\0' * (MAX_PATH+1))
browseInfo.lpszTitle = message
browseInfo.lpfn = callback
pidl = shell32.SHBrowseForFolder(ctypes.byref(browseInfo))
if not pidl:
result = None
else:
path = LPCWSTR(u" " * (MAX_PATH+1))
shell32.SHGetPathFromIDListW(pidl, path)
ole32.CoTaskMemFree(pidl)
result = path.value
return result
|
python
|
from django.apps import AppConfig
class EsgConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'esg'
|
python
|
from .base import * # noqa: F403,F401
DEBUG = True
INSTALLED_APPS += [ # noqa ignore=F405
'debug_toolbar',
]
MIDDLEWARE += [ # noqa ignore=F405
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ALLOWED_HOSTS = [
'0.0.0.0',
'127.0.0.1',
'art-backend.herokuapp.com'
]
INTERNAL_IPS = [
'0.0.0.0',
'127.0.0.1'
]
|
python
|
# Version 3.1; Erik Husby; Polar Geospatial Center, University of Minnesota; 2019
from __future__ import division
import math
import os
import sys
import traceback
from warnings import warn
import numpy as np
from osgeo import gdal_array, gdalconst
from osgeo import gdal, ogr, osr
gdal.UseExceptions()
class RasterIOError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedDataTypeError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedMethodError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
#############
# Raster IO #
#############
# Legacy; Retained for quick instruction of useful GDAL raster information extraction methods.
def oneBandImageToArrayZXY_projRef(rasterFile):
"""
Opens a single-band raster image as a NumPy 2D array [Z] and returns it along
with [X, Y] coordinate ranges of pixels in the raster grid as NumPy 1D arrays
and the projection definition string for the raster dataset in OpenGIS WKT format.
"""
if not os.path.isfile(rasterFile):
raise RasterIOError("No such rasterFile: '{}'".format(rasterFile))
ds = gdal.Open(rasterFile, gdal.GA_ReadOnly)
proj_ref = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
xmin, ymax = gt[0], gt[3]
dx, dy = gt[1], gt[5]
X = xmin + np.arange(ds.RasterXSize) * dx
Y = ymax + np.arange(ds.RasterYSize) * dy
Z = ds.GetRasterBand(1).ReadAsArray()
return Z, X, Y, proj_ref
def openRaster(file_or_ds, target_EPSG=None):
"""
Open a raster image as a GDAL dataset object.
Parameters
----------
file_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
Returns
-------
ds : osgeo.gdal.Dataset
The raster image as a GDAL dataset.
Notes
-----
If `rasterFile_or_ds` is a GDAL dataset,
it is returned without modification.
"""
ds = None
if type(file_or_ds) == gdal.Dataset:
ds = file_or_ds
elif isinstance(file_or_ds, str):
if not os.path.isfile(file_or_ds):
raise RasterIOError("No such rasterFile: '{}'".format(file_or_ds))
try:
ds = gdal.Open(file_or_ds, gdal.GA_ReadOnly)
except RuntimeError:
print("RuntimeError when opening file/dataset: {}".format(file_or_ds))
raise
else:
raise InvalidArgumentError("Invalid input type for `file_or_ds`: {}".format(
type(file_or_ds)))
if target_EPSG is not None:
target_sr = osr.SpatialReference()
target_sr.ImportFromEPSG(target_EPSG)
ds = reprojectGDALDataset(ds, target_sr, 'nearest')
return ds
def reprojectGDALDataset(ds_in, sr_out, interp_str):
# FIXME: Finish this function.
# dtype_gdal, promote_dtype = dtype_np2gdal(Z.dtype)
# if promote_dtype is not None:
# Z = Z.astype(promote_dtype)
interp_gdal = interp_str2gdal(interp_str)
mem_drv = gdal.GetDriverByName('MEM')
sr_in = osr.SpatialReference()
# ds_in = mem_drv.Create('', X.size, Y.size, 1, dtype_gdal)
# ds_in.SetGeoTransform((X[0], X[1]-X[0], 0,
# Y[0], 0, Y[1]-Y[0]))
# ds_in.GetRasterBand(1).WriteArray(Z)
ds_out = mem_drv.Create('', ds_in.RasterXSize, ds_in.RasterYSize, 1)
gdal.ReprojectImage(ds_in, ds_out, '', '', interp_gdal)
return ds_out
def gdalReadAsArraySetsmSceneBand(raster_band, make_nodata_nan=False):
scale = raster_band.GetScale()
offset = raster_band.GetOffset()
if scale is None:
scale = 1.0
if offset is None:
offset = 0.0
if scale == 1.0 and offset == 0.0:
array_data = raster_band.ReadAsArray()
if make_nodata_nan:
nodata_val = raster_band.GetNoDataValue()
if nodata_val is not None:
array_data[array_data == nodata_val] = np.nan
else:
if raster_band.DataType != gdalconst.GDT_Int32:
raise RasterIOError(
"Expected GDAL raster band with scale!=1.0 or offset!=0.0 to be of Int32 data type"
" (scaled int LERC_ZSTD-compressed 50cm DEM), but data type is {}".format(
gdal.GetDataTypeName(raster_band.DataType)
)
)
if scale == 0.0:
raise RasterIOError(
"GDAL raster band has invalid parameters: scale={}, offset={}".format(scale, offset)
)
nodata_val = raster_band.GetNoDataValue()
array_data = raster_band.ReadAsArray(buf_type=gdalconst.GDT_Float32)
adjust_where = (array_data != nodata_val) if nodata_val is not None else True
if scale != 1.0:
np.multiply(array_data, scale, out=array_data, where=adjust_where)
if offset != 0.0:
np.add(array_data, offset, out=array_data, where=adjust_where)
if make_nodata_nan:
array_nodata = np.logical_not(adjust_where, out=adjust_where)
array_data[array_nodata] = np.nan
del adjust_where
if array_data is None:
raise RasterIOError("`raster_band.ReadAsArray()` returned None")
return array_data
def getCornerCoords(gt, shape):
"""
Retrieve the georeferenced corner coordinates of a raster image.
The corner coordinates of the raster are calculated from
the rasters's geometric transformation specifications and
the dimensions of the raster.
Parameters
----------
gt : numeric tuple `(top_left_x, dx_x, dx_y, top_left_y, dy_x, dy_y)`
The affine geometric transformation ("geotransform" or "geo_trans")
describing the relationship between pixel coordinates and
georeferenced coordinates.
Pixel coordinates start at `(0, 0)` [row, col] for the top left pixel
in the raster image, increasing down rows and right across columns.
Georeferenced coordinates `(x_geo, y_geo)` are calculated for pixels
in the image by the pixel coordinates `(pix_row, pix_col)` as follows:
`x_geo = top_left_x + pix_row*dx_x + pix_col*dx_y`
`y_geo = top_left_y + pix_row*dy_x + pix_col*dy_y`
shape : tuple of positive int, 2 elements
Dimensions of the raster image in (num_rows, num_cols) format.
Returns
-------
corner_coords : ndarray (5, 2)
Georeferenced corner coordinates of the raster image,
in (x, y) coordinate pairs, starting and ending at the
top left corner, clockwise.
"""
top_left_x = np.full((5, 1), gt[0])
top_left_y = np.full((5, 1), gt[3])
top_left_mat = np.concatenate((top_left_x, top_left_y), axis=1)
ysize, xsize = shape
raster_XY_size_mat = np.array([
[0, 0],
[xsize, 0],
[xsize, ysize],
[0, ysize],
[0, 0]
])
gt_mat = np.array([
[gt[1], gt[4]],
[gt[2], gt[5]]
])
return top_left_mat + np.dot(raster_XY_size_mat, gt_mat)
def coordsToWkt(point_coords):
"""
Retrieve a WKT polygon representation of an ordered list of
point coordinates.
Parameters
----------
point_coords : 2D sequence of floats/ints like ndarray
of shape (npoints, ndim)
Ordered list of points, each represented by a list of
coordinates that define its position in space.
Returns
-------
wkt : str
WKT polygon representation of `point_coords`.
"""
return 'POLYGON (({}))'.format(
','.join([" ".join([str(c) for c in xy]) for xy in point_coords])
)
def wktToCoords(wkt):
"""
Create an array of point coordinates from a WKT polygon string.
Parameters
----------
wkt : str
WKT polygon representation of points with coordinate data
to be extracted.
Returns
-------
point_coords : ndarray of shape (npoints, ndim)
Ordered list of point coordinates extracted from `wkt`.
"""
coords_list = eval(
wkt.replace('POLYGON ','').replace('(','[').replace(')',']').replace(',','],[').replace(' ',',')
)
return np.array(coords_list)
def extractRasterData(rasterFile_or_ds, *params):
"""
Extract information from a single-band raster image file.
Parameters
----------
rasterFile_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
params : str
Names of parameters to be extracted from the raster dataset.
'array'/'z' ------ matrix of image pixel values as ndarray (2D)
'shape'----------- pixel shape of image as tuple (nrows, ncols)
'x' -------------- georeferenced grid coordinates corresponding to
each column of pixels in image as ndarray (1D)
'y' -------------- georeferenced grid coordinates corresponding to
each row of pixels in image as ndarray (1D)
'dx' ------------- x length of each pixel in georeferenced pixel-grid coordinates,
corresponding to x[1] - x[0] from 'x' param (dx may be negative)
'dy' ------------- y length of each pixel in georeferenced pixel-grid coordinates,
corresponding to y[1] - y[0] from 'y' param (dy may be negative)
'res' ------------ (absolute) resolution of square pixels in image
(NaN if pixels are not square)
'geo_trans' ------ affine geometric transformation
(see documentation for `getCornerCoords`)
'corner_coords' -- georeferenced corner coordinates of image extent
(see documentation for `getCornerCoords`)
'proj_ref' ------- projection definition string in OpenGIS WKT format
(None if projection definition is not available)
'spat_ref' ------- spatial reference as osgeo.osr.SpatialReference object
(None if spatial reference is not available)
'geom' ----------- polygon geometry of image extent as osgeo.ogr.Geometry object
'geom_sr' -------- polygon geometry of image extent as osgeo.ogr.Geometry object
with spatial reference assigned (if available)
'nodata_val' ----- pixel value that should be interpreted as "No Data"
'dtype_val' ------ GDAL type code for numeric data type of pixel values (integer)
'dtype_str' ------ GDAL type name for numeric data type of pixel values (string)
Returns
-------
value_list : list
List of parameter data with length equal to the number
of parameter name arguments given in the function call.
The order of returned parameter data corresponds directly to
the order of the parameter name arguments.
If only one parameter name argument is provided, the single
datum is returned itself, not in a list.
Examples
--------
>>> f = 'my_raster.tif'
>>> image_data, resolution = extractRasterData(f, 'array', 'res')
>>> resolution
2
>>> extractRasterData(f, 'dy')
-2
"""
ds = openRaster(rasterFile_or_ds)
pset = set(params)
invalid_pnames = pset.difference({'ds', 'shape', 'z', 'array', 'x', 'y',
'dx', 'dy', 'res', 'geo_trans', 'corner_coords',
'proj_ref', 'spat_ref', 'geom', 'geom_sr',
'nodata_val', 'dtype_val', 'dtype_str'})
if invalid_pnames:
raise InvalidArgumentError("Invalid parameter(s) for extraction: {}".format(invalid_pnames))
if pset.intersection({'z', 'array', 'nodata_val', 'dtype_val', 'dtype_str'}):
band = ds.GetRasterBand(1)
if pset.intersection({'z', 'array'}):
try:
array_data = gdalReadAsArraySetsmSceneBand(band)
except RasterIOError as e:
traceback.print_exc()
print("Error reading raster: {}".format(rasterFile_or_ds))
raise
if pset.intersection({'shape', 'x', 'y', 'corner_coords', 'geom', 'geom_sr'}):
shape = (ds.RasterYSize, ds.RasterXSize) if 'array_data' not in vars() else array_data.shape
if pset.intersection({'x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords', 'geom', 'geom_sr'}):
geo_trans = ds.GetGeoTransform()
if pset.intersection({'proj_ref', 'spat_ref', 'geom_sr'}):
proj_ref = ds.GetProjectionRef()
if pset.intersection({'corner_coords', 'geom', 'geom_sr'}):
corner_coords = getCornerCoords(geo_trans, shape)
if pset.intersection({'spat_ref', 'geom_sr'}):
spat_ref = osr.SpatialReference(proj_ref) if proj_ref is not None else None
if pset.intersection({'geom', 'geom_sr'}):
geom = ogr.Geometry(wkt=coordsToWkt(corner_coords))
if pset.intersection({'nodata_val'}):
nodata_val = band.GetNoDataValue()
if pset.intersection({'dtype_val', 'dtype_str'}):
dtype_val = band.DataType
if pset.intersection({'dtype_str'}):
dtype_str = gdal.GetDataTypeName(dtype_val)
value_list = []
for pname in params:
pname = pname.lower()
value = None
if pname == 'ds':
value = ds
elif pname == 'shape':
value = shape
elif pname in ('z', 'array'):
value = array_data
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = corner_coords
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = spat_ref
elif pname == 'geom':
value = geom
elif pname == 'geom_sr':
value = geom.Clone() if 'geom' in params else geom
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
warn("Spatial reference could not be extracted from raster dataset, "
"so extracted geometry has not been assigned a spatial reference.")
elif pname == 'nodata_val':
value = nodata_val
elif pname == 'dtype_val':
value = dtype_val
elif pname == 'dtype_str':
value = dtype_str
value_list.append(value)
if len(value_list) == 1:
value_list = value_list[0]
return value_list
# Legacy; Retained for a visual aid of equivalences between NumPy and GDAL data types.
# Use gdal_array.NumericTypeCodeToGDALTypeCode to convert from NumPy to GDAL data type.
def dtype_np2gdal_old(dtype_in, form_out='gdal', force_conversion=False):
"""
Converts between input NumPy data type (dtype_in may be either
NumPy 'dtype' object or already a string) and output GDAL data type.
If form_out='numpy', the corresponding NumPy 'dtype' object will be
returned instead, allowing for quick lookup by string name.
If the third element of a dtype_dict conversion tuple is zero,
that conversion of NumPy to GDAL data type is not recommended. However,
the conversion may be forced with the argument force_conversion=True.
"""
dtype_dict = { # ---GDAL LIMITATIONS---
'bool' : (np.bool, gdal.GDT_Byte, 0), # GDAL no bool/logical/1-bit
'int8' : (np.int8, gdal.GDT_Byte, 1), # GDAL byte is unsigned
'int16' : (np.int16, gdal.GDT_Int16, 1),
'int32' : (np.int32, gdal.GDT_Int32, 1),
'intc' : (np.intc, gdal.GDT_Int32, 1), # np.intc ~= np.int32
'int64' : (np.int64, gdal.GDT_Int32, 0), # GDAL no int64
'intp' : (np.intp, gdal.GDT_Int32, 0), # intp ~= np.int64
'uint8' : (np.uint8, gdal.GDT_Byte, 1),
'uint16' : (np.uint16, gdal.GDT_UInt16, 1),
'uint32' : (np.uint32, gdal.GDT_UInt32, 1),
'uint64' : (np.uint64, gdal.GDT_UInt32, 0), # GDAL no uint64
'float16' : (np.float16, gdal.GDT_Float32, 1), # GDAL no float16
'float32' : (np.float32, gdal.GDT_Float32, 1),
'float64' : (np.float64, gdal.GDT_Float64, 1),
'complex64' : (np.complex64, gdal.GDT_CFloat32, 1),
'complex128': (np.complex128, gdal.GDT_CFloat64, 1),
}
errmsg_unsupported_dtype = "Conversion of NumPy data type '{}' to GDAL is not supported".format(dtype_in)
try:
dtype_tup = dtype_dict[str(dtype_in).lower()]
except KeyError:
raise UnsupportedDataTypeError("No such NumPy data type in lookup table: '{}'".format(dtype_in))
if form_out.lower() == 'gdal':
if dtype_tup[2] == 0:
if force_conversion:
print(errmsg_unsupported_dtype)
else:
raise UnsupportedDataTypeError(errmsg_unsupported_dtype)
dtype_out = dtype_tup[1]
elif form_out.lower() == 'numpy':
dtype_out = dtype_tup[0]
else:
raise UnsupportedDataTypeError("The following output data type format is not supported: '{}'".format(form_out))
return dtype_out
def dtype_np2gdal(dtype_np):
# TODO: Write docstring.
if dtype_np == np.bool:
promote_dtype = np.uint8
elif dtype_np == np.int8:
promote_dtype = np.int16
elif dtype_np == np.float16:
promote_dtype = np.float32
else:
promote_dtype = None
if promote_dtype is not None:
warn("NumPy array data type ({}) does not have equivalent GDAL data type and is not "
"supported, but can be safely promoted to {}".format(dtype_np, promote_dtype(1).dtype))
dtype_np = promote_dtype
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_np)
if dtype_gdal is None:
raise InvalidArgumentError("NumPy array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_np))
return dtype_gdal, promote_dtype
def interp_str2gdal(interp_str):
# TODO: Write docstring.
interp_choices = ('nearest', 'linear', 'cubic', 'spline', 'lanczos', 'average', 'mode')
interp_dict = {
'nearest' : gdal.GRA_NearestNeighbour,
'linear' : gdal.GRA_Bilinear,
'bilinear' : gdal.GRA_Bilinear,
'cubic' : gdal.GRA_Cubic,
'bicubic' : gdal.GRA_Cubic,
'spline' : gdal.GRA_CubicSpline,
'lanczos' : gdal.GRA_Lanczos,
'average' : gdal.GRA_Average,
'mode' : gdal.GRA_Mode,
}
if interp_str not in interp_dict:
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_choices, interp_str))
return interp_dict[interp_str]
def saveArrayAsTiff(array, dest,
X=None, Y=None, proj_ref=None, geotrans_rot_tup=(0, 0),
nodata_val='like_raster', dtype_out=None, nbits=None, co_args='compress',
like_raster=None):
"""
Save a NumPy 2D array as a single-band raster image in GeoTiff format.
Parameters
----------
array : ndarray, 2D
Array containing the values of pixels to be saved in the image,
one value per pixel.
dest : str (file path)
File path where the raster image will be saved.
If a file already exists at this path, it will be overwritten.
X : None or (ndarray, 1D)
Grid coordinates corresponding to all columns in the raster image,
from left to right, such that `X[j]` specifies the x-coordinate for
all pixels in `array[:, j]`.
If None, `like_raster` must be provided.
Y : None or (ndarray, 1D)
Grid coordinates corresponding to all rows in the raster image,
from top to bottom, such that `Y[i]` specifies the y-coordinate for
all pixels in `array[i, :]`
If None, `like_raster` must be provided.
proj_ref : None, str (WKT or Proj4), or osr.SpatialReference
Projection reference of the raster image to be saved, specified as
either a WKT/Proj4 string or an osr.SpatialReference object.
If None, `like_raster` must be provided.
geotrans_rot_tup : None or tuple (2 floats)
The third and fifth elements of the geometric transformation tuple
that specify rotation from north-up of the raster image to be saved.
If a north-up output is desired, let both elements be zero.
See documentation for `getCornerCoords` for more information on the
geometric transformation tuple.
If None, `like_raster` must be provided.
nodata_val : 'like_raster', None, or int/float
Non-NaN value in `array` that will be classified as "no data" in the
output raster image.
If 'like_raster', allow this value to be set equal to the nodata value
of `like_raster`.
dtype_out : data type as str (e.g. 'uint16'), NumPy data type
(e.g. np.uint16), or numpy.dtype object (e.g. from arr.dtype)
Numeric type of values in the output raster image.
If 'n-bit', write output raster image in an unsigned integer GDAL
data type with ['NBITS=n'] option in driver, where n is set to `nbits`
if `nbits` is not None. If `nbits` is None, n is calculated to be only
as large as necessary to capture the maximum value of `array`, and the
output array data type is unsigned integer of minimal bitdepth.
nbits : None or 1 <= int <= 32
Only applies when `dtype_out='nbits'`.
co_args : None, 'compress', or list of '[ARG_NAME]=[ARG_VALUE]' strings
Creation Option arguments to pass to the `Create` method of the GDAL
Geotiff driver that instantiates the output raster dataset.
If 'compress', the following default arguments are used:
'BIGTIFF=IF_SAFER'
'COMPRESS=LZW'
'TILED=YES'
The 'NBITS=X' argument may not be used -- that is set by the `nbits`
argument for this function.
A list of Creation Option arguments may be found here: [1].
like_raster : None, str (file path), or osgeo.gdal.Dataset
File path or GDAL dataset for a raster image of identical dimensions,
geographic location/extent, spatial reference, and nodata value as
the raster image that will be saved.
If provided, `X`, `Y`, `proj_ref`, and `geotrans_rot_tup` should not
be provided, as these metrics will be taken from the like raster.
Returns
-------
None
Notes
-----
The OSGeo `gdal_translate` program [1] must be callable by name
from the current working directory at the time this function is called.
References
----------
.. [1] https://www.gdal.org/frmt_gtiff.html
"""
spat_ref = None
projstr_wkt = None
projstr_proj4 = None
if proj_ref is None:
pass
elif type(proj_ref) == osr.SpatialReference:
spat_ref = proj_ref
elif isinstance(proj_ref, str):
spat_ref = osr.SpatialReference()
if proj_ref.lstrip().startswith('PROJCS'):
projstr_wkt = proj_ref
spat_ref.ImportFromWkt(projstr_wkt)
elif proj_ref.lstrip().startswith('+proj='):
projstr_proj4 = proj_ref
spat_ref.ImportFromProj4(projstr_proj4)
else:
raise InvalidArgumentError("`proj_ref` of string type has unknown format: '{}'".format(proj_ref))
else:
raise InvalidArgumentError("`proj_ref` must be a string or osr.SpatialReference object, "
"but was of type {}".format(type(proj_ref)))
dtype_is_nbits = (dtype_out is not None and type(dtype_out) is str and dtype_out == 'nbits')
if co_args is not None and co_args != 'compress':
if type(co_args) != list:
raise InvalidArgumentError("`co_args` must be a list of strings, but was {}".format(co_args))
if dtype_is_nbits:
for arg in co_args:
if arg.startswith('NBITS='):
raise InvalidArgumentError("`co_args` cannot include 'NBITS=X' argument. "
"Please use this function's `nbits` argument.")
shape = array.shape
dtype_gdal = None
if like_raster is not None:
ds_like = openRaster(like_raster)
if shape[0] != ds_like.RasterYSize or shape[1] != ds_like.RasterXSize:
raise InvalidArgumentError("Shape of `like_rasterFile` '{}' ({}, {}) does not match "
"the shape of `array` {}".format(
like_raster, ds_like.RasterYSize, ds_like.RasterXSize, shape)
)
geo_trans = extractRasterData(ds_like, 'geo_trans')
if proj_ref is None:
spat_ref = extractRasterData(ds_like, 'spat_ref')
if nodata_val == 'like_raster':
nodata_val = extractRasterData(ds_like, 'nodata_val')
if dtype_out is None:
dtype_gdal = extractRasterData(ds_like, 'dtype_val')
else:
if shape[0] != Y.size or shape[1] != X.size:
raise InvalidArgumentError("Lengths of [`Y`, `X`] grid coordinates ({}, {}) do not match "
"the shape of `array` ({})".format(Y.size, X.size, shape))
geo_trans = (X[0], X[1]-X[0], geotrans_rot_tup[0],
Y[0], geotrans_rot_tup[1], Y[1]-Y[0])
if nodata_val == 'like_raster':
nodata_val = None
if dtype_out is not None:
if dtype_is_nbits:
if nbits is None:
nbits = int(math.floor(math.log(float(max(1, np.max(array))), 2)) + 1)
elif type(nbits) != int or nbits < 1:
raise InvalidArgumentError("`nbits` must be an integer in the range [1,32]")
if nbits <= 8:
dtype_gdal = gdal.GDT_Byte
elif nbits <= 16:
dtype_gdal = gdal.GDT_UInt16
elif nbits <= 32:
dtype_gdal = gdal.GDT_UInt32
else:
raise InvalidArgumentError("Output array requires {} bits of precision, "
"but GDAL supports a maximum of 32 bits")
else:
if type(dtype_out) is str:
dtype_out = eval('np.{}'.format(dtype_out.lower()))
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_out)
if dtype_gdal is None:
raise InvalidArgumentError("Output array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_out))
dtype_in = array.dtype
dtype_in_gdal, promote_dtype = dtype_np2gdal(dtype_in)
if promote_dtype is not None:
array = array.astype(promote_dtype)
dtype_in = promote_dtype(1).dtype
if dtype_out is not None:
if dtype_is_nbits:
if not np.issubdtype(dtype_in, np.unsignedinteger):
warn("Input array data type ({}) is not unsigned and may be incorrectly saved "
"with n-bit precision".format(dtype_in))
elif dtype_in != dtype_out:
warn("Input array NumPy data type ({}) differs from output "
"NumPy data type ({})".format(dtype_in, dtype_out(1).dtype))
elif dtype_gdal is not None and dtype_gdal != dtype_in_gdal:
warn("Input array GDAL data type ({}) differs from output "
"GDAL data type ({})".format(gdal.GetDataTypeName(dtype_in_gdal),
gdal.GetDataTypeName(dtype_gdal)))
if dtype_gdal is None:
dtype_gdal = dtype_in_gdal
sys.stdout.write("Saving Geotiff {} ...".format(dest))
sys.stdout.flush()
# Create the output raster dataset in memory.
if co_args is None:
co_args = []
if co_args == 'compress':
co_args = []
co_args.extend(['BIGTIFF=IF_SAFER']) # Will create BigTIFF
# if the resulting file *might* exceed 4GB.
co_args.extend(['COMPRESS=LZW']) # Do LZW compression on output image.
co_args.extend(['TILED=YES']) # Force creation of tiled TIFF files.
if dtype_is_nbits:
co_args.extend(['NBITS={}'.format(nbits)])
if spat_ref is not None:
if projstr_wkt is None:
projstr_wkt = spat_ref.ExportToWkt()
if projstr_proj4 is None:
projstr_proj4 = spat_ref.ExportToProj4()
sys.stdout.write(" GDAL data type: {}, NoData value: {}, Creation Options: {}, Projection (Proj4): {} ...".format(
gdal.GetDataTypeName(dtype_gdal), nodata_val, ' '.join(co_args) if co_args else None, projstr_proj4.strip())
)
sys.stdout.flush()
sys.stdout.write(" creating file ...")
sys.stdout.flush()
driver = gdal.GetDriverByName('GTiff')
ds_out = driver.Create(dest, shape[1], shape[0], 1, dtype_gdal, co_args)
ds_out.SetGeoTransform(geo_trans)
if projstr_wkt is not None:
ds_out.SetProjection(projstr_wkt)
band = ds_out.GetRasterBand(1)
if nodata_val is not None:
band.SetNoDataValue(nodata_val)
sys.stdout.write(" writing array values ...")
sys.stdout.flush()
band.WriteArray(array)
# Write the output raster dataset to disk.
sys.stdout.write(" finishing file ...")
sys.stdout.flush()
ds_out = None # Dereference dataset to initiate write to disk of intermediate image.
sys.stdout.write(" done!\n")
sys.stdout.flush()
|
python
|
import random
print('====================== \033[35mBEM-VINDO AO JOGO DA ADIVINHAÇÃO\033[m ======================')
print('Tente adivinhar o número entre 0 e 10 que eu estou pensando')
computador = random.randint(0, 10)
palpites = 0
acertou = False
while not acertou:
jogador = int(input('Qual é a sua tentativa? '))
palpites += 1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('Mais... Tente mais uma vez.')
elif jogador > computador:
print('Menos... Tente mais uma vez.')
print('\033[32mVocê venceu, PARABÉNS\033[m')
print('Foram necessárias \033[37m{}\033[m tentativas para me vencer '.format(palpites))
|
python
|
# -*- coding: utf-8 -*-
# Imports
import json
import discord
import random
import datetime
import asyncio
client = discord.Client()
# Readiness Indicator
@client.event
async def on_ready():
print("The bot is ready!")
await client.change_presence(game=discord.Game(name="roulette with your money"))
# Reminder Message
# CURRENTLY NOT WORKING AS INTENDED
# Intended to send the message every Monday
@client.event
async def background_loop():
await client.wait_until_ready()
while not client.is_closed:
if datetime.weekday == 0:
channel = client.get_channel("397349083318059010")
message = "Don't forget downtime!"
await client.send_message(channel, message)
await asyncio.sleep(604800)
# Main Functionality
@client.event
async def on_message(message):
with open('banks2.txt') as bankin:
bank = json.load(bankin)
# Banking Functions
if message.content.startswith('/bank'):
user = str(message.author)
operation = message.content.split()[1]
if operation in ['add', 'subtract']:
metal = message.content.split()[2]
amount = message.content.split()[3]
if user in bank.keys():
if operation == 'add':
if metal == 'gold':
bank[user][metal] += int(amount)
elif metal == 'silver':
bank[user][metal] += int(amount)
elif metal == 'copper':
bank[user][metal] += int(amount)
await client.send_message(message.channel,
f'You have deposited {amount} {metal}. You now have {bank[user][metal]} '
f'{metal} in your account.')
elif operation == 'subtract':
if metal == 'gold':
bank[user][metal] -= int(amount)
elif metal == 'silver':
bank[user][metal] -= int(amount)
elif metal == 'copper':
bank[user][metal] -= int(amount)
await client.send_message(message.channel,
f'You have withdrawn {amount} {metal}. You now have {bank[user][metal]} '
f'{metal} in your account.')
elif operation == ('balance'):
await client.send_message(message.channel, f'Your balance is {bank[user]["gold"]} gold, '
f'{bank[user]["silver"]} silver, and {bank[user]["copper"]}'
f' copper.')
elif operation == ('clear'):
bank[user]["gold"] = 0
bank[user]["silver"] = 0
bank[user]["copper"] = 0
await client.send_message(message.channel, 'You have cleared your balance.')
elif operation == ('condense'):
silver, copper = divmod(bank[user]["copper"], 10)
bank[user]["silver"] += silver
bank[user]["copper"] = copper
gold, silver = divmod(bank[user]["silver"], 10)
bank[user]["gold"] += gold
bank[user]["silver"] = silver
await client.send_message(message.channel,
f'Your balance has been condensed to {bank[user]["gold"]} gold, '
f'{bank[user]["silver"]} silver, and {bank[user]["copper"]} copper.')
else:
bank.update({user: {'gold': 0, 'silver': 0, 'copper': 0}})
await client.send_message(message.channel,
'You did not have an account. You now have an account with a balance of 0')
with open('banks2.txt', 'w') as bankout:
json.dump(bank, bankout)
# Dice Rolling Functions
if message.content.startswith('/roll'):
if "-" in message.content:
operator = "-"
elif "+" in message.content:
operator = "+"
else:
operator = str()
if "-" in message.content:
bonus = 0 - int(message.content.split('-')[1])
elif "+" in message.content:
bonus = 0 + int(message.content.split('+')[1])
else:
bonus = int(0)
if operator != "":
sidesEnd = message.content.find(operator)
elif operator == "":
sidesEnd = len(message.content)
numberofDice = message.content[message.content.find('/roll') + 5:message.content.find('d')]
numberofSides = message.content[message.content.find('d') + 1:sidesEnd]
rolls = 0
rawrolls = []
bonusRolls = []
dice = 0
try:
dice = int(numberofDice)
except ValueError:
dice = 1
pass
while dice > rolls:
rawrolls.append(random.randint(1, int(numberofSides)))
rolls += 1
for r in rawrolls:
bonusRolls.append(r + bonus)
await client.send_message(message.channel,
f'You rolled **{rawrolls}**. Your bonus of **[{bonus}]** brings that to **{bonusRolls}'
f'**.')
elif message.content.startswith('/r'):
if "-" in message.content:
operator = "-"
elif "+" in message.content:
operator = "+"
else:
operator = str()
if "-" in message.content:
bonus = 0 - int(message.content.split('-')[1])
elif "+" in message.content:
bonus = 0 + int(message.content.split('+')[1])
else:
bonus = int(0)
if operator != "":
sidesEnd = message.content.find(operator)
elif operator == "":
sidesEnd = len(message.content)
numberofDice = message.content[message.content.find('/r') + 2:message.content.find('d')]
numberofSides = message.content[message.content.find('d') + 1:sidesEnd]
rolls = 0
rawrolls = []
bonusRolls = []
dice = 0
try:
dice = int(numberofDice)
except ValueError:
dice = 1
pass
while dice > rolls:
rawrolls.append(random.randint(1, int(numberofSides)))
rolls += 1
for r in rawrolls:
bonusRolls.append(r + bonus)
await client.send_message(message.channel,
f'You rolled **{rawrolls}**. Your bonus of **[{bonus}]** brings that to **{bonusRolls}'
f'**.')
# Help Section
if message.content.startswith('/help'):
with open('helps.txt') as file:
helps = json.load(file)
for h in helps:
await client.send_message(message.channel, f'**{h}** - {helps[h]}\n')
# Command List
if message.content.startswith('/commands'):
with open('commands.txt') as file:
commands = json.load(file)
for c in commands:
await client.send_message(message.channel, f'**{c}** - {commands[c]}\n')
# Communal Banking
if message.content.startswith('/communal'):
with open ('communalbank.txt') as communalIn:
communal = json.load(communalIn)
operation = message.content.split()[1]
if operation in ['add', 'subtract']:
metal = message.content.split()[2]
amount = message.content.split()[3]
if operation == 'add':
if metal == 'gold':
communal[metal] += int(amount)
elif metal == 'silver':
communal[metal] += int(amount)
elif metal == 'copper':
communal[metal] += int(amount)
await client.send_message(message.channel, f'You have deposited {amount} {metal} in the communal account')
elif operation == ('balance'):
await client.send_message(message.channel, f'The Communal Balance is {communal["gold"]} gold, {communal["silver"]} silver, and {communal["copper"]} copper')
elif operation == ('condense'):
silver, copper = divmod(communal["copper"], 10)
communal["silver"] += silver
communal["copper"] = copper
gold, silver = divmod(communal["silver"], 10)
communal["gold"] += gold
communal["silver"] = silver
await client.send_message(message.channel, f'The Communal Balance is {communal["gold"]} gold, {communal["silver"]} silver, and {communal["copper"]} copper')
with open ('communalbank.txt', 'w') as communalOut:
json.dump(communal, communalOut)
# Run the bot
token = 'Your Token'
client.run(token)
|
python
|
import re
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from markdown import markdown
register = template.Library()
@register.filter("markdown")
@stringfilter
def markdown_filter(value):
return mark_safe(markdown(value))
@register.tag(name="markdown")
def do_markdown(parser, token):
nodelist = parser.parse(("endmarkdown",))
parser.delete_first_token()
m = re.search(r"as (?P<var_name>\w+)$", token.contents)
var_name = None
if m:
var_name = m.group("var_name")
return MarkdownNode(nodelist, var_name)
class MarkdownNode(template.Node):
def __init__(self, nodelist, var_name=None):
self.nodelist = nodelist
self.var_name = var_name
def render(self, context):
value = markdown_filter(self.nodelist.render(context))
if self.var_name:
context[self.var_name] = value
return ""
return value
|
python
|
"""
This file is part of Advent of Code 2019.
Coded by: Samuel Michaels ([email protected])
11 December 2019
NO COPYRIGHT
This work is dedicated to the public domain. All rights have been
waived worldwide under copyright law, including all related and
neighboring rights, to the extent allowed by law.
You may copy, modify, distribute, and perform the work, even for
commercial purposes, all without asking permission. See the
accompanying COPYRIGHT document.
"""
import io
import sys
import unittest
from day9 import ElfCPU, InvalidInstructionError, ProtectionFaultError, InputInterrupt, OutputInterrupt, OutputOverflow
from day9 import InputOverflow
from unittest.mock import patch
class TestElfCPU(unittest.TestCase):
def test_load_string_types(self):
"""
Checks for TypeError
"""
e = ElfCPU()
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
e.load_string(0)
e.load_string('1,2,3,4')
def test_peek(self):
"""
Tests address range for peek
"""
e = ElfCPU()
e.load_string('0,1,2,3,4,5,6,7,8,9')
# TypeError
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
e.peek('x')
# Above memory range
with self.assertRaises(ValueError):
e.peek(2**65)
# Below memory range
with self.assertRaises(ValueError):
e.peek(-1)
self.assertEqual(e.peek(0), 0)
self.assertEqual(e.peek(9), 9)
def test_poke(self):
"""
Tests address range and data for poke
"""
e = ElfCPU()
e.load_string('0,1,2,3,4,5,6,7,8,9')
# TypeError
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
e.poke('x', 2)
# Above memory range
with self.assertRaises(ValueError):
e.poke(2**65, 2)
# Below memory range
with self.assertRaises(ValueError):
e.poke(-1, 2)
# Value
with self.assertRaises(ValueError):
e.poke(0, 2**64+1)
self.assertEqual(e.poke(0, 99), 99)
self.assertEqual(e.poke(9, 88), 88)
self.assertEqual(e.peek(0), 99)
self.assertEqual(e.peek(9), 88)
def test_invalid_instr(self):
"""
Tests for invalid op code
"""
e = ElfCPU()
e.load_string('123456789')
with self.assertRaises(InvalidInstructionError):
e.execute()
def test_op_add(self):
"""
Tests ADD op code
[dst]:=[a]+[b]
"""
e = ElfCPU()
# Invalid address 123456789 for a
e.load_string('1,123456789,0,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for b
e.load_string('1,0,123456789,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for dst
e.load_string('1,0,0,123456789')
with self.assertRaises(ProtectionFaultError):
e.step()
# 1 + 1 = 2 @ address 0
e.load_string('1,0,0,0,99')
e.step()
self.assertEqual(e.peek(0), 2)
# 2**64 + 1 = 1 @ address 0 (overflow and wrap)
#e.load_string('1,5,6,0,99,'+str(2**64)+',1')
#e.step()
#self.assertEqual(e.peek(0), 1)
# [dst]:=a+[b]
e.load_string('101,44,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 46)
# [dst]:=[a]+b
e.load_string('1001,5,50,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 52)
# [dst]:=a+b
e.load_string('1101,5,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 10)
# [dst]:=r[a]+b
e.load_string('109,10,1201,0,5,7,99,7,3,3,5')
e.execute()
self.assertEqual(e.peek(7), 10)
# [dst]:=a+r[b]
e.load_string('109,10,2101,20,0,7,99,7,3,3,10')
e.execute()
self.assertEqual(e.peek(7), 30)
# [dst]:=a+r[b]
e.load_string('109,10,2101,20,0,7,99,7,3,3,10')
e.execute()
self.assertEqual(e.peek(7), 30)
# r[dst]:=a+b
e.load_string('109,10,21101,16,16,0,99,7,3,3,7')
e.execute()
self.assertEqual(e.peek(10), 32)
# dst:=a+b INVALID
#e.load_string('11101,32,32,1,99')
#e.execute()
#self.assertEqual(e.peek(1), 64)
def test_op_mul(self):
"""
Tests MUL op code
[dst]:=[a]*[b]
"""
e = ElfCPU()
# Invalid address 123456789 for a
e.load_string('2,123456789,0,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for b
e.load_string('2,0,123456789,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for dst
e.load_string('2,0,0,123456789')
with self.assertRaises(ProtectionFaultError):
e.step()
# [dst]:=[a]*[b]
e.load_string('2,0,0,0,99')
e.step()
self.assertEqual(e.peek(0), 4)
# [dst]:=a*[b]
e.load_string('102,44,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 88)
# [dst]:=[a]*b
e.load_string('1002,5,50,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 100)
# [dst]:=a*b
e.load_string('1102,5,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 25)
# [dst]:=r[a]*b
e.load_string('109,10,1202,0,4,7,99,7,3,3,4')
e.execute()
self.assertEqual(e.peek(7), 16)
# [dst]:=a*r[b]
e.load_string('109,10,2102,7,0,7,99,7,3,3,2')
e.execute()
self.assertEqual(e.peek(7), 14)
# [dst]:=r[a]*r[b]
e.load_string('109,10,2202,0,1,7,99,7,3,3,2,6')
e.execute()
self.assertEqual(e.peek(7), 12)
# dst:=a*b
e.load_string('11102,6,6,0,99')
e.execute()
self.assertEqual(e.peek(0), 36)
# r[dst]:=a*b
e.load_string('109,7,21102,8,3,0,99,1')
e.execute()
self.assertEqual(e.peek(7), 24)
def test_op_input(self):
"""
Tests input op code
Use unittest.mock.patch to fake the input value
"""
e = ElfCPU()
# Interrupts off
e.load_string('103,3,99,-1')
e.interrupts = False
with patch('builtins.input', return_value='1234'):
e.execute()
self.assertEqual(e.peek(3), 1234)
# Interrupts on IMMEDIATE MODE
e.load_string('103,5,103,5,99,-1')
e.interrupts = True
with self.assertRaises(InputInterrupt):
e.step()
# Should be back at pc = 0
self.assertEqual(e.pc, 0)
# Load input
e.input_buffer = 567
# Loading again overflows
with self.assertRaises(InputOverflow):
e.input_buffer = 123
# Execute the input instruction
e.step()
self.assertEqual(e.peek(5), 567)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 987
# Execute until end
e.execute()
self.assertEqual(e.peek(5), 987)
######################################################
# Interrupts on RELATIVE MODE
e.load_string('109,10,203,0,203,1,203,-1,99,102,100,101')
e.interrupts = True
# step past the relative base op code
e.step()
with self.assertRaises(InputInterrupt):
e.step()
# Should be back at pc = 2 (after relative base op code)
self.assertEqual(e.pc, 2)
# Load input
e.input_buffer = 567
# Loading again overflows
with self.assertRaises(InputOverflow):
e.input_buffer = 123
# Execute the input instruction
e.step()
self.assertEqual(e.peek(10), 567)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 987
# Step to execute this input
e.step()
self.assertEqual(e.peek(11), 987)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 456
# Execute until end
e.execute()
self.assertEqual(e.peek(9), 456)
######################################################
# Interrupts on POSITIONAL MODE
e.load_string('3,7,3,8,3,9,99,1,3,5')
e.interrupts = True
with self.assertRaises(InputInterrupt):
e.step()
# Should be back at pc = 0
self.assertEqual(e.pc, 0)
# Load input
e.input_buffer = 345
# Loading again overflows
with self.assertRaises(InputOverflow):
e.input_buffer = 123
# Execute the input instruction
e.step()
self.assertEqual(e.peek(7), 345)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 765
# Step to execute this input
e.step()
self.assertEqual(e.peek(8), 765)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 555
# Execute until end
e.execute()
self.assertEqual(e.peek(9), 555)
def test_op_output(self):
"""
Tests output op code
Use io.StringIO() to capture the output
"""
e = ElfCPU()
# Interrupts off
e.load_string('4,5,104,66,99,55,5')
e.interrupts = False
result = None
with patch('sys.stdout', new=io.StringIO()) as output:
e.execute()
result = output.getvalue()
result = result.splitlines()
# First is a reference to memory address 5
self.assertEqual(result[0].strip(), '55')
# Second is an immediate value
self.assertEqual(result[1].strip(), '66')
# Interrupts on
e.load_string('4,5,104,66,99,55,5')
e.interrupts = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 55)
# Don't clear buffer
with self.assertRaises(OutputOverflow):
e.execute()
# Restart test
e.reset()
e.load_string('4,5,104,66,99,55,5')
e.interrupts = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 55)
# Clear buffer
del e.output_buffer
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 66)
###############################################
# Interrupts on RELATIVE MODE
# Restart test
e.reset()
e.load_string('109,5,204,1,99,6,1234')
e.interrupts = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 1234)
def test_op_jmp_true(self):
"""
Tests jump if true op code
"""
e = ElfCPU()
"""
Tests address 8 (which is 1) if it is non-zero. Since this is
true, it jumps to the value of address 9 (which is 7). This
terminates the program.
"""
e.load_string('5,8,9,1101,1,1,8,99,1,7')
e.execute()
self.assertEqual(e.peek(8), 1)
"""
Tests immediate value 8 if it is non-zero. Since it is
true, jump to immediate address 7 which terminates.
"""
e.load_string('1105,8,7,1101,1,1,8,99,1,7')
e.execute()
self.assertEqual(e.peek(8), 1)
"""
Tests address 8 (which is 0) if it is non-zero. Since this is
false it does not jump and instead adds 1+1 to address 8.
"""
e.load_string('5,8,9999,11101,1,1,8,99,0,7')
e.execute()
self.assertEqual(e.peek(8), 2)
"""
Tests immediate value 0 if it is non-zero. Since it is
false it does not jump and instead adds 1+1 to address 8.
"""
e.load_string('1105,0,9999,11101,1,1,8,99,0,7')
e.execute()
self.assertEqual(e.peek(8), 2)
def test_op_cmp_lessthan(self):
"""
Tests compare less than op code
"""
e = ElfCPU()
"""
Tests if value of address 8 (5) is less than value of
address 9 (10). Since this is true write 1 to address 10.
"""
e.load_string('7,8,9,10,99,5,10,-1,5,10,7')
e.execute()
self.assertEqual(e.peek(10), 1)
"""
Tests if value of address 5 (10) is less than value of
address 6 (5). Since this is false write 0 to address 10.
"""
e.load_string('7,8,9,10,99,5,10,-1,10,5,7')
e.execute()
self.assertEqual(e.peek(10), 0)
"""
Tests if immediate value of 5 is less than immediate value of
10. Since this is true write 1 to address 7.
"""
e.load_string('1107,5,10,7,99,0,0,-1')
e.execute()
self.assertEqual(e.peek(7), 1)
"""
Tests if immediate value of 10 is less than immediate value of
5. Since this is false write 0 to address 7.
"""
e.load_string('11107,10,5,7,99,0,0,-1')
e.execute()
self.assertEqual(e.peek(7), 0)
"""
if r[a] < r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22207,0,1,2,99,222,222,222,100,50,1')
e.execute()
self.assertEqual(e.peek(12), 0)
"""
if r[a] < r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22207,0,1,2,99,222,222,222,50,100,1')
e.execute()
self.assertEqual(e.peek(12), 1)
def test_op_eq(self):
"""
Tests equals op code
"""
e = ElfCPU()
"""
Tests if value of address 5 (10) is equal to value
of address 6 (10). Since this is true, write 1 to
address 10.
"""
e.load_string('8,8,9,10,99,10,10,-1,10,10,7')
e.execute()
self.assertEqual(e.peek(10), 1)
"""
Tests if value of address 5 (10) is equal to value
of address 6 (0). Since this is false, write 0 to
address 10.
"""
e.load_string('8,8,9,10,99,10,0,-1,5,6,7')
e.execute()
self.assertEqual(e.peek(10), 0)
"""
Tests if immediate value 10 is equal to immediate value
10. Since this is true, write 1 to address 7.
"""
e.load_string('1108,10,10,7,99,2,3,-1')
e.execute()
self.assertEqual(e.peek(7), 1)
"""
Tests if immediate value of 0 is equal to immediate value
10. Since this is false, write 0 to address 7.
"""
e.load_string('1108,0,10,7,99,2,3,-1')
e.execute()
self.assertEqual(e.peek(7), 0)
"""
if r[a] = r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22208,0,1,2,99,222,222,222,555,555,1')
e.execute()
self.assertEqual(e.peek(12), 1)
"""
if r[a] < r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22208,0,1,2,99,222,222,222,-500,100,1')
e.execute()
self.assertEqual(e.peek(12), 0)
def test_halt(self):
"""
Tests for the halt op code
"""
e = ElfCPU()
e.load_string('1,0,0,0,99')
e.step()
self.assertFalse(e.is_halted)
e.step()
self.assertTrue(e.is_halted)
def test_reset(self):
"""
Tests for CPU reset
"""
e = ElfCPU()
e.load_string('1,0,0,0,99')
e.execute()
e.reset()
# Halted gets cleared
self.assertFalse(e.is_halted)
# Program counter goes to 0
self.assertEqual(e.pc, 0)
# Memory gets wiped so address 1 becomes invalid
with self.assertRaises(ValueError):
e.peek(1)
def test_gpf(self):
"""
Tests for a general protection fault by allowing the program counter to
go past the end of the memory.
"""
e = ElfCPU()
# Jump to 2**20, the last memory address
e.load_string('1106,0,1048576')
with self.assertRaises(ProtectionFaultError):
e.execute()
def test_op_relative_base(self):
"""
Tests the relative base mode op code
"""
e = ElfCPU()
# Position
e.load_string('9,5,204,1,99,6,7,777')
e.interrupts = True
# Step over relative mode op
e.step()
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 777)
# Immediate
e.reset()
e.load_string('109,5,204,1,99,444,777')
e.interrupts = True
# Step over relative mode op
e.step()
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 777)
# Relative
e.reset()
e.load_string('209,9,209,6,204,-2,99,5,333,4,6')
e.interrupts = True
e.debug = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 333)
# EOF
|
python
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask_restful import Resource, Api
from controller.TestSuit import TestSuit
from model.TestSuit import db as TestSuitDB
app = Flask(__name__)
api = Api(app)
api.add_resource(TestSuit, '/testsuit')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///example.sqlite'
TestSuitDB.init_app(app)
if __name__ == '__main__':
with app.app_context():
TestSuitDB.create_all()
app.run(debug=True)
|
python
|
# Compact version
# ---------------
rule = (('0'*8 + bin(30)[2:])[-8:])[::-1]
cells = list('0'*40 + '1' + '0'*40)
for epoch in range(40):
print(''.join(cells).replace("0"," ").replace("1","█"))
cells = [cells[0]] + [rule[eval('0b' + cells[i-1]+cells[i]+cells[i+1])]
for i in range(1,len(cells)-1)] + [cells[-1]]
"""
# Readable version
# ----------------
# Rule transformation into ascii/binary representation
rule = 30
rule = ('0'*8 + bin(rule)[2:])[-8:]
rule = rule[::-1]
# Cells
p = 40
cells = '0'*p + '1' + '0'*p
# Iteration over epoch
n = 40
for epoch in range(n):
# Display
t = ''.join(cells)
t = t.replace("0", " ")
t = t.replace("1", "█")
print(t)
# Iteration over local neighborood
_cells = []
for i in range(1,len(cells)-1):
code = cells[i-1]+cells[i]+cells[i+1]
code = eval('0b' + code)
_cells.append(rule[code])
cells = [cells[0]] + _cells + [cells[-1]]
"""
|
python
|
import io
import os
from google.cloud import vision
from google.cloud.vision import types
from google.protobuf.json_format import MessageToJson
class GoogleVisionApi:
def __init__(self):
# Instantiates a client
self.client = vision.ImageAnnotatorClient()
self.requestsCache = {}
def request(self, imagePath):
# Loads the image into memory
with io.open(imagePath, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
self.requestsCache[imagePath] = self.client.document_text_detection(image=image)
response = self.requestsCache[imagePath]
jsonText = MessageToJson(response)
return jsonText
def clear(self,requestName):
if requestName in self.requestsCache:
del self.requestsCache[requestName]
def clearAll(self):
self.requestsCache = {}
|
python
|
from os import getenv, \
path
class Config(object):
API_KEY = getenv('API_KEY')
DEBUG = getenv('DEBUG', False)
SQLALCHEMY_DATABASE_URI = getenv('DATABASE_URL', 'sqlite:///' + path.dirname(__file__) + '/app/app.db').replace('mysql2:', 'mysql:')
SQLALCHEMY_ECHO = getenv('SQLALCHEMY_ECHO', False)
SQLALCHEMY_POOL_RECYCLE = 60
SQLALCHEMY_TRACK_MODIFICATIONS = False
STRIP_WWW_PREFIX = True
TESTING = False
|
python
|
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', default='mag', choices=['mag', 'amazon'])
parser.add_argument('--num_motif', default=50, type=int)
parser.add_argument('--eta', default=2.0, type=float)
args = parser.parse_args()
dataset = args.dataset
topM = args.num_motif
eta = args.eta
labels = []
with open(f'../{dataset}_data/labels.txt') as fin:
for line in fin:
data = line.strip()
labels.append('TERM_'+data)
label2emb = {}
word2idx = {}
idx2word = {}
word2emb = {}
with open(f'{dataset}.emb') as fin:
idx = 0
for line in fin:
data = line.strip().split()
if len(data) != 101:
continue
word = data[0]
emb = np.array([float(x) for x in data[1:]])
emb = emb / np.linalg.norm(emb)
word2idx[word] = idx
idx2word[idx] = word
word2emb[word] = emb
idx += 1
if word in labels:
label2emb[word] = emb
word2kappa = {}
with open(f'{dataset}.kappa') as fin:
for line in fin:
data = line.strip().split()
if len(data) != 2:
continue
word = data[0]
kappa = float(data[1])
word2kappa[word] = kappa
embMat = np.zeros((len(idx2word), 100))
for idx in range(len(idx2word)):
embMat[idx] = word2emb[idx2word[idx]]
with open(f'{dataset}_motifs.txt', 'w') as fout:
for label in labels:
l_emb = word2emb[label]
res = np.dot(embMat, l_emb)
idx_sorted = list(np.argsort(-res))
expanded = []
k = 0
kappa_l = word2kappa[label]
while len(expanded) < topM and k < len(idx_sorted):
word = idx2word[idx_sorted[k]]
if word2kappa[word] >= eta*kappa_l:
expanded.append(word)
k += 1
fout.write(label+'\t'+'\t'.join(expanded)+'\n')
|
python
|
from infi.pyutils.lazy import cached_method
from ..inquiry import InquiryException
from logging import getLogger
logger = getLogger(__name__)
class InfiniBoxVolumeMixin(object):
@cached_method
def _is_volume_mapped(self):
"""In race condition between a rescan and volume unmap operation, the device may still exist while the volume
is already unampped. This method returns True if a volume is mapped to the device."""
standard_inquiry = self.device.get_scsi_standard_inquiry()
# spc4r30 section 6.4.2 tables 140 + 141, peripheral device type 0 is disk, 31 is unknown or no device
return standard_inquiry.peripheral_device.type == 0
@cached_method
def get_volume_id(self):
""" Returns the volume id within the InfiniBox """
try:
return self._get_key_from_json_page('vol_entity_id', 0xc6)
except InquiryException:
return self._get_key_from_json_page('vol_entity_id')
@cached_method
def get_volume_name(self):
""" Returns the volume name inside the Infinibox, or None if not a volume """
return self._get_volume_name_from_json_page()
@cached_method
def get_volume_type(self):
""" Returns the volume type, or None if it is not a volume """
raise NotImplementedError()
def _get_volume_name_from_json_page(self):
try:
return self.get_string_data(0xc7)
except InquiryException:
return self._get_key_from_json_page('vol')
def _send_null_write(self, device):
from infi.asi.cdb.write import Write10Command
from infi.asi.coroutines.sync_adapter import sync_wait
cdb = Write10Command(0, '') # empty write
with device.asi_context() as asi:
sync_wait(cdb.execute(asi))
def _is_null_write_returns_write_protected_check_condition(self, device):
from infi.asi.errors import AsiCheckConditionError
try:
self._send_null_write(device)
return False
except AsiCheckConditionError as error:
if error.sense_obj.sense_key == "DATA_PROTECT":
return True
raise
def check_if_write_protected(self):
from infi.storagemodel.linux.native_multipath import LinuxNativeMultipathBlockDevice
if isinstance(self.device, LinuxNativeMultipathBlockDevice):
# on linux, device-mapper swallows the I/Os and doesn't pass them to the device, so we bypass it
return self._is_null_write_returns_write_protected_check_condition(self.device.get_paths()[0])
else:
return self._is_null_write_returns_write_protected_check_condition(self.device)
|
python
|
import discord
from discord.ext import commands
import random, string
from asyncio import sleep
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ascii(self, ctx, amount: int=1):
await ctx.message.delete()
for i in range(amount):
text=''
for i in range(2000):
text=text+chr(random.randrange(13000))
await ctx.send(content=text)
@commands.command()
async def hack(self, ctx, user:discord.User):
perc=0
while(perc < 100):
await ctx.message.edit(content=f'**Получение почты `{user}`... {perc}%**')
perc+=random.randint(1, 15)
await ctx.message.edit(content='**:white_check_mark: Почта получена!**')
await sleep(5)
perc=0
while(perc < 100):
await ctx.message.edit(content=f'**Получение пароля `{user}`... {perc}%**')
perc+=random.randint(1, 10)
await ctx.message.edit(content='**:white_check_mark: Пароль был получен!**')
await sleep(5)
perc=0
while(perc < 100):
await ctx.message.edit(content=f'**Обход защиты... {perc}%**')
perc+=random.randint(1, 5)
await ctx.message.edit(content=f'**:white_check_mark: Успешно вошёл в аккаунт `{user}`**')
@commands.command()
async def rainbow(self, ctx):
emojis=['🟧', '🟦', '🟥', '🟪', '🟩', '🟨']
while True:
text=''
for i in range(300):
text=text+''.join(random.choice(emojis))
await ctx.message.edit(content=text)
await ctx.message.delete()
@commands.command()
async def ghoul(self, ctx):
await ctx.message.edit(content='```Я гуль...```')
a=1000
while a>6:
await ctx.send(f'**{a}-7={a-7}**')
a-=7
@commands.command()
async def boom(self, ctx):
await ctx.message.edit(content="**Данный чат будет взорван через 5 секунд...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 4 секунды...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 3 секунды...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 2 секунды...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 1 секунду...**")
await sleep(1)
await ctx.message.delete()
message=await ctx.send("**Boom!**", file=discord.File("Resources/boom.gif"))
await sleep(1)
await ctx.send("⠀" + "\n"*1998 + "⠀")
await message.delete()
def setup(bot):
bot.add_cog(Fun(bot))
|
python
|
import requests
import lib.RModule as rmodule
import lib.RAudiostream as raudiostream
import lib.RAtmosphere as ratmosphere
import lib.RMonitoring as rmonitoring
class Project:
project_id = None
switchboards = []
neopixels = []
audiostream = None
monitoring = None
def __init__(self, project_id, broker):
self.project_id = project_id
# STARTING MONITORING SERVICE
self.monitoring = rmonitoring.Monitoring(self.project_id, broker)
# LOADING SWITCHBOARDS
retrieve_switchboards_request = "http://rhapsody.hestiaworkshop.net/rest/switchboards/get_switchboards/" + self.project_id
try:
r = requests.get(retrieve_switchboards_request)
result = r.json()
for switchboard in result:
new_switchboard = rmodule.Module("switchboards", switchboard['switchboard_id'], switchboard['switchboard_mqtt_topic'], project_id, broker)
self.switchboards.append(new_switchboard)
except Exception as e:
self.monitoring.send("ERROR", "project_initialization -> while loading switchboards", str(e))
# LOADING NEOPIXELS
retrieve_neopixels_request = "http://rhapsody.hestiaworkshop.net/rest/neopixels/get_neopixels/" + self.project_id
try:
r = requests.get(retrieve_neopixels_request)
result = r.json()
for neopixel in result:
new_neopixel = rmodule.Module("neopixels", neopixel['neopixel_id'], neopixel['neopixel_mqtt_topic'], project_id, broker)
self.neopixels.append(new_neopixel)
except:
self.monitoring.send("ERROR", "project_initialization -> while loading neopixels", str(e))
# MODULES
# LOADING AUDIOSTREAM MODULE
try:
self.audiostream = raudiostream.Audiostream("13041983", self.project_id, broker)
except Exception as e:
self.monitoring.send("ERROR", "project_initialization -> while loading audiostream module", str(e))
# LOADING NEOPIXEL MODULE
try:
self.neopixel = rneopixel.Neopixel("01101974", self.project_id, broker)
except Exception as e:
self.monitoring.send("ERROR", "project_initialization -> while loading neopixel module", str(e))
# SERVICES
# LOADING ATMOSPHERES SERVICE
#atmosphere = ratmosphere.Atmosphere(self.project_id)
def begin(self):
# mlmkl
try:
for switchboard in self.switchboards:
switchboard.start()
except Exception as e:
self.monitoring.send("ERROR", "starting_project -> starting switchboards", str(e))
# Starts neopixel modules
try:
for neopixel in self.neopixels:
neopixel.start()
except Exception as e:
self.monitoring.send("ERROR", "starting_project -> starting neopixels", str(e))
# Starts audiostream module
try:
self.audiostream.start()
except Exception as e:
self.monitoring.send("starting_project -> starting audiostream", str(e))
|
python
|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble, svm,datasets
import brica1
# Randomforest Component Definition
class RandomForestClassifierComponent(brica1.Component):
def __init__(self, n_in):
super(RandomForestClassifierComponent, self).__init__()
self.classifier = ensemble.RandomForestClassifier()
self.make_in_port("in0", n_in)
self.make_out_port("out0", 1)
def fire(self):
x = self.inputs["in0"]
z = self.classifier.predict([x])
self.results["out0"] = z
def fit(self, X, y):
self.classifier.fit(X, y)
# SVM Component Definition
class SVMComponent(brica1.Component):
def __init__(self, n_in):
super(SVMComponent, self).__init__()
self.classifier = svm.LinearSVC(C=1.0)
self.make_in_port("in0", n_in)
self.make_out_port("out0", 1)
def fire(self):
x = self.inputs["in0"]
z = self.classifier.predict([x])
self.results["out0"] = z
def fit(self, X, y):
self.classifier.fit(X, y)
# SVM vs RFC Component Definition
class SVMvsRFC_Component(brica1.Component):
def __init__(self, n_in):
super(SVMvsRFC_Component, self).__init__()
self.make_in_port("in0",n_in)
self.make_in_port("in1",n_in)
self.make_out_port("out0", 1)
def fire(self):
x = self.inputs["in0"]
y = self.inputs["in1"]
self.results["out0"] = (x==y)
# Load iris dataset
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# Setup data feeder component
feeder = brica1.ConstantComponent()
feeder.make_out_port("out0", 2)
# Setup components
svm = SVMComponent(2)
svm.fit(X, y)
RFC = RandomForestClassifierComponent(2)
RFC.fit(X,y)
SR =SVMvsRFC_Component(1)
# Connect the components
brica1.connect((feeder, "out0"), (svm, "in0"))
brica1.connect((feeder, "out0"), (RFC, "in0"))
brica1.connect((svm, "out0"), (SR, "in0"))
brica1.connect((RFC, "out0"), (SR, "in1"))
# Add components to module
mod = brica1.Module()
mod.add_component("feeder", feeder)
mod.add_component("svm", svm)
mod.add_component("RFC",RFC)
mod.add_component("SR", SR)
# Setup scheduler and agent
a = brica1.Agent()
a.add_submodule("mod", mod)
s = brica1.VirtualTimeSyncScheduler(a)
# Test the classifier
svm_result=[]
RFC_result=[]
svm_vs_RFC=[]
for i in xrange(len(X)):
feeder.set_state("out0", X[i]) # Set data feeder to training data i
s.step() # Execute prediction
svm_result.append(svm.get_out_port("out0").buffer[0])
RFC_result.append(RFC.get_out_port("out0").buffer[0])
s.step()
svm_vs_RFC.append(SR.get_out_port("out0").buffer[0])
for i in xrange(len(X)):
print "SVM: {}\tRFC: {}\tRESULT: {}".format(svm_result[i], RFC_result[i], svm_vs_RFC[i])
|
python
|
""" Parent class to inception models """
import tensorflow as tf
from . import TFModel
from .layers import conv_block
class Inception(TFModel):
""" The base class for all inception models
**Configuration**
body : dict
layout : str
a sequence of blocks in the network:
- b - inception block for v1 and v3 models
- r - reduction block
- f - factorization_block for Inception_v3 model (see :meth:`.factorization_block`)
- m - mixed_block for Inception_v3 model (see :meth:`.mixed_block`)
- e - expanded_block for Inception_v3 model (see :meth:`.expanded_block`)
- A - inception block A for Inception_v4 model (see :meth:`.inception_a_block`)
- B - inception block B for Inception_v4 model (see :meth:`.inception_b_block`)
- C - Inception block C for Inception_v4 model (see :meth:`.inception_c_block`)
- G - grid-reduction block for Inception_v4 model (see :meth:`.reduction_grid_block`)
arch : dict
parameters for each block:
key : str
block's short name
value : dict
specific parameters (e.g. filters)
"""
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers.
Parameters
----------
inputs : tf.Tensor
input tensor
layout : str
a sequence of blocks
arch : dict
parameters for each block
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
arch, layout = cls.pop(['arch', 'layout'], kwargs)
with tf.variable_scope(name):
x, inputs = inputs, None
layout_dict = {}
for block in layout:
if block not in layout_dict:
layout_dict[block] = [-1, 0]
layout_dict[block][1] += 1
for i, block in enumerate(layout):
layout_dict[block][0] += 1
block_no = layout_dict[block][0]
block_args = {**kwargs, **arch[block]}
filters = block_args.pop('filters', None)
if isinstance(filters, list):
filters = filters[block_no]
if block == 'b':
x = cls.block(x, filters=filters, name='block-%d'%i, **block_args)
elif block == 'r':
x = cls.reduction_block(x, filters=filters, name='reduction_block-%d'%i, **block_args)
elif block == 'f':
x = cls.factorization_block(x, filters=filters, name='factorization_block-%d'%i, **block_args)
elif block == 'm':
x = cls.mixed_block(x, filters=filters, name='mixed_block-%d'%i, **block_args)
elif block == 'e':
x = cls.expanded_block(x, filters=filters, name='expanded_block-%d'%i, **block_args)
elif block == 'A':
x = cls.inception_a_block(x, filters=filters, name='inception_a_block-%d'%i, **block_args)
elif block == 'B':
x = cls.inception_b_block(x, filters=filters, name='inception_b_block-%d'%i, **block_args)
elif block == 'C':
x = cls.inception_c_block(x, filters=filters, name='inception_c_block-%d'%i, **block_args)
elif block == 'G':
x = cls.reduction_grid_block(x, filters=filters, name='reduction_grid_block-%d'%i, **block_args)
return x
@classmethod
def reduction_block(cls, inputs, filters, layout='cna', name='reduction_block', **kwargs):
""" Reduction block.
For details see figure 10 in the article.
Parameters
----------
inputs : tf.Tensor
input tensor
filters : tuple of 3 ints
number of output filters
name : str
scope name
Returns
-------
tf.Tensor
"""
with tf.variable_scope(name):
branch_3 = conv_block(inputs, layout, filters[3], 3, name='conv_3', strides=2, padding='valid', **kwargs)
branch_1_3 = conv_block(inputs, layout*2, [filters[0]]+[filters[1]], [1, 3], name='conv_1_3', **kwargs)
branch_1_3_3 = conv_block(branch_1_3, layout, filters[2], 3, name='conv_1_3_3', strides=2,
padding='valid', **kwargs)
branch_pool = conv_block(inputs, layout='p', pool_size=3, pool_strides=2, name='max_pooling',
padding='valid', **kwargs)
axis = cls.channels_axis(kwargs['data_format'])
output = tf.concat([branch_3, branch_1_3_3, branch_pool], axis, name='output')
return output
|
python
|
# Generated by Django 3.0.4 on 2020-03-20 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0024_submittedplace'),
]
operations = [
migrations.RemoveField(
model_name='submittedplace',
name='website',
),
migrations.AddField(
model_name='submittedplace',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
|
python
|
"""Tests for claim_line model and the associated functions."""
from claims_to_quality.analyzer.models import claim_line
def test_str_method():
"""Test that claim lines are represented in a readable format."""
line = claim_line.ClaimLine(
{'clm_line_hcpcs_cd': 'code', 'mdfr_cds': ['GQ'], 'clm_pos_code': '24', 'clm_line_num': 1}
)
assert line.__str__() == 'ClaimLine - line_number: 1'
|
python
|
#!/usr/bin/env python3
"""
Script to preprocess OCR output for Tesseract
Usage:
python3 preprocess.py /path/to/input/dir \
/path/to/output/dir
"""
from glob import glob
import os
import shutil
import sys
import cv2
import numpy as np
def preprocess(img):
"""Takes a given image and returns the preprocessed version for
tesseract.
Args:
img (cv2 image): The image to preprocess
Returns:
dict of cv2 image: The preprocessed images with the keys:
(`gray`, `gray_inv`, `thresh`) for the respective
images.
"""
# gray scale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gray scale inverted
img_gray_inv = cv2.bitwise_not(img_gray)
# statistical flag for white versus black ID: median
flag_background = np.median(img_gray)
# initial idea: 128-ish is half the size of the RGB scale so:
if flag_background > 128:
print('White Number Detected!')
_, threshold = cv2.threshold(img_gray, 220, 255, cv2.THRESH_BINARY_INV)
else:
print('Black Number Detected!')
_, threshold = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY)
# Return the processed image image
return { 'gray': img_gray, 'gray_inv': img_gray_inv, 'thresh': threshold }
def main():
assert len(sys.argv) - 1 >= 2, "Must provide two arguments (in_dir, out_dir)"
in_dir = sys.argv[1]
assert in_dir != None, "Missing input directory (argv[1])"
out_dir = sys.argv[2]
assert out_dir != None, "Missing output directory (argv[2])"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for file in glob("%s/*.jpg" % in_dir):
print("Processing '%s' for thresholding..." % file)
img = cv2.imread(file)
image_id = os.path.splitext(os.path.basename(file))[0]
preprocessed = [v for v in preprocess(img).values()]
for i, ppimg in enumerate(preprocessed):
out_jpeg_file = ("%s/%s.pp%s.jpg" % (out_dir, image_id, i))
cv2.imwrite(out_jpeg_file, ppimg)
for file in glob("%s/*.json" % in_dir):
image_id = os.path.splitext(os.path.basename(file))[0]
out_json_file = ("%s/%s.json" % (out_dir, image_id))
shutil.copy(file, out_json_file)
if __name__ == '__main__':
main()
|
python
|
from .davis import vis
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio App RDM is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio Records Permissions API."""
from elasticsearch_dsl.query import Q
from flask import current_app
from invenio_search.api import DefaultFilter, RecordsSearch
from .factories import record_read_permission_factory
def rdm_records_filter():
"""Records filter."""
# TODO: Implement with new permissions metadata
try:
perm_factory = current_app.config["RECORDS_REST_ENDPOINTS"]["recid"][
"read_permission_factory_imp"
]() # noqa
except KeyError:
perm_factory = record_read_permission_factory
# FIXME: this might fail if factory returns None, meaning no "query_filter"
# was implemente in the generators. However, IfPublic should always be
# there.
filters = perm_factory.query_filters
if filters:
qf = None
for f in filters:
qf = qf | f if qf else f
return qf
else:
return Q()
# TODO: Move this to invenio-rdm-records and
# * have it provide the permissions OR
# * rely on app's current_search for tests
class RecordsSearch(RecordsSearch):
"""Search class for RDM records."""
class Meta:
"""Default index and filter for frontpage search."""
index = "records"
doc_types = None
default_filter = DefaultFilter(rdm_records_filter)
|
python
|
#! python3
# aoc_13.py
# Advent of code:
# https://adventofcode.com/2021/day/13
# https://adventofcode.com/2021/day/13#part2
#
def part_one(input) -> int:
nmap =[]
ymap = []
coords = []
with open(input, 'r') as inp:
lines = inp.readlines()
for line in lines:
line.strip()
coords.append([int(line.split(',')[1]),int(line.split(',')[0])])
R = 447*2+1 #max([_[0] for _ in coords])+1
C = 655*2+1 #max([_[1] for _ in coords])+1
print('R:',R,'C:',C)
dmap = [[0 for columns in range(C)] for rows in range(R)]
for l in coords:
dmap[l[0]][l[1]] = 1
dmap = xfold(dmap,655)
dmap = yfold(dmap,447)
dmap = xfold(dmap,327)
dmap = yfold(dmap,223)
dmap = xfold(dmap,163)
dmap = yfold(dmap,111)
dmap = xfold(dmap,81)
dmap = yfold(dmap,55)
dmap = xfold(dmap,40)
dmap = yfold(dmap,27)
dmap = yfold(dmap,13)
dmap = yfold(dmap,6)
for line in dmap:
print(line)
return sum([sum(i) for i in dmap])
def yfold(m,yf):
nmap = []
for y in range(yf):
nmap.append([x or y for x, y in zip(m[:][y], m[:][-y-1])])
return nmap
def xfold(m,xf):
nmap =[]
for row in m:
nmap.append([x or y for x, y in zip(row[:xf], row[:xf:-1])])#list reverse [::-1]
return nmap
def part_two(input) -> int:
return 0
if __name__ == "__main__":
# ex_folds =
# inp_folds =
example_path = "./aoc_13_example.txt"
input_path = "./aoc_13_input.txt"
print("---Part One---")
# print(part_one(example_path))
print(part_one(input_path))
print("---Part Two---")
# print(part_two(input_path))
#fold along x=655
#fold along y=447
#fold along x=327
#fold along y=223
#fold along x=163
#fold along y=111
#fold along x=81
#fold along y=55
#fold along x=40
#fold along y=27
#fold along y=13
#fold along y=6
|
python
|
# Copyright (C) 2018 Shriram Bhat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Character map for unicode Kannada script with Latin."""
charmap_iso15919 = {
"Knda": [
u"ಀ", u"ಁ", u"ಂ", u"ಃ", u"಄", u"ಅ", u"ಆ", u"ಇ", u"ಈ", u"ಉ", u"ಊ", u"ಋ", u"ಌ", u"", u"ಎ", u"ಏ",
u"ಐ", u"", u"ಒ", u"ಓ", u"ಔ", u"ಕ", u"ಖ", u"ಗ", u"ಘ", u"ಙ", u"ಚ", u"ಛ", u"ಜ", u"ಝ", u"ಞ", u"ಟ",
u"ಠ", u"ಡ", u"ಢ", u"ಣ", u"ತ", u"ಥ", u"ದ", u"ಧ", u"ನ", u"", u"ಪ", u"ಫ", u"ಬ", u"ಭ", u"ಮ", u"ಯ",
u"ರ", u"ಱ", u"ಲ", u"ಳ", u"", u"ವ", u"ಶ", u"ಷ", u"ಸ", u"ಹ", u"", u"", u"಼", u"ಽ", u"ಾ", u"ಿ",
u"ೀ", u"ು", u"ೂ", u"ೃ", u"ೄ", u"", u"ೆ", u"ೇ", u"ೈ", u"", u"ೊ", u"ೋ", u"ೌ", u"್", u"", u"",
u"", u"", u"", u"", u"", u"ೕ", u"ೖ", u"", u"", u"", u"", u"", u"", u"ೝ", u"ೞ", u"",
u"ೠ", u"ೡ", u"ೢ", u"ೣ", u"", u"", u"೦", u"೧", u"೨", u"೩", u"೪", u"೫", u"೬", u"೭", u"೮", u"೯",
u"", u"ೱ", u"ೲ", u"ೳ", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u""
],
"Latn": [
u"", u"m̐", u"ṁ", u"ḥ", u"", u"a", u"ā", u"i", u"ī", u"u", u"ū", u"ṛ", u"ḷ", u"ê", u"e", u"ē",
u"ai", u"ô", u"o", u"ō", u"au", u"ka", u"kha", u"ga", u"gha", u"ṅa", u"ca", u"cha", u"ja", u"jha", u"ña", u"ṭa",
u"ṭha", u"ḍa", u"ḍha", u"ṇa", u"ta", u"tha", u"da", u"dha", u"na", u"ṉa", u"pa", u"pha", u"ba", u"bha", u"ma", u"ya",
u"ra", u"ṟa", u"la", u"ḷa", u"ḻa", u"va", u"śa", u"ṣa", u"sa", u"ha", u"", u"", u"", u"'", u"ā", u"i",
u"ī", u"u", u"ū", u"ṛ", u"ṝ", u"ê", u"e", u"ē", u"ai", u"ô", u"o", u"ō", u"au", u"", u"", u"",
u"oṃ", u"", u"", u"", u"", u"", u"", u"", u"qa", u"ḵẖa", u"ġ", u"za", u"ṛa", u"ṛha", u"fa", u"ẏa",
u"ṝ", u"ḹ", u"ḷ", u"ḹ", u".", u"..", u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9",
u"…", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u""
],
}
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.