content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
from multi_circle_2 import Multi_circle_2
if __name__ == '__main__':
multi_circle_2 = Multi_circle_2(
[
#x , y, z, yaw, sleep
[0.0 , 0.0, 1.0, 0, 8],
[0.0 , 0.0 ,1.0, 0, 3],
[-0.3 , -1.4, 0.0, 0, 0],
]
)
multi_circle_2.run()
|
python
|
# Given a column title as appear in an Excel sheet, return its corresponding column number.
# For example:
# A -> 1
# B -> 2
# C -> 3
# ...
# Z -> 26
# AA -> 27
# AB -> 28
class Solution:
# @param s, a string
# @return an integer
def titleToNumber(self, s):
d = {'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'H':8, 'I':9,
'J':10, 'K':11, 'L':12, 'M':13, 'N':14, 'O':15, 'P':16, 'Q':17,
'R':18, 'S':19, 'T':20, 'U':21, 'V':22, 'W':23, 'X':24, 'Y':25, 'Z':26}
num = 0
while(len(s)):
num += d[s[0]] * pow(26, len(s) - 1)
s = s[1:]
return num
if __name__ == '__main__':
s = Solution()
print s.titleToNumber('ABC')
|
python
|
"""
Clustered/Convolutional/Variational autoencoder, including demonstration of
training such a network on MNIST, CelebNet and the film, "Sita Sings The Blues"
using an image pipeline.
Copyright Yida Wang, May 2017
"""
import matplotlib
import tensorflow as tf
import numpy as np
from scipy.misc import imsave
import os
import csv
from libs.dataset_utils import create_input_pipeline
from libs.vae import VAE
# Those lines must be setted when we ssh other servers without displaying
matplotlib.use('Agg')
def generate_vae(
files_train,
input_shape=[None, 784],
output_shape=[None, 784],
batch_size=128,
n_examples=6,
crop_shape=[64, 64],
crop_factor=1,
n_filters=[100, 100, 100, 100],
n_hidden=256,
n_code=50,
n_clusters=12,
convolutional=True,
fire=True,
variational=True,
metric=False,
filter_sizes=[3, 3, 3, 3],
dropout=True,
keep_prob=1.0,
activation=tf.nn.relu,
output_path="result",
ckpt_name="vae.ckpt"):
"""General purpose training of a (Variational) (Convolutional) (Clustered)
Autoencoder.
Supply a list of file paths to images, and this will do everything else.
Parameters
----------
files_train : list of strings
List of paths to images for training.
input_shape : list
Must define what the input image's shape is.
batch_size : int, optional
Batch size.
n_examples : int, optional
Number of example to use while demonstrating the current training
iteration's reconstruction. Creates a square montage, so:
n_examples**2 = 16, 25, 36, ... 100.
crop_shape : list, optional
Size to centrally crop the image to.
crop_factor : float, optional
Resize factor to apply before cropping.
n_filters : list, optional
Same as VAE's n_filters.
n_hidden : int, optional
Same as VAE's n_hidden.
n_code : int, optional
Same as VAE's n_code.
convolutional : bool, optional
Use convolution or not.
fire: bool, optional
Use fire module or not.
variational : bool, optional
Use variational layer or not.
filter_sizes : list, optional
Same as VAE's filter_sizes.
dropout : bool, optional
Use dropout or not
keep_prob : float, optional
Percent of keep for dropout.
activation : function, optional
Which activation function to use.
img_step : int, optional
How often to save training images showing the manifold and
reconstruction.
save_step : int, optional
How often to save checkpoints.
output_path : str, optional
Defien a path for saving result and sample images
ckpt_name : str, optional
Checkpoints will be named as this, e.g. 'model.ckpt'
"""
# Those should be defined before we finalize the graph
batch_train = create_input_pipeline(
files=files_train,
batch_size=batch_size,
n_epochs=1,
crop_shape=crop_shape,
crop_factor=crop_factor,
input_shape=input_shape,
output_shape=output_shape,
shuffle=False)
ae = VAE(input_shape=[None] + crop_shape + [input_shape[-1]],
output_shape=[None] + crop_shape + [output_shape[-1]],
convolutional=convolutional,
variational=variational,
fire=fire,
metric=metric,
n_filters=n_filters,
n_hidden=n_hidden,
n_code=n_code,
n_clusters=n_clusters,
dropout=dropout,
filter_sizes=filter_sizes,
activation=activation)
# We create a session to use the config = tf.ConfigProto()
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
sess = tf.Session(config=config)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# This will handle our threaded image pipeline
coord = tf.train.Coordinator()
# Ensure no more changes to graph
tf.get_default_graph().finalize()
# Start up the queues for handling the image pipeline
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if (
os.path.exists(output_path + '/' + ckpt_name + '.index') or
os.path.exists(ckpt_name)
):
saver.restore(sess, output_path + '/' + ckpt_name)
print("Model restored.")
else:
print("No model, train at first!")
# Get the number of training samples
with open(files_train, "r") as f:
reader = csv.reader(f, delimiter=",")
data = list(reader)
n_files_train = len(data)
batch_i = 0
try:
while not coord.should_stop() and batch_i < n_files_train/batch_size:
batch_xs, batch_ts, batch_ys = sess.run(batch_train)
batch_xs /= 255.0
batch_ts /= 255.0
# Plot example reconstructions
input_x, recon, target = sess.run(
[ae['x'], ae['y'], ae['t']],
feed_dict={
ae['x']: batch_xs,
ae['t']: batch_ts,
ae['train']: False,
ae['keep_prob']: 1.0})[:]
# This is for avoiding 0 in the denomiters
depth_recon = recon[:, :, :, -1] + 0.001
depth_target = target[:, :, :, -1]
for sam_id in range(target.shape[0]):
name_img = data[batch_i*batch_size+sam_id][0]
label_start = name_img.rfind('_') + 1
label_end = name_img.rfind('.')
imsave(
arr=input_x[sam_id, :],
name=output_path+'/generated/input_' +
name_img[label_start:label_end] + '.png')
imsave(
arr=np.squeeze(target[sam_id, :]),
name=output_path+'/generated/target_' +
name_img[label_start:label_end] + '.png')
imsave(
arr=np.squeeze(recon[sam_id, :]),
name=output_path+'/generated/recon_' +
name_img[label_start:label_end] + '.png')
# Evaluation for depth images
valid_pos = np.nonzero(depth_target)
delta1 = (np.count_nonzero(
np.maximum(
1.25 - np.maximum(
depth_recon[valid_pos]/depth_target[valid_pos],
depth_target[valid_pos]/depth_recon[valid_pos]),
0)) / depth_target[valid_pos].size)
delta2 = (np.count_nonzero(
np.maximum(
1.25**2 - np.maximum(
depth_recon[valid_pos]/depth_target[valid_pos],
depth_target[valid_pos]/depth_recon[valid_pos]),
0)) / depth_target[valid_pos].size)
delta3 = (np.count_nonzero(
np.maximum(
1.25**3 - np.maximum(
depth_recon[valid_pos]/depth_target[valid_pos],
depth_target[valid_pos]/depth_recon[valid_pos]),
0)) / depth_target[valid_pos].size)
rel = (np.mean(
np.abs(depth_recon[valid_pos] - depth_target[valid_pos]) /
depth_target[valid_pos]))
print(
'rel:', rel,
', delta 1:', delta1,
', delta 2:', delta2,
', delta 3:', delta3)
batch_i += 1
except tf.errors.OutOfRangeError:
print('Done.')
finally:
# One of the threads has issued an exception. So let's tell all the
# threads to shutdown.
coord.request_stop()
# Wait until all threads have finished.
coord.join(threads)
# Clean up the session.
sess.close()
|
python
|
"""
This provides wrappers for FMUs so they are easy to assemble in a short
script, as well as support writing data in HDF5 format using pytable, as
well as a library of FMUs ready to be assembled.
The structure of the file starts with the FMU utilities, goes on to define
modules and simulations, and ends with a list of FMUs.
The end result is a simulation that is easy to specify in python, and self
described data stored in HDF5 files which can be read with either python or
matlab. This does not work with octave however.
"""
import tables as tb
import re
import os
import platform
import numpy as np
from collections import OrderedDict
import datetime as dtime
import time # this might be redundant
import sys
import subprocess
import timeit
import shutil as shutil
import tempfile as tempfile
from collections import Counter
def patch_string(s):
special = "[=~!@\$%\^&\.\(\)-\+\\\|\]\}\[\{';:/\?<>]"
return re.sub(special, "_", s)
def platform_path():
if 'linux' in sys.platform:
return 'dymola_models/Linux/'
elif 'win' in sys.platform:
return 'dymola_models/Windows/'
else:
print('Unsupported platform ' + sys.platform)
sys.exit(1)
class module:
""" Base class for FMUs Everything and the kitchen sink.
This contains enough info to hide all the complexity of constructing
the command line for the execution environment.
Also, there is information here that goes into the HDF5 datafile.
That information is not mandatory but useful. No effort is made at
this time to parse modelDescription.xml files to gather UUID and other
good info.
"""
def __init__(self, filename, name, connectors=None, inputs=None,
outputs=None, parameters=None, host="tcp://localhost",
prefixcmd=[],
debug_level=0, port = None, description="",
aliases={}, attributes={}
):
self.id = None # for fmigo's command line
self.name=name
self.description=description
self.filename = filename # the fmu itself
self.connectors=connectors #kinematic couplings: variable lists
self.inputs = inputs # input ports
self.outputs = outputs # outs
if hasattr(self, "parameters") and parameters:
self.parameters.update(parameters)
else:
self.parameters=parameters
self.id = None # unique id assigned from simulation
self.port = port # will be assigned by simulation if on local host
self.host = host # which computer this runs on
self.prefixcmd = prefixcmd # special extra commands like optirun or
# xterm etc.
self.debug_level=debug_level
self.attributes = attributes
self.aliases = aliases
def make_aliases(self):
""" this is to make identical names for corresponding variables"""
if hasattr(self, "inputs") and self.inputs:
self.aliases.update(self.inputs)
if hasattr(self, "outputs") and self.outputs and type(self.outputs)==type({}):
self.aliases.update(self.outputs)
if hasattr(self, "connectors") and self.connectors:
for k,v in self.connectors.items():
self.aliases["phi_"+k] = v[0]
self.aliases["w_" +k] = v[1]
self.aliases["a_" +k] = v[2]
self.aliases["tau_"+k] = v[3]
# now construct the reverse map
self.raliases = {}
for k,v in self.aliases.items():
self.raliases[v] = k
# construct command line for parameters
def make_pars(self):
s=""
if self.parameters:
for k in self.parameters:
s +=":"+str(self.id)+","+k+","+self.parameters[k]
return ['-p', s[1:]]
else:
return []
def uri(self, mode="tcp"):
if mode == "tcp":
return self.host+":"+str(self.port)
elif mode=="mpi":
return self.filename
else:
print("Only mpi and tcp modes are available but %s was requested." %mode)
sys.exit(-1)
class simulation:
""" Here is all the runtime support for both TCP and MPI. Modules are
initialized and then sorted for the connections can be delivered to the
master directly from the dictionary of connections"""
def __init__(self, fmus,
couplings=(),
signals=(),
cleanup=None,
annotations=(),
START_PORT=3000,
extra_args=[],
debug_level=0 ,
name="",
variant="",
description="some simulation"
):
self.fmus = self.remove_name_clashes(fmus)
self.couplings = couplings # maybe this doesn't need to be stored
self.signals = signals # same
self.cleanup = cleanup
self.cmd_pre = []
self.cmd = []
self.cmd_post = []
self.cmd_pars = []
self.cmd_couplings = []
self.cmd_signals = []
self.ports = []
self.processes = [] # for when we use TCP
self.port = START_PORT
self.uris = None
self.extra_args = extra_args
self.debug_level = debug_level
self.is_kinematic = False
self.wall_time = 0
self.name = name
self.description = description
self.annotations = annotations
self.variant = variant
## Assign an ordering to the FMUs from 0
## This id is relevant only to fmigo-{master|mpi}
## We also take care of the parameters here --- could be done when
## launching simulation
def init_fmus(self, comm_step):
id = 0
self.cmd_pars = []
for k,f in self.fmus.items():
f.id = id
if hasattr(f, "setup"):
f.setup(comm_step)
id += 1
s=""
if f.parameters:
for k in f.parameters:
s +=":"+str(f.id)+","+k+","+f.parameters[k]
self.cmd_pars += ['-p', s[1:]]
f.make_aliases()
return
# assuming that no work has been done so far, one might have simply a
# dictionary of pairs and connector tags which then generates the
# command line bits. This really belongs in this class.
def init_couplings(self):
c = []
self.is_kinematic = True if self.couplings else False
for i in self.couplings:
if len(i) == 4:
c += ["-C", "shaft,%d,%d,%s,%s" %
(self.fmus[i[0]].id, self.fmus[i[2]].id,
",".join(self.fmus[i[0]].connectors[i[1]]),
",".join(self.fmus[i[2]].connectors[i[3]]))]
else:
c+= ["-C", "shaft,%d,%d,%s,%s,%s,%s,%s,%s,%s,%s"
%(self.fmus[i[0]].id, self.fmus[i[5]].id,
i[1], i[2], i[3], i[4], i[6], i[7], i[8], i[9])]
self.cmd_couplings = c
def init_signals(self):
c = []
if self.signals:
for i in self.signals:
c+= ["-c", "%d,%s,%d,%s" % (self.fmus[i[0]].id,
self.fmus[i[0]].outputs[i[1]]
if hasattr(self.fmus[i[0]], "outputs") and i[1] in self.fmus[i[0]].outputs else i[1],
self.fmus[i[2]].id,
self.fmus[i[2]].inputs[i[3]]
if hasattr(self.fmus[i[2]], "inputs") and i[3] in self.fmus[i[2]].inputs else i[3]
)
]
self.cmd_signals = c
# find the runtime resources dependencies and paths, which would
# include the AGX paths and environment variables.
def find_external_dependencies(self):
pass
# Construct command line for fmigo-server processes, start these,
# report both process ID and URI
def launch_tcp_servers(self):
self.uris = []
self.processes = []
for n,i in self.fmus.items():
cmd = []
if i.prefixcmd:
cmd = i.prefixcmd
cmd += ["fmigo-server"]
if i.debug_level:
cmd += ['-l', str(i.debug_level)]
cmd += ["--port", str(self.port), i.filename]
# TODO: this needs work since a given FMU might work on a different
# host but that's beyond the current scope
self.uris = self.uris +['tcp://localhost:%d' %self.port]
self.ports = self.ports + [self.port]
self.port += 1
self.processes = self.processes + [subprocess.Popen(cmd)]
return cmd
## Given an already assembled command line for connections and parameters,
## run the simulation and a cleanup command as needed.
## There's a possibility to override the filename here
def simulate(self,tend, dt, datafile, mode="tcp", holonomic=True,
parallel=True, max_samples=None, convertonly=False,
extra_args = None):
if extra_args:
self.extra_args = extra_args
self.datafile = datafile
self.tend = tend
self.dt = dt
self.mode = mode
self.annotations += ("mode", mode),
if datafile:
self.datafile = datafile
self.init_fmus( dt)
self.init_couplings()
self.init_signals()
self.parallel = parallel
self.mode = mode
processes = []
opts = ["-t", str(self.tend), "-d", str(self.dt), '-H']
if max_samples:
opts += ['-S', str(max_samples)]
if self.is_kinematic:
self.parallel = True
self.annotations += ("coupling", "kinematic"),
self.annotations += ("coupling_type", "%s" % "holonomic" if holonomic else "nonholonomic"),
if not holonomic:
opts += ["-N"]
else:
self.annotations += ("coupling", "signals"),
self.annotations += ("parallel", str(self.parallel)),
opts += ["-m", "%s" % ("jacobi" if self.parallel else "gs")]
opts += self.extra_args
if self.datafile:
opts += ['-o', self.datafile + ".csv" ]
opts += self.cmd_pars + self.cmd_couplings + self.cmd_signals
if self.debug_level:
opts += ['-l', str(self.debug_level)]
if self.mode == "tcp":
cmd = ['fmigo-master']+ opts
if not convertonly:
self.launch_tcp_servers()
cmd = cmd + self.uris
if not convertonly:
time.sleep(1)
elif self.mode == "mpi":
cmd = ['mpiexec', '-oversubscribe', '-np', str(1),'fmigo-mpi'] + opts
for n,i in self.fmus.items():
cmd += [":", "-np", str(1)]
if i.prefixcmd:
cmd += i.prefixcmd
cmd += ["fmigo-mpi"]
if i.debug_level:
cmd += ['l', str(i.debug_level)]
cmd += [i.filename]
else:
print("Only mpi and tcp modes are available but %s was requested." %mode)
sys.exit(-1)
print(' '.join(cmd))
ts = dtime.datetime.now()
if not convertonly:
proc = subprocess.Popen(cmd)
self.wall_time = str(dtime.datetime.now() -ts)
self.annotations += ("wall_time", self.wall_time),
self.annotations += ("fail",
"true" if convertonly or proc.wait() != 0 else "success"),
csv = self.datafile + ".csv"
self.fix_csv(csv)
self.pack_FMU_CSV_H5()
# raise a signal if things went wrong
if not convertonly and proc.wait() != 0:
sys.exit(1)
return cmd
def fix_csv(self, csv):
## truncate file if there are bad lines
infile = open(csv, "r")
infile.seek(0)
l = infile.readline()
N = len(re.findall(",", l))
infile.close()
if N > 0:
outfile = tempfile.NamedTemporaryFile()
infile = open(csv, "r")
infile.seek(0)
for l in infile:
if N == len(re.findall(",", l)):
outfile.write(l.encode())
else:
break
# close and reopen to overwrite
outfile.seek(0)
infile.close()
infile = open(csv, "wb")
shutil.copyfileobj(outfile, infile)
outfile.close()
infile.close()
#
# This provides the operations necessary to take a csv file containing
# data from several different modules and repack them in a well structured
# hdf5 file with annotation.
#
# It is also possible to append the full simulation to an existing HDF5
# file.
#
# Because there are many operations involved in converting csv files to
# decent HDF5 ones, we make a class and split the various tricks into
# methods which can be tested individually.
#
def pack_FMU_CSV_H5(self):
"""
List of FMU names, csv file to read from h5 file to write to,
communication step, a tuple of duples with (name, value) for
attributes, and additional description string
The HDF5 files have a structure as follows:
/
./simulation0001 ./simulation0002 ... ./simulationXXXX
Numbering is done automatically for each new simulation dumped in a file.
Simulation groups have the structure
./simulationXXXX
./global_data ./fmu1 ./fmu2 .../fmuN
and the group for each fmu group is the name of the FMU as given in the
module definitions. The global_data group contains a pytable with data
collected by the master which does not belong to any FMU.
Each fmu group then contains a pytable.
The simulation groups also contain the following attributes:
-- date
-- start (TODO: this should be as accurate as possible)
-- stop
-- comm_step
-- N (number of steps)
-- kinematic (value is no, holonomic, nonholonomic)
-- OS on which simulation ran
-- mode (mpi or tcp/ip)
-- parallel (yes or no, depending on whether we used Jacobi or sequential)
"""
## short hands
csv = self.datafile+".csv"
hdf = self.datafile+".h5"
H = self.dt
fmus = list(self.fmus.items())
### Utility functions follow
## Replace special characters in `names' in csv files otherwise they
## get deleted.
def patch_string(s):
special = "[=~!@\$%\^&\.\(\)-\+\\\|\]\}\[\{';:/\?<>]"
return re.sub(special, "_", s)
## Replace special characters in `names' in csv files otherwise they
## get deleted.
def patch_names(file):
f = open(file, "r+")
line = f.readline()
n = len(line)
line = patch_string(line)
if len(line) <= n: # here we can do this in place.
f.seek(0)
f.write(line)
f.close()
else: # here we have to move all the data
tf = tempfile.NamedTemporaryFile()
tf.write(line)
for l in f:
fs.write(l)
f.close()
shutil.move(tf.name, file)
def set_attr(g, name, attr):
g._f_setattr(patch_string(name), attr)
# Works with a numpy structured array and replace non-descript names with
# sensible ones so that
# `fmu_XXX_something' `something'
# This is all put under a group called fmu_XXX in the end
# Here, x contains all the names corresponding to one given FMU, i.e.,
# fmu1_x, fmu1_y, fmu1_z,, ... , fmu1_....
def rename(x):
names = ()
for i in x.dtype.names:
v = re.search('fmu([^_]+)_*',i)
if v:
i = re.sub("fmu[^_]+_", "", i)
names = names + (i,)
x.dtype.names = names
# remove bad characters in the names in the data file.
patch_names(csv)
# read
data = np.genfromtxt(csv, delimiter=",", names=True)
# split the columns according to fmu0_*, fmu1_* etc
# and the rest
# get all keys starting with fmu[[:digits:]]+_ and replace with
# fmu_[[:digits:]]
# sorted makes the list [fmu1, fmu2, fmu3 ... ]
keys = sorted(list(OrderedDict.fromkeys(
re.sub("(fmu[^_]+)_[^ ]*", "\g<1>",
" ".join(data.dtype.names)).split())))
# global simulation data: whatever does not start with fmu
global_cols = sorted(re.sub("fmu[^ ]+", "", " ".join(keys)).split())
# modules is what's left over
fmu_cols = sorted(list(set(keys) - set(global_cols)))
# Time to get an output file
dfile = tb.open_file(hdf, mode = "a")
## TODO: something smart to organize data
n = len(dfile.list_nodes("/")) +1
g = dfile.create_group("/", 'simulation%04d' % n , 'Simulation')
set_attr(g,"date", dtime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
time_units = [["s", 1], ["ms", 1e3],
["mus", 1e6]]
time_scale = 2
set_attr(g,"comm_step", str(self.dt*time_units[time_scale][1]))
set_attr(g,"time_unit", str(time_units[time_scale][0]))
set_attr(g,"time_factor", str(time_units[time_scale][1]))
set_attr(g,"tend", str(self.tend))
set_attr(g, "os", platform.uname().system)
set_attr(g, "machine", platform.uname().node)
set_attr(g, "release", platform.uname().release)
if self.name:
set_attr(g, "name", self.name)
if self.variant:
set_attr(g, "variant", self.variant)
for i in self.annotations:
set_attr(g, i[0], i[1])
if self.description:
set_attr(g, "description", self.description)
data_map = {}
# store all columns fmuX_* in a separate group, name the group
# according to a dictionary entry in fmus
for i in fmu_cols:
# get the FMU id
ix = int(re.sub("fmu([^ ]*)", "\g<1>", i))
rx = re.compile("%s_[^ ]*" % i)
# get the list of columns
c = re.findall(rx, " ".join(data.dtype.names))
# put the corresponding table in a dictionary
data_map[fmus[ix][0]] = data[c]
# now time for suitable names
rename(data_map[fmus[ix][0]])
# Here's where we insert the data
# TODO: the group should have additional attributes from the tuple
# of duple.
table = dfile.create_table(g, fmus[ix][0], data_map[fmus[ix][0]], "FMU data")
set_attr(table, "name", fmus[ix][1].name)
## attributes are
if hasattr(fmus[ix][1], "attributes"):
for k,v in fmus[ix][1].attributes.items():
set_attr(table, k, v)
if hasattr(fmus[ix][1], "parameters") and fmus[ix][1].parameters:
for k,v in fmus[ix][1].parameters.items():
set_attr(table, k, v)
if hasattr(fmus[ix][1], "description"):
set_attr(table, "description", fmus[ix][1].description)
if hasattr(fmus[ix][1], "aliases"):
set_attr(table, "aliases", fmus[ix][1].aliases)
if hasattr(fmus[ix][1], "raliases"):
set_attr(table, "raliases", fmus[ix][1].raliases)
data_map["stepper"] = data[global_cols]
table = dfile.create_table(g, "simulation", data_map["stepper"], "Stepper data")
dfile.close()
return dfile
## It is possible for a given FMU to appear more than once in a simulation so this
## function will rename as needed.
def remove_name_clashes(self, fmus):
"""
fmus is a dictionary containing objects which are assumed to have a
"name" attribute
"""
#1) collect names from the dictionary into a list
names = []
for k,v in fmus.items():
names += [v.name]
# Counter creates a dictionary with the names as key and incidence
# count as value.
counts = Counter(names)
# Here we take care of elements with incidence count higher than 1
# for each of thense, names.index(s) will return the index of the
# first element with that name. Since that name is being modified, we
# consume all duplicates and append an integer xxx to it.
for s,num in counts.items():
if num > 1:
for suffix in range(1, num+1):
names[names.index(s)] += "%03d" % suffix
# Now replace the names in the original dictionary,
# consuming the elements in names as we go along. The sequence of the
# dictionary traversal is the same as before.
for k,v in fmus.items():
v.name = names.pop(0)
return fmus
|
python
|
import numpy as np
from jitcdde import input as system_input
from symengine import exp
from ..builder.base.constants import EXC, INH, LAMBDA_SPEED
from ..builder.base.network import SingleCouplingExcitatoryInhibitoryNode
from ..builder.base.neural_mass import NeuralMass
from .model_input import OrnsteinUhlenbeckProcess, ZeroInput
TCR_DEFAULT_PARAMS = {
"tau": 20.0, # ms
"Q_max": 400.0e-3, # 1/ms
"theta": -58.5, # mV
"sigma": 6.0,
"C1": 1.8137993642,
"C_m": 1.0, # muF/cm^2
"gamma_e": 70.0e-3, # 1/ms
"gamma_r": 100.0e-3, # 1/ms
"g_L": 1.0, # AU
"g_GABA": 1.0, # ms
"g_AMPA": 1.0, # ms
"g_LK": 0.018, # mS/cm^2
"g_T": 3.0, # mS/cm^2
"g_h": 0.062, # mS/cm^2
"E_AMPA": 0.0, # mV
"E_GABA": -70.0, # mV
"E_L": -70.0, # mV
"E_K": -100.0, # mV
"E_Ca": 120.0, # mV
"E_h": -40.0, # mV
"alpha_Ca": -51.8e-6, # nmol
"tau_Ca": 10.0, # ms
"Ca_0": 2.4e-4,
"k1": 2.5e7,
"k2": 4.0e-4,
"k3": 1.0e-1,
"k4": 1.0e-3,
"n_P": 4.0,
"g_inc": 2.0,
"ext_current": 0.0,
"lambda": LAMBDA_SPEED,
}
TRN_DEFAULT_PARAMS = {
"tau": 20.0, # ms
"Q_max": 400.0e-3, # 1/ms
"theta": -58.5, # mV
"sigma": 6.0,
"C1": 1.8137993642,
"C_m": 1.0, # muF/cm^2
"gamma_e": 70.0e-3, # 1/ms
"gamma_r": 100.0e-3, # 1/ms
"g_L": 1.0, # AU
"g_GABA": 1.0, # ms
"g_AMPA": 1.0, # ms
"g_LK": 0.018, # mS/cm^2
"g_T": 2.3, # mS/cm^2
"E_AMPA": 0.0, # mV
"E_GABA": -70.0, # mV
"E_L": -70.0, # mV
"E_K": -100.0, # mV
"E_Ca": 120.0, # mV
"ext_current": 0.0,
"lambda": LAMBDA_SPEED,
}
# matrix as [to, from], masses as (TCR, TRN)
THALAMUS_NODE_DEFAULT_CONNECTIVITY = np.array([[0.0, 5.0], [3.0, 25.0]])
class ThalamicMass(NeuralMass):
"""
Base for thalamic neural populations
Reference:
Costa, M. S., Weigenand, A., Ngo, H. V. V., Marshall, L., Born, J.,
Martinetz, T., & Claussen, J. C. (2016). A thalamocortical neural mass
model of the EEG during NREM sleep and its response to auditory stimulation.
PLoS computational biology, 12(9).
"""
name = "Thalamic mass"
label = "THLM"
def _get_firing_rate(self, voltage):
return self.params["Q_max"] / (
1.0 + exp(-self.params["C1"] * (voltage - self.params["theta"]) / self.params["sigma"])
)
# synaptic currents
def _get_excitatory_current(self, voltage, synaptic_rate):
return self.params["g_AMPA"] * synaptic_rate * (voltage - self.params["E_AMPA"])
def _get_inhibitory_current(self, voltage, synaptic_rate):
return self.params["g_GABA"] * synaptic_rate * (voltage - self.params["E_GABA"])
# intrinsic currents
def _get_leak_current(self, voltage):
return self.params["g_L"] * (voltage - self.params["E_L"])
def _get_potassium_leak_current(self, voltage):
return self.params["g_LK"] * (voltage - self.params["E_K"])
def _get_T_type_current(self, voltage, h_T_value):
return (
self.params["g_T"]
* self._m_inf_T(voltage)
* self._m_inf_T(voltage)
* h_T_value
* (voltage - self.params["E_Ca"])
)
class ThalamocorticalMass(ThalamicMass):
"""
Excitatory mass representing thalamocortical relay neurons in the thalamus.
"""
name = "Thalamocortical relay mass"
label = "TCR"
mass_type = EXC
num_state_variables = 10
num_noise_variables = 1
coupling_variables = {9: f"r_mean_{EXC}"}
required_couplings = ["node_exc_exc", "node_exc_inh", "network_exc_exc"]
state_variable_names = [
"V",
"Ca",
"h_T",
"m_h1",
"m_h2",
"s_e",
"s_i",
"ds_e",
"ds_i",
"r_mean",
]
required_params = [
"tau",
"Q_max",
"theta",
"sigma",
"C1",
"C_m",
"gamma_e",
"gamma_r",
"g_L",
"g_GABA",
"g_AMPA",
"g_LK",
"g_T",
"g_h",
"E_AMPA",
"E_GABA",
"E_L",
"E_K",
"E_Ca",
"E_h",
"alpha_Ca",
"tau_Ca",
"Ca_0",
"k1",
"k2",
"k3",
"k4",
"n_P",
"g_inc",
"ext_current",
"lambda",
]
noise_input = [OrnsteinUhlenbeckProcess(mu=0.0, sigma=0.0, tau=5.0)]
def __init__(self, params=None):
super().__init__(params=params or TCR_DEFAULT_PARAMS)
def _initialize_state_vector(self):
"""
Initialize state vector.
"""
self.initial_state = [
self.params["E_L"],
self.params["Ca_0"],
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
def _get_anomalous_rectifier_current(self, voltage, m_h1_value, m_h2_value):
return self.params["g_h"] * (m_h1_value + self.params["g_inc"] * m_h2_value) * (voltage - self.params["E_h"])
def _m_inf_T(self, voltage):
return 1.0 / (1.0 + exp(-(voltage + 59.0) / 6.2))
def _h_inf_T(self, voltage):
return 1.0 / (1.0 + exp((voltage + 81.0) / 4.0))
def _tau_h_T(self, voltage):
return (30.8 + (211.4 + exp((voltage + 115.2) / 5.0)) / (1.0 + exp((voltage + 86.0) / 3.2))) / 3.7371928
def _m_inf_h(self, voltage):
return 1.0 / (1.0 + exp((voltage + 75.0) / 5.5))
def _tau_m_h(self, voltage):
return 20.0 + 1000.0 / (exp((voltage + 71.5) / 14.2) + exp(-(voltage + 89.0) / 11.6))
def _P_h(self, ca_conc):
return (self.params["k1"] * ca_conc ** self.params["n_P"]) / (
self.params["k1"] * ca_conc ** self.params["n_P"] + self.params["k2"]
)
def _derivatives(self, coupling_variables):
(
voltage,
ca_conc,
h_T,
m_h1,
m_h2,
syn_ext,
syn_inh,
dsyn_ext,
dsyn_inh,
firing_rate,
) = self._unwrap_state_vector()
# voltage dynamics
d_voltage = -(
self._get_leak_current(voltage)
+ self._get_excitatory_current(voltage, syn_ext)
+ self._get_inhibitory_current(voltage, syn_inh)
+ self.params["ext_current"]
) / self.params["tau"] - (1.0 / self.params["C_m"]) * (
self._get_potassium_leak_current(voltage)
+ self._get_T_type_current(voltage, h_T)
+ self._get_anomalous_rectifier_current(voltage, m_h1, m_h2)
)
# calcium concetration dynamics
d_ca_conc = (
self.params["alpha_Ca"] * self._get_T_type_current(voltage, h_T)
- (ca_conc - self.params["Ca_0"]) / self.params["tau_Ca"]
)
# channel dynamics: T-type and rectifier current
d_h_T = (self._h_inf_T(voltage) - h_T) / self._tau_h_T(voltage)
d_m_h1 = (
(self._m_inf_h(voltage) * (1.0 - m_h2) - m_h1) / self._tau_m_h(voltage)
- self.params["k3"] * self._P_h(ca_conc) * m_h1
+ self.params["k4"] * m_h2
)
d_m_h2 = self.params["k3"] * self._P_h(ca_conc) * m_h1 - self.params["k4"] * m_h2
# synaptic dynamics
d_syn_ext = dsyn_ext
d_syn_inh = dsyn_inh
d_dsyn_ext = (
self.params["gamma_e"] ** 2
* (
coupling_variables["node_exc_exc"]
+ coupling_variables["network_exc_exc"]
+ system_input(self.noise_input_idx[0])
- syn_ext
)
- 2 * self.params["gamma_e"] * dsyn_ext
)
d_dsyn_inh = (
self.params["gamma_r"] ** 2 * (coupling_variables["node_exc_inh"] - syn_inh)
- 2 * self.params["gamma_r"] * dsyn_inh
)
# firing rate as dummy dynamical variable with infinitely fast
# fixed-point dynamics
firing_rate_now = self._get_firing_rate(voltage)
d_firing_rate = -self.params["lambda"] * (firing_rate - firing_rate_now)
return [
d_voltage,
d_ca_conc,
d_h_T,
d_m_h1,
d_m_h2,
d_syn_ext,
d_syn_inh,
d_dsyn_ext,
d_dsyn_inh,
d_firing_rate,
]
class ThalamicReticularMass(ThalamicMass):
"""
Inhibitory mass representing thalamic reticular nuclei neurons in the
thalamus.
"""
name = "Thalamic reticular nuclei mass"
label = "TRN"
mass_type = INH
num_state_variables = 7
num_noise_variables = 1
coupling_variables = {6: f"r_mean_{INH}"}
required_couplings = ["node_inh_exc", "node_inh_inh", "network_inh_exc"]
state_variable_names = [
"V",
"h_T",
"s_e",
"s_i",
"ds_e",
"ds_i",
"r_mean",
]
required_params = [
"tau",
"Q_max",
"theta",
"sigma",
"C1",
"C_m",
"gamma_e",
"gamma_r",
"g_L",
"g_GABA",
"g_AMPA",
"g_LK",
"g_T",
"E_AMPA",
"E_GABA",
"E_L",
"E_K",
"E_Ca",
"ext_current",
"lambda",
]
noise_input = [ZeroInput()]
def __init__(self, params=None):
super().__init__(params=params or TRN_DEFAULT_PARAMS)
def _m_inf_T(self, voltage):
return 1.0 / (1.0 + exp(-(voltage + 52.0) / 7.4))
def _h_inf_T(self, voltage):
return 1.0 / (1.0 + exp((voltage + 80.0) / 5.0))
def _tau_h_T(self, voltage):
return (85.0 + 1.0 / (exp((voltage + 48.0) / 4.0) + exp(-(voltage + 407.0) / 50.0))) / 3.7371928
def _initialize_state_vector(self):
"""
Initialize state vector.
"""
self.initial_state = [
self.params["E_L"],
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
def _derivatives(self, coupling_variables):
(
voltage,
h_T,
syn_ext,
syn_inh,
dsyn_ext,
dsyn_inh,
firing_rate,
) = self._unwrap_state_vector()
# voltage dynamics
d_voltage = -(
self._get_leak_current(voltage)
+ self._get_excitatory_current(voltage, syn_ext)
+ self._get_inhibitory_current(voltage, syn_inh)
+ self.params["ext_current"]
) / self.params["tau"] - (1.0 / self.params["C_m"]) * (
self._get_potassium_leak_current(voltage) + self._get_T_type_current(voltage, h_T)
)
# channel dynamics: T-type
d_h_T = (self._h_inf_T(voltage) - h_T) / self._tau_h_T(voltage)
# synaptic dynamics
d_syn_ext = dsyn_ext
d_syn_inh = dsyn_inh
d_dsyn_ext = (
self.params["gamma_e"] ** 2
* (
coupling_variables["node_inh_exc"]
+ coupling_variables["network_inh_exc"]
+ system_input(self.noise_input_idx[0])
- syn_ext
)
- 2 * self.params["gamma_e"] * dsyn_ext
)
d_dsyn_inh = (
self.params["gamma_r"] ** 2 * (coupling_variables["node_inh_inh"] - syn_inh)
- 2 * self.params["gamma_r"] * dsyn_inh
)
# firing rate as dummy dynamical variable with infinitely fast
# fixed-point dynamics
firing_rate_now = self._get_firing_rate(voltage)
d_firing_rate = -self.params["lambda"] * (firing_rate - firing_rate_now)
return [
d_voltage,
d_h_T,
d_syn_ext,
d_syn_inh,
d_dsyn_ext,
d_dsyn_inh,
d_firing_rate,
]
class ThalamicNode(SingleCouplingExcitatoryInhibitoryNode):
"""
Thalamic mass model network node with 1 excitatory (TCR) and 1 inhibitory
(TRN) population due to Costa et al.
"""
name = "Thalamic mass model node"
label = "THLMnode"
default_network_coupling = {"network_exc_exc": 0.0, "network_inh_exc": 0.0}
default_output = f"r_mean_{EXC}"
output_vars = [f"r_mean_{EXC}", f"r_mean_{INH}", f"V_{EXC}", f"V_{INH}"]
def __init__(
self,
tcr_params=None,
trn_params=None,
connectivity=THALAMUS_NODE_DEFAULT_CONNECTIVITY,
):
"""
:param tcr_params: parameters for the excitatory (TCR) mass
:type tcr_params: dict|None
:param trn_params: parameters for the inhibitory (TRN) mass
:type trn_params: dict|None
:param connectivity: local connectivity matrix
:type connectivity: np.ndarray
"""
tcr_mass = ThalamocorticalMass(params=tcr_params)
tcr_mass.index = 0
trn_mass = ThalamicReticularMass(params=trn_params)
trn_mass.index = 1
super().__init__(
neural_masses=[tcr_mass, trn_mass],
local_connectivity=connectivity,
# within thalamic node there are no local delays
local_delays=None,
)
|
python
|
import os
import shutil
os_license = '{{ cookiecutter.license }}'
notebooks = '{{ cookiecutter['jupyter notebooks'] }}'
containers = '{{ cookiecutter.containers }}'
if os_license == "No license file":
os.remove("LICENSE")
if notebooks == "No":
shutil.rmtree("src/notebooks")
if containers == "one":
shutil.rmtree('containers/exploration')
elif containers == "several":
os.remove("containers/Dockerfile")
os.remove("containers/environment.yml")
|
python
|
"""Basic functionality sanity tests"""
from urllib.parse import urljoin
import pytest
# Various sub-urls for the main page
PAGE_URL_LIST = ["", "about/", "docs/", "scls/"]
# Various sub-urls for a collection
SCL_URL_LIST = ["", "edit/", "coprs/", "repos/", "acl/", "review_req/"]
@pytest.mark.django_db
def test_scl_page(client):
"""Selected SCL page contains expected content"""
response = client.get("/en/scls/hhorak/rpmquality/")
assert b"check-content-instructions" in response.content
assert b"check-content-description" in response.content
assert b"yum install centos-release-scl" in response.content
@pytest.mark.parametrize("url_tail", PAGE_URL_LIST)
@pytest.mark.django_db
def test_top_page_accessible(client, url_tail):
"""Top-level sub-page is accessible."""
url = urljoin("/en/", url_tail)
response = client.get(url)
assert response.status_code == 200
@pytest.mark.parametrize("url_tail", SCL_URL_LIST)
@pytest.mark.django_db
def test_scl_page_accessible(admin_client, url_tail):
"""SCL-level sub-page is accessible."""
url = urljoin("/en/scls/hhorak/rpmquality/", url_tail)
response = admin_client.get(url)
assert response.status_code == 200
@pytest.mark.parametrize(
"endpoint",
[
pytest.param("/scls/-/live"),
pytest.param("/scls/-/ready", marks=pytest.mark.django_db),
],
)
def test_scl_health_checks_is_never_cached(client, endpoint):
"""The health check indicator instructs clients to never cache the results"""
response = client.get(endpoint, follow=True)
assert 200 <= response.status_code < 300
assert "no-cache" in response["Cache-Control"]
|
python
|
from collections import OrderedDict
import jsonschema
from django.db.models import Q
from jsonschema import Draft4Validator
from rest_framework import serializers
from architecture_tool_django.modeling.models import Edgetype, Nodetype
from ..models import Edge, Node
class EdgeSerializer(serializers.ModelSerializer):
edge_type = serializers.CharField(source="edge_type.edgetype")
class Meta:
model = Edge
fields = ["edge_type", "target"]
def to_representation(self, instance):
ret = super().to_representation(instance)
# Here we filter the null values and creates a new dictionary
# We use OrderedDict like in original method
return OrderedDict([(key, ret[key]) for key in ret if ret[key] is not None])
class NodeSerializer(serializers.ModelSerializer):
outbound_edges = EdgeSerializer(many=True, read_only=True)
class Meta:
model = Node
fields = ["key", "nodetype", "attributeSet", "outbound_edges"]
def to_representation(self, instance):
ret = super().to_representation(instance)
# Here we filter the null values and creates a new dictionary
# We use OrderedDict like in original method
return OrderedDict([(key, ret[key]) for key in ret if ret[key] is not None])
def validate_attributeSet(self, value):
nodetype = self.get_initial().get("nodetype")
v = Draft4Validator(
Nodetype.objects.get(pk=nodetype).attribute_schema.schema,
format_checker=jsonschema.FormatChecker(),
)
errors = []
for error in v.iter_errors(value):
errors.append(error.message)
if errors:
raise serializers.ValidationError(errors)
return value
# def validate_outbound_edges(self, value):
# return value
def create(self, validated_data):
try:
node = Node.objects.create(**validated_data)
for edge in self.initial_data["outbound_edges"]:
target_node = Node.objects.get(key=edge["target"])
edge_type = Edgetype.objects.get(
Q(source_nodetype=node.nodetype)
& Q(target_nodetype=target_node.nodetype)
& Q(edgetype=edge["edge_type"])
)
node.add_edge(target_node, edge_type)
node.save()
except Exception as error:
raise serializers.ValidationError(error)
return node
def update(self, instance, validated_data):
try:
instance.remove_all_edges()
for edge in self.initial_data["outbound_edges"]:
target_node = Node.objects.get(key=edge["target"])
edge_type = Edgetype.objects.get(
Q(source_nodetype=instance.nodetype)
& Q(target_nodetype=target_node.nodetype)
& Q(edgetype=edge["edge_type"])
)
instance.add_edge(target_node, edge_type)
instance.attributeSet = validated_data.get(
"attributeSet", instance.attributeSet
)
instance.save()
except Exception as error:
raise serializers.ValidationError(error)
return instance
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from patsy import dmatrix
from patsy import DesignMatrix, DesignInfo
from patsy import LookupFactor,ModelDesc,Term
X = [[1, 10], [1, 20], [1, -2]]
print(dmatrix(X))
design_info = DesignInfo(["Intercept!", "Not intercept!"])
X_dm = DesignMatrix(X, design_info)
print(dmatrix(X_dm))
def add_predictors(base_formula, extra_predictors):
desc = ModelDesc.from_formula(base_formula)
# Using LookupFactor here ensures that everything will work correctly even
# if one of the column names in extra_columns is named like "weight.in.kg"
# or "sys.exit()" or "LittleBobbyTables()".
desc.rhs_termlist += [Term([LookupFactor(p)]) for p in extra_predictors]
return desc
extra_predictors = [f"x{i}" for i in range(10)]
desc = add_predictors("np.log(y) ~ a*b + c:d", extra_predictors)
print(desc.describe())
|
python
|
table_file_template = """
import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
{groups}
table_name = '{table_name}'
schema_name = '{schema_name}'
{column_annotations}
{column_defs}
{table_annotations}
{key_defs}
{fkey_defs}
{table_def}
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def,replace=replace, really=really)
if __name__ == "__main__":
host = {host!r}
catalog_id = {catalog_id}
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
"""
schema_file_template = """
import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
{groups}
schema_name = '{schema_name}'
{table_names}
{annotations}
{acls}
{comments}
schema_def = em.Schema.define(
'{schema_name}',
comment=comment,
acls=acls,
annotations=annotations,
)
def main(catalog, mode, replace=False):
updater = CatalogUpdater(catalog)
updater.update_schema(mode, schema_def, replace=replace)
if __name__ == "__main__":
host = {host!r}
catalog_id = {catalog_id}
mode, replace, host, catalog_id = parse_args(host, catalog_id)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
"""
catalog_file_template = """
import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
from deriva.core import tag as chaise_tags
import deriva.core.ermrest_model as em
{groups}
{tag_variables}
{annotations}
{acls}
def main(catalog, mode, replace=False):
updater = CatalogUpdater(catalog)
updater.update_catalog(mode, annotations, acls, replace=replace)
if __name__ == "__main__":
host = {host!r}
catalog_id = {catalog_id}
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_catalog=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
"""
|
python
|
from opentelemetry import trace
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor
trace.set_tracer_provider(TracerProvider())
cloud_trace_exporter = CloudTraceSpanExporter()
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(cloud_trace_exporter)
)
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("foo"):
print("Hello world!")
|
python
|
# Duas formas para se fazer a leitura de um número inteiro...#
def leiaint(msg):
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
break
else:
print('\033[31mERRO ! Digite um número inteiro válido.\033[m')
return valor
a = leiaint('Digite um valor: ')
print(f'{a} É um inteiro!!')
def leiainteiro(msg):
while True:
try:
n = int(input(msg))
break
except ValueError:
print('\033[31mERRO ! Digite um número inteiro válido.\033[m')
return n
b = leiainteiro('Digite um numero: ')
print(f'{b} É um inteiro!!')
|
python
|
from torch.nn import CrossEntropyLoss
class GPT2Loss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(GPT2Loss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
"""
Loss function for gpt2
:param output:
:param labels:
:return:
"""
# Flatten the tensors (shift-align)
# Remove last token from output
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
# Remove the first token from labels e do not care for question
labels = (labels[..., 1:].contiguous()).view(-1)
# Compute the actual loss
return super(GPT2Loss, self).forward(output, labels)
class VisualGPT2Loss(GPT2Loss):
def __init__(self, pad_token_id, extract=None):
super(VisualGPT2Loss, self).__init__(pad_token_id=pad_token_id)
if extract is not None:
assert type(extract) == int, 'Extract value MUST be integer'
self.extract = extract
def forward(self, output, labels):
if self.extract is not None:
output = output[self.extract]
# Compute the actual loss
return super(VisualGPT2Loss, self).forward(output, labels[0])
class BERTLoss(CrossEntropyLoss):
def __init__(self, pad_token_id):
super(BERTLoss, self).__init__(ignore_index=pad_token_id)
def forward(self, output, labels):
"""
Loss function for gpt2
:param output:
:param labels:
:return:
"""
# Flatten the tensors (shift-align)
# Remove last token from output
output = output[..., :-1, :].contiguous().view(-1, output.size(-1))
# Remove the first token from labels e do not care for question
labels = (labels[..., 1:].contiguous()).view(-1)
# Compute the actual loss
return super(BERTLoss, self).forward(output, labels)
|
python
|
from mccq.cli import cli
|
python
|
import sys
from bidi.algorithm import get_display
# variable length print, the string constants are adjusted for bidi.
# an unrelated editor is that vs code doesn't support bidi.
# the workaround is to put bidi text into separate string variables.
#not entirely sure, if we need to swap each element seperately...
def printb(*args, sep=' ', end='\n', file=sys.stdout, flush=False):
#lst = reversed( list( map( lambda arg : get_display(arg) if isinstance(arg, str) else str(arg), args) ) )
lst = reversed( list( map( lambda arg : get_display(str(arg)), args) ) )
print( ' '.join( lst ), sep=sep, end=end, file=file, flush=flush)
#def printb(*args, sep=' ', end='\n', file=sys.stdout, flush=False):
# print( get_display( ' '.join(map(str, args))), sep=sep, end=end, file=file, flush=flush)
#
def inputb(prompt=''):
printb( prompt ,end="", flush=True)
return input()
|
python
|
import os
from pathlib import Path
from string import Template
template = Template("""
Package: zram-swap
Version: $version
Depends: python3, libpython3-stdlib
Section: custom
Priority: optional
Architecture: all
Essential: no
Installed-Size: $size
Maintainer: Dmitry Orlov <[email protected]>
Description: Easy way to configure swap over zram
""".strip())
files = (
Path("zram-swap"),
Path("zram-swap.default"),
Path("zram-swap.service"),
)
print(template.substitute(
version=os.environ['VERSION'],
size=sum(file.stat().st_size for file in files)
))
|
python
|
"""Test label"""
from napari.components.text_overlay import TextOverlay
def test_label():
"""Test creating label object"""
label = TextOverlay()
assert label is not None
|
python
|
import sys, clr
import ConfigParser
from os.path import expanduser
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
from Autodesk.Revit.DB import Document,FilteredElementCollector,FamilySymbol,Transaction,\
BuiltInCategory, ElementId, ViewSchedule, View, ImportInstance
from Autodesk.Revit.UI import TaskDialog
from pyrevit.framework import List
from pyrevit import revit, DB
from pyrevit import forms
__doc__ = 'Replace DWG with Annotation, select from a list.'
def get_selected_elements(doc):
"""API change in Revit 2016 makes old method throw an error"""
try:
# Revit 2016
return [doc.GetElement(id)
for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]
except:
# old method
return list(__revit__.ActiveUIDocument.Selection.Elements)
# selection = get_selected_elements(doc)
importInstance = []
replaceImports = []
ownerView = []
# collect dwg from view
imports = DB.FilteredElementCollector(doc) \
.OfClass(ImportInstance)\
.ToElements()
for i in imports:
if not i.LookupParameter('Name').AsString() in importInstance:
importInstance.append(i.LookupParameter('Name').AsString())
replaceDWG = forms.SelectFromList.show(importInstance,
multiselect=True,
button_name='Select DWG')
# Selected
for i in imports:
if i.LookupParameter('Name').AsString() in replaceDWG and i.ViewSpecific:
replaceImports.append(i)
ownerView.append(i.OwnerViewId)
# Get all Annotation Symbols
symbols = DB.FilteredElementCollector(doc) \
.OfClass(FamilySymbol) .OfCategory(BuiltInCategory.OST_GenericAnnotation)\
.ToElements()
# Select Name
sNames = []
replaceAnno = ()
# Get the selected symbol
for i in symbols:
sNames.append(i.Family.Name)
replaceName = forms.SelectFromList.show(sNames,
multiselect=False,
button_name='Select Annotation')
for i in symbols:
if i.Family.Name == replaceName:
replaceAnno = i
t = Transaction(doc, 'Replace dwg')
t.Start()
count = 0
for cad in replaceImports:
if cad.ViewSpecific:
max = cad.BoundingBox[doc.ActiveView].Max
min = cad.BoundingBox[doc.ActiveView].Min
location = (max + min)/2
boxes = doc.Create.NewFamilyInstance(location, replaceAnno, doc.GetElement(ownerView[count]))
doc.Delete(cad.Id)
count += 1
t.Commit()
|
python
|
import json
from urllib.parse import urlencode
from flask import Response, request
def _create_link(path, limit, offset, order_by, order_how, args_dict):
# copy of the dict passed in so we don't modify the original
params = dict(args_dict)
params["limit"] = limit
params["offset"] = offset
params["order_by"] = order_by
params["order_how"] = order_how
return "{}?{}".format(path, urlencode(params))
def _create_first_link(path, limit, offset, count, order_by, order_how, args_dict):
# Example return string:
# "/api/item-service/v1/items?limit=20&offset=0&order_by=captured_date&order_how=desc"
return _create_link(path, limit, 0, order_by, order_how, args_dict)
def _create_previous_link(path, limit, offset, count, order_by, order_how, args_dict):
# if we are at the beginning, do not create a previous link
# Example return string:
# "/api/item-service/v1/items?limit=20&offset=20&order_by=captured_date&order_how=desc"
if offset == 0 or offset - limit < 0:
return _create_first_link(path, limit, offset, count, order_by, order_how, args_dict)
return _create_link(path, limit, offset - limit, order_by, order_how, args_dict)
def _create_next_link(path, limit, offset, count, order_by, order_how, args_dict):
# if we are at the end, do not create a next link
# Example return string:
# "/api/item-service/v1/items?limit=20&offset=40&order_by=captured_date&order_how=desc"
if limit + offset >= count:
return _create_last_link(path, limit, offset, count, order_by, order_how, args_dict)
return _create_link(path, limit, limit + offset, order_by, order_how, args_dict)
def _create_last_link(path, limit, offset, count, order_by, order_how, args_dict):
# Example return string:
# "/api/item-service/v1/items?limit=20&offset=100&order_by=captured_date&order_how=desc"
final_offset = count - limit if (count - limit) >= 0 else 0
return _create_link(path, limit, final_offset, order_by, order_how, args_dict)
def build_paginated_baseline_list_response(
limit, offset, order_by, order_how, json_list, total_available, count, args_dict={}
):
json_output = {
"meta": {
"count": count,
"limit": limit,
"offset": offset,
"total_available": total_available,
},
"links": {
"first": _create_first_link(
request.path, limit, offset, count, order_by, order_how, args_dict
),
"next": _create_next_link(
request.path, limit, offset, count, order_by, order_how, args_dict
),
"previous": _create_previous_link(
request.path, limit, offset, count, order_by, order_how, args_dict
),
"last": _create_last_link(
request.path, limit, offset, count, order_by, order_how, args_dict
),
},
"data": json_list,
}
return _build_json_response(json_output)
def _build_json_response(json_data, status=200):
return Response(json.dumps(json_data), status=status, mimetype="application/json")
|
python
|
from distutils.core import setup
setup(
name='http_request',
version='1.1',
packages=['http_request'],
url='https://github.com/dennisfischer/http_request',
license='MIT',
author='Dennis Fischer',
author_email='[email protected]',
description='A small python library to parse and build HTTP requests'
)
|
python
|
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2018. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
__author__ = "Rubens Ulysse <[email protected]>"
__contributors__ = ["Marée Raphaël <[email protected]>", "Mormont Romain <[email protected]>"]
__copyright__ = "Copyright 2010-2018 University of Liège, Belgium, http://www.cytomine.be/"
from cytomine.cytomine import Cytomine
from cytomine.models.annotation import Annotation
from cytomine.models.collection import Collection, DomainCollection
from cytomine.models.model import Model, DomainModel
class Property(DomainModel):
def __init__(self, object, key=None, value=None, **attributes):
super(Property, self).__init__(object)
self.key = key
self.value = value
self.populate(attributes)
self._by_key = False
@property
def obj(self):
return self._object
@obj.setter
def obj(self, value):
self._object = value
if isinstance(value, Annotation):
self.domainClassName = "annotation"
else:
self.domainClassName = value.class_
self.domainIdent = value.id
def uri(self):
if self._by_key and self.domainClassName and self.domainIdent and self.key:
uri = "domain/{}/{}/key/{}/property.json".format(self.domainClassName, self.domainIdent, self.key)
else:
uri = super(Property, self).uri()
if self.domainClassName == "annotation":
uri = uri.replace("domain/", "")
return uri
def fetch(self, id=None, key=None):
if self.id is None and id is None and self.key is None and key is None:
raise ValueError("Cannot fetch a model with no ID and no key.")
if id is not None:
self.id = id
if key is not None:
self.key = key
self._by_key = True
model = Cytomine.get_instance().get_model(self, self.query_parameters)
self._by_key = False
return model
def __str__(self):
return "[{}] {} : {} ({}) - Key: {} - Value {}".format(self.callback_identifier, self.id, self.domainClassName,
self.domainIdent, self.key, self.value)
class PropertyCollection(DomainCollection):
def __init__(self, object, filters=None, max=0, offset=0, **parameters):
super(PropertyCollection, self).__init__(Property, object, filters, max, offset)
self._allowed_filters = [None]
self.set_parameters(parameters)
def uri(self, without_filters=False):
uri = super(PropertyCollection, self).uri(without_filters)
if self._domainClassName == "annotation":
uri = uri.replace("domain/", "")
return uri
def as_dict(self):
"""Transform the property collection into a python dictionary mapping keys
with their respective Property objects.
"""
return {p.key: p for p in self}
@property
def _obj(self):
return self._object
@_obj.setter
def _obj(self, value):
self._object = value
if isinstance(value, Annotation):
self._domainClassName = "annotation"
else:
self._domainClassName = value.class_
self._domainIdent = value.id
class AttachedFile(DomainModel):
def __init__(self, object, filename=None, **attributes):
super(AttachedFile, self).__init__(object)
self.filename = filename
self.url = None
self.populate(attributes)
def uri(self):
if self.is_new():
return "{}.json".format(self.callback_identifier)
else:
return "{}/{}.json".format(self.callback_identifier, self.id)
def save(self):
return self.upload()
def update(self, id=None, **attributes):
return self.upload()
def upload(self):
return Cytomine.get_instance().upload_file(self, self.filename,
query_parameters={"domainClassName": self.domainClassName,
"domainIdent": self.domainIdent})
def download(self, destination="{filename}", override=False):
if self.is_new():
raise ValueError("Cannot download file if not existing ID.")
pattern = re.compile("{(.*?)}")
destination = re.sub(pattern, lambda m: str(getattr(self, str(m.group(0))[1:-1], "_")), destination)
return Cytomine.get_instance().download_file("{}/{}/download".format(self.callback_identifier, self.id),
destination, override)
class AttachedFileCollection(DomainCollection):
def __init__(self, object, filters=None, max=0, offset=0, **parameters):
super(AttachedFileCollection, self).__init__(AttachedFile, object, filters, max, offset)
self._allowed_filters = [None]
self.set_parameters(parameters)
class Description(DomainModel):
def __init__(self, object, data=None, **attributes):
super(Description, self).__init__(object)
self.data = data
self.populate(attributes)
def uri(self):
return "domain/{}/{}/{}.json".format(self._object.class_, self._object.id, self.callback_identifier)
def fetch(self, id=None):
if id is not None:
self.id = id
return Cytomine.get_instance().get_model(self, self.query_parameters)
class Tag(Model):
def __init__(self, name=None, **attributes):
super(Tag, self).__init__()
self.name = name
self.populate(attributes)
class TagCollection(Collection):
def __init__(self, filters=None, max=0, offset=0, **parameters):
super(TagCollection, self).__init__(Tag, filters, max, offset)
self._allowed_filters = [None]
self.set_parameters(parameters)
class TagDomainAssociation(DomainModel):
def __init__(self, object, tag=None, **attributes):
super(TagDomainAssociation, self).__init__(object)
self.tag = tag
self.populate(attributes)
def uri(self):
if self.id:
uri = "tag_domain_association/{}.json".format(self.id)
elif self.domainClassName and self.domainIdent:
uri = super(TagDomainAssociation, self).uri()
return uri
@property
def callback_identifier(self):
return "tag_domain_association"
class TagDomainAssociationCollection(DomainCollection):
def __init__(self, object, filters=None, max=0, offset=0, **parameters):
super(TagDomainAssociationCollection, self).__init__(TagDomainAssociation, object, filters, max, offset)
self._allowed_filters = [None]
self.set_parameters(parameters)
@property
def callback_identifier(self):
return "tag_domain_association"
|
python
|
import pandas as pd
from age.data.load.countries import base
from age.data.load import transformations
from age.data.load import utils
from age.data.load import ined
import logging
import datetime
from urllib.request import urlopen
_CASES_PATH = 'https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv'
_INED_URL = 'https://dc-covid.site.ined.fr/en/data/netherlands/'
_COLUMN_MAP = {
'Date_statistics': 'Date',
'Agegroup': 'Age',
}
ISO = 'NLD'
def _load_raw_cases():
df = pd.read_csv(_CASES_PATH, sep=';')
df = (df.rename(columns=_COLUMN_MAP)
.groupby(['Date', 'Age', 'Sex'])
.Date_file
.count()
.reset_index()
.rename(columns={'Date_file': 'cases_new'}))
df.Date = pd.to_datetime(df.Date)
df.Sex = df.Sex.replace({'Female': 'f', 'Male': 'm'})
df = df[df.Sex != "Unknown"]
df = df[df.Age != "Unknown"]
df = df[df.Age != "<50"]
return df
class Netherlands(base.LoaderBase):
def __init__(self):
self._raw_cases = None
self._raw_deaths = None
def raw_cases(self) -> pd.DataFrame:
if self._raw_cases is None:
self._raw_cases = _load_raw_cases()
return self._raw_cases
def raw_deaths(self) -> pd.DataFrame:
if self._raw_deaths is None:
self._raw_deaths = ined.read_ined_table(_INED_URL, 'RIVM_Data', num_rows=20)
return self._raw_deaths
def cases(self) -> pd.DataFrame:
raw_cases = self.raw_cases()
cases = transformations.ensure_contiguous(raw_cases)
cases = transformations.add_both_sexes(cases)
cases['ISO'] = ISO
return cases
def deaths(self) -> pd.DataFrame:
deaths = self.raw_deaths()
deaths['ISO'] = ISO
return deaths
|
python
|
"""
MIT License
Copyright (c) 2018 Elias Doehne
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup
try:
# Try to build the cython extension locally
from Cython.Build import cythonize
extension_modules = cythonize("stellarisdashboard/parsing/cython_ext/tokenizer.pyx")
except ImportError:
print(
"Cython is not installed, using pre-built C-extension if available, or (slow) fallback solution."
)
extension_modules = []
except RuntimeError as e:
print(f"Warning: RuntimeError while building Cython extension: {e}")
print("Using pre-built C-extension if available, or (slow) fallback solution.")
extension_modules = []
with open("requirements.txt", "r") as f:
install_requires = f.read().strip().split()
setup(
name="stellarisdashboard",
ext_modules=extension_modules,
install_requires=install_requires,
entry_points={
"console_scripts": [
"stellarisdashboard = stellarisdashboard.__main__:main",
"stellarisdashboardcli = stellarisdashboard.cli:cli",
],
},
)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import xml.dom.minidom
import argparse
class Config():
def __add_server(self, parent_node, settings, server_name):
try:
os.environ["TRAVIS_SECURE_ENV_VARS"]
except KeyError:
print "no secure env vars available, please declare it first"
sys.exit()
serversNodes = settings.getElementsByTagName("servers")
if not serversNodes:
serversNode = parent_node.createElement("servers")
settings.appendChild(serversNode)
else:
serversNode = serversNodes[0]
sonatypeServerNode = parent_node.createElement("server")
sonatypeServerId = parent_node.createElement("id")
sonatypeServerUser = parent_node.createElement("username")
sonatypeServerPass = parent_node.createElement("password")
idNode = parent_node.createTextNode(server_name)
userNode = parent_node.createTextNode(os.environ["SONATYPE_USERNAME"])
passNode = parent_node.createTextNode(os.environ["SONATYPE_PASSWORD"])
sonatypeServerId.appendChild(idNode)
sonatypeServerUser.appendChild(userNode)
sonatypeServerPass.appendChild(passNode)
sonatypeServerNode.appendChild(sonatypeServerId)
sonatypeServerNode.appendChild(sonatypeServerUser)
sonatypeServerNode.appendChild(sonatypeServerPass)
serversNode.appendChild(sonatypeServerNode)
def __add_mirror(self, parent_node, settings):
mirrors = parent_node.createElement("mirrors")
settings.appendChild(mirrors)
mirror = parent_node.createElement("mirror")
mirror_id = parent_node.createElement("id")
mirror_id_text = parent_node.createTextNode("nexus")
mirror_mirrorOf = parent_node.createElement("mirrorOf")
mirror_mirrorOf_text = parent_node.createTextNode("*")
mirror_url = parent_node.createElement("url")
mirror_url_value = parent_node.createTextNode("http://130.206.80.85/nexus/content/groups/public")
mirrors.appendChild(mirror)
mirror_id.appendChild(mirror_id_text)
mirror_mirrorOf.appendChild(mirror_mirrorOf_text)
mirror_url.appendChild(mirror_url_value)
mirror.appendChild(mirror_id)
mirror.appendChild(mirror_mirrorOf)
mirror.appendChild(mirror_url)
def configure_server(self, server=True, mirrors=True, home_dir=os.path.expanduser("~")):
m2 = xml.dom.minidom.parse(home_dir + '/.m2/settings.xml')
settings = m2.getElementsByTagName("settings")[0]
if mirrors:
self.__add_mirror(m2, settings)
if server:
self.__add_server(m2, settings, "repo-release")
self.__add_server(m2, settings, "repo-snapshot")
m2Str = m2.toxml()
f = open(home_dir + '/.m2/settings.xml', 'w')
f.write(m2Str)
f.close()
def main(prog_args):
parser = argparse.ArgumentParser()
parser.add_argument("--deploy", help="add servers tag to settings.xml", action="store_true")
parser.add_argument("--mirrors", help="add mirrors tag to settings.xml", action="store_true")
args = parser.parse_args()
config = Config()
config.configure_server(server=args.deploy, mirrors=args.mirrors)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
python
|
#!/usr/bin/python
import gzip
import socket
#UDP_IP = "127.0.0.1"
UDP_IP = "0.0.0.0"
UDP_PORT = 6550
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
f = gzip.open('flight.dat.gz', 'wb')
print "Aura Logger"
print "listening for", UDP_IP, UDP_PORT
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
print "received message:", len(data)
f.write(data)
|
python
|
"""REST API endpoints"""
from fastapi import APIRouter
from app.api.api_v1.endpoints import items, users
api_router = APIRouter()
api_router.include_router(users.router, prefix="/users", tags=["users"])
api_router.include_router(items.router, prefix="/items", tags=["items"])
|
python
|
import os
import sys
import time
import gc
import copy
import random
import numpy as np
import pandas as pd
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
from image_util import *
from data_util import *
from env import *
from sum_tree import *
import model_creator as mCreator
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def push(self, transition):
self.memory.append(transition)
if len(self.memory) > self.capacity:
del self.memory[0]
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class PrioritizedReplayMemory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
beta = 0.4
beta_increment_per_sampling = 0.001
def __init__(self, capacity):
self.tree = sum_tree(capacity)
self.capacity = capacity
def _get_priority(self, error):
return (error + self.e) ** self.a
def add(self, error, sample):
p = self._get_priority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
idxs = []
segment = self.tree.total() / n
priorities = []
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
priorities.append(p)
batch.append(data)
idxs.append(idx)
sampling_probabilities = priorities / self.tree.total()
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
return batch, idxs, is_weight
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
def __len__(self):
return int(self.tree.n_entries)
class DQN_Agent:
def __init__(self, global_features_net, local_features_net, num_epoch, num_actions, num_fc_nodes, lr=0.0001, lr_decay=0.01, gamma=0.9, epsilon=1.0, min_epsilon=0.1, epsilon_decay=0.99, guided_eps=0.5, mem_size=10000, batch_size=32, iter_update_target = 300, use_gpu = True, use_chex_net = False, transforms = None):
#Initiate agent components
self.num_actions = num_actions
self.evaluate_net = mCreator.Bi_DQN(num_actions, global_features_net, local_features_net, num_fc_nodes, True, use_chex_net)
self.target_net = mCreator.Bi_DQN(num_actions, global_features_net, local_features_net, num_fc_nodes, True, use_chex_net)
self.evaluate_net.train(True)
self.target_net.train(True)
if(use_gpu):
#self.evaluate_net = torch.nn.DataParallel(self.evaluate_net.cuda(), device_ids=[0, 1])
#self.target_net = torch.nn.DataParallel(self.target_net.cuda(), device_ids=[0, 1])
self.evaluate_net = self.evaluate_net.cuda()
self.target_net = self.target_net.cuda()
self.optimizer = torch.optim.Adam(self.evaluate_net.parameters(), lr=lr, weight_decay=lr_decay)
self.loss_function = nn.MSELoss()
self.memory = ReplayMemory(capacity=mem_size)
self.transforms = transforms
self.batch_size = batch_size
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.min_epsilon = min_epsilon
self.epsilon_linear_factor = float(self.epsilon-self.min_epsilon)/float(num_epoch)
self.guided_eps = guided_eps
self.gamma = gamma
self.iter_update_target = iter_update_target
self.iter_counter = 0
#@profile
def select_action(self, current_env, bb_img):
sample = np.random.uniform()
if(sample > self.epsilon):
#Exploitation
#print("Exploitation")
#Convert to 3 channel and apply torchvision transforms
#unsqueeze for batch size
global_img = transform_img_for_model(current_env.full_env)
if(self.transforms is not None):
global_img = self.transforms(global_img)
global_img.unsqueeze_(0)
global_img = global_img.to(0)
bb_img = transform_img_for_model(bb_img)
if(self.transforms is not None):
bb_img = self.transforms(bb_img)
bb_img.unsqueeze_(0)
bb_img = bb_img.to(0)
q = self.evaluate_net(global_img, bb_img)
action = torch.max(q, 1)[1].data[0]
return action
else:
#Exploration
sample = np.random.uniform()
if(sample > self.guided_eps):
#print("Random exploration")
#Random exploration
return random.randint(0, self.num_actions-1)
else:
#print("Guided exploration")
#Guided exploration
rewards = []
for i in range(self.num_actions):
rewards.append(current_env.step_foresee(i))
pos_reward_index = []
zero_reward_index = []
for i in range(len(rewards)):
if(rewards[i] > 0):
pos_reward_index.append(i)
if(rewards[i] == 0):
zero_reward_index.append(i)
if(len(pos_reward_index) > 0):
return random.choice(pos_reward_index)
elif(len(zero_reward_index) > 0):
return random.choice(zero_reward_index)
else:
return random.randint(0, self.num_actions-1)
#For inference once agent is trained
def select_action_infer(self, current_env, bb_img):
global_img = transform_img_for_model(current_env.full_env)
bb_img = transform_img_for_model(bb_img)
if(self.transforms is not None):
global_img = self.transforms(global_img)
bb_img = self.transforms(bb_img)
q = self.target_net(global_img, bb_img)
action = torch.max(q, 1)[1].data[0]
return action
def store_transitions(self, state_tup, action, reward, next_state_tup, done):
self.memory.push((state_tup, action, reward, next_state_tup, done))
#@profile
def learn(self):
self.iter_counter += 1
if(len(self.memory) < self.batch_size):
return
#Random transition batch is taken from experience replay memory
transitions = self.memory.sample(self.batch_size)
batch_state_env = []
batch_state = []
batch_action = []
batch_reward = []
batch_state_next_env = []
batch_state_next_state = []
batch_done = []
for t in transitions:
(bse, bs), ba, br, (bsne, bsns), bd = t
bse = transform_img_for_model(bse)
bs = transform_img_for_model(bs)
bsne = transform_img_for_model(bsne)
bsns = transform_img_for_model(bsns)
if(self.transforms is not None):
bse = self.transforms(bse)
bs = self.transforms(bs)
bsne = self.transforms(bsne)
bsns = self.transforms(bsns)
batch_state_env.append(bse)
batch_state.append(bs)
batch_action.append(ba)
batch_reward.append(br)
batch_state_next_env.append(bsne)
batch_state_next_state.append(bsns)
batch_done.append(bd)
batch_state = Variable(torch.stack(batch_state)).cuda(async=True)
batch_state_env = Variable(torch.stack(batch_state_env)).cuda(async=True)
batch_action = torch.FloatTensor(batch_action).unsqueeze_(0)
batch_action = batch_action.view(batch_action.size(1), -1)
batch_action = Variable(batch_action).cuda(async=True)
batch_reward = torch.FloatTensor(batch_reward).unsqueeze_(0)
batch_reward = batch_reward.view(batch_reward.size(1), -1)
batch_reward = Variable(batch_reward).cuda(async=True)
batch_next_state = Variable(torch.stack(batch_state_next_state)).cuda(async=True)
batch_state_next_env = Variable(torch.stack(batch_state_next_env)).cuda(async=True)
# current Q values are estimated by NN for all actions
current_q_values = self.evaluate_net(batch_state_env, batch_state).gather(1, batch_action.long())
# expected Q values are estimated from actions which gives maximum Q value
max_next_q_values = self.target_net(batch_state_next_env, batch_next_state).detach().max(1)[0]
max_next_q_values = max_next_q_values.unsqueeze_(0)
max_next_q_values = max_next_q_values.view(max_next_q_values.size(1), -1)
expected_q_values = batch_reward + (self.gamma * max_next_q_values)
# loss is measured from error between current and newly expected Q values
loss = self.loss_function(current_q_values, expected_q_values)
# backpropagation of loss to NN
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
#free variables
del batch_state, batch_state_env, batch_action, batch_reward, batch_next_state, batch_state_next_env, loss
if(self.iter_counter % self.iter_update_target == 0):
self.target_net.load_state_dict(self.evaluate_net.state_dict())
#Per Episode
def decay_epsilon(self):
new_epsilon = self.epsilon * self.epsilon_decay
if new_epsilon < self.min_epsilon:
self.epsilon = self.min_epsilon
else:
self.epsilon = new_epsilon
#Per Epoch
def decay_epsilon_linear(self):
self.epsilon -= self.epsilon_linear_factor
if(self.epsilon < self.min_epsilon):
self.epsilon = self.min_epsilon
def set_training(self, train_or_not=True):
if(train_or_not):
self.evaluate_net.train(True)
self.target_net.train(True)
else:
self.evaluate_net.train(False)
self.target_net.train(False)
self.evaluate_net.eval()
self.target_net.eval()
class DQN_Agent_Single_Net:
def __init__(self, features_net, num_epoch, num_actions, num_fc_nodes, lr=0.0001, lr_decay=0.01, gamma=0.9, epsilon=1.0, min_epsilon=0.1, epsilon_decay=0.99, guided_eps=0.5, mem_size=10000, batch_size=32, iter_update_target = 300, use_gpu = True, use_chex_net = False, transforms = None, use_pr_replay=True):
#Initiate agent components
self.num_actions = num_actions
self.evaluate_net = mCreator.DQN(num_actions, features_net, num_fc_nodes, True, use_chex_net)
self.target_net = mCreator.DQN(num_actions, features_net, num_fc_nodes, True, use_chex_net)
self.evaluate_net.train(True)
self.target_net.train(True)
if(use_gpu):
#self.evaluate_net = torch.nn.DataParallel(self.evaluate_net.cuda(), device_ids=[0, 1])
#self.target_net = torch.nn.DataParallel(self.target_net.cuda(), device_ids=[0, 1])
self.evaluate_net = self.evaluate_net.cuda()
self.target_net = self.target_net.cuda()
self.optimizer = torch.optim.Adam(self.evaluate_net.parameters(), lr=lr, weight_decay=lr_decay)
#self.loss_function = nn.MSELoss()
self.use_pr_replay = use_pr_replay
if(use_pr_replay):
self.memory = PrioritizedReplayMemory(mem_size)
else:
self.memory = ReplayMemory(capacity=mem_size)
self.loss_function = nn.SmoothL1Loss().cuda()
self.transforms = transforms
self.batch_size = batch_size
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.min_epsilon = min_epsilon
self.epsilon_linear_factor = float(self.epsilon-self.min_epsilon)/float(num_epoch)
self.guided_eps = guided_eps
self.gamma = gamma
self.iter_update_target = iter_update_target
self.use_ddqn = True
self.iter_counter = 0
#@profile
def select_action(self, current_env, bb_img):
sample = np.random.uniform()
if(sample > self.epsilon):
#Exploitation
#print("Exploitation")
#Convert to 3 channel and apply torchvision transforms
#unsqueeze for batch size
bb_img = transform_img_for_model(bb_img.numpy(), self.transforms)
#bb_img = transform_img_for_model(bb_img)
#if(self.transforms is not None):
# bb_img = self.transforms(bb_img)
bb_img.unsqueeze_(0)
bb_img = bb_img.to(0)
q = self.evaluate_net(bb_img)
action = torch.max(q, 1)[1].data[0]
return action
else:
#Exploration
sample = np.random.uniform()
if(sample > self.guided_eps):
#print("Random exploration")
#Random exploration
return random.randint(0, self.num_actions-1)
else:
#print("Guided exploration")
#Guided exploration
rewards = []
for i in range(self.num_actions):
rewards.append(current_env.step_foresee(i))
pos_reward_index = []
zero_reward_index = []
for i in range(len(rewards)):
if(rewards[i] > 0):
pos_reward_index.append(i)
if(rewards[i] == 0):
zero_reward_index.append(i)
if(len(pos_reward_index) > 0):
return random.choice(pos_reward_index)
elif(len(zero_reward_index) > 0):
return random.choice(zero_reward_index)
else:
return random.randint(0, self.num_actions-1)
#For inference once agent is trained
def select_action_infer(self, current_env, bb_img):
bb_img = transform_img_for_model(bb_img.numpy(), self.transforms)
#bb_img = transform_img_for_model(bb_img)
#if(self.transforms is not None):
# bb_img = self.transforms(bb_img)
bb_img.unsqueeze_(0)
bb_img = bb_img.to(0)
q = self.target_net(bb_img)
action = torch.max(q, 1)[1].data[0]
return action
def store_transitions(self, state_tup, action, reward, next_state_tup, done):
if(self.use_pr_replay):
target = self.evaluate_net(transform_img_for_model(state_tup.numpy(), self.transforms).unsqueeze_(0).to(0)).data
old_val = target[0][action]
target_val = self.target_net(transform_img_for_model(next_state_tup.numpy(), self.transforms).unsqueeze_(0).to(0)).data
if done:
target[0][action] = reward
else:
target[0][action] = reward + self.gamma * torch.max(target_val)
error = abs(old_val - target[0][action])
self.memory.add(error, (state_tup, action, reward, next_state_tup, done))
else:
self.memory.push((state_tup, action, reward, next_state_tup, done))
@profile
def learn(self):
self.iter_counter += 1
if(len(self.memory) < self.batch_size):
return
#Random transition batch is taken from experience replay memory
if(self.use_pr_replay):
transitions, idxs, is_weights = self.memory.sample(self.batch_size)
else:
transitions = self.memory.sample(self.batch_size)
batch_state = []
batch_action = []
batch_reward = []
batch_state_next_state = []
batch_done = []
for t in transitions:
bs, ba, br, bsns, bd = t
bs = transform_img_for_model(bs.numpy(), self.transforms)
#bs = transform_img_for_model(bs)
#if(self.transforms is not None):
# bs = self.transforms(bs)
batch_state.append(bs)
batch_action.append(ba)
batch_reward.append(br)
bsns = transform_img_for_model(bsns.numpy(), self.transforms)
#bsns = transform_img_for_model(bsns)
#if(self.transforms is not None):
# bsns = self.transforms(bsns)
batch_state_next_state.append(bsns)
batch_done.append(bd)
with torch.no_grad():
batch_state = Variable(torch.stack(batch_state).cuda(async=True))
batch_next_state = Variable(torch.stack(batch_state_next_state).cuda(async=True))
batch_action = torch.FloatTensor(batch_action).unsqueeze_(0)
batch_action = batch_action.view(batch_action.size(1), -1)
batch_action = Variable(batch_action.cuda(async=True))
batch_reward = torch.FloatTensor(batch_reward).unsqueeze_(0)
batch_reward = batch_reward.view(batch_reward.size(1), -1)
batch_reward = Variable(batch_reward.cuda(async=True))
batch_dones = torch.FloatTensor(batch_done).unsqueeze_(0)
batch_dones = batch_dones.view(batch_dones.size(1), -1)
batch_dones = Variable(batch_dones.cuda(async=True))
# current Q values are estimated by NN for all actions
current_q_values = self.evaluate_net(batch_state).gather(1, batch_action.long())
# expected Q values are estimated from actions which gives maximum Q value
if(self.use_ddqn):
next_actions = torch.max(self.evaluate_net(batch_next_state),1)[1].detach()
next_actions = next_actions.unsqueeze_(0)
next_actions = next_actions.view(next_actions.size(1), -1)
max_next_q_values = self.target_net(batch_next_state).gather(1, next_actions.long()).detach()
else:
max_next_q_values = self.target_net(batch_next_state).max(1)[0].detach()
max_next_q_values = max_next_q_values.unsqueeze_(0)
max_next_q_values = max_next_q_values.view(max_next_q_values.size(1), -1)
expected_q_values = batch_reward + (1 - batch_dones) * (self.gamma * max_next_q_values)
# loss is measured from error between current and newly expected Q values
#torch.cuda.synchronize()
loss = self.loss_function(current_q_values, expected_q_values)
errors = torch.abs(expected_q_values - current_q_values).cpu().data.numpy()
# update priority
for i in range(self.batch_size):
idx = idxs[i]
self.memory.update(idx, errors[i])
#print(loss)
# backpropagation of loss to NN
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
#free variables
del batch_state, batch_action, batch_reward, batch_next_state, transitions, current_q_values, max_next_q_values, expected_q_values
#for obj in gc.get_objects():
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# print(type(obj), obj.size())
#if(self.iter_counter % self.iter_update_target == 0):
# self.target_net.load_state_dict(self.evaluate_net.state_dict())
return loss
#Per Episode
def decay_epsilon(self):
new_epsilon = self.epsilon * self.epsilon_decay
if new_epsilon < self.min_epsilon:
self.epsilon = self.min_epsilon
else:
self.epsilon = new_epsilon
#Per Epoch
def decay_epsilon_linear(self):
self.epsilon -= self.epsilon_linear_factor
if(self.epsilon < self.min_epsilon):
self.epsilon = self.min_epsilon
def set_training(self, train_or_not=True):
if(train_or_not):
self.evaluate_net.train(True)
self.target_net.train(True)
else:
self.evaluate_net.train(False)
self.target_net.train(False)
self.evaluate_net.eval()
self.target_net.eval()
|
python
|
from py_queryable import Model
from py_queryable import Column, PrimaryKey, ForeignKey
class StubModel(Model):
__table_name__ = u'test_table'
test_int_column = Column(int, 'int_column')
class StubModel2(Model):
__table_name__ = u'test_table'
test_int_column = Column(int)
class StubPrimary(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(int, 'int_pk')
class StubPrimaryString(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(unicode, 'unicode_pk')
class StubIntUnique(Model):
__table_name__ = u"test_table"
test_pk = PrimaryKey(int)
test_unique = Column(int, 'int_column', is_unique=True)
class StubForeignKey(Model):
__table_name__ = u"foreign_key_table"
test_pk = PrimaryKey(int, 'int_pk')
test_fk = ForeignKey(StubPrimary, 'test_fk', is_nullable=False)
class StubUpdateModel(Model):
__table_name__ = u"test_update_table"
key = PrimaryKey(int, 'key_column')
update_col = Column(int, 'update_column')
class Student(Model):
__table_name__ = u"student"
student_id = PrimaryKey(int, "student_id")
first_name = Column(unicode, "first_name")
last_name = Column(unicode, "last_name")
gpa = Column(int, "gpa")
|
python
|
from mmcv.utils import Registry,build_from_cfg
TRANSFORMER = Registry('Transformer')
POSITIONAL_ENCODING = Registry('Position encoding')
def build_transformer(cfg,default_args=None):
"""Builder for Transfomer."""
return build_from_cfg(cfg,TRANSFORMER,default_args)
def build_positional_encoding(cfg,default_args=None):
"""Builder for Position Encoding."""
return build_from_cfg(cfg,POSITIONAL_ENCODING,default_args)
|
python
|
from sports.nba.nba_team import NBA_Team
class HoustonRockets(NBA_Team):
"""
NBA Golden State Warriors Static Information
"""
full_name = "Houston Rockets"
name = "Rockets"
team_id = 1610612745
def __init__(self):
"""
"""
super().__init__()
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-02-17 09:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0043_backend_filteredlanguage'),
]
operations = [
migrations.AddField(
model_name='backend',
name='supportedFunctions',
field=models.TextField(blank=True, default='asc, desc, avg, values, score, text, count, sample, min, max, average, concat, group_concat, langMatches, lang, regex, sum', help_text='Comma separated list of SPARQL functions supported by the backend. Will be used for funciton highlighting.', verbose_name='Supported functions'),
),
migrations.AddField(
model_name='backend',
name='supportedKeywords',
field=models.TextField(blank=True, default='prefix, select, distinct, where, order, limit, offset, optional, by, as, having, not, textlimit, contains-entity, contains-word, filter, group, union, optional, has-predicate', help_text='Comma separated list of SPARQL keywords supported by the backend. Will be used for keyword highlighting.', verbose_name='Supported keywords'),
),
migrations.AlterField(
model_name='backend',
name='filteredLanguage',
field=models.CharField(blank=True, default='en', help_text='Comma separated language codes used for filter suggestions', max_length=2000, verbose_name='Filter languages'),
),
]
|
python
|
import sys
import secrets
from toolbox._common import PlaybookRun
class OCMAddon:
"""
Commands for managing OCM addons
"""
@staticmethod
def install(ocm_refresh_token, ocm_cluster_id, ocm_url, ocm_addon_id, wait_for_ready_state=False):
"""
Installs an OCM addon
Args:
ocm_refresh_token: For OCM login auth
ocm_cluster_id: Cluster ID from OCM's POV
ocm_url: Used to determine environment
ocm_addon_id: the addon id to install. (such as `managed-odh`, `gpu-operator-certified-addon` etc.)
wait_for_ready_state: Optional. If true will cause the role to wait until addon reports ready state. (Can time out)
"""
opt = {
"ocm_addon_id": ocm_addon_id,
"ocm_cluster_id": ocm_cluster_id,
"ocm_url": ocm_url,
"ocm_refresh_token": ocm_refresh_token,
"wait_for_ready_state": wait_for_ready_state,
}
return PlaybookRun("ocm_install_addon", opt)
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-31 08:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('first_app', '0005_inventory'),
]
operations = [
migrations.CreateModel(
name='InventoryCell',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inv_content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Loot')),
('inv_coord', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='first_app.Cell')),
],
),
migrations.RemoveField(
model_name='inventory',
name='inv_content',
),
migrations.RemoveField(
model_name='inventory',
name='inv_coord',
),
migrations.DeleteModel(
name='Inventory',
),
]
|
python
|
import random
from shop import db
from shop.models import Menu, Computers
class Computer:
computers = 1
def __init__(self):
self.name = "Model: MacBook {}".format(self.computers)
self.price = random.randint(20000, 30000)
self.currency = '₽'
self.disk_size = "Disk size:{} Gb".format(random.randint(128, 512))
self.weight = "Weight: {}".format(random.randrange(1, 3))
self.memory_size = "RAM memory size: {}".format(4, 32)
self.display_size = "Display size: {}".format(random.randint(10, 20))
self.img_link = 'img/{}.jpeg'.format(self.computers)
self.page_link = '/product/{}'.format(self.computers)
Computer.computers += 1
@classmethod
def generate_n_computers(cls, number_of_computers):
computers = []
for i in range(number_of_computers):
computers.append(cls())
return computers
def create_db():
db.drop_all()
db.create_all()
def fill_db():
for item in {"description", "stats", "modifications", "prices", "reviews", "discussions"}:
menu_item = Menu(name=item)
db.session.add(menu_item)
computers = Computer.generate_n_computers(5)
for cmp in computers:
computer = Computers(name=cmp.name,
price=cmp.price,
currency=cmp.currency,
page_link=cmp.page_link,
img_file=cmp.img_link
)
db.session.add(computer)
db.session.commit()
|
python
|
import math
from collections import OrderedDict
from typing import Optional, Dict, List, Union
import numpy
import shapely.geometry
from OpenGL.GL import GL_RGB8
from pyrr import Vector3
from rasterio import features
from rasterio import transform
from rasterstats import zonal_stats
from shapely.geometry import Polygon, Point, LinearRing
from vistas.core.color import RGBColor
from vistas.core.graphics.mesh import Mesh
from vistas.core.graphics.terrain import TerrainColorGeometry, TerrainColorShaderProgram
from vistas.core.graphics.texture import Texture
from vistas.core.graphics.vector import VectorGeometry, VectorShaderProgram
from vistas.core.histogram import Histogram
from vistas.core.legend import StretchedLegend
from vistas.core.plugins.data import DataPlugin
from vistas.core.plugins.option import Option, OptionGroup
from vistas.core.plugins.visualization import VisualizationPlugin3D
from vistas.core.timeline import Timeline
from vistas.ui.utils import *
class TerrainAndColorPlugin(VisualizationPlugin3D):
id = 'terrain_and_color_plugin'
name = 'Terrain & Color'
description = 'Terrain visualization with color inputs.'
author = 'Conservation Biology Institute'
version = '1.0'
visualization_name = 'Terrain & Color'
zonal_stats = dict(median=numpy.median, stdev=numpy.std, range=lambda array: numpy.max(array) - numpy.min(array))
def __init__(self):
super().__init__()
self.terrain_mesh = None
self.vector_mesh = None
self._scene = None
# data inputs
self.terrain_data = None
self.attribute_data = None
self.boundary_data = None
self.flow_dir_data = None
self.flow_acc_data = None
self.selected_point = (-1, -1)
self.feature_boundary = None
self._needs_terrain = self._needs_color = False
self._needs_boundaries = False
self._needs_flow = False
self._is_filtered = False
self._filter_min = self._filter_max = 0
# Primary plugin options
self._options = OptionGroup()
color_group = OptionGroup("Colors")
self._min_color = Option(self, Option.COLOR, "Min Color Value", RGBColor(0, 0, 1))
self._max_color = Option(self, Option.COLOR, "Max Color Value", RGBColor(1, 0, 0))
self._nodata_color = Option(self, Option.COLOR, "No Data Color", RGBColor(0.5, 0.5, 0.5))
color_group.items = [self._min_color, self._max_color,self._nodata_color]
value_group = OptionGroup("Values")
self._min_value = Option(self, Option.FLOAT, "Minimum Value", 0.0)
self._max_value = Option(self, Option.FLOAT, "Maximum Value", 0.0)
value_group.items = [self._min_value, self._max_value]
data_group = OptionGroup("Data")
self._elevation_attribute = Option(self, Option.CHOICE, "Elevation Attribute", 0)
self._attribute = Option(self, Option.CHOICE, "Data Attribute", 0)
self._elevation_factor = Option(self, Option.SLIDER, "Elevation Factor", 1.0, min_value=0.0, max_value=5.0)
data_group.items = [self._elevation_attribute, self._attribute, self._elevation_factor]
graphics_group = OptionGroup("Graphics Options")
self._hide_no_data = Option(self, Option.CHECKBOX, "Hide No Data Values", False)
self._per_vertex_color = Option(self, Option.CHECKBOX, "Per Vertex Color", True)
self._per_vertex_lighting = Option(self, Option.CHECKBOX, "Per Vertex Lighting", False)
graphics_group.items = [self._hide_no_data, self._per_vertex_color, self._per_vertex_lighting]
self._options.items = [color_group, value_group, data_group, graphics_group]
# Secondary plugin options
self._boundary_group = OptionGroup("Boundary")
self._boundary_color = Option(self, Option.COLOR, "Boundary Color", RGBColor(0, 0, 0))
self._zonal_boundary_color = Option(self, Option.COLOR, "Zonal Boundary Color", RGBColor(1, 1, 0))
self._boundary_width = Option(self, Option.FLOAT, "Boundary Width", 1.0)
self._boundary_group.items = [self._boundary_color, self._zonal_boundary_color, self._boundary_width]
self._flow_group = OptionGroup("Flow Options")
self._show_flow = Option(self, Option.CHECKBOX, "Show Flow Direction", True)
self._hide_no_data_vectors = Option(self, Option.CHECKBOX, "Hide No Data Vectors", True)
self._flow_stride = Option(self, Option.INT, "Stride", 1, 1, 10)
self._flow_color = Option(self, Option.COLOR, "Vector Color", RGBColor(1, 1, 0))
self._flow_scale = Option(self, Option.SLIDER, "Vector Scale", 1.0, 0.01, 20.0)
self._flow_group.items = [self._show_flow, self._hide_no_data_vectors, self._flow_stride, self._flow_color,
self._flow_scale]
self._animation_group = OptionGroup("Animation Options")
self._animate_flow = Option(self, Option.CHECKBOX, "Enable Flow Animation", False)
self._animation_speed = Option(self, Option.INT, "Animation Speed (ms)", 100)
self._vector_speed = Option(self, Option.SLIDER, "Animation Speed Factor", 1.0, 0.01, 5.0)
self._animation_group.items = [self._animate_flow, self._animation_speed, self._vector_speed]
self._accumulation_group = OptionGroup("Flow Accumulation Options")
self._acc_filter = Option(self, Option.CHECKBOX, "Enable Accumulation Filter", False)
self._acc_min = Option(self, Option.INT, "Accumulation Min", 0)
self._acc_max = Option(self, Option.INT, "Accumulation Max", 0)
self._acc_scale = Option(self, Option.CHECKBOX, "Scale Flow by Acc. Value", False)
self._accumulation_group.items = [self._acc_filter, self._acc_min, self._acc_max, self._acc_scale]
def get_options(self):
options = OptionGroup()
options.items = self._options.items.copy()
if self.boundary_data is not None:
options.items.append(self._boundary_group)
if self.flow_dir_data is not None:
options.items.append(self._flow_group)
options.items.append(self._animation_group)
if self.flow_acc_data is not None:
options.items.append(self._accumulation_group)
return options
def update_option(self, option: Option=None):
if option is None or option.plugin is not self:
return
name = option.name
if name in [self._min_color.name, self._max_color.name, self._min_value.name, self._max_value.name]:
post_new_legend()
elif name == self._attribute.name:
self._needs_color = True
stats = self.attribute_data.variable_stats(self._attribute.selected)
self._min_value.value = stats.min_value
self._max_value.value = stats.max_value
post_newoptions_available(self)
post_new_legend()
elif name == self._elevation_attribute.name:
self._needs_terrain = True
elif name is self._boundary_width.name:
self._needs_boundaries = True
if self.flow_dir_data is not None:
vector_shader = self.vector_mesh.shader
if name == self._animate_flow.name:
vector_shader.animate = self._animate_flow.value
elif name == self._animation_speed.name:
vector_shader.animation_speed = self._animation_speed.value
elif name == self._elevation_factor.name:
self.vector_mesh.geometry.vertex_scalars = Vector3(
[1, 1, self._elevation_factor.value], dtype=numpy.float32
)
elif name == self._flow_color.name:
vector_shader.color = self._flow_color.value
elif name == self._flow_scale.name:
vector_shader.vector_scale = self._flow_scale.value
elif name == self._show_flow.name:
self.vector_mesh.visible = self._show_flow.value
elif name == self._hide_no_data_vectors.name:
vector_shader.hide_no_data = self._hide_no_data_vectors.value
elif name == self._acc_filter.name:
vector_shader.use_mag_filter = self._acc_filter.value and self.flow_acc_data is not None
elif name == self._vector_speed.name:
vector_shader.vector_speed = self._vector_speed.value
elif name in [self._acc_min.name, self._acc_max.name]:
vector_shader.mag_min = self._acc_min.value
vector_shader.mag_max = self._acc_max.value
elif name == self._acc_scale.name:
vector_shader.use_magnitude_scale = self._acc_scale.value and self.flow_acc_data is not None
self.refresh()
def timeline_changed(self):
if self.terrain_data and self.terrain_data.time_info and self.terrain_data.time_info.is_temporal:
self._needs_terrain = True
self._needs_color = True
self.refresh()
@property
def can_visualize(self):
return self.terrain_data is not None
@property
def data_roles(self):
return [
(DataPlugin.RASTER, 'Terrain'),
(DataPlugin.RASTER, 'Attribute'),
(DataPlugin.FEATURE, 'Boundaries'),
(DataPlugin.RASTER, 'Flow Direction'),
(DataPlugin.RASTER, 'Flow Accumulation')
]
def set_data(self, data: DataPlugin, role):
if role == 0:
self.terrain_data = data
self._needs_terrain = True
if data is not None:
self._elevation_attribute.labels = data.variables
else:
self._elevation_attribute.labels = []
post_newoptions_available(self)
elif role == 1:
self.attribute_data = data
self._needs_color = True
if data is not None:
stats = data.variable_stats(data.variables[0])
self._min_value.value = round(stats.min_value, 6) # User can specify higher sig figs
self._max_value.value = round(stats.max_value, 6)
self._attribute.value = 0
self._attribute.labels = data.variables
else:
self._min_value.value, self._max_value.value = 0, 0
self._attribute.labels = []
post_newoptions_available(self)
post_new_legend()
elif role == 2:
self.boundary_data = data
self._needs_boundaries = True
elif role == 3:
self.flow_dir_data = data
self._needs_flow = True
post_newoptions_available(self)
elif role == 4:
self.flow_acc_data = data
self._needs_flow = True
if data is not None:
stats = data.variable_stats(data.variables[0])
self._acc_min.value, self._acc_max.value = stats.min_value, stats.max_value
else:
self._acc_min.value, self._acc_max.value = 0, 0
post_newoptions_available(self)
def get_data(self, role):
if role == 0:
return self.terrain_data
elif role == 1:
return self.attribute_data
elif role == 2:
return self.boundary_data
elif role == 3:
return self.flow_dir_data
elif role == 4:
return self.flow_acc_data
return None
@property
def is_filterable(self):
return True
@property
def is_filtered(self):
return self._is_filtered
@property
def filter_histogram(self):
if self.attribute_data is not None:
variable = self._attribute.selected
stats = self.attribute_data.variable_stats(variable)
return Histogram(
self.attribute_data.get_data(variable, Timeline.app().current),
min_value=stats.min_value,
max_value=stats.max_value,
nodata_value=stats.nodata_value
)
else:
return Histogram()
def set_filter(self, min_value, max_value):
self._is_filtered = True
self._filter_min, self._filter_max = min_value, max_value
self.refresh()
def clear_filter(self):
self._is_filtered = False
self.refresh()
@property
def filter_min(self):
return self._filter_min
@property
def filter_max(self):
return self._filter_max
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, scene):
if self._scene is not None:
if self.terrain_mesh is not None:
self._scene.remove_object(self.terrain_mesh)
if self.vector_mesh is not None:
self._scene.remove_object(self.vector_mesh)
self._scene = scene
if self.terrain_mesh is not None and self._scene is not None:
self._scene.add_object(self.terrain_mesh)
if self.vector_mesh is not None and self._scene is not None:
self._scene.add_object(self.vector_mesh)
def refresh(self):
if self._needs_terrain:
self._create_terrain_mesh()
self._needs_terrain = False
self._needs_color = False
elif self._needs_color:
self._update_terrain_color()
self._needs_color = False
if self._needs_boundaries:
self._update_boundaries()
self._needs_boundaries = False
if self._needs_flow:
self._update_flow()
self._needs_flow = False
if self.terrain_mesh is not None:
shader = self.terrain_mesh.shader
# Update shaders with Option values
shader.hide_no_data = self._hide_no_data.value
shader.per_vertex_color = self._per_vertex_color.value
shader.per_vertex_lighting = self._per_vertex_lighting.value
shader.min_value = self._min_value.value
shader.max_value = self._max_value.value
# If attribute data does not specify a nodata_value, set something that won't affect rendering
if self.attribute_data:
stats = self.attribute_data.variable_stats(self._attribute.selected)
if stats.nodata_value is None:
shader.nodata_value = stats.min_value - 1
shader.min_color = self._min_color.value.hsv.hsva_list
shader.max_color = self._max_color.value.hsv.hsva_list
shader.nodata_color = self._nodata_color.value.hsv.hsva_list
shader.boundary_color = self._boundary_color.value.hsv.hsva_list
shader.zonal_color = self._zonal_boundary_color.value.hsv.hsva_list
shader.height_factor = self._elevation_factor.value if self._elevation_factor.value > 0 else 0.01
shader.is_filtered = self._is_filtered
shader.filter_min = self._filter_min
shader.filter_max = self._filter_max
post_redisplay()
def _create_terrain_mesh(self):
if self.terrain_data is not None:
if self.terrain_mesh is not None: # height grid was set before, needs to be removed
self._scene.remove_object(self.terrain_mesh)
self.terrain_mesh.geometry.dispose()
self.terrain_mesh = None
elevation_attribute = self._elevation_attribute.selected
height_stats = self.terrain_data.variable_stats(elevation_attribute)
nodata_value = height_stats.nodata_value
min_value = height_stats.min_value
max_value = height_stats.max_value
cellsize = self.terrain_data.resolution
height_data = self.terrain_data.get_data(elevation_attribute)
if isinstance(height_data, numpy.ma.MaskedArray):
height_data = height_data.data
factor = 1.0
height, width = height_data.shape
max_height = math.sqrt(width * height * cellsize) / 2
if max_value > max_height:
factor = max_height / max_value
height_data[height_data != nodata_value] *= factor # Apply factor where needed
height_data[height_data == nodata_value] = min_value # Otherwise, set to min value
geometry = TerrainColorGeometry(width, height, cellsize, height_data)
shader = TerrainColorShaderProgram()
mesh = Mesh(geometry, shader, plugin=self)
self.terrain_mesh = mesh
self._scene.add_object(self.terrain_mesh)
self._update_terrain_color()
else:
if self.terrain_mesh is not None:
self._scene.remove_object(self.terrain_mesh)
self.terrain_mesh.geometry.dispose()
self.terrain_mesh = None
def _update_terrain_color(self):
if self.terrain_mesh is not None:
shader = self.terrain_mesh.shader
if self.terrain_data and self.attribute_data:
shader.has_color = True
# Retrieve color layer
attribute = self._attribute.selected
data = self.attribute_data.get_data(attribute, Timeline.app().current)
if type(data) is numpy.ma.MaskedArray:
data = data.data
color_stats = self.attribute_data.variable_stats(attribute)
if color_stats.nodata_value:
shader.nodata_value = color_stats.nodata_value
self.terrain_mesh.geometry.values = data
post_redisplay()
else:
shader.has_color = False
def _update_boundaries(self):
if self.terrain_mesh is None:
return
shader = self.terrain_mesh.shader
if self.terrain_data is not None:
shader.has_boundaries = True
# Create boundary image
texture_w, texture_h = 512, 512
image_data = numpy.ones((texture_h, texture_w, 3), dtype=numpy.uint8) * 255
terrain_extent = self.terrain_data.extent
if self.boundary_data is not None:
# Burn geometry to texture
shapes = self.boundary_data.get_features()
image_data[:, :, 0] = numpy.flipud(features.rasterize(
[shapely.geometry.shape(f['geometry']).exterior.buffer(self._boundary_width.value) for f in shapes
if f['geometry']['type'] == 'Polygon'],
out_shape=(texture_h, texture_w), fill=255, default_value=0,
transform=transform.from_bounds(*terrain_extent.as_list(), texture_w, texture_h)
))
if self.selected_point != (-1, -1):
p = (self.selected_point[0], self.selected_point[1] + 1)
cell_size = self.terrain_data.resolution
grid_width, grid_height = self.terrain_data.shape
xscale = texture_w / terrain_extent.width
yscale = texture_h / terrain_extent.height
box_w, box_h = cell_size * xscale, cell_size * yscale
center = (int(p[0] / grid_height * texture_h), int(512 - p[1] / grid_width * texture_w))
# Draw black rectangle directly into data
min_x = min(max(center[0] - box_w / 2, 0), 510)
max_x = min(max(center[0] + box_w / 2, min_x + 1), 511)
min_y = min(max(center[1] - box_h / 2, 0), 510)
max_y = min(max(center[1] + box_h / 2, min_y + 1), 511)
image_data[round(min_y): round(max_y), round(min_x): round(max_x), 0] = 0
shader.boundary_texture = Texture(
data=image_data.ravel(), width=texture_w, height=texture_h, src_format=GL_RGB8
)
# Update zonal stats texture
image_data = numpy.ones((texture_h, texture_w, 3), dtype=numpy.uint8) * 255
if self.feature_boundary is not None:
shader.has_zonal_boundary = True
t_res = self.terrain_data.resolution
normalized_coords = [(p[0] / t_res, p[1] / t_res) for p in self.feature_boundary.coords]
if isinstance(self.feature_boundary, Point):
feat = Point(*[[transform.xy(self.terrain_data.affine, *p) for p in normalized_coords]])
else:
feat = LinearRing(*[[transform.xy(self.terrain_data.affine, *p) for p in normalized_coords]])
image_data[:, :, 0] = numpy.flipud(features.rasterize(
[feat.buffer(self._boundary_width.value)],
out_shape=(texture_h, texture_w), fill=255, default_value=1, all_touched=True,
transform=transform.from_bounds(*terrain_extent.as_list(), texture_w, texture_h)
))
else:
shader.has_zonal_boundary = False
shader.zonal_texture = Texture(
data=image_data.ravel(), width=texture_w, height=texture_h, src_format=GL_RGB8
)
else:
shader.has_boundaries = False
shader.has_zonal_boundary = False
shader.boundary_texture = Texture()
shader.zonal_texture = Texture()
def _update_flow(self):
if self.terrain_data is not None and self.flow_dir_data is not None:
height_label = self._elevation_attribute.selected
flow_dir_label = self.flow_dir_data.variables[0]
flow_acc_label = self.flow_acc_data.variables[0] if self.flow_acc_data is not None else ""
attribute_label = self._attribute.selected if self.attribute_data is not None else ""
height_data = self.terrain_data.get_data(height_label, Timeline.app().current)
flow_dir = self.flow_dir_data.get_data(flow_dir_label, Timeline.app().current)
height, width = flow_dir.shape
if not flow_dir.shape == height_data.shape:
post_message("Terrain and flow grids don't match. Did you load the correct flow grid for this terrain?",
MessageEvent.ERROR)
return
# Clobber all the data into one big array
vector_data = numpy.zeros((height, width, VectorGeometry.BUFFER_WIDTH), dtype=numpy.float32)
vector_data[:, :, 0:3] = self.terrain_mesh.geometry.vertices.reshape((height, width, 3))
vector_data[:, :, 3] = flow_dir * -45.0 + 45.0 # VELMA flow direction, converted to polar degrees
vector_data[:, :, 4] = 90 - numpy.arcsin(numpy.abs(
self.terrain_mesh.geometry.normals.reshape((height, width, 3))[:, :, 2]
)) * 180 / numpy.pi
vector_data[:, :, 5] = numpy.ones((height, width), dtype=numpy.float32) if self.flow_acc_data is None else \
self.flow_acc_data.get_data(flow_acc_label, Timeline.app().current)
vector_data[:, :, 6] = numpy.zeros((height, width), dtype=numpy.float32) if self.attribute_data is None \
else self.attribute_data.get_data(attribute_label, Timeline.app().current)
# Inform vector_renderable of attribute grid (if set) so shader knows whether to hide nodata values
if self.attribute_data is not None:
nodata_value = self.attribute_data.variable_stats(attribute_label).nodata_value
else:
nodata_value = 1.0
if self.vector_mesh is None:
self.vector_mesh = Mesh(
VectorGeometry(max_instances=width * height, data=vector_data),
VectorShaderProgram()
)
self.scene.add_object(self.vector_mesh)
else:
self.vector_mesh.geometry.vector_data = vector_data
self.vector_mesh.shader.nodata_value = nodata_value
self.vector_mesh.shader.use_mag_filter = self._acc_filter.value and self.flow_acc_data is not None
self.vector_mesh.shader.mag_min = self._acc_min.value
self.vector_mesh.shader.mag_max = self._acc_max.value
self.vector_mesh.shader.use_magnitude_scale = self._acc_scale.value and self.flow_acc_data is not None
self.vector_mesh.visible = self._show_flow.value
elif self.vector_mesh is not None:
self.scene.remove_object(self.vector_mesh)
self.vector_mesh = None
def has_legend(self):
return self.attribute_data is not None
def get_legend(self, width, height):
legend = StretchedLegend(self._min_value.value, self._max_value.value, self._min_color.value,
self._max_color.value)
return legend.render(width, height)
def get_identify_detail(self, point: Vector3) -> Optional[Dict]:
if self.terrain_data is not None:
res = self.terrain_data.resolution
cell_x = int(round((point.y / res)))
cell_y = int(round((point.x / res)))
terrain_attr = self._elevation_attribute.selected
terrain_ref = self.terrain_data.get_data(terrain_attr)
if self.attribute_data is not None:
attribute_ref = self.attribute_data.get_data(
self._attribute.selected, Timeline.app().current
)
attr_height, attr_width = attribute_ref.shape
if 0 <= cell_x < attr_width and 0 <= cell_y < attr_height:
result = OrderedDict()
result['Point'] = "{}, {}".format(cell_x, cell_y)
result['Value'] = attribute_ref[cell_y, cell_x]
result['Height'] = terrain_ref[cell_y, cell_x]
if self.flow_dir_data is not None:
flow_dir_ref = self.flow_dir_data.get_data(self.flow_dir_data.variables[0])
direction = flow_dir_ref[cell_y, cell_x]
result['Flow Direction (input)'] = direction
degrees = 45.0 + 45.0 * direction
result['Flow Direction (degrees)'] = degrees if degrees < 360.0 else degrees - 360.0
if self.flow_acc_data is not None:
result['Flow Accumulation'] = self.flow_acc_data.get_data(
self.flow_acc_data.variables[0]
)[cell_y, cell_x]
self.selected_point = (cell_x, cell_y)
self._needs_boundaries = True
self.refresh()
return result
self.selected_point = (-1, -1)
self._needs_boundaries = True
self.refresh()
return None
def get_zonal_stats_from_point(self, point: Vector3) -> List[Optional[Dict]]:
results = []
if self.boundary_data:
for plugin in (x for x in (self.terrain_data, self.attribute_data, self.flow_dir_data, self.flow_dir_data)
if x is not None):
if plugin is self.terrain_data:
var = self._elevation_attribute.selected
elif plugin is self.attribute_data:
var = self._attribute.selected
else:
var = ''
raster = plugin.get_data(var)
affine = plugin.affine
res = plugin.resolution
var_stats = plugin.variable_stats(var)
nodata = var_stats.nodata_value
# Transform point coordinates to crs of raster
p = shapely.geometry.Point(*transform.xy(affine, point.x / res, point.y / res))
zones = []
for feat in self.boundary_data.get_features():
if shapely.geometry.shape(feat['geometry']).contains(p):
zones.append(feat)
# Retrieve zonal stats for this raster
result = zonal_stats(zones, raster, affine=affine, nodata=nodata, add_stats=self.zonal_stats)
for j, row in enumerate(result):
row['Name'] = "{} (Zone {})".format(plugin.data_name, zones[j].get('id'))
results.append(row)
return results
def get_zonal_stats_from_feature(self, feature: LinearRing) -> List[Optional[Dict]]:
results = []
if self.terrain_data:
# Normalize feature coordinates to terrain resolution
t_res = self.terrain_data.resolution
normalized_coords = [(p[0] / t_res, p[1] / t_res) for p in feature.coords]
for plugin in (x for x in (self.terrain_data, self.attribute_data, self.flow_dir_data, self.flow_dir_data)
if x is not None):
if plugin is self.terrain_data:
var = self._elevation_attribute.selected
elif plugin is self.attribute_data:
var = self._attribute.selected
else:
var = ''
raster = plugin.get_data(var, Timeline.app().current)
affine = plugin.affine
var_stats = plugin.variable_stats(var)
nodata = var_stats.nodata_value
feat = Polygon(*[[transform.xy(affine, *p) for p in normalized_coords]])
# Transform normalized raster coordinates to CRS of raster to query and obtain results
result = zonal_stats(feat, raster, affine=affine, nodata=nodata, add_stats=self.zonal_stats)[0]
result['Name'] = plugin.data_name
results.append(result)
return results
def update_zonal_boundary(self, feature: Union[LinearRing, Point]):
self.feature_boundary = feature
self._needs_boundaries = True
self.refresh()
|
python
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
from gevent import monkey; monkey.patch_all()
from nose.tools import assert_raises
import mock
from . import scheduler as _
from config import config_value
from job import Job
from digits.utils import subclass, override
class TestScheduler():
def get_scheduler(self):
return _.Scheduler(config_value('gpu_list'))
def test_add_before_start(self):
s = self.get_scheduler()
assert not s.add_job(None), 'add_job should fail'
def test_start_twice(self):
s = self.get_scheduler()
assert s.start(), 'failed to start'
assert s.start(), 'failed to start the second time'
assert s.stop(), 'failed to stop'
def test_stop_before_start(self):
s = self.get_scheduler()
assert s.stop(), 'failed to stop'
@subclass
class JobForTesting(Job):
@override
def job_type(self):
return 'Job For Testing'
class TestSchedulerFlow():
@classmethod
def setUpClass(cls):
cls.s = _.Scheduler(config_value('gpu_list'))
assert cls.s.start(), 'failed to start'
@classmethod
def tearDownClass(cls):
assert cls.s.stop(), 'failed to stop'
def test_add_remove_job(self):
job = JobForTesting('tmp')
assert self.s.add_job(job), 'failed to add job'
assert len(self.s.jobs) == 1, 'scheduler has %d jobs' % len(self.s.jobs)
assert self.s.delete_job(job), 'failed to delete job'
assert len(self.s.jobs) == 0, 'scheduler has %d jobs' % len(self.s.jobs)
|
python
|
from pathlib import Path
from flask_wtf import FlaskForm
from wtforms import SelectMultipleField, SubmitField
from wtforms.validators import DataRequired
path = Path(".")
forecast = sorted(path.glob("data/forecast_*.json"), reverse=True)
choices = [(p, p) for p in forecast]
class YearSelectForm(FlaskForm):
year = SelectMultipleField(
f"Available Data ({len(choices)} years, multiple selectable)",
choices=choices,
validators=[DataRequired()],
)
submit = SubmitField("Make Plot")
|
python
|
"""
1. Clarification
2. Possible solutions
- Brute force
- HashMap
3. Coding
4. Tests
"""
# T=O(n^2), S=O(1)
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
if len(nums) < 2: return [-1, -1]
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
# T=O(n), S=O(n)
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
if len(nums) < 2: return [-1, -1]
hash_map = {}
for i, x in enumerate(nums):
if target - x in hash_map:
return [hash_map[target - x], i]
hash_map[x] = i
|
python
|
import enum
from typing import Dict
class PropertyType(enum.Enum):
HOUSE = 0
FLAT = 1
BUNGALOW = 2
class BuiltForm(enum.Enum):
MID_TERRACE = 0
SEMI_DETACHED = 1
DETACHED = 2
END_TERRACE = 3
class OccupantType(enum.Enum):
OWNER_OCCUPIED = 0
RENTED_PRIVATE = 1
RENTED_SOCIAL = 2
class EPCRating(enum.Enum):
G = 0
F = 1
E = 2
D = 3
C = 4
B = 5
A = 6
class HeatingSystem(enum.Enum):
BOILER_GAS = 0
BOILER_OIL = 1
BOILER_ELECTRIC = 2
HEAT_PUMP_AIR_SOURCE = 3
HEAT_PUMP_GROUND_SOURCE = 4
class HeatingFuel(enum.Enum):
GAS = 0
ELECTRICITY = 1
OIL = 2
HEATING_SYSTEM_FUEL: Dict[HeatingSystem, HeatingFuel] = {
HeatingSystem.BOILER_GAS: HeatingFuel.GAS,
HeatingSystem.BOILER_OIL: HeatingFuel.OIL,
HeatingSystem.BOILER_ELECTRIC: HeatingFuel.ELECTRICITY,
HeatingSystem.HEAT_PUMP_AIR_SOURCE: HeatingFuel.ELECTRICITY,
HeatingSystem.HEAT_PUMP_GROUND_SOURCE: HeatingFuel.ELECTRICITY,
}
HEATING_SYSTEM_LIFETIME_YEARS = 15
HAZARD_RATE_HEATING_SYSTEM_ALPHA = 6
HAZARD_RATE_HEATING_SYSTEM_BETA = 15
# If a ban is active and has been announced, irrespective of the `SIGMOID_{K, OFFSET}` values,
# all agents will not consider banned heating systems after this time
MAX_BAN_LEAD_TIME_YEARS = 10
SIGMOID_K = 2
SIGMOID_OFFSET = 0
class ConstructionYearBand(enum.Enum):
# These categories match the England & Wales EPC categories
BUILT_PRE_1900 = 0
BUILT_1900_1929 = 1
BUILT_1930_1949 = 2
BUILT_1950_1966 = 3
BUILT_1967_1975 = 4
BUILT_1976_1982 = 5
BUILT_1983_1990 = 6
BUILT_1991_1995 = 7
BUILT_1996_2002 = 8
BUILT_2003_2006 = 9
BUILT_2007_ONWARDS = 10
# Parameters describing distributions
# A distribution matching 50th/90th percentiles from 2020 Houzz & Home report (11k/100k respectively)
# http://st.hzcdn.com/static/econ/en-GB/2020_HouzzAndHome_UK_Renovation_Trends_Study.pdf
GB_RENOVATION_BUDGET_WEIBULL_ALPHA = 0.55
GB_RENOVATION_BUDGET_WEIBULL_BETA = 21_994
# A distribution aligned to Q2 2021 GB property values
GB_PROPERTY_VALUE_WEIBULL_ALPHA = 1.61
GB_PROPERTY_VALUE_WEIBULL_BETA = 280_000
class Element(enum.Enum):
ROOF = 0
GLAZING = 1
WALLS = 2
class InsulationSegment(enum.Enum):
SMALL_FLAT = 0
LARGE_FLAT = 1
SMALL_MID_TERRACE_HOUSE = 2
LARGE_MID_TERRACE_HOUSE = 3
SMALL_SEMI_END_TERRACE_HOUSE = 4
LARGE_SEMI_END_TERRACE_HOUSE = 5
SMALL_DETACHED_HOUSE = 6
LARGE_DETACHED_HOUSE = 7
BUNGALOW = 8
# parameters chosen to align to a distribution of discount rates obtained from an investigation of 1,217 random
# U.S. homeowners given choice experiments relating to the purchase of a water heater (mean 19%; std. 23%)
# Individual Time Preferences and Energy Efficiency (NBER Working Paper No. 20969)
DISCOUNT_RATE_WEIBULL_ALPHA = 0.8
DISCOUNT_RATE_WEIBULL_BETA = 0.165
class EventTrigger(enum.Enum):
BREAKDOWN = 0
RENOVATION = 1
EPC_C_UPGRADE = 2
# Scale factor is inferred from general relationship between estimated floor area and kW capacity
# https://www.boilerguide.co.uk/articles/size-heat-pump-need (see table)
# https://www.imsheatpumps.co.uk/blog/what-size-heat-pump-do-i-need-for-my-house/
# https://www.homeheatingguide.co.uk/renewables-advice/air-source-heat-pumps-a-sizing-guide
HEAT_PUMP_CAPACITY_SCALE_FACTOR = {
HeatingSystem.HEAT_PUMP_AIR_SOURCE: 0.1,
HeatingSystem.HEAT_PUMP_GROUND_SOURCE: 0.08,
}
MAX_HEAT_PUMP_CAPACITY_KW = {
HeatingSystem.HEAT_PUMP_AIR_SOURCE: 20.0,
HeatingSystem.HEAT_PUMP_GROUND_SOURCE: 25.0,
}
MIN_HEAT_PUMP_CAPACITY_KW = {
HeatingSystem.HEAT_PUMP_AIR_SOURCE: 4.0,
HeatingSystem.HEAT_PUMP_GROUND_SOURCE: 4.0,
}
class PropertySize(enum.Enum):
SMALL = 0
MEDIUM = 1
LARGE = 2
# Source: https://www.ovoenergy.com/guides/energy-guides/how-much-heating-energy-do-you-use
# Assume figure of 133kWh/m2a a reflects an average heating system Coefficient of Performance of 0.92 (gas boiler)
# 133 * 0.92 = 122kWh/m2a
HEATING_KWH_PER_SQM_ANNUAL = 122
FUEL_KWH_TO_HEAT_KWH: Dict[HeatingSystem, float] = {
# The conversion factor between 1kWh of fuel and useful heat. For example:
# Gas Boilers ~ 0.9, since 1kWh of gas produces ~0.9kWh of heat (due to inefficiencies in the boiler)
HeatingSystem.BOILER_GAS: 0.92,
HeatingSystem.BOILER_OIL: 0.92,
HeatingSystem.BOILER_ELECTRIC: 0.995,
HeatingSystem.HEAT_PUMP_AIR_SOURCE: 3,
HeatingSystem.HEAT_PUMP_GROUND_SOURCE: 4,
}
HEAT_PUMPS = {HeatingSystem.HEAT_PUMP_AIR_SOURCE, HeatingSystem.HEAT_PUMP_GROUND_SOURCE}
BOILERS = {
HeatingSystem.BOILER_GAS,
HeatingSystem.BOILER_OIL,
HeatingSystem.BOILER_ELECTRIC,
}
# The likelihoods of houses under renovation choosing to address heating system and/or insulation as part of project
# Derived from the VERD Project, 2012-2013. UK Data Service. SN: 7773, http://doi.org/10.5255/UKDA-SN-7773-1
# Based upon the choices of houses in 'Stage 3' - finalising or actively renovating
RENO_PROBA_HEATING_SYSTEM_UPDATE = 0.18
RENO_PROBA_INSULATION_UPDATE = 0.33
# Likelihood of upgrading 1,2 or 3 insulation elements during a renovation
# Derived from the VERD Project, 2012-2013. UK Data Service. SN: 7773, http://doi.org/10.5255/UKDA-SN-7773-1
# Based upon the choices of houses in 'Stage 3' - finalising or actively renovating
RENO_NUM_INSULATION_ELEMENTS_UPGRADED = {1: 0.76, 2: 0.17, 3: 0.07}
# An amount a house may set aside for work related to home heating and energy efficiency
# Expressed as a proportion of their total renovation budget (20%)
HEATING_PROPORTION_OF_RENO_BUDGET = 0.2
# Upper bound on floor area sqm for to be classed as 'Small', by property type / built form
# As per the segmentation used in Source: BEIS - WHAT DOES IT COST TO RETROFIT HOMES?
RETROFIT_COSTS_SMALL_PROPERTY_SQM_LIMIT = {
"FLAT": 54,
"MID_TERRACE_HOUSE": 76,
"SEMI_OR_END_TERRACE_HOUSE": 80,
"SMALL_DETACHED_HOUSE": 117,
}
# Floor area of homes in England and Wales
# Source: England/Wales Energy Performance Certificates
FLOOR_AREA_SQM_33RD_PERCENTILE = 66
FLOOR_AREA_SQM_66TH_PERCENTILE = 89
class InterventionType(enum.Enum):
RHI = 0
BOILER_UPGRADE_SCHEME = 1
GAS_OIL_BOILER_BAN = 2
# Source: https://www.ons.gov.uk/peoplepopulationandcommunity/birthsdeathsandmarriages/families/datasets/householdsbytypeofhouseholdandfamilyregionsofenglandandukconstituentcountries
ENGLAND_WALES_HOUSEHOLD_COUNT_2020 = 24_600_000
# Source - https://www.heatpumps.org.uk/wp-content/uploads/2020/06/Building-the-Installer-Base-for-Net-Zero-Heating_02.06.pdf
# Uses the CCC Balanced Pathway scenario of 625k HPs/year in 2028, stating it requires 33,700 installers - i.e. an installation takes ~19 days
HEAT_PUMP_INSTALLATION_DURATION_MONTHS = 0.65
# Source: https://ukerc.ac.uk/news/heating-engineers-skills-and-heat-decarbonisation/
# Assuming a 1:1 replacement of gas engineer to heat pump engineers
# In 2019, 130K heating engineers registered with Gas Safe; 27.8mil households = ~215 households to every installer
HOUSEHOLDS_PER_HEAT_PUMP_INSTALLER_FLOOR = 215
# Source: Element Energy for CCC (pg 33 https://www.theccc.org.uk/wp-content/uploads/2020/12/Element-Energy-Trajectories-for-Residential-Heat-Decarbonisation-Executive-Summary.pdf)
# Note: These figures have been adjusted to the England/Wales population using ONS data
ENGLAND_WALES_ANNUAL_NEW_BUILDS = {
2021: 45238,
2022: 62202,
2023: 73511,
2024: 79166,
2025: 282735,
2026: 282735,
2027: 294044,
2028: 254461,
2029: 254461,
2030: 209224,
2031: 197914,
2032: 180950,
2033: 384520,
2034: 384520,
2035: 339282,
2036: 344937,
2037: 390174,
2038: 378865,
2039: 390174,
2040: 384520,
2041: 390174,
2042: 395829,
2043: 356246,
2044: 327973,
2045: 282735,
2046: 271425,
2047: 254461,
2048: 463685,
2049: 446721,
2050: 390174,
}
|
python
|
import pdb
import logging
import numpy as np
import sys
sys.path.append('..')
import MRATools as mt
def determine_radius(k, h):
if k==0:
raise ValueError("Ensemble size must be stricly positive")
s = np.floor(np.sqrt(k))
if s % 2==0:
sf = s-1
else:
sf = s
if k==sf**2:
return h*1.01*(sf-1)/2*np.sqrt(2)
base = (sf-1)/2.0
intervals = np.array([sf**2])
while intervals[-1]<(sf+2)**2:
if len(intervals)==1 or ((sf+2)**2 - intervals[-1]==4):
intervals = np.append(intervals, intervals[-1] + 4)
else:
intervals = np.append(intervals, intervals[-1] + 8)
ind = intervals.searchsorted(k)
middle = (intervals[ind-1] + intervals[ind])/2.0
if k<=middle:
print('k=%d, app=%d, ind=%d, base=%d' % (k, intervals[ind-1], ind-1, (sf-1)/2.0))
app_ind = ind-1
else:
print('k=%d, app=%d, ind=%d, base=%d' % (k, intervals[ind], ind, (sf-1)/2.0))
app_ind = ind
if app_ind==0:
return h*base*np.sqrt(2) + h*0.01
else:
return h*np.sqrt((base+1)**2 + (app_ind-1)**2) + h*0.01
if __name__=='__main__':
dim_x = 34
dim_y = 34
locs = mt.genLocations2d( Nx=dim_x, Ny=dim_y )
for Nens in [43]:
Sig = mt.KanterCovFun(locs, radius=Nens, circular=False)
mt.dispMat(mt.filterNNZ(Sig[1050,:].reshape(34,34)), cmap="Spectral")
print(len(np.where(Sig[1050,:])[0]))
|
python
|
# -*- coding: utf-8 -*-
"""Code for creating diagrams."""
|
python
|
from .confirmationwindow import ConfirmationWindow
from .menu import Menu
from .popupwindow import PopupWindow
from .scrollselector import ScrollSelector
from .filemenu import FileMenu
__all__ = ["PopupWindow","ScrollSelector",
"ConfirmationWindow","Menu",
"FileMenu"]
|
python
|
from typing import Optional
import torch
from anndata import AnnData
from scvi.model import SCVI
use_gpu = torch.cuda.is_available()
def unsupervised_training_one_epoch(
adata: AnnData,
run_setup_anndata: bool = True,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
):
if run_setup_anndata:
SCVI.setup_anndata(adata, batch_key=batch_key, labels_key=labels_key)
m = SCVI(adata)
m.train(1, train_size=0.4, use_gpu=use_gpu)
|
python
|
from jmanager.utils.print_utils import get_progress_text
PROGRESS_TEXT = "test |========================= | 50.0%"
class TestPrintUtils:
def test_get_progress_text(self):
assert get_progress_text(msg="test", iteration=1, total=2) == PROGRESS_TEXT
|
python
|
import numpy as np
def get_pieces_count(state):
count = 0
for s in state:
if s.isalpha():
count += 1
return count
def is_kill_move(state_prev, state_next):
return get_pieces_count(state_prev) - get_pieces_count(state_next)
def create_position_labels():
labels_array = []
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
letters.reverse()
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for l1 in range(9):
for n1 in range(10):
move = letters[8 - l1] + numbers[n1]
labels_array.append(move)
# labels_array.reverse()
return labels_array
def create_position_labels_reverse():
labels_array = []
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
letters.reverse()
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for l1 in range(9):
for n1 in range(10):
move = letters[l1] + numbers[n1]
labels_array.append(move)
labels_array.reverse()
return labels_array
class GameBoard(object):
board_pos_name = np.array(create_position_labels()).reshape(9,10).transpose()
Ny = 10
Nx = 9
def __init__(self):
self.state = "RNBAKABNR/9/1C5C1/P1P1P1P1P/9/9/p1p1p1p1p/1c5c1/9/rnbakabnr"#"rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR" #
self.round = 1
# self.players = ["w", "b"]
self.current_player = "w"
self.restrict_round = 0
# 小写表示黑方,大写表示红方
# [
# "rheakaehr",
# " ",
# " c c ",
# "p p p p p",
# " ",
# " ",
# "P P P P P",
# " C C ",
# " ",
# "RHEAKAEHR"
# ]
def reload(self):
self.state = "RNBAKABNR/9/1C5C1/P1P1P1P1P/9/9/p1p1p1p1p/1c5c1/9/rnbakabnr"#"rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR" #
self.round = 1
self.current_player = "w"
self.restrict_round = 0
@staticmethod
def print_borad(board, action = None):
def string_reverse(string):
# return ''.join(string[len(string) - i] for i in range(1, len(string)+1))
return ''.join(string[i] for i in range(len(string) - 1, -1, -1))
x_trans = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8}
if(action != None):
src = action[0:2]
src_x = int(x_trans[src[0]])
src_y = int(src[1])
# board = string_reverse(board)
board = board.replace("1", " ")
board = board.replace("2", " ")
board = board.replace("3", " ")
board = board.replace("4", " ")
board = board.replace("5", " ")
board = board.replace("6", " ")
board = board.replace("7", " ")
board = board.replace("8", " ")
board = board.replace("9", " ")
board = board.split('/')
# board = board.replace("/", "\n")
print(" abcdefghi")
for i,line in enumerate(board):
if (action != None):
if(i == src_y):
s = list(line)
s[src_x] = 'x'
line = ''.join(s)
print(i,line)
# print(board)
@staticmethod
def sim_do_action(in_action, in_state):
x_trans = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7, 'i':8}
src = in_action[0:2]
dst = in_action[2:4]
src_x = int(x_trans[src[0]])
src_y = int(src[1])
dst_x = int(x_trans[dst[0]])
dst_y = int(dst[1])
# GameBoard.print_borad(in_state)
# print("sim_do_action : ", in_action)
# print(dst_y, dst_x, src_y, src_x)
board_positions = GameBoard.board_to_pos_name(in_state)
line_lst = []
for line in board_positions:
line_lst.append(list(line))
lines = np.array(line_lst)
# print(lines.shape)
# print(board_positions[src_y])
# print("before board_positions[dst_y] = ",board_positions[dst_y])
lines[dst_y][dst_x] = lines[src_y][src_x]
lines[src_y][src_x] = '1'
board_positions[dst_y] = ''.join(lines[dst_y])
board_positions[src_y] = ''.join(lines[src_y])
# src_str = list(board_positions[src_y])
# dst_str = list(board_positions[dst_y])
# print("src_str[src_x] = ", src_str[src_x])
# print("dst_str[dst_x] = ", dst_str[dst_x])
# c = copy.deepcopy(src_str[src_x])
# dst_str[dst_x] = c
# src_str[src_x] = '1'
# board_positions[dst_y] = ''.join(dst_str)
# board_positions[src_y] = ''.join(src_str)
# print("after board_positions[dst_y] = ", board_positions[dst_y])
# board_positions[dst_y][dst_x] = board_positions[src_y][src_x]
# board_positions[src_y][src_x] = '1'
board = "/".join(board_positions)
board = board.replace("111111111", "9")
board = board.replace("11111111", "8")
board = board.replace("1111111", "7")
board = board.replace("111111", "6")
board = board.replace("11111", "5")
board = board.replace("1111", "4")
board = board.replace("111", "3")
board = board.replace("11", "2")
# GameBoard.print_borad(board)
return board
@staticmethod
def board_to_pos_name(board):
board = board.replace("2", "11")
board = board.replace("3", "111")
board = board.replace("4", "1111")
board = board.replace("5", "11111")
board = board.replace("6", "111111")
board = board.replace("7", "1111111")
board = board.replace("8", "11111111")
board = board.replace("9", "111111111")
return board.split("/")
@staticmethod
def check_bounds(toY, toX):
if toY < 0 or toX < 0:
return False
if toY >= GameBoard.Ny or toX >= GameBoard.Nx:
return False
return True
@staticmethod
def validate_move(c, upper=True):
if (c.isalpha()):
if (upper == True):
if (c.islower()):
return True
else:
return False
else:
if (c.isupper()):
return True
else:
return False
else:
return True
@staticmethod
def get_legal_moves(state, current_player):
moves = []
k_x = None
k_y = None
K_x = None
K_y = None
face_to_face = False
board_positions = np.array(GameBoard.board_to_pos_name(state))
for y in range(board_positions.shape[0]):
for x in range(len(board_positions[y])):
if(board_positions[y][x].isalpha()):
if(board_positions[y][x] == 'r' and current_player == 'b'):
toY = y
for toX in range(x - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
for toX in range(x + 1, GameBoard.Nx):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
toX = x
for toY in range(y - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
for toY in range(y + 1, GameBoard.Ny):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
elif(board_positions[y][x] == 'R' and current_player == 'w'):
toY = y
for toX in range(x - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
for toX in range(x + 1, GameBoard.Nx):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
toX = x
for toY in range(y - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
for toY in range(y + 1, GameBoard.Ny):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
elif ((board_positions[y][x] == 'n' or board_positions[y][x] == 'h') and current_player == 'b'):
for i in range(-1, 3, 2):
for j in range(-1, 3, 2):
toY = y + 2 * i
toX = x + 1 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False) and board_positions[toY - i][x].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + 1 * i
toX = x + 2 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False) and board_positions[y][toX - j].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif ((board_positions[y][x] == 'N' or board_positions[y][x] == 'H') and current_player == 'w'):
for i in range(-1, 3, 2):
for j in range(-1, 3, 2):
toY = y + 2 * i
toX = x + 1 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True) and board_positions[toY - i][x].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + 1 * i
toX = x + 2 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True) and board_positions[y][toX - j].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif ((board_positions[y][x] == 'b' or board_positions[y][x] == 'e') and current_player == 'b'):
for i in range(-2, 3, 4):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 5 and \
board_positions[y + i // 2][x + i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 5 and \
board_positions[y + i // 2][x - i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif ((board_positions[y][x] == 'B' or board_positions[y][x] == 'E') and current_player == 'w'):
for i in range(-2, 3, 4):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 4 and \
board_positions[y + i // 2][x + i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 4 and \
board_positions[y + i // 2][x - i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'a' and current_player == 'b'):
for i in range(-1, 3, 2):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 7 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 7 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'A' and current_player == 'w'):
for i in range(-1, 3, 2):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 2 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 2 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'k'):
k_x = x
k_y = y
if(current_player == 'b'):
for i in range(2):
for sign in range(-1, 2, 2):
j = 1 - i
toY = y + i * sign
toX = x + j * sign
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 7 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'K'):
K_x = x
K_y = y
if(current_player == 'w'):
for i in range(2):
for sign in range(-1, 2, 2):
j = 1 - i
toY = y + i * sign
toX = x + j * sign
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 2 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'c' and current_player == 'b'):
toY = y
hits = False
for toX in range(x - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
hits = False
for toX in range(x + 1, GameBoard.Nx):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
toX = x
hits = False
for toY in range(y - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
hits = False
for toY in range(y + 1, GameBoard.Ny):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
elif (board_positions[y][x] == 'C' and current_player == 'w'):
toY = y
hits = False
for toX in range(x - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
hits = False
for toX in range(x + 1, GameBoard.Nx):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
toX = x
hits = False
for toY in range(y - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
hits = False
for toY in range(y + 1, GameBoard.Ny):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (hits == False):
if (board_positions[toY][toX].isalpha()):
hits = True
else:
moves.append(m)
else:
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
elif (board_positions[y][x] == 'p' and current_player == 'b'):
toY = y - 1
toX = x
if (GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False)):
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
if y < 5:
toY = y
toX = x + 1
if (GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False)):
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toX = x - 1
if (GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False)):
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'P' and current_player == 'w'):
toY = y + 1
toX = x
if (GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True)):
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
if y > 4:
toY = y
toX = x + 1
if (GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True)):
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toX = x - 1
if (GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True)):
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
if(K_x != None and k_x != None and K_x == k_x):
face_to_face = True
for i in range(K_y + 1, k_y, 1):
if(board_positions[i][K_x].isalpha()):
face_to_face = False
if(face_to_face == True):
if(current_player == 'b'):
moves.append(GameBoard.board_pos_name[k_y][k_x] + GameBoard.board_pos_name[K_y][K_x])
else:
moves.append(GameBoard.board_pos_name[K_y][K_x] + GameBoard.board_pos_name[k_y][k_x])
return moves
|
python
|
import numpy as np
import theano
import theano.tensor as T
import util
def vanilla(params, gradients, opts):
return [(param, param - opts.learning_rate * gradient)
for param, gradient in zip(params, gradients)]
def momentum(params, gradients, opts):
assert opts.momentum >= 0.0 and opts.momentum <= 1.0
updates = []
for param, gradient in zip(params, gradients):
velocity_t0 = util.zeros_in_the_shape_of(param)
velocity_t1 = opts.momentum * velocity_t0 - opts.learning_rate * gradient
updates.append((velocity_t0, velocity_t1))
updates.append((param, param + velocity_t1))
return updates
def rmsprop(params, gradients, opts):
assert opts.momentum
assert opts.momentum >= 0.0 and opts.momentum <= 1.0
updates = []
for param_t0, gradient in zip(params, gradients):
# rmsprop see slide 29 of http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
# first the mean_sqr exponential moving average
mean_sqr_t0 = util.zeros_in_the_shape_of(param_t0)
mean_sqr_t1 = (opts.momentum * mean_sqr_t0) + ((1.0-opts.momentum) * gradient**2)
updates.append((mean_sqr_t0, mean_sqr_t1))
# update param surpressing gradient by this average
param_t1 = param_t0 - opts.learning_rate * (gradient / T.sqrt(mean_sqr_t1 + 1e-10))
updates.append((param_t0, param_t1))
return updates
|
python
|
distancia = float(input('Qual é a distancia da sua viagem?' ))
print('voce esta prestes a começa uma viagem de {}km.'.format(distancia))
preço = distancia * 0.50 if distancia <= 200 else distancia * 0.45
print('É o preço da sua passagem sera de R${:2f}'.format(preço))
opcao4 = ''
while opcao4!= 'S' and opcao4 != 'N':
opcao4 = str(input('você deseja executar novamente [S/N]?')).upper()[0]
if opcao4 == 'S':
import JOGO
if opcao4 == 'N':
print('obrigado por ultilizar nossos serviços')
break
|
python
|
# ### 1.5 function that post process the ccs results
import numpy as np
import pandas as pd
# define a function that could calculate the overall annual emission and lock emission
def bau_ccs_post (df):
coal_annual_existing_emission = df.loc[:,('coal_power_annual_emission_existing')].values
coal_annual_new_emission = df.loc[:,('coal_power_annual_emission_new')].values
gas_annual_existing_emission = df.loc[:,('gas_power_annual_emission_existing')].values
gas_annual_new_emission = df.loc[:,('gas_power_annual_emission_new')].values
oil_annual_existing_emission = df.loc[:,('oil_power_annual_emission_existing')].values
oil_annual_new_emission = df.loc[:,('oil_power_annual_emission_new')].values
coal_lock_existing_emission = df.loc[:,('coal_power_lock_emission_existing')].values
coal_lock_new_emission = df.loc[:,('coal_power_lock_emission_new')].values
gas_lock_existing_emission = df.loc[:,('gas_power_lock_emission_existing')].values
gas_lock_new_emission = df.loc[:,('gas_power_lock_emission_new')].values
oil_lock_existing_emission = df.loc[:,('oil_power_lock_emission_existing')].values
oil_lock_new_emission = df.loc[:,('oil_power_lock_emission_new')].values
coal_overall_lock_emission = np.zeros(shape=(36))
gas_overall_lock_emission = np.zeros(shape=(36))
oil_overall_lock_emission = np.zeros(shape=(36))
coal_overall_annual_emission = np.zeros(shape=(36))
gas_overall_annual_emission = np.zeros(shape=(36))
oil_overall_annual_emission = np.zeros(shape=(36))
for i in range(36):
coal_annual_exisitng = coal_annual_existing_emission[i]
gas_annual_exisitng = gas_annual_existing_emission[i]
oil_annual_exisitng = oil_annual_existing_emission[i]
coal_annual_added = 0
gas_annual_added = 0
oil_annual_added = 0
for j in range(i+1):
coal_annual_new = coal_annual_new_emission[j]
coal_annual_added = coal_annual_added + coal_annual_new
gas_annual_new = gas_annual_new_emission[j]
gas_annual_added = gas_annual_added + gas_annual_new
oil_annual_new = oil_annual_new_emission[j]
oil_annual_added = oil_annual_added + oil_annual_new
coal_overall_annual_emission[i] = coal_annual_exisitng + coal_annual_added
df.loc[:,('coal_annual_emission')] = coal_overall_annual_emission
gas_overall_annual_emission[i] = gas_annual_exisitng + gas_annual_added
df.loc[:,('gas_annual_emission')] = gas_overall_annual_emission
oil_overall_annual_emission[i] = oil_annual_exisitng + oil_annual_added
df.loc[:,('oil_annual_emission')] = oil_overall_annual_emission
for i in range(36):
coal_lock_exisitng = coal_lock_existing_emission[i]
gas_lock_exisitng = gas_lock_existing_emission[i]
oil_lock_exisitng = oil_lock_existing_emission[i]
coal_lock_added = 0
gas_lock_added = 0
oil_lock_added = 0
for j in range(i+1):
coal_lock_new = coal_lock_new_emission[j]* (1-0.025*(i-j))
coal_lock_added = coal_lock_added + coal_lock_new
gas_lock_new = gas_lock_new_emission[j]* (1-0.025*(i-j))
gas_lock_added = gas_lock_added + gas_lock_new
oil_lock_new = oil_lock_new_emission[j]* (1-0.025*(i-j))
oil_lock_added = oil_lock_added + oil_lock_new
coal_overall_lock_emission[i] = coal_lock_exisitng + coal_lock_added
df.loc[:,('coal_lock_emission')] = coal_overall_lock_emission
gas_overall_lock_emission[i] = gas_lock_exisitng + gas_lock_added
df.loc[:,('gas_lock_emission')] = gas_overall_lock_emission
oil_overall_lock_emission[i] = oil_lock_exisitng + oil_lock_added
df.loc[:,('oil_lock_emission')] = oil_overall_lock_emission
return df
# define a function that could select the useful columns from the table
def ccs_results (ccs):
ccs_cols = ['year',
'coal_power_capacity_GW','coal_power_capacity_existing','coal_power_capacity_new',
'coal_annual_emission','coal_power_annual_emission_existing','coal_power_annual_emission_new',
'coal_lock_emission','coal_power_lock_emission_existing','coal_power_lock_emission_new',
'gas_power_capacity_GW','gas_power_capacity_existing','gas_power_capacity_new',
'gas_annual_emission','gas_power_annual_emission_existing','gas_power_annual_emission_new',
'gas_lock_emission','gas_power_lock_emission_existing','gas_power_lock_emission_new',
'oil_power_capacity_GW','oil_power_capacity_existing','oil_power_capacity_new',
'oil_annual_emission','oil_power_annual_emission_existing','oil_power_annual_emission_new',
'oil_lock_emission','oil_power_lock_emission_existing','oil_power_lock_emission_new',
'coal_power_capacity_new1','coal_power_annual_emission_new1','coal_power_annual_emission_new1',
'coal_power_capacity_new2','coal_power_annual_emission_new2','coal_power_annual_emission_new2',
'gas_power_capacity_new1','gas_power_annual_emission_new1','gas_power_annual_emission_new1',
'gas_power_capacity_new2','gas_power_annual_emission_new2','gas_power_annual_emission_new2',
'oil_power_capacity_new1','oil_power_annual_emission_new1','oil_power_annual_emission_new1',
'oil_power_capacity_new2','oil_power_annual_emission_new2','oil_power_annual_emission_new2']
ccs = ccs[ccs_cols]
return ccs
|
python
|
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure, output_file, show
from bokeh.models.markers import marker_types
import bokeh.layouts
import datetime
import numpy as np
import DownloadData
age, year_week, data = DownloadData.incidence()
np_data = np.array(data)
np_data[np_data == None] = 0
data = np_data
np.savetxt('age.txt', age, fmt="%s")
np.savetxt('year_week.txt', year_week, fmt="%s")
np.savetxt('data.txt', np_data, fmt="%s")
age = np.genfromtxt('age.txt', dtype='str')
year_week = np.genfromtxt('year_week.txt', dtype='str')
data = np.loadtxt('data.txt')
data, interp = DownloadData.extrapolateLastWeek(year_week, data)
print('Extrapolated last DataPoint: ', interp)
def yw2datetime(yw):
if isinstance(yw, (list, np.ndarray)):
return [yw2datetime(i) for i in yw]
yw_int = [int(i) for i in yw.split('-KW')]
weekday = datetime.datetime(yw_int[0], 1, 1).weekday()
if weekday <= 3: # Thursday
date_diff = 1 - weekday
else:
date_diff = 8 - weekday
if date_diff > 0:
return datetime.datetime(yw_int[0], 1, date_diff) + datetime.timedelta(weeks=yw_int[1] - 1, days=6)
else:
return datetime.datetime(yw_int[0] - 1, 12, 31 + date_diff) + datetime.timedelta(weeks=yw_int[1] - 1, days=6)
# output to static HTML file
output_file("lines.html")
# create a new plot with a title and axis labels
hover_tool = HoverTool(
tooltips=[
('Altersgruppe', "$name"),
("Datum", "$x"),
("Inzidenz", "$y"),
],
formatters={
'$x': 'datetime',
},
mode='vline'
)
p1 = figure(title="Inzidenz nach Altersgruppen", x_axis_type='datetime', x_axis_label='Datum', y_axis_label='Inzidenz',
tools='pan,wheel_zoom,box_zoom,reset')
p1.sizing_mode = "stretch_both"
label = bokeh.models.Label(x=3, y=3, x_units='screen', y_units='screen',
text='Stand: ' + datetime.datetime.now().strftime("%d.%m.%Y %H:%M") +
'; Quellen: Fallzahlen - Robert Koch-Institut: SurvStat@RKI 2.0, https://survstat.rki.de;' +
' Bevölkerung: https://www-genesis.destatis.de/ 12411-0005 31.12.2019',
text_font_size='8pt')
#
p1.add_layout(label)
p1.xaxis[0].formatter = bokeh.models.DatetimeTickFormatter() # PrintfTickFormatter(format="%d.%m.%Y")
cmap_colors = np.zeros([3, len(age)])
cmap_colors[0] = np.interp(np.linspace(0, 1, len(age)), np.linspace(0, 1, 4),
np.array([17.6, 19.2, 83.1, 83.1]) / 100 * 255)
cmap_colors[1] = np.interp(np.linspace(0, 1, len(age)), np.linspace(0, 1, 4),
np.array([66.7, 30.2, 22, 62.7]) / 100 * 255)
cmap_colors[2] = np.interp(np.linspace(0, 1, len(age)), np.linspace(0, 1, 4),
np.array([17.6, 55.7, 22, 22]) / 100 * 255)
cmap_colors = cmap_colors.astype(np.uint8)
marker_list = [m_type for m_type in marker_types]
marker_selector = [0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15,
16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25,
0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15,
16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25,
0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15,
16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25,
0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25]
glyph_list = []
for i in range(len(age)):
if age[i] == 'Gesamt':
line_color = (0, 0, 0)
line_width = 2
else:
line_color = tuple(cmap_colors[:, i])
line_width = 1
muted_alpha = .1
source = ColumnDataSource(data=dict(
x_list=list(yw2datetime(year_week)),
y_list=list(data[i][:]),
desc=[age[i] for x in range(len(year_week))],
col=[line_color for x in year_week]
))
if interp:
li = p1.line(source.data['x_list'][:-1], source.data['y_list'][:-1], line_color=line_color,
line_width=line_width,
line_alpha=1, muted_alpha=muted_alpha, legend_label=age[i])
li2 = p1.line(source.data['x_list'][-2:], source.data['y_list'][-2:], line_color=line_color,
line_width=line_width,
line_alpha=1, muted_alpha=muted_alpha, legend_label=age[i], line_dash=[3, 3])
else:
li = p1.line(source.data['x_list'], source.data['y_list'], line_color=line_color, line_width=line_width,
line_alpha=1, muted_alpha=muted_alpha, legend_label=age[i])
sca = p1.scatter(x="x_list", y="y_list", source=source, muted_alpha=muted_alpha, legend_label=age[i])
sca.glyph.marker = marker_list[marker_selector[i]]
sca.glyph.line_color = line_color
sca.glyph.fill_color = None
sca.glyph.size = 8
glyph_list.append(sca)
p1.add_tools(HoverTool(
renderers=glyph_list,
tooltips=[
("Alter", "@desc"),
("Datum", "@x_list{%d.%m.%Y}"),
("Inzidenz", "@y_list{0}"),
],
formatters={'@x_list': 'datetime', },
))
p1.legend.location = "top_left"
p1.legend.click_policy = "mute"
p1.legend.orientation = "horizontal"
column = bokeh.layouts.column([p1]) # , p2])
column.sizing_mode = "stretch_both"
show(column)
|
python
|
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from hpsdnclient.api import Api
from hpsdnclient.auth import XAuthToken
from hpsdnclient.apibase import ApiBase
from hpsdnclient.core import CoreMixin
from hpsdnclient.net import NetMixin
from hpsdnclient.of import OfMixin
class ApiTestCase(unittest.TestCase):
def setUp(self):
self.auth = XAuthToken(server='example.com',
user='sdn',
password='skyline'
)
def test_api_instantiation(self):
api = Api('10.10.10.10', self.auth)
self.assertTrue(isinstance(api, ApiBase))
self.assertTrue(isinstance(api, CoreMixin))
self.assertTrue(isinstance(api, NetMixin))
self.assertTrue(isinstance(api, OfMixin))
self.assertEqual(api.restclient.auth, self.auth)
self.assertEqual(api.controller, '10.10.10.10')
|
python
|
#!/usr/bin/env python3
"""
Author : Ken Youens-Clark <[email protected]>
Date : 2020-11-11
Purpose: Find patterns in files
"""
import argparse
import re
from typing import List, NamedTuple, TextIO
class Args(NamedTuple):
""" Command-line arguments """
pattern_file: TextIO
search_files: List[TextIO]
out_file: TextIO
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Find patterns in files',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p',
'--pattern',
help='Patterns file',
metavar='FILE',
type=argparse.FileType('rt'),
required=True)
parser.add_argument('-f',
'--file',
help='A readable file',
metavar='FILE',
type=argparse.FileType('rt'),
required=True,
nargs='+')
parser.add_argument('-o',
'--out_file',
help='Output file',
metavar='FILE',
type=argparse.FileType('wt'),
default='out.txt')
args = parser.parse_args()
return Args(pattern_file=args.pattern,
search_files=args.file,
out_file=args.out_file)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
patterns = list(map(lambda s: re.compile(s.rstrip()), args.pattern_file))
num_found = 0
for fh in args.search_files:
for line in map(str.rstrip, fh):
for regex in patterns:
if regex.search(line):
num_found += 1
print(line, file=args.out_file)
print(f'Done, wrote {num_found} matches to "{args.out_file.name}."')
# --------------------------------------------------
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/env python3
from __future__ import division, absolute_import, print_function, unicode_literals
import subprocess
import re
import time
import sys
import argparse
import yaml
from timeparse import timeparse
def call(args):
return '\n'.join(subprocess.check_output(args).decode().splitlines())
def get_all_statuses():
return [tuple(x.split(",")) for x in call(["docker", "ps", "--all", "--format", "{{.ID}},{{.Status}}"]).splitlines()]
def get_statuses_for_ids(ids):
status_list = get_all_statuses()
statuses = {}
for id in ids:
status = None
for s in status_list:
if id.find(s[0]) == 0:
status = s[1]
break
if status is None:
status = "removed"
statuses[id] = status
return statuses
def convert_status(s):
res = re.search(r"^([^\s]+)[^\(]*(?:\((.*)\).*)?$", s)
if res is None:
raise Exception("Unknown status format %s" % s)
if res.group(1) == "Up":
if res.group(2) == "health: starting":
return "starting"
elif res.group(2) == "healthy":
return "healthy"
elif res.group(2) == "unhealthy":
return "unhealthy"
elif res.group(2) is None:
return "up"
else:
raise Exception("Unknown status format %s" % s)
else:
return "down"
def get_converted_statuses(ids):
return dict([(k, convert_status(v)) for k, v in get_statuses_for_ids(ids).items()])
def get_docker_compose_args(args):
nargs = []
for f in args.file:
nargs += ['-f', f]
if args.project_name:
nargs += ['-p', args.project_name]
return nargs
def get_services_ids(dc_args):
services_names = yaml.safe_load(call(["docker-compose"] + dc_args + ["config"]))["services"].keys()
services = {}
for name in services_names:
id = call(["docker-compose"] + dc_args + ["ps", '-q', name]).strip()
if id == '':
continue
services[name] = id
return services
def get_services_statuses(services_with_ids):
statuses_by_id = get_converted_statuses(services_with_ids.values())
return dict([(k, statuses_by_id[v]) for k, v in services_with_ids.items()])
def main():
parser = argparse.ArgumentParser(
description='Wait until all services in a docker-compose file are healthy. Options are forwarded to docker-compose.',
usage='docker-compose-wait.py [options]'
)
parser.add_argument('-f', '--file', action='append', default=[],
help='Specify an alternate compose file (default: docker-compose.yml)')
parser.add_argument('-p', '--project-name',
help='Specify an alternate project name (default: directory name)')
parser.add_argument('-w', '--wait', action='store_true',
help='Wait for all the processes to stabilize before exit (default behavior is to exit '
+ 'as soon as any of the processes is unhealthy)')
parser.add_argument('-t', '--timeout', default=None,
help='Max amount of time during which this command will run (expressed using the '
+ 'same format than in docker-compose.yml files, example: 5s, 10m,... ). If there is a '
+ 'timeout this command will exit returning 1. (default: wait for an infinite amount of time)')
args = parser.parse_args()
dc_args = get_docker_compose_args(args)
start_time = time.time()
timeout = timeparse(args.timeout) if args.timeout is not None else None
services_ids = get_services_ids(dc_args)
up_statuses = set(['healthy', 'up'])
down_statuses = set(['down', 'unhealthy', 'removed'])
stabilized_statuses = up_statuses | down_statuses
while True:
statuses = get_services_statuses(services_ids)
if args.wait:
if any([v not in stabilized_statuses for k, v in statuses.items()]):
continue
if all([v in up_statuses for k, v in statuses.items()]):
print("All processes up and running")
exit(0)
elif any([v in down_statuses for k, v in statuses.items()]):
print("Some processes failed:")
for k, v in [(k, v) for k, v in statuses.items() if v in down_statuses]:
print("%s is %s" % (k, v))
exit(-1)
if args.timeout is not None and time.time() > start_time + timeout:
print("Timeout")
exit(1)
time.sleep(1)
if __name__ == "__main__":
# execute only if run as a script
main()
|
python
|
from durabledict.base import DurableDict
from durabledict.encoding import NoOpEncoding
class MemoryDict(DurableDict):
'''
Does not actually persist any data to a persistant storage. Instead, keeps
everything in memory. This is really only useful for use in tests
'''
def __init__(self, autosync=True, encoding=NoOpEncoding, *args, **kwargs):
self.__storage = dict()
self.__last_updated = 1
super(MemoryDict, self).__init__(autosync, encoding, *args, **kwargs)
def persist(self, key, val):
self.__storage[key] = self.encoding.encode(val)
self.__last_updated += 1
def depersist(self, key):
del self.__storage[key]
self.__last_updated += 1
def durables(self):
encoded_tuples = self.__storage.items()
tuples = [(k, self.encoding.decode(v)) for k, v in encoded_tuples]
return dict(tuples)
def last_updated(self):
return self.__last_updated
def _setdefault(self, key, default=None):
self.__last_updated += 1
val = self.__storage.setdefault(key, self.encoding.encode(default))
return self.encoding.decode(val)
def _pop(self, key, default=None):
self.__last_updated += 1
if default:
default = self.encoding.encode(default)
val = self.__storage.pop(key, default)
if val is None:
raise KeyError
return self.encoding.decode(val)
|
python
|
# Copyright (c) The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE.
import random
from copy import copy
from functools import reduce
from typing import List, Optional
import numpy as np
from .factor import Factor, product_over_
from .graphical_model import GraphicalModel
class MiniBucketElimination:
"""Mini-Bucket elimination algorithm."""
def __init__(self, model: GraphicalModel = None, **kwargs):
self.base_logZ = 0.0
self.model = model.copy()
self.working_model = None # type: Optional[GraphicalModel]
if "elimination_order" in kwargs:
self.elimination_order = copy(kwargs["elimination_order"])
else:
self.elimination_order = []
if "renormalized_model" not in kwargs:
self._renormalize_model(**kwargs)
else:
self.renormalized_model = kwargs["renormalized_model"].copy()
self.renormalized_elimination_order = kwargs[
"renormalized_elimination_order"]
self.variables_replicated_from_ = kwargs[
"variables_replicated_from_"]
self.base_logZ = kwargs["base_logZ"]
self._initialize_relation()
def _initialize_relation(self):
variable_upper_to_ = {var: None for var in
self.renormalized_model.variables}
variables_lower_to_ = {var: [] for var in
self.renormalized_model.variables}
factor_upper_to_ = {var: Factor.scalar(1.0) for var in
self.renormalized_model.variables}
upper_candidate_for_ = {var: set() for var in
self.renormalized_model.variables}
for fac in self.renormalized_model.factors:
lower_var = fac.first_variable_in_order(self.renormalized_elimination_order)
if lower_var is not None:
factor_upper_to_[lower_var] = fac
for var in fac.variables:
if var in self.renormalized_elimination_order:
upper_candidate_for_[lower_var].add(var)
for var in self.renormalized_elimination_order:
m_vars = sorted(
upper_candidate_for_[var],
key=self.renormalized_elimination_order.index
)
upper_candidate_for_[var] = copy(m_vars[m_vars.index(var) + 1:])
if m_vars.index(var) + 1 < len(m_vars):
upper_var = m_vars[m_vars.index(var) + 1]
variable_upper_to_[var] = upper_var
variables_lower_to_[upper_var].append(var)
upper_candidate_for_[upper_var] = upper_candidate_for_[
upper_var].union(
m_vars[m_vars.index(var) + 1:]
)
self.variable_upper_to_ = variable_upper_to_
self.variables_lower_to_ = variables_lower_to_
self.factors_adj_to_ = {
var: self.renormalized_model.get_adj_factors(var)
for var in self.renormalized_model.variables
}
self.factor_upper_to_ = factor_upper_to_
# self.upper_candidate_for_ = upper_candidate_for_
def _renormalize_model(self, **kwargs):
ibound = kwargs["ibound"]
use_min_fill = True
if "elimination_order_method" in kwargs:
if kwargs["elimination_order_method"] == "random":
elimination_order = copy(self.model.variables)
use_min_fill = False
random.shuffle(elimination_order)
elif kwargs["elimination_order_method"] == "not_random":
elimination_order = copy(self.model.variables)
use_min_fill = False
elif kwargs["elimination_order_method"] == "min_fill":
elimination_order = []
use_min_fill = True
elif "elimination_order" in kwargs:
elimination_order = copy(kwargs["elimination_order"])
use_min_fill = False
else:
elimination_order = copy(self.model.variables)
use_min_fill = False
random.shuffle(elimination_order)
# elimination_order = []
# use_min_fill = True
renormalized_model = self.model.copy()
renormalized_elimination_order = []
variables_replicated_from_ = {var: [] for var in self.model.variables}
factors_adj_to_ = dict()
working_factorss = [[fac] for fac in renormalized_model.factors]
eliminated_variables = []
for t in range(len(elimination_order)):
uneliminated_variables = sorted(
set(self.model.variables) - set(eliminated_variables))
candidate_mini_buckets_for_ = dict()
bucket_for_ = {cand_var: [] for cand_var in uneliminated_variables}
for facs in working_factorss:
for cand_var in self._get_variables_in(
[[fac] for fac in facs],
eliminated=eliminated_variables):
bucket_for_[cand_var].append(facs)
for cand_var in uneliminated_variables:
candidate_mini_buckets_for_[cand_var] = []
for facs in bucket_for_[cand_var]:
mini_bucket = None
for mb in candidate_mini_buckets_for_[cand_var]:
eliminated = eliminated_variables + [cand_var]
if self.get_bucket_size(
mb + [facs], eliminated=eliminated) < ibound:
mini_bucket = mb
break
if mini_bucket:
mini_bucket.append(facs)
else:
candidate_mini_buckets_for_[cand_var].append([facs])
if use_min_fill:
var, mini_buckets = min(
candidate_mini_buckets_for_.items(), key=lambda x: len(
x[1]))
elimination_order.append(var)
else:
var = elimination_order[t]
mini_buckets = candidate_mini_buckets_for_[var]
eliminated_variables.append(var)
mini_buckets.sort(
key=lambda mb: self.get_bucket_size(
mb, eliminated=eliminated_variables))
remove_idx = []
for working_facs_idx, working_facs in enumerate(working_factorss):
if var in self._get_variables_in(
[[fac] for fac in working_facs]):
remove_idx.append(working_facs_idx)
for i in reversed(sorted(remove_idx)):
working_factorss.pop(i)
for (i, mb) in enumerate(mini_buckets):
mb_facs = [fac for facs in mb for fac in facs]
working_factorss.append(mb_facs)
replicated_var = var + "_" + str(i)
variables_replicated_from_[var].append(replicated_var)
factors_adj_to_[replicated_var] = [fac for fac in mb_facs if
var in fac.variables]
for var in elimination_order:
for replicated_var in variables_replicated_from_[var]:
renormalized_model.add_variable(replicated_var)
renormalized_elimination_order.append(replicated_var)
for fac in factors_adj_to_[replicated_var]:
fac.variables[fac.variables.index(var)] = replicated_var
renormalized_model.remove_variable(var)
# For each variable find factors which will be eliminated with it.
factors_upper_to_ = {var: [] for var in renormalized_elimination_order}
for fac in renormalized_model.factors:
lower_var = fac.first_variable_in_order(renormalized_elimination_order)
if lower_var is not None:
factors_upper_to_[lower_var].append(fac)
assert set(factors_upper_to_.keys()) == set(
renormalized_elimination_order)
for var, facs in factors_upper_to_.items():
if facs:
new_fac = product_over_(*facs)
for fac in facs:
renormalized_model.remove_factor(fac)
renormalized_model.add_factor(new_fac)
base_logZ = 0.0
for fac in renormalized_model.factors:
base_logZ += np.max(fac.log_values)
fac.log_values -= np.max(fac.log_values)
self.elimination_order = elimination_order
self.renormalized_model = renormalized_model
self.renormalized_elimination_order = renormalized_elimination_order
self.variables_replicated_from_ = variables_replicated_from_
self.base_logZ = base_logZ
def run(self, get_z=True):
"""Runs the algorithm.
Eliminates all variables in elimination_order, and stores eliminated model in
self.working_model.
"""
self.working_model = self.renormalized_model.copy()
for var in self.elimination_order:
for i, rvar in enumerate(self.variables_replicated_from_[var]):
if i < len(self.variables_replicated_from_[var]) - 1:
self.working_model.contract_variable(rvar, operator="max")
else:
self.working_model.contract_variable(rvar, operator="sum")
def get_log_z(self) -> float:
"""Returns logarithm of partition function for fully eliminated model."""
assert self.working_model is not None, 'Called get_log_z() before run().'
assert len(self.working_model.variables) == 0, "Model is not fully eliminated."
log_z = self.base_logZ
for fac in self.working_model.factors:
log_z += fac.log_values
return log_z
def _get_variables_in(self, bucket, eliminated=None):
if eliminated is None:
eliminated = []
if [fac.variables for facs in bucket for fac in facs]:
variables_in_bucket = reduce(
lambda vars1, vars2: set(vars1).union(set(vars2)),
[fac.variables for facs in bucket for fac in facs],
)
variables_in_bucket = sorted(
set(variables_in_bucket) - set(eliminated))
return list(variables_in_bucket)
else:
return []
def get_bucket_size(self, bucket: List[Factor], eliminated=None):
"""Counts variables referenced by factors in given bucket."""
if eliminated is None:
eliminated = []
variables_in_bucket = self._get_variables_in(bucket, eliminated)
if variables_in_bucket:
return len(variables_in_bucket)
else:
return 0
|
python
|
import itertools
from collections import defaultdict, OrderedDict, Counter, namedtuple, deque
# defaultdict can define default_factory where
# accessing a key which does not exist, creates it with a default value
# and the default value is returned
# commonly used to append to lists in dictionaries
dd1 = defaultdict(list)
print dd1
print dd1['addme']
print dd1
print dict(dd1)
print '---'
# grouping idiom
names = ['van Rossum', 'torvalds', 'stallman', 'thompson', 'ritchie', 'wall', 'gosling']
d = defaultdict(list)
for name in names:
key = len(name)
d[key].append(name)
print d
print '---'
# defaultdict inside defaultdict...
# trees, json
# https://gist.github.com/hrldcpr/2012250
# https://www.youtube.com/watch?v=uWGPxYDo87Q#t=14m35s
dd2 = defaultdict(lambda: defaultdict(dict))
print dd2
print dd2['lev1']['lev2']
print dd2
print '---'
# Counter - unordered collection where elements are dict keys
# and their counts are stored as dict values
c = Counter('these are attack eyebrows.')
print c
print "t occurs", c['t'], "times"
#0 returned for missing items instead of KeyError as with dict
print "z occurs", c['z'], "times"
print "Top two most common:", c.most_common(2)
print "appear more than once:", [k for k, v in c.iteritems() if v > 1]
#elements returns iterator
print "elements:", list(c.elements())
del c['t']
print "after 't' removed:", c
print '-'
# with generator
print Counter(len(name) for name in names)
print '-'
# nested
lst = [[1, 2, 1], [2, 3, 1, 1], [], [4, 5]]
print Counter(itertools.chain(*lst))
#print Counter(val for sub in lst for val in sub)
print '-'
# other ways to init
print Counter({'a': 4, 'b': 3})
print Counter(a=4, b=3)
print '---'
# namedtuple- tuple subclass, uses no more memory than regular tuple
# "you should use named tuples instead of tuples anywhere you think object notation
# will make your code more pythonic and more easily readable"
# http://stackoverflow.com/a/2970722
# fieldnames specified as a sequence of strings
# can also specify fieldnames as single string with fieldnames separated by
# whitespace and/or commas
Point = namedtuple('Point', ['x', 'y'])
p1 = Point(1.0, 2.0)
p2 = Point(x=-1.0, y=-2.0)
print p1
print p2
print p1.x, p1[1]
print p2.x, p2.y
print '-'
# Raymond Hettinger, author of namedtuple on 'clarifying
# multiple return values with named tuples':
# https://www.youtube.com/watch?v=OSGv2VnC0go&t=32m19s
def get_name():
name = namedtuple("name", ["first", "middle", "last"])
return name("John", "Marwood", "Cleese")
name = get_name()
print name.first, name.middle, name.last
print '---'
# deque - pronounced 'deck', thread-safe
d = deque('efg')
d.append('h')
d.appendleft('d')
print d
# extend and extendleft take an iterable
d.extend('ijk')
# left appends in the arg in reverse order to the front
d.extendleft('cba')
print d
print d.pop()
print d.popleft()
print d
d.remove('i')
print d
# when new items are added beyond maxlen an equal number
# are removed from the opposite end
d = deque(maxlen=3)
d.extend('abc')
print d
d.append('d')
print d
d.appendleft('a')
print d
print '---'
# OrderedDict - dictionary that preserves the order keys were added
od1 = OrderedDict()
od1['a'] = 0
od1['b'] = 1
od1['c'] = 2
od1['d'] = 3
print od1
a_dict = {}
a_dict['a'] = 0
a_dict['b'] = 1
a_dict['c'] = 2
a_dict['d'] = 3
print a_dict
print '-'
lst = [1, 2, 3, 1, 1, 4, 5, 4, 1]
print "lst:", lst
# https://stackoverflow.com/a/7961425
print "with duplicates removed, order maintained:", list(OrderedDict.fromkeys(lst))
|
python
|
#!/usr/bin/python3
from evdev import InputDevice, categorize, ecodes
import configparser, os
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__))+'/config.ini')
def sysrun(command):
os.system(command+ " &")
dev = InputDevice(config["DEVICE"]["path"])
dev.grab()
for event in dev.read_loop():
if event.type == ecodes.EV_KEY:
key = categorize(event)
if key.keystate == key.key_down:
print(key.keycode)
if key.keycode in config["KEYS"].keys():
print("Executing "+config["KEYS"][key.keycode])
sysrun(config["KEYS"][key.keycode])
|
python
|
from .colored import (
info,
error,
fatal
)
from .get_env_var import get_env_var
from .sync_to_async import sync_to_async
__ALL__ = (
'info',
'error',
'fatal',
'get_env_var',
'sync_to_async'
)
|
python
|
from .doc import AssemblyDocCommand
from .helpers import instruction_set
from .helpers import support_set
from .completion import completionListener
from .context import ContextManager
__all__ = ["AssemblyDocCommand", "instruction_set","support_set", "completionListener", "ContextManager"]
|
python
|
#!/usr/bin/python
# Filename: ex_for.py
for i in range(0, 5):
print("{0}: hello".format(i))
print('The for loop is over')
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ============================================================================
# Erfr - One-time pad encryption tool
# Extractor script
# Copyright (C) 2018 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# Website: http://www.urbanware.org
# GitHub: https://github.com/urbanware-org/erfr
# ============================================================================
import os
import sys
def main():
from core import clap
from core import common
from datetime import datetime as dt
try:
p = clap.Parser()
except Exception as e:
print "%s: error: %s" % (os.path.basename(sys.argv[0]), e)
sys.exit(1)
p.set_description("Extract a user-defined byte range from an existing " \
"into a new file.")
p.set_epilog("Further information and usage examples can be found " \
"inside the documentation file for this script.")
# Define required arguments
p.add_avalue("-i", "--input-file", "input file path", "input_file", None,
True)
p.add_avalue("-l", "--length", "number of bytes to read", "length", None,
True)
p.add_avalue("-o", "--output-file", "output file path", "output_file",
None, True)
p.add_avalue("-s", "--offset", "position where to start reading",
"offset", None, True)
# Define optional arguments
p.add_avalue("-b", "--buffer-size", "buffer size in bytes", "buffer_size",
4096, False)
p.add_switch("-h", "--help", "print this help message and exit", None,
True, False)
p.add_switch(None, "--overwrite", "overwrite existing file", "overwrite",
True, False)
p.add_switch("-r", "--remove", "remove the extracted data from the " \
"input file", "remove_bytes", True, False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print common.get_version()
sys.exit(0)
args = p.parse_args()
remove_bytes = None
force_remove = \
bool(int(common.global_config(["Extractor"], ["force_remove"], "0")))
if force_remove:
remove_bytes = True
else:
remove_bytes = args.remove_bytes
try:
timestamp = dt.now()
common.extract_bytes(args.input_file, args.output_file, args.offset,
args.length, args.buffer_size, args.overwrite,
remove_bytes)
print "Elapsed time: %s" % (dt.now() - timestamp)
except Exception as e:
p.error(e)
if __name__ == "__main__":
main()
# EOF
|
python
|
import logging
g_logger = None
def get_logger():
return g_logger
def configure_logging(name: str, level: int = logging.DEBUG):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)
global g_logger
g_logger = logging.getLogger(name=name)
g_logger.debug("logger with name=\"{}\" configured, min severity={}".format(name, logging.getLevelName(level)))
|
python
|
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from scipy import linalg
import torchvision
from torchvision import datasets, transforms
from Tools import FLAGS
def get_dataset(train, subset):
transf = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]
)
if FLAGS.dataset.lower() == "svhn":
if train is True:
split = "train"
else:
split = "test"
sets = datasets.SVHN(
"/home/LargeData/Regular/svhn", split=split, download=True, transform=transf
)
elif FLAGS.dataset.lower() == "cifar10":
sets = datasets.CIFAR10(
"/home/LargeData/Regular/cifar",
train=train,
download=True,
transform=transf,
)
elif FLAGS.dataset.lower() == "cifar100":
sets = datasets.CIFAR100(
"/home/LargeData/Regular/cifar",
train=train,
download=True,
transform=transf,
)
return sets
def inf_train_gen(batch_size, train=True, infinity=True, subset=0):
loader = torch.utils.data.DataLoader(
get_dataset(train, subset),
batch_size,
drop_last=True,
shuffle=True,
num_workers=8,
)
if infinity is True:
while True:
for img, labels in loader:
yield img, labels
else:
for img, labels in loader:
yield img, labels
if __name__ == "__main__":
# Utils.config.load_config("./configs/classifier_cifar10_mt_aug.yaml")
FLAGS.zca = True
FLAGS.translate = 2
# wrapper = AugmentWrapper()
dataset = get_dataset(True, 0)
img_list = []
for i in range(100):
img, _ = dataset.__getitem__(i)
img_list.append(img)
img_list = torch.stack(img_list, 0).cuda()
torchvision.utils.save_image((img_list + 1) / 2, "./tmp.png", nrow=10)
# img_list = wrapper(img_list)
# print(torch.max(img_list), torch.min(img_list))
# torchvision.utils.save_image((img_list + 1) / 2, "./tmp1.png", nrow=10)
|
python
|
from collections.abc import Container
class MyContainer(Container):
def __init__(self, value):
self.data = value
def __contains__(self, value):
return value in self.data
|
python
|
# names, grades, attributes, sentences
names = ['Tom', 'Fred', 'Harry', 'Hermione', 'Ron', 'Sarah', 'Ele', 'Mike', 'Peter']
subjects = ['Maths','Science','English', 'Arts', 'Music', 'German', 'French', 'PE']
grade_adjective = {
1: ['terrible', 'horrible'],
2: ['below average', 'mediocre'],
3: ['Average', 'As expected'],
4: ['good','above average'],
5: ['Excellent', 'awesome']
}
silly_excuse = {
1: ['Needs to pay attention to reading'],
2: ['should consider wearing blue wellies'],
3: ['dog ate their homework'],
4: ['is always late'],
5: ['is a genius anyway']
}
|
python
|
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from oauth2_provider.decorators import protected_resource
import json
from datetime import datetime
from django.utils import timezone
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Dataset, User, Query, RecentDatasetView
from sqlshare_rest.views import get_oauth_user, get403, get404, get405
from sqlshare_rest.views.sql import response_for_query
from sqlshare_rest.dao.user import get_user
from sqlshare_rest.dao.dataset import create_dataset_from_query
from sqlshare_rest.dao.dataset import create_dataset_from_snapshot
from sqlshare_rest.dao.dataset import create_preview_for_dataset
from sqlshare_rest.dao.dataset import get_dataset_by_owner_and_name
from sqlshare_rest.dao.dataset import update_dataset_sql, add_public_access
from sqlshare_rest.dao.dataset import remove_public_access
from sqlshare_rest.util.query import get_sample_data_for_query
from sqlshare_rest.logger import getLogger
logger = getLogger(__name__)
@csrf_exempt
@protected_resource()
def download(request, owner, name):
get_oauth_user(request)
if request.META['REQUEST_METHOD'] != "POST":
response = HttpResponse("")
response.status_code = 405
return response
try:
dataset = get_dataset_by_owner_and_name(owner, name)
except Dataset.DoesNotExist:
return get404()
except User.DoesNotExist:
return get404()
except Exception as ex:
raise
user = get_user(request)
if not dataset.user_has_read_access(user):
return get403()
backend = get_backend()
sql = backend.get_download_sql_for_dataset(dataset)
download_name = "%s.csv" % name
return response_for_query(sql, user, download_name)
@csrf_exempt
@protected_resource()
def snapshot(request, owner, name):
get_oauth_user(request)
if request.META['REQUEST_METHOD'] != "POST":
return get405()
try:
dataset = get_dataset_by_owner_and_name(owner, name)
except Dataset.DoesNotExist:
return get404()
except User.DoesNotExist:
return get404()
except Exception as ex:
raise
user = get_user(request)
if not dataset.user_has_read_access(user):
return get403()
values = json.loads(request.body.decode("utf-8"))
new_name = values["name"]
description = values["description"]
is_public = values.get("is_public", True)
logger.info("POST dataset snapshot; owner: %s; name: %s; "
"destination_name: %s; is_public: %s" % (owner,
name,
new_name,
is_public),
request)
new_dataset = create_dataset_from_snapshot(user, new_name, dataset)
if is_public:
new_dataset.is_public = True
else:
new_dataset.is_public = False
new_dataset.save()
response = HttpResponse("")
response["location"] = new_dataset.get_url()
response.status_code = 201
return response
@csrf_exempt
@protected_resource()
def dataset(request, owner, name):
get_oauth_user(request)
if request.META['REQUEST_METHOD'] == "GET":
return _get_dataset(request, owner, name)
if request.META['REQUEST_METHOD'] == "PUT":
return _put_dataset(request, owner, name)
if request.META['REQUEST_METHOD'] == "PATCH":
return _patch_dataset(request, owner, name)
if request.META['REQUEST_METHOD'] == "DELETE":
return _delete_dataset(request, owner, name)
def _get_dataset(request, owner, name):
try:
dataset = get_dataset_by_owner_and_name(owner, name)
except Dataset.DoesNotExist:
return get404()
except User.DoesNotExist:
return get404()
except Exception as ex:
raise
user = get_user(request)
if not dataset.user_has_read_access(user):
return get403()
if dataset.popularity:
dataset.popularity = dataset.popularity + 1
else:
dataset.popularity = 1
dataset.last_viewed = timezone.now()
dataset.save()
get_or_create = RecentDatasetView.objects.get_or_create
recent_view, created = get_or_create(dataset=dataset, user=user)
recent_view.timestamp = timezone.now()
recent_view.save()
data = dataset.json_data()
if dataset.preview_is_finished:
username = user.username
query = Query.objects.get(is_preview_for=dataset)
sample_data, columns = get_sample_data_for_query(query,
username)
data["sample_data"] = sample_data
data["columns"] = columns
logger.info("GET dataset; owner: %s; name: %s" % (owner, name), request)
data["qualified_name"] = get_backend().get_qualified_name(dataset)
return HttpResponse(json.dumps(data))
def _put_dataset(request, owner, name):
user = get_user(request)
username = user.username
if username != owner:
raise Exception("Owner doesn't match user: %s, %s" % (owner, username))
data = json.loads(request.body.decode("utf-8"))
try:
dataset = create_dataset_from_query(username, name, data["sql_code"])
description = data.get("description", "")
is_public = data.get("is_public", False)
dataset.description = description
dataset.is_public = is_public
dataset.save()
if is_public:
add_public_access(dataset)
else:
remove_public_access(dataset)
response = HttpResponse(json.dumps(dataset.json_data()))
response.status_code = 201
logger.info("PUT dataset; owner: %s; name: %s" % (owner, name),
request)
return response
except Exception as ex:
response = HttpResponse("Error saving dataset: %s" % ex)
response.status_code = 400
return response
def _patch_dataset(request, owner, name):
user = get_user(request)
username = user.username
if username != owner:
raise Exception("Owner doesn't match user: %s, %s" % (owner, username))
dataset = get_dataset_by_owner_and_name(owner, name)
data = json.loads(request.body.decode("utf-8"))
updated = False
if "description" in data:
dataset.description = data["description"]
logger.info("PATCH dataset description; owner: %s; name: %s; "
"description: %s" % (owner, name, data["description"]),
request)
updated = True
if "sql_code" in data:
dataset.sql = data["sql_code"]
updated = True
logger.info("PATCH dataset sql_code; owner: %s; name: %s; "
"sql_code: %s" % (owner, name, dataset.sql), request)
try:
update_dataset_sql(owner, dataset, data["sql_code"])
updated = True
except Exception as ex:
r = HttpResponse("Error updating sql: %s" % (str(ex)))
r.status_code = 400
return r
if "is_public" in data:
dataset.is_public = data["is_public"]
logger.info("PATCH dataset is_public; owner: %s; name: %s; "
"is_public: %s" % (owner, name, dataset.is_public),
request)
if dataset.is_public:
get_backend().add_public_access(dataset, user)
else:
get_backend().remove_public_access(dataset, user)
updated = True
dataset.save()
return HttpResponse(json.dumps(dataset.json_data()))
def _delete_dataset(request, owner, name):
user = get_user(request)
username = user.username
if username != owner:
raise Exception("Owner doesn't match user: %s, %s" % (owner, username))
response = HttpResponse("")
try:
dataset = get_dataset_by_owner_and_name(owner, name)
Query.objects.filter(is_preview_for=dataset).delete()
except Dataset.DoesNotExist:
response.status_code = 404
return response
logger.info("DELETE dataset; owner: %s; name: %s" % (owner, name), request)
dataset.delete()
return response
|
python
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.inputs_utils."""
import os
import tensorflow as tf
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import test_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import pipeline_pb2
from google.protobuf import text_format
from ml_metadata.proto import metadata_store_pb2
class InputsUtilsTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata')
def testResolveParameters(self):
parameters = pipeline_pb2.NodeParameters()
text_format.Parse(
"""
parameters {
key: 'key_one'
value {
field_value {string_value: 'value_one'}
}
}
parameters {
key: 'key_two'
value {
field_value {int_value: 2}
}
}""", parameters)
parameters = inputs_utils.resolve_parameters(parameters)
self.assertEqual(len(parameters), 2)
self.assertEqual(parameters['key_one'], 'value_one')
self.assertEqual(parameters['key_two'], 2)
def testResolveParametersFail(self):
parameters = pipeline_pb2.NodeParameters()
text_format.Parse(
"""
parameters {
key: 'key_one'
value {
runtime_parameter {name: 'rp'}
}
}""", parameters)
with self.assertRaisesRegex(RuntimeError, 'Parameter value not ready'):
inputs_utils.resolve_parameters(parameters)
def testResolverInputsArtifacts(self):
pipeline = pipeline_pb2.Pipeline()
self.load_proto_from_text(
os.path.join(self._testdata_dir,
'pipeline_for_input_resolver_test.pbtxt'), pipeline)
my_example_gen = pipeline.nodes[0].pipeline_node
another_example_gen = pipeline.nodes[1].pipeline_node
my_transform = pipeline.nodes[2].pipeline_node
my_trainer = pipeline.nodes[3].pipeline_node
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.sqlite.SetInParent()
with metadata.Metadata(connection_config=connection_config) as m:
# Publishes first ExampleGen with two output channels. `output_examples`
# will be consumed by downstream Transform.
output_example = types.Artifact(
my_example_gen.outputs.outputs['output_examples'].artifact_spec.type)
output_example.uri = 'my_examples_uri'
side_examples = types.Artifact(
my_example_gen.outputs.outputs['side_examples'].artifact_spec.type)
side_examples.uri = 'side_examples_uri'
contexts = context_lib.register_contexts_if_not_exists(
m, my_example_gen.contexts)
execution = execution_publish_utils.register_execution(
m, my_example_gen.node_info.type, contexts)
execution_publish_utils.publish_succeeded_execution(
m, execution.id, contexts, {
'output_examples': [output_example],
'another_examples': [side_examples]
})
# Publishes second ExampleGen with one output channel with the same output
# key as the first ExampleGen. However this is not consumed by downstream
# nodes.
another_output_example = types.Artifact(
another_example_gen.outputs.outputs['output_examples'].artifact_spec
.type)
another_output_example.uri = 'another_examples_uri'
contexts = context_lib.register_contexts_if_not_exists(
m, another_example_gen.contexts)
execution = execution_publish_utils.register_execution(
m, another_example_gen.node_info.type, contexts)
execution_publish_utils.publish_succeeded_execution(
m, execution.id, contexts, {
'output_examples': [another_output_example],
})
# Gets inputs for transform. Should get back what the first ExampleGen
# published in the `output_examples` channel.
transform_inputs = inputs_utils.resolve_input_artifacts(
m, my_transform.inputs)
self.assertEqual(len(transform_inputs), 1)
self.assertEqual(len(transform_inputs['examples']), 1)
self.assertProtoPartiallyEquals(
transform_inputs['examples'][0].mlmd_artifact,
output_example.mlmd_artifact,
ignored_fields=[
'create_time_since_epoch', 'last_update_time_since_epoch'
])
# Tries to resolve inputs for trainer. As trainer also requires min_count
# for both input channels (from example_gen and from transform) but we did
# not publish anything from transform, it should return nothing.
self.assertIsNone(
inputs_utils.resolve_input_artifacts(m, my_trainer.inputs))
if __name__ == '__main__':
tf.test.main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
import bottle
from bottle import default_app, request, route, response, get
from pymongo import MongoClient
import json
import api
bottle.debug(True)
def enable_cors(fn):
def _enable_cors(*args, **kwargs):
# set CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if bottle.request.method != 'OPTIONS':
# actual request; reply with the actual response
return fn(*args, **kwargs)
return _enable_cors
@get('/')
def home():
return "welcome to the official api for rateurspkear!"
@get('/geek')
def geek():
return api.__geek()
@route
def yo():
return "yo"
@route('/login', method=["OPTIONS", "POST"])
@enable_cors
def post_login(**user):
return api.__post_login(**user)
@route('/signup', method=["OPTIONS", "POST"])
@enable_cors
def post_signup():
return api.__post_signup()
# --------------------------------------------- #
# speakrs api:
# ready:
# GET /speakrs
# todo:
# POST /me/set-speech-title
# GET /speakr?id=id
# --------------------------------------------- #
@get('/speakrs')
@enable_cors
def get_speakrs():
return api.__get_speakrs()
@get('/speakr')
@enable_cors
def get_speakr():
speakr_id = int(request.query.speakrId)
return api.__get_speakr_by_id(speakr_id)
# --------------------------------------------- #
# talks api:
# ready:
# GET /talks
# GET /get-talk?talkId=talkId
# todo:
# POST /rate?talkId=talkId
# --------------------------------------------- #
@get('/talks')
@enable_cors
def get_talks():
if request.query.speakrId != "":
speakr_id = request.query.speakrId
return api.__get_talks_by_speakrid(int(speakr_id))
else:
return api.__get_talks()
@get('/talk')
@enable_cors
def get_talk():
talkId = int(request.query.talkId)
return api.__get_talk_by_id(talkId)
bottle.run(host='0.0.0.0', port=argv[1])
|
python
|
class Pelicula:
# Constructor de clase
def __init__(self, titulo, duracion, lanzamiento):
self.titulo = titulo
self.duracion = duracion
self.lanzamiento = lanzamiento
print("Se ha creado la pelicula", self.titulo)
# Redefinimos el metodo String
def __str__(self):
return "{} lanzada en {} con una duración de {} minutos".format(self.titulo, self.lanzamiento, self.duracion)
# Redefinimos el método Length
def __len__(self):
return self.duracion
class Catalogo:
peliculas = []
def __init__(self, peliculas=[]):
self.peliculas = peliculas
def agregar_pelicula(self, pelicula):
self.peliculas.append(pelicula)
def mostrar_catalogo(self):
print("El Catalogo Contiene las siguientes preguntas:")
for p in self.peliculas:
print(p)
p = Pelicula("El Padrino", 175, 1972)
c = Catalogo([p])
c.agregar_pelicula(Pelicula("Interestelar", 2015, 280))
c.mostrar_catalogo()
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from random import randint
import time,sys
braille = ['⣽','⢿','⣻','⡿','⣾','⣟','⣯','⣷']
z = len(braille)
for x in range(10):
r = (randint(0,z-1))
sys.stdout.write("Working " + braille[r] + " " + str(x*10) + "% done" )
sys.stdout.flush()
time.sleep(1)
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write("Working " + braille[r] + " " + "100 % done" )
print "\nCompleted."
|
python
|
# Generated by Django 3.0.4 on 2020-03-30 20:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0002_auto_20200330_1310'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='descrtiption',
new_name='description',
),
]
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Title : 字符串工具类
@File : string_utils.py
@Author : vincent
@Time : 2020/9/15 11:14 上午
@Version : 1.0
'''
import os
def is_null(obj):
if obj is None:
return True
if len(obj) == 0:
return True
def list_2_str(str_list):
r_str = ''
if str_list:
for s in str_list:
r_str = r_str + str(s) + "\n"
return r_str
def to_str(obj):
if not obj:
return ""
str1 = replace((str(obj)))
return str1
def replace(str1):
str1 = str1.replace("(", "(")
str1 = str1.replace(")", ")")
return str1
def equals(str1, str2):
str1 = replace(to_str(str1))
str2 = replace(to_str(str2))
if str1 == str2:
return True
else:
return False
def equals_list(list1, list2):
str1 = list_2_str(list1)
str2 = list_2_str(list2)
return equals(str1, str2)
def create_header_line(file_name, max_len=70):
s_len = len(file_name)
if s_len >= max_len:
return file_name
w_size = (max_len - s_len) // 2
t_str = "-" * w_size
return t_str + file_name + t_str
def create_header_link(channel, data_type, file_name):
file_name = os.path.split(file_name)[-1] # file_name is full path with dirs, we only need the last file name
file_name_a = '<a href="./case_ocr.html?data_type=' + data_type + '&file_name=' + file_name \
+ "&channel=" + channel \
+ '" target="_blank">' + file_name + '</a>'
return file_name_a
def create_demo_link(case_type, ip_address, file_name):
img_name = os.path.basename(file_name)
file_name_a = '<a href="./demo_result.html?case_type=' + case_type + '&ip_address=' + ip_address \
+ "&file_name=" + img_name \
+ '" target="_blank">' + img_name + '</a>'
return file_name_a
def rate_format(rate):
"""
传入rate的小数格式,返回百分比格式的字符串
@param rate:
@return:
"""
filed_rate = rate * 100
rate_str = "{:.2f}".format(filed_rate)
return rate_str
def temp_reg_date_format(date_str):
if date_str and len(date_str) >= 7:
return date_str[0:7]
else:
return date_str
def temp_duration_format(duration: str):
if duration:
d_arr = duration.split("至")
if len(d_arr) == 2:
return temp_reg_date_format(d_arr[0]) + "至" + temp_reg_date_format(d_arr[1])
else:
return duration
else:
return duration
def create_compare_result(gt_value, pred_value):
if gt_value == pred_value:
result_str = "<p>{}<p>".format("正确")
else:
result_str = "<p style=\"color:red\">{}<p>".format("错误")
return result_str
|
python
|
# Generated by Django 2.2.10 on 2020-02-27 14:37
from django.db import migrations, models
def nullify_courserun_expiration_dates(apps, schema_editor):
"""
Unset all of the expiration dates set automatically by the previous code.
We are moving to empty expiration dates by default and only explicitly setting them
if needed on a specific course run.
"""
CourseRun = apps.get_model("courses", "CourseRun")
CourseRun.objects.update(expiration_date=None)
class Migration(migrations.Migration):
dependencies = [("courses", "0025_run_tag")]
operations = [
migrations.AlterField(
model_name="courserun",
name="expiration_date",
field=models.DateTimeField(
blank=True,
db_index=True,
help_text="The date beyond which the learner should not see link to this course run on their dashboard.",
null=True,
),
),
migrations.RunPython(
nullify_courserun_expiration_dates, migrations.RunPython.noop
),
]
|
python
|
# Generated by Django 3.0.4 on 2020-03-11 21:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kegs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='beer',
name='description',
field=models.TextField(default=1, max_length=250),
preserve_default=False,
),
migrations.AddField(
model_name='keg',
name='number',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
python
|
# Generated by Django 3.1.14 on 2022-03-24 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0030_auto_20220324_0947'),
]
operations = [
migrations.AddField(
model_name='report',
name='eid',
field=models.CharField(blank=True, default='', max_length=1024, verbose_name='External id'),
),
]
|
python
|
from http import HTTPStatus
from django.urls import resolve, reverse
from mock import patch
from barriers.views.public_barriers import PublicBarrierDetail
from core.tests import MarketAccessTestCase
class PublicBarrierViewTestCase(MarketAccessTestCase):
def test_public_barrier_url_resolves_to_correct_view(self):
match = resolve(f'/barriers/{self.barrier["id"]}/public/')
assert match.func.view_class == PublicBarrierDetail
@patch("utils.api.client.PublicBarriersResource.get")
@patch("utils.api.client.PublicBarriersResource.get_activity")
@patch("utils.api.client.PublicBarriersResource.get_notes")
def test_public_barrier_view_loads_correct_template(
self, _mock_get_notes, _mock_get_activity, mock_get
):
mock_get.return_value = self.barrier
url = reverse(
"barriers:public_barrier_detail", kwargs={"barrier_id": self.barrier["id"]}
)
response = self.client.get(url)
assert HTTPStatus.OK == response.status_code
self.assertTemplateUsed(response, "barriers/public_barriers/detail.html")
@patch("utils.api.client.PublicBarriersResource.get")
@patch("utils.api.client.PublicBarriersResource.get_activity")
@patch("utils.api.client.PublicBarriersResource.get_notes")
def test_public_barrier_view_loads_html(
self, _mock_get_notes, _mock_get_activity, mock_get
):
mock_get.return_value = self.public_barrier
url = reverse(
"barriers:public_barrier_detail", kwargs={"barrier_id": self.barrier["id"]}
)
title = "<title>Market Access - Public barrier</title>"
section_head = '<h2 class="summary-group__heading">Public view:'
notes_head = '<h2 class="section-heading govuk-!-margin-bottom-0">Internal notes and updates</h2>'
add_note_button = (
'<a class="govuk-button button--primary" href="?add-note=1">Add a note</a>'
)
response = self.client.get(url)
html = response.content.decode("utf8")
assert HTTPStatus.OK == response.status_code
assert title in html
assert section_head in html
assert notes_head in html
assert add_note_button in html
@patch("utils.api.resources.APIResource.list")
@patch("utils.api.client.PublicBarriersResource.get")
@patch("utils.api.client.PublicBarriersResource.get_activity")
@patch("utils.api.client.PublicBarriersResource.get_notes")
def test_public_barrier_search_view_loads_html(
self, _mock_get_notes, _mock_get_activity, mock_get, mock_list
):
mock_get.return_value = self.public_barrier
url = reverse("barriers:public_barriers")
title = "<title>Market Access - Public barriers</title>"
section_head = '<h1 class="govuk-heading-l govuk-!-margin-bottom-5">Market access public barriers</h1>'
response = self.client.get(url)
html = response.content.decode("utf8")
assert HTTPStatus.OK == response.status_code
assert title in html
assert section_head in html
@patch("utils.api.resources.APIResource.list")
@patch("utils.api.client.PublicBarriersResource.get")
@patch("utils.api.client.PublicBarriersResource.get_activity")
@patch("utils.api.client.PublicBarriersResource.get_notes")
def test_public_barrier_search_view_filters_correctly(
self, _mock_get_notes, _mock_get_activity, mock_get, mock_list
):
def fetch_html_for_params(params):
url = reverse("barriers:public_barriers")
response = self.client.get(url, data=params)
html = response.content.decode("utf8")
return html, response
html, response = fetch_html_for_params(
{
"country": [
"9f5f66a0-5d95-e211-a939-e4115bead28a",
"83756b9a-5d95-e211-a939-e4115bead28a",
],
"sector": [
"9538cecc-5f95-e211-a939-e4115bead28a",
"aa22c9d2-5f95-e211-a939-e4115bead28a",
],
"region": [
"3e6809d6-89f6-4590-8458-1d0dab73ad1a",
"5616ccf5-ab4a-4c2c-9624-13c69be3c46b",
],
"status": ["0", "10", "30"],
}
)
form = response.context["form"]
assert HTTPStatus.OK == response.status_code
assert form.cleaned_data["country"] == [
"9f5f66a0-5d95-e211-a939-e4115bead28a",
"83756b9a-5d95-e211-a939-e4115bead28a",
]
assert form.cleaned_data["sector"] == [
"9538cecc-5f95-e211-a939-e4115bead28a",
"aa22c9d2-5f95-e211-a939-e4115bead28a",
]
assert form.cleaned_data["region"] == [
"3e6809d6-89f6-4590-8458-1d0dab73ad1a",
"5616ccf5-ab4a-4c2c-9624-13c69be3c46b",
]
assert form.cleaned_data["status"] == ["0", "10", "30"]
mock_list.assert_called_with(
country=",".join(
[
"9f5f66a0-5d95-e211-a939-e4115bead28a",
"83756b9a-5d95-e211-a939-e4115bead28a",
]
),
sector=",".join(
[
"9538cecc-5f95-e211-a939-e4115bead28a",
"aa22c9d2-5f95-e211-a939-e4115bead28a",
]
),
region=",".join(
[
"3e6809d6-89f6-4590-8458-1d0dab73ad1a",
"5616ccf5-ab4a-4c2c-9624-13c69be3c46b",
]
),
status=",".join(["0", "10", "30"]),
)
|
python
|
from __future__ import absolute_import
from __future__ import unicode_literals
import multiprocessing
import os
import sys
import mock
import pytest
import pre_commit.constants as C
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import CalledProcessError
from testing.auto_namedtuple import auto_namedtuple
def test_basic_get_default_version():
assert helpers.basic_get_default_version() == C.DEFAULT
def test_basic_healthy():
assert helpers.basic_healthy(None, None) is True
def test_failed_setup_command_does_not_unicode_error():
script = (
'import sys\n'
"getattr(sys.stderr, 'buffer', sys.stderr).write(b'\\x81\\xfe')\n"
'exit(1)\n'
)
# an assertion that this does not raise `UnicodeError`
with pytest.raises(CalledProcessError):
helpers.run_setup_cmd(Prefix('.'), (sys.executable, '-c', script))
def test_assert_no_additional_deps():
with pytest.raises(AssertionError) as excinfo:
helpers.assert_no_additional_deps('lang', ['hmmm'])
msg, = excinfo.value.args
assert msg == (
'For now, pre-commit does not support additional_dependencies for lang'
)
SERIAL_FALSE = auto_namedtuple(require_serial=False)
SERIAL_TRUE = auto_namedtuple(require_serial=True)
def test_target_concurrency_normal():
with mock.patch.object(multiprocessing, 'cpu_count', return_value=123):
with mock.patch.dict(os.environ, {}, clear=True):
assert helpers.target_concurrency(SERIAL_FALSE) == 123
def test_target_concurrency_cpu_count_require_serial_true():
with mock.patch.dict(os.environ, {}, clear=True):
assert helpers.target_concurrency(SERIAL_TRUE) == 1
def test_target_concurrency_testing_env_var():
with mock.patch.dict(
os.environ, {'PRE_COMMIT_NO_CONCURRENCY': '1'}, clear=True,
):
assert helpers.target_concurrency(SERIAL_FALSE) == 1
def test_target_concurrency_on_travis():
with mock.patch.dict(os.environ, {'TRAVIS': '1'}, clear=True):
assert helpers.target_concurrency(SERIAL_FALSE) == 2
def test_target_concurrency_cpu_count_not_implemented():
with mock.patch.object(
multiprocessing, 'cpu_count', side_effect=NotImplementedError,
):
with mock.patch.dict(os.environ, {}, clear=True):
assert helpers.target_concurrency(SERIAL_FALSE) == 1
def test_shuffled_is_deterministic():
assert helpers._shuffled(range(10)) == [3, 7, 8, 2, 4, 6, 5, 1, 0, 9]
|
python
|
import requests
import os
import shutil
from constants import chromedriverPath
# Constants
chromedriverUrl = 'https://chromedriver.storage.googleapis.com/89.0.4389.23/chromedriver_win32.zip'
chromedriverFile = 'chromedriver.zip'
def downloadFile(url, name):
r = requests.get(url, allow_redirects=True)
open(name, 'wb').write(r.content)
def downloadChromedriver():
downloadFile(chromedriverUrl, chromedriverFile)
shutil.unpack_archive(chromedriverFile, chromedriverPath)
if __name__ == '__main__':
downloadChromedriver()
|
python
|
import turtle
###################################################
# range = antal gange
# rigth or left = grader til højre eller venstre
# forward or backward = længde på streg + retning fremad/tilbage
# kan man tegne flere samtidig? hvordan
###################################################
<<<<<<< HEAD
for n in range(1,80):
turtle.left(80)
turtle.forward(n*2)
turtle.right(40)
turtle.backward(n*2)
for n in range(1,50):
turtle.right(80)
turtle.forward(n*1.5)
=======
for n in range(1,32):
turtle.left(45)
turtle.forward(n*7)
>>>>>>> 553b976d6b623bd6c685c9241d731a21b1faed94
input()
|
python
|
# __init__.py
from cogs.add import add
from cogs.add import add_all
from cogs.apply import apply
from cogs.clear import clear
from cogs.connect import connect
from cogs.delete import delete
from cogs.diff import diff
from cogs.fetch import fetch
from cogs.ignore import ignore
from cogs.init import init
from cogs.ls import ls
from cogs.mv import mv
from cogs.merge import merge
from cogs.push import push
from cogs.rm import rm
from cogs.share import share
from cogs.status import status
from cogs.helpers import get_sheet_url
# Version of COGS
__version__ = "0.0.1"
|
python
|
class ActivationFrame():
def __init__(self, owner):
self.owner = owner
self.static_link = None
self.dynamic_link = None
self.params = []
self.local_vars = []
self.static_vars = []
self.external_vars = []
self.temp_vars = []
self.previous_PC = 0
def get_deep_copy(self):
copy = ActivationFrame(self.owner)
copy.static_link = self.static_link
copy.dynamic_link = self.dynamic_link
copy.params = self.params.copy()
copy.local_vars = self.local_vars.copy()
copy.static_vars = self.static_vars.copy()
copy.temp_vars = self.temp_vars.copy()
return copy
def __str__(self):
output = ""
output += f"owner_function: {self.owner.name} "
output += f"| static_link: ({str(self.static_link)}) "
output += f"| dynamic_link: ({str(self.dynamic_link)}) "
output += f"| params: ({str(self.params)}) "
output += f"| local_vars: ({str(self.local_vars)}) "
output += f"| static_vars: ({str(self.static_vars)}) "
output += f"| temp_vars: ({str(self.temp_vars)})"
return output
|
python
|
#4. Crie um código em Python que receba uma lista de nomes informados pelo usuário com tamanho indefinido (a lista deve ser encerrada quando o usuário digitar 0) e, na sequência, receba um nome para que seja verificado se este consta na lista ou não. Observação: ignorar diferenças entre maiúsculas e minúsculas.
nomes = []
for i in range(1000):
a = input('Digite um nome pra acrescentar a lista ou 0 para sair: ').lower()
if a == '0':
break
else:
nomes.append(a.lower())
nome = input('Digite um nome a ser buscado na lista: ')
if nome in nomes:
print('O nome está na lista!')
else:
print('O nome NÃO está na lista!')
|
python
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("BSC Indicator"),
"items": [
{
"type": "doctype",
"name": "BSC Initiative Log",
"description":_("BSC Initiative Log"),
"onboard": 1,
"dependencies": ["BSC Initiative"],
},
{
"type": "doctype",
"name": "BSC Initiative",
"description":_("BSC Initiative"),
"onboard": 1,
"dependencies": ["BSC Target"],
},
{
"type": "doctype",
"name": "BSC Target",
"description":_("BSC Target"),
"onboard": 1,
"dependencies": ["BSC Indicator"],
},
{
"type": "doctype",
"name": "BSC Indicator",
"description":_("BSC Indicator"),
"onboard": 1,
"dependencies": ["BSC Objective"],
}
]
},
{
"label": _("BSC Meeting"),
"items": [
{
"type": "doctype",
"name": "BSC Meeting",
"description":_("BSC Meeting"),
},
{
"type": "doctype",
"name": "BSC Meeting Minutes",
"description":_("BSC Meeting Minutes"),
"onboard": 1,
"dependencies": ["BSC Meeting"],
},
{
"type": "doctype",
"name": "BSC Meeting Recommendation",
"description":_("BSC Meeting Recommendation"),
"onboard": 1,
"dependencies": ["BSC Meeting Minutes"],
},
{
"type": "doctype",
"name": "BSC Committee",
"description":_("BSC Committee"),
},
{
"type": "report",
"name": "BSC Meeting Report Summery",
"doctype": "BSC Meeting",
"is_query_report": True
}
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"name": "Department-wise Indicator Performance",
"doctype": "BSC Indicator",
"is_query_report": True
},
{
"type": "report",
"name": "Department-wise Initiative Performance",
"doctype": "BSC Initiative",
"is_query_report": True
},
{
"type": "report",
"name": "BSC Initiative Analysis",
"doctype": "BSC Initiative Log",
"is_query_report": True
},
{
"type": "report",
"name": "Department-wise Operation Plan and Program Budget",
"doctype": "BSC Initiative",
"is_query_report": True
},
{
"type": "report",
"name": "Department-wise Initiative Achievement Report",
"doctype": "BSC Initiative Log",
"is_query_report": True
},
{
"type": "report",
"name": "BSC Performance Report",
"doctype": "BSC Perspective",
"is_query_report": True
}
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "BSC Perspective",
"description":_("BSC Perspective"),
"onboard": 1,
},
{
"type": "doctype",
"name": "BSC Objective",
"description":_("BSC Objective"),
"onboard": 1,
"dependencies": ["BSC Perspective"],
},
{
"type": "doctype",
"name": "BSC Target Group",
"description":_("BSC Target Group"),
"onboard": 1
},
{
"type": "doctype",
"name": "BSC Settings",
"description":_("BSC Settings"),
}
]
},
]
|
python
|
# -*- coding: utf-8 -*-
"""
空白模板
"""
###### 欢迎使用脚本任务,首先让我们熟悉脚本任务的一些使用规则 ######
# 详细教程请在AI Studio文档(https://ai.baidu.com/ai-doc/AISTUDIO/Ik3e3g4lt)中进行查看.
# 脚本任务使用流程:
# 1.编写代码/上传本地代码文件
# 2.调整数据集路径以及输出文件路径
# 3.填写启动命令和备注
# 4.提交任务选择运行方式(单机单卡/单机四卡/双机四卡)
# 5.项目详情页查看任务进度及日志
# 注意事项:
# 1.输出结果的体积上限为20GB,超过上限可能导致下载输出失败.
# 2.脚本任务单次任务最大运行时长为72小时(三天).
# 3.在使用单机四卡或双击四卡时可不配置GPU编号,默认启动所有可见卡;如需配置GPU编号,单机四卡的GPU编号为0,1,2,3;双机四卡的GPU编号为0,1.
# 日志记录. 任务会自动记录环境初始化日志、任务执行日志、错误日志、执行脚本中所有标准输出和标准出错流(例如print()),用户可以在「提交」任务后,通过「查看日志」追踪日志信息.
# -------------------------------关于数据集和输出文件的路径问题---------------------------------
# 数据集路径
# datasets_prefix为数据集的根路径,完整的数据集文件路径是由根路径和相对路径拼接组成的。
# 相对路径获取方式:请在编辑项目状态下通过点击左侧导航「数据集」中文件右侧的【复制】按钮获取.
# datasets_prefix = '/root/paddlejob/workspace/train_data/datasets/'
# train_datasets = datasets_prefix + '通过路径拷贝获取真实数据集文件路径'
# 输出文件路径
# 任务完成后平台会自动把output_dir目录所有文件压缩为tar.gz包,用户可以通过「下载输出」将输出结果下载到本地.
# output_dir = "/root/paddlejob/workspace/output"
# -------------------------------关于启动命令需要注意的问题------------------------------------
# 脚本任务支持两种运行方式
# 1.shell 脚本. 在 run.sh 中编写项目运行时所需的命令,并在启动命令框中填写如 bash run.sh 的命令使脚本任务正常运行.
# 2.python 指令. 在 run.py 编写运行所需的代码,并在启动命令框中填写如 python run.py <参数1> <参数2> 的命令使脚本任务正常运行.
# 注:run.sh、run.py 可使用自己的文件替代,如python train.py 、bash train.sh.
# 命令示例:
# 1. python 指令
# ---------------------------------------单机四卡-------------------------------------------
# 方式一(不配置GPU编号):python -m paddle.distributed.launch run.py
# 方式二(配置GPU编号):python -m paddle.distributed.launch --gpus="0,1,2,3" run.py
# ---------------------------------------双机四卡-------------------------------------------
# 方式一(不配置GPU编号):python -m paddle.distributed.launch run.py
# 方式二(配置GPU编号):python -m paddle.distributed.launch --gpus="0,1" run.py
# 2. shell 命令
# 使用run.sh或自行创建新的shell文件并在对应的文件中写下需要执行的命令(需要运行多条命令建议使用shell命令的方式)。
# 以单机四卡不配置GPU编号为例,将单机四卡方式一的指令复制在run.sh中,并在启动命令出写出bash run.sh
# 从paddle.vision.models 模块中import 残差网络,VGG网络,LeNet网络
import paddle
from paddle.vision.models import resnet50, vgg16, LeNet
from paddle.vision.datasets import Cifar10
from paddle.optimizer import Momentum
from paddle.regularizer import L2Decay
from paddle.nn import CrossEntropyLoss
from paddle.metric import Accuracy
import paddle.vision.transforms as T
# 在使用GPU机器时,可以将use_gpu变量设置成True,可以灵活调整
# print(paddle.device.get_device()) # 查看一下设备
# use_gpu = True
# paddle.set_device('gpu:0') if use_gpu else paddle.set_device('cpu')
# 用于模型保存和可视化参数的路径
import os
# filepath, filename = os.path.split(os.path.realpath(__file__))
# stem, suffix = os.path.splitext(filename) # filename .py
filepath = './output'
stem = 'cifar10_b' # 版本a表示底层API,版本b表示使用高层API
SAVE_DIR = '{}/model/{}'.format(filepath, stem)
visualdl = paddle.callbacks.VisualDL(log_dir='{}/visualdl_log/{}'.format(filepath, stem))
# 确保从paddle.vision.datasets.Cifar10中加载的图像数据是np.ndarray类型
paddle.vision.set_image_backend('cv2')
# 调用resnet50模型
model = paddle.Model(resnet50(pretrained=True, num_classes=10))
# 使用Cifar10数据集
stats = ((0.491401, 0.4821591, 0.44653094), (0.20220289, 0.1993163, 0.20086345))
train_transform = T.Compose([
T.RandomCrop(32, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(*stats)
])
test_transform = T.Compose([
T.ToTensor(),
T.Normalize(*stats)
])
train_dataset = Cifar10(mode='train', transform=train_transform)
val_dataset = Cifar10(mode='test', transform=test_transform)
# 定义优化器
optimizer = Momentum(
learning_rate=0.01, # 应为是高层API这里的学习率没法使用好的策略
momentum=0.9,
weight_decay=L2Decay(1e-4),
parameters=model.parameters())
# 进行训练前准备
model.prepare(optimizer, CrossEntropyLoss(), Accuracy(topk=(1, 5)))
# 启动训练
model.fit( # 如果train_dataset是DataLoader格式的,则batch_size和shuffle失效
train_dataset,
val_dataset,
epochs=100,
batch_size=64,
save_dir=SAVE_DIR,
save_freq=10,
num_workers=8,
verbose=1,
shuffle=True,
callbacks=visualdl)
# 1. python 指令
# ---------------------------------------单机四卡-------------------------------------------
# 方式一(不配置GPU编号):python -m paddle.distributed.launch run.py
# 方式二(配置GPU编号):python -m paddle.distributed.launch --gpus="0,1,2,3" run.py
# ---------------------------------------双机四卡-------------------------------------------
# 方式一(不配置GPU编号):python -m paddle.distributed.launch run.py
# 方式二(配置GPU编号):python -m paddle.distributed.launch --gpus="0,1" run.py
|
python
|
# Generated by Django 2.1.7 on 2019-06-03 05:56
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_auto_20190601_2202'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2019, 6, 3, 5, 56, 44, 241352, tzinfo=utc)),
),
]
|
python
|
from flask_babel import lazy_gettext as _
from .model import (
Session,
CirculationEvent,
ExternalIntegration,
get_one,
create
)
class LocalAnalyticsProvider(object):
NAME = _("Local Analytics")
DESCRIPTION = _("Store analytics events in the 'circulationevents' database table.")
# A given site can only have one analytics provider.
CARDINALITY = 1
# Where to get the 'location' of an analytics event.
LOCATION_SOURCE = "location_source"
# The 'location' of an analytics event is the 'neighborhood' of
# the request's authenticated patron.
LOCATION_SOURCE_NEIGHBORHOOD = "neighborhood"
# Analytics events have no 'location'.
LOCATION_SOURCE_DISABLED = ""
SETTINGS = [
{
"key": LOCATION_SOURCE,
"label": _("Geographic location of events"),
"description": _("Local analytics events may have a geographic location associated with them. How should the location be determined?<p>Note: to use the patron's neighborhood as the event location, you must also tell your patron authentication mechanism how to <i>gather</i> a patron's neighborhood information."),
"default": LOCATION_SOURCE_DISABLED,
"type": "select",
"options": [
{ "key": LOCATION_SOURCE_DISABLED, "label": _("Disable this feature.") },
{ "key": LOCATION_SOURCE_NEIGHBORHOOD, "label": _("Use the patron's neighborhood as the event location.") },
],
},
]
def __init__(self, integration, library=None):
self.integration_id = integration.id
self.location_source = integration.setting(
self.LOCATION_SOURCE
).value or self.LOCATION_SOURCE_DISABLED
if library:
self.library_id = library.id
else:
self.library_id = None
def collect_event(self, library, license_pool, event_type, time,
old_value=None, new_value=None, **kwargs):
if not library and not license_pool:
raise ValueError("Either library or license_pool must be provided.")
if library:
_db = Session.object_session(library)
else:
_db = Session.object_session(license_pool)
if library and self.library_id and library.id != self.library_id:
return
neighborhood = None
if self.location_source == self.LOCATION_SOURCE_NEIGHBORHOOD:
neighborhood = kwargs.pop("neighborhood", None)
return CirculationEvent.log(
_db, license_pool, event_type, old_value, new_value, start=time,
library=library, location=neighborhood
)
@classmethod
def initialize(cls, _db):
"""Find or create a local analytics service.
"""
# If a local analytics service already exists, return it.
local_analytics = get_one(
_db, ExternalIntegration,
protocol=cls.__module__,
goal=ExternalIntegration.ANALYTICS_GOAL
)
# If a local analytics service already exists, don't create a
# default one. Otherwise, create it with default name of
# "Local Analytics".
if not local_analytics:
local_analytics, ignore = create(
_db, ExternalIntegration,
protocol=cls.__module__,
goal=ExternalIntegration.ANALYTICS_GOAL,
name=str(cls.NAME)
)
return local_analytics
Provider = LocalAnalyticsProvider
|
python
|
import shutil, ssl, json
import requests, zipfile, gzip, io
import pandas as pd
from tbsdq import utilities as dqutils
from tbsdq import configuration as dqconfig
from topN import configuration as topnconfig
""" Non-Spatial dataset analysis on the open data registry """
def fetch_and_parse_catalogue(zip_url, expected_file):
format_list = ['CSV', 'JSON', 'JSONL', 'XML', 'XLS', 'XLSX', 'XLSM', 'TXT', 'TAB', 'ZIP']
df = pd.DataFrame()
catalogue_data = []
dqutils.get_and_extract_gzip(zip_url, expected_file)
try:
with open(expected_file, encoding=dqconfig.encoding_type) as f:
for line in f:
data = json.loads(line)
for r in data['resources']:
dict_entry = {
"c_id": r['package_id'],
"source_format": r['format'],
"resource_type": r['resource_type']
}
catalogue_data.append(dict_entry)
except:
print('Error reading expected file: {0}'.format(expected_file))
pass
if len(catalogue_data) > 0:
df = pd.DataFrame(catalogue_data)
df['dataset_format'] = df.source_format.apply(lambda x: x.upper() if x.upper() in format_list else 'OTHER')
df = df[df['resource_type'].isin(['dataset','api'])]
df['dataset_format'] = pd.Categorical(df['dataset_format'], format_list.append('OTHER'))
df = df[['c_id', 'dataset_format']]
df = df.sort_values(['c_id','dataset_format'])
df = df.groupby('c_id', as_index=False).first()
return df
def build_top_N_dataset():
try:
ssl._create_default_https_context = ssl._create_unverified_context
output_file = topnconfig.topN_file
output_size = topnconfig.topN_size
df_downloads = dqutils.excel_delete_and_merge_tabs(topnconfig.downloads_file, ['Summary by departments', 'Top 100 Datasets'])
df_downloads.columns = ['d_id', 'd_title_en', 'd_title_fr', 'd_downloads']
df_visits = dqutils.excel_delete_and_merge_tabs(topnconfig.visits_file, ['Summary by departments', 'Top 100 Datasets'])
df_visits.columns = ['v_id', 'v_title_en', 'v_title_fr', 'v_visits']
df_analysis = pd.merge(left=df_downloads, right=df_visits, left_on='d_id', right_on='v_id')
df_analysis = df_analysis[['d_id', 'd_title_en', 'd_downloads', 'v_visits']]
df_nonspatial = pd.read_csv(topnconfig.nonspatial_file, encoding=dqconfig.encoding_type)
df_merged = pd.merge(left=df_nonspatial, right=df_analysis, left_on='id_s', right_on='d_id')
df_merged.columns = ['id', 'owner_org_en', 'owner_org_fr', 'title_en', 'title_fr', 'desc_en', 'desc_fr', 'link_en', 'link_fr', 'id2', 'title_en_2', 'downloads', 'visits']
col_list = ['id', 'owner_org_en', 'title_en', 'desc_en', 'link_en', 'downloads', 'visits']
df = df_merged[col_list]
df_openness = dqutils.fetch_zipped_csv(topnconfig.openness_zip_file, topnconfig.openness_file)
if not df_openness.empty:
df_openness.columns = ['department', 'title', 'URL', 'openness_rating']
df_openness[['e_url', 'f_url']] = df_openness['URL'].str.split('|', expand=True)
df_openness['o_id'] = df_openness['e_url'].str.split('/').str[-1].str.strip()
df_openness = df_openness[['o_id', 'openness_rating']]
df = pd.merge(left=df, right=df_openness, left_on='id', right_on='o_id')
col_list.append('openness_rating')
df = df[col_list]
df_catalogue = fetch_and_parse_catalogue(topnconfig.catalogue_zip_file, topnconfig.catalogue_file)
if not df_catalogue.empty:
df = pd.merge(left=df, right=df_catalogue, left_on='id', right_on='c_id')
col_list.append('dataset_format')
df = df[col_list]
df_user_ratings = pd.read_csv(topnconfig.user_ratings_file, encoding=dqconfig.encoding_type)
df_user_ratings.columns = ['j1', 'j2', 'ur_id', 'user_rating_score', 'user_rating_count', 'l1', 'l2']
df_user_ratings = df_user_ratings[['ur_id', 'user_rating_score', 'user_rating_count']]
df = pd.merge(left=df, right=df_user_ratings, left_on='id', right_on='ur_id')
col_list.append('user_rating_score')
col_list.append('user_rating_count')
df = df[col_list]
df.sort_values(by='downloads', ascending=False).head(output_size).to_csv(output_file, index=None, header=True, encoding=dqconfig.encoding_type)
if dqconfig.remove_temp_data:
shutil.rmtree(dqconfig.temp_data_folder)
except:
raise
|
python
|
from django.contrib.auth.models import Permission
class TestPermissionsMixin:
def _clean_permissions(self):
self.user.user_permissions.clear()
self._clean_perm_cache()
def _set_permissions(self, perms):
self.user.user_permissions.add(
*Permission.objects.filter(codename__in=perms)
)
self._clean_perm_cache()
def _clean_perm_cache(self):
for cache in ['_user_perm_cache', '_perm_cache']:
if hasattr(self.user, cache):
delattr(self.user, cache)
|
python
|
# -*- coding: utf-8 -*-
"""
github3.repos.commit
====================
This module contains the RepoCommit class alone
"""
from __future__ import unicode_literals
from . import status
from .. import git, models, users
from .comment import RepoComment
class RepoCommit(models.BaseCommit):
"""The :class:`RepoCommit <RepoCommit>` object. This represents a commit as
viewed by a :class:`Repository`. This is different from a Commit object
returned from the git data section.
Two commit instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.sha == c2.sha
c1.sha != c2.sha
"""
def _update_attributes(self, commit):
super(RepoCommit, self)._update_attributes(commit)
#: :class:`User <github3.users.User>` who authored the commit.
self.author = self._class_attribute(commit, 'author', users.User, self)
#: :class:`User <github3.users.User>` who committed the commit.
self.committer = self._class_attribute(
commit, 'committer', users.User, self
)
#: :class:`Commit <github3.git.Commit>`.
self.commit = self._class_attribute(commit, 'commit', git.Commit, self)
self.sha = self._get_attribute(commit, 'sha')
#: The number of additions made in the commit.
self.additions = 0
#: The number of deletions made in the commit.
self.deletions = 0
#: Total number of changes in the files.
self.total = 0
stats = self._get_attribute(commit, 'stats')
if stats and stats is not self.Empty:
self.additions = commit['stats'].get('additions')
self.deletions = commit['stats'].get('deletions')
self.total = commit['stats'].get('total')
#: The files that were modified by this commit.
self.files = self._get_attribute(commit, 'files', [])
self._uniq = self.sha
#: The commit message
self.message = getattr(self.commit, 'message', self.Empty)
def _repr(self):
return '<Repository Commit [{0}]>'.format(self.sha[:7])
def diff(self):
"""Retrieve the diff for this commit.
:returns: the diff as a bytes object
:rtype: bytes
"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else b''
def patch(self):
"""Retrieve the patch formatted diff for this commit.
:returns: the patch as a bytes object
:rtype: bytes
"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else b''
def statuses(self):
"""Retrieve the statuses for this commit.
:returns: the statuses for this commit
:rtype: :class:`~github3.repos.status.Status`
"""
url = self._build_url('statuses', base_url=self._api)
return self._iter(-1, url, status.Status)
def comments(self, number=-1, etag=None):
"""Iterate over comments for this commit.
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`RepoComment <github3.repos.comment.RepoComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, RepoComment, etag=etag)
|
python
|
from pymusicterm.util.time import milliseconds_to_minutes, milliseconds_to_seconds
from py_cui.widgets import Widget
class LoadingBarWidget(Widget):
BAR_COMPLETED_CHAR=u'\u2588'
def __init__(self,id,title,grid,row,column,row_span,column_span,padx,pady,logger) -> None:
""" Initializer for LoadingBar Widget
"""
super().__init__(id,title,grid,row,column,row_span,column_span,padx,pady,logger)
self._draw_border=True
self._num_items=10
self._completed_items=0
self._total_duration=title
self._time_elapsed=title
def increment_items(self,time_elapsed:int):
self._completed_items=time_elapsed
minutes=milliseconds_to_minutes(self._completed_items)
seconds=milliseconds_to_seconds(self._completed_items)
self._time_elapsed='{}:{}'.format(minutes,seconds)
self._title='{}-{}'.format(self._time_elapsed,self._total_duration)
def set_total_duration(self,total_duration:int):
self._num_items=total_duration
self._completed_items=0
minutes=milliseconds_to_minutes(self._num_items)
seconds=milliseconds_to_seconds(self._num_items)
self._time_elapsed="0:00"
self._total_duration='{}:{}'.format(minutes,seconds)
self._title='{}-{}'.format(self._time_elapsed,self._total_duration)
def _draw(self):
""" Override base draw class.
"""
super()._draw()
self._title="{}-{}".format(self._time_elapsed,self._total_duration)
width=self._stop_x -self._start_x
bar_width=width
items_per_bar_block=self._num_items / bar_width
bar_blocks_per_item=bar_width/self._num_items
if items_per_bar_block >=1:
completed_blocks=int(self._completed_items/items_per_bar_block)
else:
completed_blocks=int(bar_blocks_per_item * self._completed_items)
non_completed_blocks= bar_width - completed_blocks
#TODO: STOP INCREMENT
text='{}{}'.format(self.BAR_COMPLETED_CHAR* completed_blocks,'-'*non_completed_blocks)
self._renderer.set_color_mode(self._color)
# if self._draw_border:
# self._renderer.draw_border(self,with_title=True)
target_y=self._start_y+int(self._height/2)
#FIXME: DOESN'T UPDATE IN REALTIME
self._renderer.set_color_mode(self._color)
self._renderer.draw_text(self,self._title,target_y-1,centered=True,selected=True)
self._renderer.draw_text(self,text,target_y,centered=True,bordered=self._draw_border,selected=True)
self._renderer.unset_color_mode(self._color)
self._renderer.reset_cursor(self)
|
python
|
import json
from typing import List, Dict, Union
import bleach
allowed_items = {
"section", "subsection", "link"
}
allowed_sub_items = {
"title",
"url",
"metadata",
"desc",
"commentary"
}
def clean_submission(upload_post: str) -> List[Union[str, Dict[str, str]]]:
output = []
for resource in json.loads(upload_post):
for key, value in resource.items():
if key in allowed_items:
included_item = {}
if key == "link":
for link_key, link_value in value.items():
if link_key in allowed_sub_items:
included_item[link_key] = bleach.clean(link_value)
else:
included_item = bleach.clean(value)
output.append({key: included_item})
return output
|
python
|
import psycopg2 as pg
from psycopg2.extras import DictCursor
from getpass import getpass
##############
# Connecting #
##############
cx = pg.connect(
host='localhost', database='abq',
user=input('Username: '),
password=getpass('Password: '),
cursor_factory=DictCursor
)
cur = cx.cursor()
#####################
# Executing Queries #
#####################
cur.execute("""
CREATE TABLE test
(id SERIAL PRIMARY KEY, val TEXT)
""")
cur.execute("""
INSERT INTO test (val)
VALUES ('Banana'), ('Orange'), ('Apple');
""")
###################
# Retrieving Data #
###################
cur.execute("SELECT * FROM test")
num_rows = cur.rowcount
data = cur.fetchall()
print(f'Got {num_rows} rows from database:')
#print(data)
for row in data:
# DictCursor rows can use string indexes
print(row['val'])
#########################
# Parameterized Queries #
#########################
new_item = input('Enter new item: ')
#Never do this:
#cur.execute(f"INSERT INTO test (val) VALUES ('{new_item}')")
cur.execute("INSERT INTO test (val) VALUES (%s)", (new_item,))
# or:
# cur.execute("INSERT INTO test (val) VALUES (%(item)s)", {'item': new_item})
cur.execute('SELECT * FROM test')
print(cur.fetchall())
###############
# Cleaning Up #
###############
# Call this to actually save the data before leaving
# cx.commit()
# This is usually not necessary, but you can do it if you wish.
#cx.close()
|
python
|
def train(model, link_predictor, emb, edge_index, pos_train_edge, batch_size, optimizer):
"""
Runs offline training for model, link_predictor and node embeddings given the message
edges and supervision edges.
1. Updates node embeddings given the edge index (i.e. the message passing edges)
2. Computes predictions on the positive supervision edges
3. Computes predictions on the negative supervision edges (which are sampled)
4. Computes the loss on the positive and negative edges and updates parameters
"""
model.train()
link_predictor.train()
train_losses = []
for edge_id in DataLoader(range(pos_train_edge.shape[0]), batch_size, shuffle=True):
optimizer.zero_grad()
node_emb = model(emb, edge_index) # (N, d)
pos_edge = pos_train_edge[edge_id].T # (2, B)
pos_pred = link_predictor(node_emb[pos_edge[0]], node_emb[pos_edge[1]]) # (B, )
neg_edge = negative_sampling(edge_index, num_nodes=emb.shape[0],
num_neg_samples=edge_id.shape[0], method='dense') # (Ne,2)
neg_pred = link_predictor(node_emb[neg_edge[0]], node_emb[neg_edge[1]]) # (Ne,)
loss = -torch.log(pos_pred + 1e-15).mean() - torch.log(1 - neg_pred + 1e-15).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
return sum(train_losses) / len(train_losses)
|
python
|
from .base import TemplateView
class Index(TemplateView):
template_name = 'feats/index.html'
|
python
|
from github import GitHub
username = "github_username"
db_path_account = "C:/GitHub.Accounts.sqlite3"
db_path_api = "C:/GitHub.Apis.sqlite3"
g = GitHub.GitHub(db_path_account, db_path_api, username)
g.repo.create('repository_name', description='this is repository description.', homepage='http://homepage.com')
|
python
|
#!/usr/bin/env python2.7
import argparse
import sys
import rospy
import tf
from gazebo_msgs.srv import GetLinkState, GetLinkStateRequest
parser = argparse.ArgumentParser()
parser.add_argument("object_name", help="Name of the object")
parser.add_argument("link_name", help="Link of the object")
parser.add_argument("world_name", help="Name of the parent link")
args = parser.parse_args(sys.argv[1:4])
gz_name = "::".join([args.object_name, args.link_name])
tf_name = "/".join([args.object_name, args.link_name])
world_name = args.world_name
if __name__ == "__main__":
rospy.init_node("gazebo_object_to_tf")
rate = rospy.Rate(10)
rospy.wait_for_service("/gazebo/get_link_state")
get_link_state = rospy.ServiceProxy("/gazebo/get_link_state", GetLinkState)
request = GetLinkStateRequest(gz_name, "")
tb = tf.TransformBroadcaster()
while not rospy.is_shutdown():
rospy.wait_for_service("/gazebo/get_link_state")
ans = get_link_state(request)
if ans.success:
tb.sendTransform(
(
ans.link_state.pose.position.x,
ans.link_state.pose.position.y,
ans.link_state.pose.position.z,
),
(
ans.link_state.pose.orientation.x,
ans.link_state.pose.orientation.y,
ans.link_state.pose.orientation.z,
ans.link_state.pose.orientation.w,
),
rospy.Time.now(),
tf_name,
world_name,
)
else:
rospy.logerr(ans.status_message)
rate.sleep()
|
python
|
import json
from datetime import date
from unittest.mock import MagicMock, patch
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from exo_currency.models import Currency, CurrencyExchangeRate, Provider
CURRENCY_EXCHANGE_RATES_URL = 'currency-exchange-rates'
CALCULATE_AMOUNT_URL = 'calculate-amount'
CALCULATE_TIME_WEIGHTED_RATE_URL = 'calculate-time-weighted-rate'
class ExoCurrencyV1ApiTests(TestCase):
"""Test for the API V1 of exo_currency"""
def setUp(self):
self.client = APIClient()
providers = Provider.objects.all().count()
if not providers:
call_command('load_providers')
currencies = Currency.objects.all().count()
if not currencies:
call_command('load_currencies', 'currencies.csv')
currency_exchange_rates = CurrencyExchangeRate.objects.all().count()
if not currency_exchange_rates:
call_command('load_exchange_rates', 'data.csv')
def test_currency_exchange_rates(self):
"""Test listing currency exchange rates with a valid request"""
res = self.client.get(reverse(CURRENCY_EXCHANGE_RATES_URL))
currency_exchange_rates_count = CurrencyExchangeRate.objects.all().count()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['count'], currency_exchange_rates_count)
def test_currency_exchange_rates_with_from(self):
"""Test listing currency exchange rates with a valid request with from filter"""
_from = '2020-01-01'
url = '{}{}'.format(
reverse(CURRENCY_EXCHANGE_RATES_URL),
'?from=' + _from,
)
res = self.client.get(url)
currency_exchange_rates_count = CurrencyExchangeRate.objects.filter(valuation_date__gte=_from).count()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['count'], currency_exchange_rates_count)
def test_currency_exchange_rates_with_to(self):
"""Test listing currency exchange rates with a valid request with to filter"""
_to = '2020-06-10'
url = '{}{}'.format(
reverse(CURRENCY_EXCHANGE_RATES_URL),
'?to=' + _to,
)
res = self.client.get(url)
currency_exchange_rates_count = CurrencyExchangeRate.objects.filter(valuation_date__lte=_to).count()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['count'], currency_exchange_rates_count)
def test_currency_exchange_rates_with_from_and_to(self):
"""Test listing currency exchange rates with a valid request with from and to filter"""
_from = '2020-06-05'
_to = '2020-06-10'
url = '{}{}{}'.format(
reverse(CURRENCY_EXCHANGE_RATES_URL),
'?from=' + _from,
'&to=' + _to,
)
res = self.client.get(url)
currency_exchange_rates_count = CurrencyExchangeRate.objects\
.filter(valuation_date__gte=_from)\
.filter(valuation_date__lte=_to)\
.count()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['count'], currency_exchange_rates_count)
@patch('requests.get')
def test_calculate_amount(self, mock_get):
"""Test creating user with valid payload is successful"""
mock_return_value = MagicMock()
mock_return_value.status_code = 200
mock_return_value.text = json.dumps({
'success': True,
'imestamp': 1363478399,
'historical': True,
'base': 'EUR',
'date': '2020-06-23',
'rates': {
'USD': 1.307716,
}
})
mock_get.return_value = mock_return_value
url = reverse(
CALCULATE_AMOUNT_URL,
kwargs={
'origin_currency': 'EUR',
'amount': '5',
'target_currency': 'USD',
},
)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIsNotNone(res.data.get('amount'))
@patch('requests.get')
def test_calculate_time_weighted_rate(self, mock_get):
"""Test creating user with valid payload is successful"""
mock_return_value = MagicMock()
mock_return_value.status_code = 200
mock_return_value.text = json.dumps({
'success': True,
'imestamp': 1363478399,
'historical': True,
'base': 'EUR',
'date': '2020-06-23',
'rates': {
'USD': 1.307716,
}
})
mock_get.return_value = mock_return_value
url = reverse(
CALCULATE_TIME_WEIGHTED_RATE_URL,
kwargs={
'origin_currency': 'EUR',
'amount': '5',
'target_currency': 'USD',
'date_invested': '2020-06-20',
},
)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIsNotNone(res.data.get('twr'))
|
python
|
from flask import Flask, request, Response
import json
import numpy as np
import gpt_gen
import gpt_gen_thread
import sys
import time
import logging
import torch
from Config import config_predict
from datetime import datetime
ConfigPredict = config_predict()
batchGenerating=ConfigPredict.batchGenerating
path_configs = ConfigPredict.model_configs
num0 = ConfigPredict.predict_nums
tags = ConfigPredict.tags
rmHFW = ConfigPredict.rmHFW
maxNext = ConfigPredict.maxNext_JLX
path_next = ConfigPredict.path_JLX_next
path_simi = ConfigPredict.path_JLX_simi
model,tokenizer,config,device,GPUs = [],[],[],[],[]
ModelIndex = []
for ii in range(len(path_configs)):
M0,T0,C0,D0 = [],[],[],[]
gpus = ConfigPredict.gpus[ii].split(',')
idx = path_configs[ii].index('config_')+len('config_')
key = path_configs[ii][idx:-5]
for gpu in gpus:
m0,t0,c0,d0 = gpt_gen.getModel(path_config=path_configs[ii],gpu=gpu)
c0['repetition_penalty'] = ConfigPredict.repetition_penalty[ii]
c0['temperature'] = ConfigPredict.temperature[ii]
c0['length'] = ConfigPredict.length[ii]
M0.append(m0)
T0.append(t0)
C0.append(c0)
D0.append(d0)
model.append(M0)
tokenizer.append(T0)
config.append(C0)
device.append(D0)
ModelIndex.append([kk for kk in range(len(gpus))])
GPUs.append(gpus)
D_simi = json.load(open(path_simi,'r',encoding='utf-8'))
D_next = json.load(open(path_next,'r',encoding='utf-8'))
D_simi = {k:json.loads(D_simi[k]) for k in D_simi}
D_next = {k:json.loads(D_next[k]) for k in D_next}
def read_excel(path_source1,index=0):
import xlrd
workbook = xlrd.open_workbook(path_source1) # 打开excel文件
sheets = workbook.sheets()
table = workbook.sheet_by_name(sheets[index].name) # 将文件内容表格化
rows_num = table.nrows # 获取行
cols_num = table.ncols # 获取列
res = [] # 定义一个数组
for rows in range(rows_num):
r = []
for cols in range(cols_num):
r.append(table.cell(rows, cols).value) # 获取excel中单元格的内容
res.append(r)
return res
def getdata(path_data='D:\项目\输入法\神配文数据\生成评测\主动详情准确率3.xlsx',path_targe='test_online/data/test_0416.json'):
data = read_excel(path_data,index=0)
D = []
prefix = ''
r = []
for i in range(1,len(data)):
if data[i][0]!='':
if prefix!='':
d = {'input':prefix,'ouput':r}
D.append(d)
prefix = data[i][0]
r = []
if data[i][4]==0 or data[i][4]==1 or data[i][4]==2:
r.append(data[i][3]+'\t'+str(int(data[i][4])))
if prefix!='':
d = {'input':prefix,'ouput':r}
D.append(d)
with open(path_targe,'w',encoding='utf-8') as f:
json.dump(D,f,ensure_ascii=False,indent=4)
def write_excel(path_target,data,sheetname='Sheet1'):
import xlwt
# 创建一个workbook 设置编码
workbook = xlwt.Workbook(encoding='utf-8')
# 创建一个worksheet
worksheet = workbook.add_sheet(sheetname)
# 写入excel
# 参数对应 行, 列, 值
rows,cols = len(data),len(data[0])
for i in range(rows):
for j in range(cols):
worksheet.write(i, j, label=str(data[i][j]))
# 保存
workbook.save(path_target)
def test(Data,sym='-new'):
modelidx = [np.random.randint(0, len(t)) for t in ModelIndex]
# gpu_av = GPUtil.getAvailable(order='load', limit=8, maxLoad=0.9, maxMemory=0.9)
# gpu_av = GPUtil.getAvailable(order='random',maxLoad=0.9, maxMemory=0.9, limit=8)
# gpu_av = GPUtil.getAvailable(order='memory', limit=8)
gpu_av = []
gpu_opt = 0
if len(gpu_av) > 0:
for i in range(len(gpu_av)):
for j in range(len(GPUs)):
if str(gpu_av[i]) in GPUs[j]:
gpu_opt = 1
modelidx[j] = GPUs[j].index(str(gpu_av[i]))
break
quick = False
app = ''
t0 = time.time()
modl = [model[ii][modelidx[ii]] for ii in range(len(modelidx))]
conf = [config[ii][modelidx[ii]] for ii in range(len(modelidx))]
tokn = [tokenizer[ii][modelidx[ii]] for ii in range(len(modelidx))]
devi = [device[ii][modelidx[ii]] for ii in range(len(modelidx))]
ConfigPredict.gpus = [GPUs[ii][modelidx[ii]] for ii in range(len(modelidx))]
if ConfigPredict.useThread:
for D in Data:
result = gpt_gen_thread.generating_thread(app, D['input'], modl, conf, tokn, devi, ConfigPredict, quick, num0,
removeHighFreqWordss=rmHFW, batchGenerating=batchGenerating,
tags=tags,
D_simi=D_simi, D_next=D_next, maxNext=maxNext, maxChoice=10)
D['ouput'].extend([r + sym for r in result])
else:
result = []
for ii in range(len(path_configs)):
gpu = ConfigPredict.gpus[ii]
torch.cuda.set_device(int(gpu))
if ii == 1:
r0 = gpt_gen.generating_poem(app, data, model[ii], config[ii], tokenizer[ii], device[ii], quick,
num0[ii], batchGenerating=batchGenerating, gpu=gpu)
else:
r0 = gpt_gen.generating(app, data, model[ii], config[ii], tokenizer[ii], device[ii],
ConfigPredict, quick=quick, num=num0[ii], removeHighFreqWords=rmHFW[ii],
batchGenerating=batchGenerating, gpu=gpu)
r0 = [rr + tags[ii] for rr in r0]
result.extend(r0)
result_nnlm = gpt_gen.nnlm_modelpredict(D_simi, D_next, ConfigPredict, inputStr=data, maxNext=maxNext,
maxChoice=10, num=num)
result += [tmp + tags[-1] for tmp in result_nnlm]
t1 = time.time()
modelidx_s = ','.join([str(t) for t in ConfigPredict.gpus])
print('total inputs:{} and use time: {} s'.format(len(Data), '%0.4f' % (t1 - t0)))
return Data
def test_myself(path_data='D:\\项目\\输入法\\数据处理\\GPT2-Chinese\\test_online\\result\\test_text4.json'):
import random
import json
with open(path_data,'r',encoding='utf-8') as f:
Data = json.load(f)
A = []
for ii in range(len(Data)):
r = Data[ii]['output']
n0 = 0
n1 = 0
a = []
for i in range(len(r)):
if '(0)' in r[i]:
r[i] = r[i].replace('(0)','')
tag = '0'
n0 += 1
if n0 < 3:
a.append([Data[ii]['input']] + [r[i]] + [tag])
else:
r[i] = r[i].replace('(1)', '')
tag = '1'
n1 += 1
if n1 < 3:
a.append([Data[ii]['input']] + [r[i]] + [tag])
random.shuffle(a)
A.extend(a)
write_excel(path_data.replace('json','xls'), A)
def test_result(path0,n0,n1):
#path0 = 'D:\\项目\\输入法\\数据处理\\GPT2-Chinese\\test_online\\result\\test_text3.xls'
Data = read_excel(path0)
N0 = 0
N1 = 0
T0 = [0, 0, 0]
T1 = [0, 0, 0]
for i in range(n0,n1):
if Data[i][4] == '0':
N0 += 1
T0[int(Data[i][2]) - 1] += 1
else:
N1 += 1
T1[int(Data[i][2]) - 1] += 1
print(N0, N1)
print(T0, T1)
print((T0[1] + T0[2]) / N0, (T1[1] + T1[2]) / N1)
def main(path_source,sym):
print('test-begin')
with open(path_source,'r') as f:
Data = json.load(f)
t0 = time.time()
Data = test(Data,sym)
t1 = time.time()
print('total samples:{},used time:{} s,QPS:{}'.format(len(Data),'%0.4f'%(t1-t0),'%0.4f'%(len(Data)/(t1-t0))))
with open(path_source.replace('.json','-new.json'),'w',encoding='utf-8') as f:
json.dump(Data,f,ensure_ascii=False,indent=4)
path_target = path_source.replace('.json', '-new.json').replace('.json','.xls')
A = []
for ii in range(len(Data)):
a = [Data[ii]['input']]
r = Data[ii]['ouput']
for i in range(len(r)):
t = r[i].split('\t')
if len(t)==1:
b = [t[0],'']
else:
b = t
if i==0:
a = [Data[ii]['input']]+b
else:
a = ['']+b
A.append(a)
write_excel(path_target,A)
print('test-over')
if __name__=='__main__':
path_source,sym=sys.argv[1:]
main(path_source,sym)
|
python
|
import pickle
class Carro:
def __init__(self, modelo, cor):
self.modelo = modelo
self.cor = cor
def __repr__(self):
return f'Carro(modelo="{self.modelo}", cor="{self.cor}")'
carro_1 = Carro('Celta', 'Prata')
data = {
'a': 'Eduardo',
'b': 'Banans',
'c': [1, 2, 3, 4],
'd': {1, 2, 3, 4},
'car': carro_1
}
dumped_data = pickle.dumps(data)
xpto = pickle.loads(dumped_data)
|
python
|
"""
1579. Remove Max Number of Edges to Keep Graph Fully Traversable
Hard
Alice and Bob have an undirected graph of n nodes and 3 types of edges:
Type 1: Can be traversed by Alice only.
Type 2: Can be traversed by Bob only.
Type 3: Can by traversed by both Alice and Bob.
Given an array edges where edges[i] = [typei, ui, vi] represents a bidirectional edge of type typei between nodes ui and vi, find the maximum number of edges you can remove so that after removing the edges, the graph can still be fully traversed by both Alice and Bob. The graph is fully traversed by Alice and Bob if starting from any node, they can reach all other nodes.
Return the maximum number of edges you can remove, or return -1 if it's impossible for the graph to be fully traversed by Alice and Bob.
Example 1:
Input: n = 4, edges = [[3,1,2],[3,2,3],[1,1,3],[1,2,4],[1,1,2],[2,3,4]]
Output: 2
Explanation: If we remove the 2 edges [1,1,2] and [1,1,3]. The graph will still be fully traversable by Alice and Bob. Removing any additional edge will not make it so. So the maximum number of edges we can remove is 2.
Example 2:
Input: n = 4, edges = [[3,1,2],[3,2,3],[1,1,4],[2,1,4]]
Output: 0
Explanation: Notice that removing any edge will not make the graph fully traversable by Alice and Bob.
Example 3:
Input: n = 4, edges = [[3,2,3],[1,1,2],[2,3,4]]
Output: -1
Explanation: In the current graph, Alice cannot reach node 4 from the other nodes. Likewise, Bob cannot reach 1. Therefore it's impossible to make the graph fully traversable.
Constraints:
1 <= n <= 10^5
1 <= edges.length <= min(10^5, 3 * n * (n-1) / 2)
edges[i].length == 3
1 <= edges[i][0] <= 3
1 <= edges[i][1] < edges[i][2] <= n
All tuples (typei, ui, vi) are distinct.
"""
class Solution:
def maxNumEdgesToRemove(self, n: int, edges: List[List[int]]) -> int:
f = {}
def find(x):
f.setdefault(x, x)
if x != f[x]:
f[x] = find(f[x])
return f[x]
def union(x, y):
x = find(x)
y = find(y)
if x == y:
return False
f[x] = y
return True
res, e1, e2 = 0, 0, 0
for t, u, v in edges:
if t == 3:
if union(u, v):
e1 += 1
e2 += 1
else:
res += 1
copy_f = f.copy()
for t, u, v in edges:
if t == 1:
if union(u, v):
e1 += 1
else:
res += 1
f = copy_f
for t, u, v in edges:
if t == 2:
if union(u, v):
e2 += 1
else:
res += 1
return res if e1 == e2 == n - 1 else -1
|
python
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
requires = [
'cornice',
'gevent',
'pyramid_exclog',
'setuptools',
'couchdb',
'couchapp',
'pycrypto',
'openprocurement_client',
'munch',
'tzlocal',
'pyyaml',
'psutil',
'iso8601'
]
test_requires = requires + [
'requests',
'webtest',
'python-coveralls',
'nose',
'mock'
]
entry_points = {
'paste.app_factory': [
'main = openprocurement.edge.main:main'
],
'console_scripts': [
'edge_data_bridge = openprocurement.edge.databridge:main'
]
}
setup(name='openprocurement.edge',
version='1.0.0',
description='openprocurement.edge',
long_description=README,
classifiers=[
"Framework :: Pylons",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application"
],
keywords="web services",
author='Quintagroup, Ltd.',
author_email='[email protected]',
license='Apache License 2.0',
url='https://github.com/openprocurement/openprocurement.edge',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['openprocurement'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=test_requires,
extras_require={'test': test_requires},
test_suite="openprocurement.edge.tests.main.suite",
entry_points=entry_points)
|
python
|