content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/python
############################
# Librairies import
############################
import os
import checkUserInput
############################
# Functions
############################
def confirmation_address(address, what):
confirmation = False
if checkUserInput.question_and_verification("Confirmez-vous l'adresse " + address + " pour " + what + "\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
print("L'adresse " + address + " pour " + what + " a ete confirmee avec succes!")
confirmation = True
else:
print("L'adresse n'a pas ete confirmee! Veuillez la changer!")
return confirmation
def check_array_input(array):
for byte in array:
if len(byte) > 3 or byte == "":
print("L'adresse encodee n'est pas conforme!")
correctInput = False
break
else:
try:
int(byte)
correctInput = True
except:
print("L'adresse encodee n'est pas conforme!")
correctInput = False
break
return correctInput
def check_input(type, what):
while True:
try:
userInput = input("Encodez l'adresse" + type + "pour " + what + ": ")
arrayInput = userInput.split(".")
except:
print("Une erreur s'est produite!")
arrayInput = []
if len(arrayInput) == 4:
correctInput = check_array_input(arrayInput)
else:
print("L'adresse encodee n'est pas conforme!")
correctInput = False
if correctInput:
confirmation = confirmation_address(userInput, what)
if confirmation:
return arrayInput
def confirm_dhcp_address(dhcpAddress, place, goodAddress):
confirmation = False
if goodAddress:
confirmation = confirmation_address('.'.join(dhcpAddress), "la " + place + " address du DHCP ")
if goodAddress == False or confirmation == False:
print("Veuillez encodez manuellement la " + place + " addresse du range DHCP!")
dhcpAddress = checkInput("", place + " client du range DHCP")
return dhcpAddress
def calculate_dhcp(ipAddress, netmask):
networkAddress = []
broadcastAddress = []
index = 0
goodAddress = True
for byte in netmask:
if int(byte) == 255:
networkAddress.append(str(ipAddress[index]))
broadcastAddress.append(str(ipAddress[index]))
elif int(byte) == 0:
networkAddress.append(str(0))
broadcastAddress.append(str(255))
else:
rangeAddress = 255 - int(byte)
trySubNetwork = 0
nbSubNetwork = 255/rangeAddress
while trySubnetwork < nbSubNetwork:
networkAddressByte = trySubNetwork * nbSubNetwork
broadcastAddressByte = (trySubNetwork + 1) * nbSubNetwork
if int(byte) >= networkAddress and int(byte) <= broadcastAddress:
networkAddress.append(str(networkAddressByte))
broadcastAddress.append(str(broadcastAddressByte))
break
firstDhcpAddress = networkAddress
lastDhcpAddress = broadcastAddress
try:
firstDhcpAddress[3] = str(int(firstDhcpAddress[3]) + 1)
lastDhcpAddress[3] = str(int(lastDhcpAddress[3]) - 1)
except:
goodAddress = False
firstDhcpAddress = confirm_dhcp_address(firstDhcpAddress, "premiere", goodAddress)
lastDhcpAddress = confirm_dhcp_address(lastDhcpAddress, "derniere", goodAddress)
return firstDhcpAddress, lastDhcpAddress
def configure_dhcp(ipAddress, netmask):
try:
print("Enable dnsmasq")
p = os.system("systemctl enable dnsmasq")
print("Start dnsmasq")
p = os.system("systemctl start dnsmasq")
except:
print("L'activation du service DHCP a echoue!")
firstDhcpAddress, lastDhcpAddress = calculate_dhcp(ipAddress, netmask)
# Prepare DHCP file
line = "dhcp-range=" + '.'.join(firstDhcpAddress) + "," + '.'.join(lastDhcpAddress) + "," + '.'.join(netmask) + ",12h"
try:
path = "/home/dev/Configuration-Folder/51-dhcp-range.conf"
with open(path, "w") as file:
file.write(line)
except:
print("Impossible d'enregistrer le fichier de configuration!")
def configure_ipv6():
if checkUserInput.question_and_verification("Voulez-vous utiliser le prefixe IPv6 par defaut d'EVESA: \"FD05:A40B:6F6::/48\" ?\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
ipv6Prefix = "FD05:A40B:6F6::/48"
else:
while True:
answer = input("Encodez le prefixe IPv6 sous la forme xxxx:xxxx:xxxx:xxxx/yy\nPrefixe: ")
if confirmation_address(answer, "le prefixe IPv6"):
ipv6Prefix = answer
break
try:
commandLine = "sudo netmgr -i iotr network_prefix set " + ipv6Prefix
print("Set IPv6 prefixe")
p = os.system(commandLine)
except:
print("Le prefixe IPv6 n'a pas pu etre encode!")
def search_network_informations(ipAddress, netmask, searchPath, filename):
try:
for root, dir, files in os.walk(searchPath):
if filename in files:
with open(os.path.join(root, filename), "r") as file:
previousLine = ""
ipv6Address = ""
for line in file:
if "iface eth0 inet6 static" in previousLine and "address" in line:
ipv6Address = line
previousLine = line
macAddress = (':'.join(['{:02x}'.format((uuid.getnode() >> element) & 0xff) for element in range(0,8*6,8)][::-1]))
line = "# Wired adapter #1\nauto eth0\niface eth0 inet static\n"
line += "\taddress " + '.'.join(ipAddress) + "\n"
line += "\tnetmask " + '.'.join(netmask) + "\n"
line += "\thwaddress ether " + macAddress + "\n"
if ipv6Address != "" :
line += "\tiface eth0 inet6 static\n"
line += ipv6Address
line += "\tnetmask 64\n\n"
line += "# Local loopback/n"
line += "auto lo\n"
line += "\tiface lo inet loopback"
path = "/home/dev/Configuration-Folder/interfaces_static"
with open(path, "w") as file:
file.write(line)
break
except:
print("Un probleme est survenu lors de la configuration des parametres du reseau!")
############################
# Main program
############################
def main():
ipAddress = check_input(" ip ", "le routeur")
netmask = check_input(" ", "le masque reseau")
if checkUserInput.question_and_verification("Voulez-vous utiliser le service DHCP du routeur?\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
configure_dhcp(ipAddress, netmask)
else:
try:
print("Stop dnsmasq")
p = os.system("systemctl stop dnsmasq")
print("Disable dnsmasq")
p = os.system("systemctl disable dnsmasq")
except:
print("La desactivation du service DHCP a echoue!")
if checkUserInput.question_and_verification("Voulez-vous utilisez les adresses IPv6?\n[Y]: Oui\n[n]: Non\n[exit]: Quitter le programme\nReponse: ") == "y":
configure_ipv6()
search_network_informations(ipAddress, netmask, "/var/snap/ssnmode", "interfaces_static")
|
python
|
# -*- python -*-
import logging
import unittest
import electionfraud.testdata as eftd
import electionfraud.redist as redist
logging.basicConfig(level=logging.DEBUG)
class RedistributorTest(unittest.TestCase):
pass
class TestIdentity(RedistributorTest):
def setUp(self):
self.rd = redist.Identity(eftd.FOOD_STV_20)
def test_identity(self):
redistributed = [x for x in self.rd]
self.assertEqual(redistributed, eftd.FOOD_STV_20)
class TestNthSubset(RedistributorTest):
def setUp(self):
self.n = 7
def test_divisor(self):
self.assertRaises(ValueError, redist.NthSubset, eftd.FOOD_STV_20, 1)
self.assertRaises(ValueError, redist.NthSubset, eftd.FOOD_STV_20, 2)
self.assertRaises(ValueError, redist.NthSubset, eftd.FOOD_STV_20, 4)
def test_depth(self):
rd = redist.NthSubset(range(20), self.n);
redistributed = [x for x in rd]
self.assertEqual(redistributed, [6, 13, 0, 7, 14, 1, 8, 15, 2, 9, 16, 3, 10, 17, 4, 11, 18, 5, 12, 19])
def test_food_distribution(self):
rd = redist.NthSubset(eftd.FOOD_STV_20, self.n)
redistributed = [x for x in rd]
self.assertEqual(len(eftd.FOOD_STV_20), len(redistributed))
self.assertNotEqual(eftd.FOOD_STV_20, redistributed)
class TestCincinnati(RedistributorTest):
def test_cincy_divisor(self):
self.assertRaises(ValueError, redist.Cincinnati, range(11))
def test_cincy_depth(self):
rd = redist.Cincinnati(range(20))
redistributed = [x for x in rd]
self.assertEqual(redistributed, [10, 1, 12, 3, 14, 5, 16, 7, 18, 9, 0, 11, 2, 13, 4, 15, 6, 17, 8, 19])
def test_cincy_food(self):
rd = redist.Cincinnati(eftd.FOOD_STV_20)
redistributed = [x for x in rd]
self.assertEqual(len(eftd.FOOD_STV_20), len(redistributed))
self.assertNotEqual(eftd.FOOD_STV_20, redistributed)
class TestHareRandom(RedistributorTest):
def setUp(self):
pass
def test_hare_depth(self):
tedium = 10
shuffles = list()
for i in range(tedium):
rd = redist.HareRandom(eftd.FOOD_STV_20)
shuffle = [x for x in rd]
self.assertEqual(len(eftd.FOOD_STV_20), len(shuffle))
self.assertNotEqual(eftd.FOOD_STV_20, shuffle)
shuffles.append(shuffle)
for i in range(tedium - 1):
self.assertNotEqual(shuffles[i], shuffles[i + 1])
if __name__ == '__main__':
unittest.main()
|
python
|
#%% cell
"""
# Solving a New Keynesian model with Python
This file is part of a computational appendix that accompanies the paper.
> MATLAB, Python, Julia: What to Choose in Economics?
>
> Coleman, Lyon, Maliar, and Maliar (2017)
In order to run the codes in this file you will need to install and
configure a few Python packages. We recommend following the instructions
on
[quantecon.org](https://lectures.quantecon.org/jl/getting_started.html)
for getting a base python installation set up. Then to acquire
additional packages used in this file, uncomment the lines in the
cell below (delete the `#` and space at the beginning of the line) and
then run the cell:
For some details regarding the model solved in this file, please see
the [companion notebook that describes the model](http://bookshelf.quant
econ.org/submission/59fa1b45145fc3772b0cef82).
"""
#%% cell
# !pip install git+https://github.com/EconForge/interpolation.py.git
# !pip install git+https://github.com/naught101/sobol_seq.git
# !pip install requests
#%% cell
"""
## Python Code
The Python version of our algorithm is implemented as a few methods defined on
a core class named `Model`. This class is itself composed of instances of three
different classes that hold the model parameters, steady state, and grids
needed to describe the numerical model. Before we get to the classes, we need
to bring in some dependencies:
"""
#%% cell
import os
import sys
import math
from math import sqrt
import time as time
from collections import namedtuple
import requests
if "table" not in sys.argv:
import matplotlib.pyplot as plt
# turn on interactive mode so plots display automatically
plt.ion()
import numpy as np
from numpy import exp
from scipy.io import loadmat
from interpolation.complete_poly import (_complete_poly_impl_vec,
_complete_poly_impl,
complete_polynomial)
import sobol_seq
# get path to this folder
DIR = os.path.abspath("")
# set seed on random number generator to make results reproducible
np.random.seed(42)
#%% cell
"""
We will also need the following two functions, which use monomial rules to
compute quadrature nodes and weights:
"""
#%% cell
def qnwmonomial1(vcv):
n = vcv.shape[0]
n_nodes = 2*n
z1 = np.zeros((n_nodes, n))
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i in range(n):
z1[2*i:2*(i+1), i] = [1, -1]
sqrt_vcv = np.linalg.cholesky(vcv)
R = np.sqrt(n)*sqrt_vcv
ϵj = z1 @ R
ωj = np.ones(n_nodes) / n_nodes
return ϵj, ωj
def qnwmonomial2(vcv):
n = vcv.shape[0]
assert n == vcv.shape[1], "Variance covariance matrix must be square"
z0 = np.zeros((1, n))
z1 = np.zeros((2*n, n))
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i in range(n):
z1[2*i:2*(i+1), i] = [1, -1]
z2 = np.zeros((2*n*(n-1), n))
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p in range(n-1):
for q in range(p+1, n):
z2[4*i:4*(i+1), p] = [1, -1, 1, -1]
z2[4*i:4*(i+1), q] = [1, 1, -1, -1]
i += 1
sqrt_vcv = np.linalg.cholesky(vcv)
R = np.sqrt(n+2)*sqrt_vcv
S = np.sqrt((n+2)/2)*sqrt_vcv
ϵj = np.row_stack([z0, z1 @ R, z2 @ S])
ωj = np.concatenate([2/(n+2) * np.ones(z0.shape[0]),
(4-n)/(2*(n+2)**2) * np.ones(z1.shape[0]),
1/(n+2)**2 * np.ones(z2.shape[0])])
return ϵj, ωj
#%% cell
"""
## Classes
First we have the `Params` class, which holds all the model parameters as well
as the paramters that drive the algorithm.
"""
#%% cell
SteadyState = namedtuple("SteadyState",
["Yn", "Y", "π", "δ", "L", "C", "F", "S", "R", "w"])
class Params(object):
def __init__(self, zlb=True, γ=1, β=0.99, ϑ=2.09, ϵ=4.45, ϕ_y=0.07,
ϕ_π=2.21, μ=0.82, Θ=0.83, πstar=1, gbar=0.23,
ρηR=0.0, ρηa=0.95, ρηL=0.25, ρηu=0.92, ρηB=0.0, ρηG=0.95,
σηR=0.0028, σηa=0.0045, σηL=0.0500, σηu=0.0054, σηB=0.0010,
σηG=0.0038, degree=2):
self.zlb = zlb # whether or not the zlb should be imposed
self.γ = γ # Utility-function parameter
self.β = β # Discount factor
self.ϑ = ϑ # Utility-function parameter
self.ϵ = ϵ # Parameter in the Dixit-Stiglitz aggregator
self.ϕ_y = ϕ_y # Parameter of the Taylor rule
self.ϕ_π = ϕ_π # Parameter of the Taylor rule
self.μ = μ # Parameter of the Taylor rule
self.Θ = Θ # Share of non-reoptimizing firms (Calvo's pricing)
self.πstar = πstar # Target (gross) inflation rate
self.gbar = gbar # Steady-state share of gov. spending in output
# autocorrelation coefficients
self.ρηR = ρηR # See process (28) in MM (2015)
self.ρηa = ρηa # See process (22) in MM (2015)
self.ρηL = ρηL # See process (16) in MM (2015)
self.ρηu = ρηu # See process (15) in MM (2015)
self.ρηB = ρηB # See process (17) in MM (2015)
self.ρηG = ρηG # See process (26) in MM (2015)
# standard deviations
self.σηR = σηR # See process (28) in MM (2015)
self.σηa = σηa # See process (22) in MM (2015)
self.σηL = σηL # See process (16) in MM (2015)
self.σηu = σηu # See process (15) in MM (2015)
self.σηB = σηB # See process (17) in MM (2015)
self.σηG = σηG # See process (26) in MM (2015)
self.degree = degree
@property
def vcov(self):
return np.diag([self.σηR**2, self.σηa**2, self.σηL**2,
self.σηu**2, self.σηB**2, self.σηG**2])
@property
def steady_state(self):
Yn_ss = exp(self.gbar)**(self.γ/(self.ϑ+self.γ))
Y_ss = Yn_ss
π_ss = 1.0
δ_ss = 1.0
L_ss = Y_ss/δ_ss
C_ss = (1-self.gbar)*Y_ss
F_ss = C_ss**(-self.γ)*Y_ss/(1-self.β*self.Θ*π_ss**(self.ϵ-1))
S_ss = L_ss**self.ϑ*Y_ss/(1-self.β*self.Θ*π_ss**self.ϵ)
R_ss = π_ss/self.β
w_ss = (L_ss**self.ϑ)*(C_ss**self.γ)
return SteadyState(
Yn_ss, Y_ss, π_ss, δ_ss, L_ss, C_ss, F_ss, S_ss, R_ss, w_ss
)
@property
def grid_size(self):
"Grid size pinned down by degree of polynomials"
return {1: 20, 2: 100, 3: 300, 4: 1000, 5: 2000}[self.degree]
#%% cell
"""
Notice that we have a namedtuple to hold the steady state of the model. Using
the namedtuple infrastructure allows us to have convenient "dot-style" access
to the steady state, without defining a full class.
Given an instance of `Params` class, we can construct the grid on which we will
solve the model.
The `Grids` class holds this grid as well as matrices used to compute
expectations.
To match the Julia and Matlab versions of the code, the `__init__` method for
`Grids` below loads pre-generated grids from a `.mat` file for both Sobol and
random grids. This ensures that the exact same code is run in each language. If
you would like to generate the grids in pure Python, you can set the
`grid_source` keyword argument to `"python"`
"""
#%% cell
class Grids(object):
def __init__(self, p, kind="random", grid_source="mat"):
m = p.grid_size
σ = np.array([p.σηR, p.σηa, p.σηL, p.σηu, p.σηB, p.σηG])
ρ = np.array([p.ρηR, p.ρηa, p.ρηL, p.ρηu, p.ρηB, p.ρηG])
if kind == "sobol":
if grid_source == "mat":
_path = os.path.join(DIR, "Sobol_grids.mat")
s = loadmat(_path)["Sobol_grids"][:m, :]
else:
s = sobol_seq.i4_sobol_generate(8, m)
sη = s[:, :6]
η = (-2*σ + 4*(sη.max(0)-sη) / (sη.max(0)-sη.min(0))*σ)/np.sqrt(1-ρ**2)
R = 1+0.05*(np.max(s[:, 6])-s[:, 6])/(np.max(s[:, 6])-np.min(s[:, 6]))
δ = 0.95+0.05*(np.max(s[:, 7])-s[:, 7])/(np.max(s[:, 7])-np.min(s[:, 7]))
else:
# Values of exogenous state variables are distributed uniformly
# in the interval +/- std/sqrt(1-rho_nu**2)
if grid_source == "mat":
_path = os.path.join(DIR, "random_grids.mat")
s = loadmat(_path)["random_grids"][:m, :]
else:
s = np.random.rand(m, 8)
sη = s[:, :6]
η = (-2*σ + 4*σ*sη) / np.sqrt(1-ρ**2)
# Values of endogenous state variables are distributed uniformly
# in the intervals [1 1.05] and [0.95 1], respectively
R = 1 + 0.05 * s[:, 6]
δ = 0.95 + 0.05 * s[:, 7]
ηR = η[:, 0]
ηa = η[:, 1]
ηL = η[:, 2]
ηu = η[:, 3]
ηB = η[:, 4]
ηG = η[:, 5]
self.ηR = ηR
self.ηa = ηa
self.ηL = ηL
self.ηu = ηu
self.ηB = ηB
self.ηG = ηG
self.R = R
self.δ = δ
# shape (8, m)
self.X = np.vstack([np.log(R), np.log(δ), η.T])
# shape (n_complete(8, p.Degree), m)
self.X0_G = {
1: complete_polynomial(self.X, 1),
p.degree: complete_polynomial(self.X, p.degree)
}
# shape (2*n=12, n=6)
self.ϵ_nodes, self.ω_nodes = qnwmonomial1(p.vcov)
# all shape (len(ϵ_nodes), m)
self.ηR1 = p.ρηR * ηR[None, :] + self.ϵ_nodes[:, None, 0]
self.ηa1 = p.ρηa * ηa[None, :] + self.ϵ_nodes[:, None, 1]
self.ηL1 = p.ρηL * ηL[None, :] + self.ϵ_nodes[:, None, 2]
self.ηu1 = p.ρηu * ηu[None, :] + self.ϵ_nodes[:, None, 3]
self.ηB1 = p.ρηB * ηB[None, :] + self.ϵ_nodes[:, None, 4]
self.ηG1 = p.ρηG * ηG[None, :] + self.ϵ_nodes[:, None, 5]
#%% cell
"""
Finally, we construct the Model class, which has an instance of Params,
SteadyState and Grids as its three attributes.
This block of code will be longer than the others because we also include
routines to solve and simulate the model as methods on the Model class. These
methods will be clearly marked and commented.
"""
#%% cell
class Model(object):
def __init__(self, p=Params(), g=None):
if g is None:
g = Grids(p)
self.p = p
self.g = g
self.s = self.p.steady_state
def init_coefs(self, degree):
"Iniital guess for coefs. We evaluate interpoland as coefs @ basis_mat"
npol = self.g.X0_G[degree].shape[0]
coefs = np.full((3, npol), 1e-5)
coefs[:, 0] = [self.s.S, self.s.F, self.s.C**(-self.p.γ)]
return coefs
def step(self, S, F, C, δ0, R0, ηG, ηa, ηL, ηR):
# simplify notation
Θ, ϵ, gbar, ϑ, γ = self.p.Θ, self.p.ϵ, self.p.gbar, self.p.ϑ, self.p.γ
β, μ, ϕ_π, ϕ_y = self.p.β, self.p.μ, self.p.ϕ_π, self.p.ϕ_y
πstar = self.p.πstar
# Compute pie(t) from condition (35) in MM (2015)
π0 = ((1-(1-Θ)*(S/F)**(1-ϵ))/Θ)**(1/(ϵ-1))
# Compute delta(t) from condition (36) in MM (2015)
δ1 = ((1-Θ)*((1-Θ*π0**(ϵ-1))/(1-Θ))**(ϵ/(ϵ-1))+Θ*π0**ϵ/δ0)**(-1)
# Compute Y(t) from condition (38) in MM (2015)
Y0 = C/(1-gbar/exp(ηG))
# Compute L(t) from condition (37) in MM (2015)
L0 = Y0/exp(ηa)/δ1
# Compute Yn(t) from condition (31) in MM (2015)
Yn0 = (exp(ηa)**(1+ϑ)*(1-gbar/exp(ηG))**(-γ)/exp(ηL))**(1/(ϑ+γ))
# Compute R(t) from conditions (27), (39) in MM (2015) -- Taylor rule
R1 = πstar/β*(R0*β/πstar)**μ*((π0/πstar)**ϕ_π * (Y0/Yn0)**ϕ_y)**(1-μ)*exp(ηR)
return π0, δ1, Y0, L0, Yn0, R1
def solve(self, damp=0.1, tol=1e-7, verbose=False):
# rename self to m to make code below readable
m = self
n = len(m.g.ηR)
n_nodes = len(m.g.ω_nodes)
## allocate memory
# euler equations
e = np.zeros((3, n))
# previous iteration S, F, C
S0_old_G = np.ones(n)
F0_old_G = np.ones(n)
C0_old_G = np.ones(n)
# current iteration S, F, C
S0_new_G = np.ones(n)
F0_new_G = np.ones(n)
C0_new_G = np.ones(n)
# future S, F, C
S1 = np.zeros((n_nodes, n))
F1 = np.zeros((n_nodes, n))
C1 = np.zeros((n_nodes, n))
degs = [self.p.degree] if self.p.degree == 1 else [1, self.p.degree]
for deg in degs:
# housekeeping
err = 1.0
it = 0
X0_G = m.g.X0_G[deg]
start_time = time.time()
if deg <= 2:
coefs = self.init_coefs(deg)
else:
coefs = np.linalg.lstsq(X0_G.T, e.T)[0].T
# old_coefs = coefs.copy()
# coefs = self.init_coefs(deg)
# coefs[:, :old_coefs.shape[1]] = old_coefs
while err > tol:
it += 1
# Current choices (at t)
# ------------------------------
SFC0 = coefs @ X0_G
S0 = SFC0[0, :] # Compute S(t) using coefs
F0 = SFC0[1, :] # Compute F(t) using coefs
C0 = (SFC0[2, :])**(-1/m.p.γ) # Compute C(t) using coefs
π0, δ1, Y0, L0, Yn0, R1 = self.step(
S0, F0, C0, m.g.δ, m.g.R, m.g.ηG, m.g.ηa, m.g.ηL, m.g.ηR
)
if self.p.zlb:
R1 = np.maximum(R1, 1.0)
for u in range(n_nodes):
# Form complete polynomial of degree "Degree" (at t+1 states)
grid1 = [np.log(R1), np.log(δ1), m.g.ηR1[u, :], m.g.ηa1[u, :],
m.g.ηL1[u, :], m.g.ηu1[u, :], m.g.ηB1[u, :], m.g.ηG1[u, :]]
X1 = complete_polynomial(grid1, deg)
S1[u, :] = coefs[0, :] @ X1 # Compute S(t+1)
F1[u, :] = coefs[1, :] @ X1 # Compute F(t+1)
C1[u, :] = (coefs[2, :] @ X1)**(-1/m.p.γ) # Compute C(t+1)
# Compute next-period π using condition
# (35) in MM (2015)
π1 = ((1-(1-m.p.Θ)*(S1/F1)**(1-m.p.ϵ))/m.p.Θ)**(1/(m.p.ϵ-1))
# Evaluate conditional expectations in the Euler equations
#---------------------------------------------------------
e[0, :] = exp(m.g.ηu)*exp(m.g.ηL)*L0**m.p.ϑ*Y0/exp(m.g.ηa) + m.g.ω_nodes @ (m.p.β*m.p.Θ*π1**m.p.ϵ*S1)
e[1, :] = exp(m.g.ηu)*C0**(-m.p.γ)*Y0 + m.g.ω_nodes @ (m.p.β*m.p.Θ*π1**(m.p.ϵ-1)*F1)
e[2, :] = m.p.β*exp(m.g.ηB)/exp(m.g.ηu)*R1 * (m.g.ω_nodes @ ((exp(m.g.ηu1)*C1**(-m.p.γ)/π1)))
# Variables of the current iteration
#-----------------------------------
np.copyto(S0_new_G, S0)
np.copyto(F0_new_G, F0)
np.copyto(C0_new_G, C0)
# Compute and update the coefficients of the decision functions
# -------------------------------------------------------------
coefs_hat = np.linalg.lstsq(X0_G.T, e.T)[0].T
# Update the coefficients using damping
coefs = damp*coefs_hat + (1-damp)*coefs
# Evaluate the percentage (unit-free) difference between the values
# on the grid from the previous and current iterations
# -----------------------------------------------------------------
# The convergence criterion is adjusted to the damping parameters
err = (np.mean(np.abs(1-S0_new_G/S0_old_G)) +
np.mean(np.abs(1-F0_new_G/F0_old_G)) +
np.mean(np.abs(1-C0_new_G/C0_old_G)))
# Store the obtained values for S(t), F(t), C(t) on the grid to
# be used on the subsequent iteration in Section 10.2.6
#-----------------------------------------------------------------------
np.copyto(S0_old_G, S0_new_G)
np.copyto(F0_old_G, F0_new_G)
np.copyto(C0_old_G, C0_new_G)
if it % 20 == 0 and verbose:
print("On iteration {:d} err is {:6.7e}".format(it, err))
elapsed = time.time() - start_time
return coefs, elapsed
def simulate(self, coefs=None, capT=10201):
if coefs is None:
coefs, elapsed = self.solve()
# rename self to m to make code below readable
m = self
# create namedtuple to hold simulation results in an organized container
Simulation = namedtuple(
"Simulation",
["nuR", "nua", "nuL", "nuu", "nuB", "nuG",
"δ", "R", "S", "F", "C", "π", "Y", "L", "Yn", "w"]
)
# 11. Simualating a time-series solution
#---------------------------------------
# Initialize the values of 6 exogenous shocks and draw innovations
#-----------------------------------------------------------------
nuR = np.zeros(capT)
nua = np.zeros(capT)
nuL = np.zeros(capT)
nuu = np.zeros(capT)
nuB = np.zeros(capT)
nuG = np.zeros(capT)
# Generate the series for shocks
#-------------------------------
_path = os.path.join(DIR, "epsi_test_NK.mat")
rands = (loadmat(_path)["epsi_test_NK"])
capT = rands.shape[0]
# rands = np.random.randn(capT-1, 6)
for t in range(capT-1):
nuR[t+1] = self.p.ρηR*nuR[t] + self.p.σηR*rands[t, 0]
nua[t+1] = self.p.ρηa*nua[t] + self.p.σηa*rands[t, 1]
nuL[t+1] = self.p.ρηL*nuL[t] + self.p.σηL*rands[t, 2]
nuu[t+1] = self.p.ρηu*nuu[t] + self.p.σηu*rands[t, 3]
nuB[t+1] = self.p.ρηB*nuB[t] + self.p.σηB*rands[t, 4]
nuG[t+1] = self.p.ρηG*nuG[t] + self.p.σηG*rands[t, 5]
# Allocate memory for time series of ...
δ = np.ones(capT+1) # ... delta(t)
R = np.ones(capT+1) # ... R(t)
S = np.ones(capT) # ... S(t)
F = np.ones(capT) # ... F(t)
C = np.ones(capT) # ... C(t)
π = np.ones(capT) # ... π(t)
Y = np.ones(capT) # ... Y(t)
L = np.ones(capT) # ... L(t)
Yn = np.ones(capT) # ... Yn(t)
w = np.ones(capT) # ... w(t)
pol_bases = np.empty(coefs.shape[1])
states = np.empty(8)
for t in range(capT):
states[0] = math.log(R[t])
states[1] = math.log(δ[t])
states[2] = nuR[t]
states[3] = nua[t]
states[4] = nuL[t]
states[5] = nuu[t]
states[6] = nuB[t]
states[7] = nuG[t]
_complete_poly_impl_vec(states, self.p.degree, pol_bases)
vals = coefs @ pol_bases
S[t] = vals[0]
F[t] = vals[1]
C[t] = (vals[2])**(-1/m.p.γ)
π[t], δ[t+1], Y[t], L[t], Yn[t], R[t+1] = self.step(
S[t], F[t], C[t], δ[t], R[t], nuG[t], nua[t], nuL[t], nuR[t]
)
# Compute real wage
w[t] = exp(nuL[t])*(L[t]**m.p.ϑ)*(C[t]**m.p.γ)
# If ZLB is imposed, set R(t)=1 if ZLB binds
if self.p.zlb:
R[t+1] = max(R[t+1], 1.0)
return Simulation(nuR, nua, nuL, nuu, nuB, nuG, δ, R, S, F, C, π, Y, L, Yn, w)
def residuals(self, coefs, sim, burn=200):
m = self # rename self to m so the rest of this code is more readable
capT = len(sim.w)
resids = np.zeros((capT, 9))
# Integration method for evaluating accuracy
# ------------------------------------------
# Monomial integration rule with 2N**2+1 nodes
ϵ_nodes, ω_nodes = qnwmonomial2(m.p.vcov)
n_nodes = len(ω_nodes)
# Allocate for arrays needed in the loop
basis_mat = np.empty((8, n_nodes))
X1 = np.empty((coefs.shape[1], n_nodes))
nuR1 = np.empty(n_nodes)
nua1 = np.empty(n_nodes)
nuL1 = np.empty(n_nodes)
nuu1 = np.empty(n_nodes)
nuB1 = np.empty(n_nodes)
nuG1 = np.empty(n_nodes)
for t in range(capT): # For each given point,
# Take the corresponding value for shocks at t
#---------------------------------------------
nuR0 = sim.nuR[t] # nuR(t)
nua0 = sim.nua[t] # nua(t)
nuL0 = sim.nuL[t] # nuL(t)
nuu0 = sim.nuu[t] # nuu(t)
nuB0 = sim.nuB[t] # nuB(t)
nuG0 = sim.nuG[t] # nuG(t)
# Exctract time t values for all other variables (and t+1 for R, δ)
#------------------------------------------------------------------
R0 = sim.R[t] # R(t-1)
δ0 = sim.δ[t] # δ(t-1)
R1 = sim.R[t+1] # R(t)
δ1 = sim.δ[t+1] # δ(t)
L0 = sim.L[t] # L(t)
Y0 = sim.Y[t] # Y(t)
Yn0 = sim.Yn[t] # Yn(t)
π0 = sim.π[t] # π(t)
S0 = sim.S[t] # S(t)
F0 = sim.F[t] # F(t)
C0 = sim.C[t] # C(t)
# Fill basis matrix with R1, δ1 and shocks
#-----------------------------------------
# Note that we do not premultiply by standard deviations as ϵ_nodes
# already include them. All these variables are vectors of length n_nodes
nuR1[:] = nuR0*m.p.ρηR + ϵ_nodes[:, 0]
nua1[:] = nua0*m.p.ρηa + ϵ_nodes[:, 1]
nuL1[:] = nuL0*m.p.ρηL + ϵ_nodes[:, 2]
nuu1[:] = nuu0*m.p.ρηu + ϵ_nodes[:, 3]
nuB1[:] = nuB0*m.p.ρηB + ϵ_nodes[:, 4]
nuG1[:] = nuG0*m.p.ρηG + ϵ_nodes[:, 5]
basis_mat[0, :] = np.log(R1)
basis_mat[1, :] = np.log(δ1)
basis_mat[2, :] = nuR1
basis_mat[3, :] = nua1
basis_mat[4, :] = nuL1
basis_mat[5, :] = nuu1
basis_mat[6, :] = nuB1
basis_mat[7, :] = nuG1
# Future choices at t+1
#----------------------
# Form a complete polynomial of degree "Degree" (at t+1) on future state
# variables; n_nodes-by-npol
_complete_poly_impl(basis_mat, self.p.degree, X1)
# Compute S(t+1), F(t+1) and C(t+1) in all nodes using coefs
S1 = coefs[0, :] @ X1
F1 = coefs[1, :] @ X1
C1 = (coefs[2, :] @ X1)**(-1/m.p.γ)
# Compute π(t+1) using condition (35) in MM (2015)
π1 = ((1-(1-m.p.Θ)*(S1/F1)**(1-m.p.ϵ))/m.p.Θ)**(1/(m.p.ϵ-1))
# Compute residuals for each of the 9 equilibrium conditions
#-----------------------------------------------------------
resids[t, 0] = 1 - (ω_nodes @
(exp(nuu0)*exp(nuL0)*L0**m.p.ϑ*Y0/exp(nua0) +
m.p.β*m.p.Θ*π1**m.p.ϵ*S1)/S0
)
resids[t, 1] = 1 - (ω_nodes @
(exp(nuu0)*C0**(-m.p.γ)*Y0 + m.p.β*m.p.Θ*π1**(m.p.ϵ-1)*F1)/F0
)
resids[t, 2] = 1 - (ω_nodes @
(m.p.β*exp(nuB0)/exp(nuu0)*R1*exp(nuu1)*C1**(-m.p.γ)/π1)/C0**(-m.p.γ)
)
resids[t, 3] = 1 - ((1-m.p.Θ*π0**(m.p.ϵ-1))/(1-m.p.Θ))**(1/(1-m.p.ϵ))*F0/S0
resids[t, 4] = 1 - ((1-m.p.Θ)*((1-m.p.Θ*π0**(m.p.ϵ-1))/(1-m.p.Θ))**(m.p.ϵ/(m.p.ϵ-1)) + m.p.Θ*π0**m.p.ϵ/δ0)**(-1)/δ1
resids[t, 5] = 1 - exp(nua0)*L0*δ1/Y0
resids[t, 6] = 1 - (1-m.p.gbar/exp(nuG0))*Y0/C0
resids[t, 7] = 1 - (exp(nua0)**(1+m.p.ϑ)*(1-m.p.gbar/exp(nuG0))**(-m.p.γ)/exp(nuL0))**(1/(m.p.ϑ+m.p.γ))/Yn0
resids[t, 8] = 1 - m.s.π/m.p.β*(R0*m.p.β/m.s.π)**m.p.μ*((π0/m.s.π)**m.p.ϕ_π * (Y0/Yn0)**m.p.ϕ_y)**(1-m.p.μ)*exp(nuR0)/R1 # Taylor rule
# If the ZLB is imposed and R>1, the residuals in the Taylor rule (the
# 9th equation) are zero
if m.p.zlb and R1 <= 1:
resids[t, 8] = 0.0
return resids[burn:, :]
#%% cell
"""
## Running the code
Now that we've done all the hard work to define the model, its solution and
simulation, and accuracy checks, let's put things together and run the code!
"""
#%% cell
def ensurefile(url, localpath):
if not os.path.isfile(localpath):
print("Downloading {} to {}".format(url, localpath))
with open(localpath, "wb") as f:
res = requests.get(url)
f.write(res.content)
def main(m=None, file=None, plot=True, verbose=False):
ensurefile("https://github.com/sglyon/CLMMJuliaPythonMatlab/raw/master/NewKeynesian/Sobol_grids.mat", "Sobol_grids.mat")
ensurefile("https://github.com/sglyon/CLMMJuliaPythonMatlab/raw/master/NewKeynesian/epsi_test_NK.mat", "epsi_test_NK.mat")
ensurefile("https://github.com/sglyon/CLMMJuliaPythonMatlab/raw/master/NewKeynesian/random_grids.mat", "random_grids.mat")
if m is None:
m = Model()
if file is None:
mprint = print
else:
def mprint(*x):
print(*x, file=file)
# solve the model
coefs, solve_time = m.solve(verbose=verbose)
# simulate the model
t1 = time.time()
sim = m.simulate(coefs)
sim_time = time.time() - t1
# check accuracy
t1 = time.time()
resids = m.residuals(coefs, sim)
resids_time = time.time() - t1
tot_time = solve_time + sim_time + resids_time
max_err = np.log10(np.abs(resids).max() + 1e-16)
max_err_eqn = np.log10(np.abs(resids).max(0) + 1e-16)
l1 = np.log10(np.abs(resids).mean() + 1e-16)
mprint("Solver time (in seconds): ", solve_time)
mprint("Simulation time (in seconds): ", sim_time)
mprint("Residuals time (in seconds): ", resids_time)
mprint("Total time (in seconds): ", tot_time)
mprint("\nAPPROXIMATION ERRORS (log10):")
mprint("\ta) mean error in the model equations: {:0.3f}".format(l1))
mprint("\tb) max error in the model equations: {:0.3f}".format(max_err))
mprint("\tc) max error by equation: ", max_err_eqn)
mprint("tex row: {:.2f} & {:.2f} & {:.2f}".format(l1, max_err, solve_time))
# plot simulated path of variables
if plot:
fig, ax = plt.subplots(2, 2, figsize=(10, 8))
t = np.arange(1, 101)
ax[0, 0].plot(t, sim.S[t], label="S")
ax[0, 0].plot(t, sim.F[t], label="F")
ax[0, 0].set_title("Figure 1a. S and F")
ax[0, 0].legend()
ax[0, 1].plot(t, sim.Y[t], label="Y")
ax[0, 1].plot(t, sim.Yn[t], label="Yn")
ax[0, 1].set_title("Figure 1b. Output and natural output")
ax[0, 1].legend()
ax[1, 0].plot(t, sim.C[t], label="C")
ax[1, 0].plot(t, sim.L[t], label="L")
ax[1, 0].set_title("Figure 1c. Consumption and labor")
ax[1, 0].legend()
ax[1, 1].plot(t, sim.δ[t], label="δ")
ax[1, 1].plot(t, sim.R[t], label="R")
ax[1, 1].plot(t, sim.π[t], label="π")
ax[1, 1].set_title("Figure 1d. Distortion, interest rate and inflation")
ax[1, 1].legend()
return fig, solve_time, sim_time, resids_time, coefs, sim, resids, l1, max_err
else:
return None, solve_time, sim_time, resids_time, coefs, sim, resids, l1, max_err
#%% cell
def build_paper_table():
msg = "starting πstar={πstar} and degree={degree} zlb={zlb}"
with open(os.path.join(DIR, "output.csv"), "w") as f_csv:
f_csv.write("pi_star,zlb,degree,solve_time,l_1,l_inf\n")
with open(os.path.join(DIR, "output.log"), "w") as f:
for params in (dict(πstar=1.0, zlb=False),
dict(πstar=1, zlb=True),
dict(πstar=1 + 0.0598/4, zlb=False),):
for degree in range(1, 6):
print(msg.format(degree=degree, **params))
p = Params(degree=degree, **params)
g = Grids(p, kind="sobol")
m = Model(p, g)
print(msg.format(degree=degree, **params), file=f)
outputs = main(m, f, plot=False)
_stuff = [params["πstar"], params["zlb"]*1, degree,
outputs[1], outputs[7], outputs[8]]
f_csv.write(",".join(map(str, _stuff)) + "\n")
print("\n"*5, file=f)
# flush io streams so we can see output in real time
f_csv.flush()
f.flush()
#%% cell
if "table" in sys.argv:
build_paper_table()
else:
results = main()
|
python
|
arr = [1, 34, 3, 98, 9, 76, 45, 4]
l = len(arr)
digits =[]
# # Uncomment and use either of the 2 digitconverter
# def digitconverter(num):
# # digit = []
# while num !=0:
# digits.append(num%10)
# num = num//10
# def digitconverter(num):
# number = str(num)
# for i in range(len(number)):
# digits.append(int(number[i]))
for i in range(l):
if arr[i] >=10:
digitconverter(arr[i])
else:
digits.append(arr[i])
# print(digits)
digits.sort()
digits = digits[::-1] #descending order sort
# print(digits) # array of max no. possible
n = "".join(map(str,digits)) # converting every digit to str
print(n) # required max no. possible
|
python
|
from rest_framework.serializers import ModelSerializer
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from .models import IdentifiedBaseStation, Operator
class IdentifiedBaseStationSerializer(GeoFeatureModelSerializer):
class Meta:
model = IdentifiedBaseStation
geo_field = 'point'
fields = ('id', 'cgi', 'radio', 'average_signal')
class OperatorSerializer(ModelSerializer):
class Meta:
model = Operator
fields = ('id', 'friendly_name', 'name', 'cnpj')
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pathlib
import pickle
import yaml
import numpy as np
from librosa import load
from tools import yaml_loader
__author__ = 'Konstantinos Drossos -- Tampere University'
__docformat__ = 'reStructuredText'
__all__ = [
'dump_pickle_file', 'load_pickle_file', 'read_txt_file',
'load_audio_file', 'dump_numpy_object', 'load_numpy_object',
'load_yaml_file', 'load_settings_file'
]
def dump_numpy_object(np_obj, file_name, ext='.npy', replace_ext=True):
"""Dumps a numpy object to HDD.
:param np_obj: Numpy object.
:type np_obj: numpy.ndarray
:param file_name: File name to be used.
:type file_name: pathlib.Path
:param ext: Extension for the dumped object.
:type ext: str
:param replace_ext: Replace extension?
:type replace_ext: bool
"""
f_name = file_name.with_suffix(ext) if replace_ext else file_name
np.save(str(f_name), np_obj)
def dump_pickle_file(obj, file_name, protocol=2):
"""Dumps an object to pickle file.
:param obj: Object to dump.
:type obj: object | list | dict | numpy.ndarray
:param file_name: Resulting file name.
:type file_name: pathlib.Path
:param protocol: Protocol to be used.
:type protocol: int
"""
with file_name.open('wb') as f:
pickle.dump(obj, f, protocol=protocol)
def load_audio_file(audio_file, sr, mono, offset=0.0, duration=None):
"""Loads the data of an audio file.
:param audio_file: Path of the audio file.
:type audio_file: pathlib.Path
:param sr: Sampling frequency to be used.
:type sr: int
:param mono: Turn to mono?
:type mono: bool
:param offset: Offset to be used (in seconds).
:type offset: float
:param duration: Duration of signal to load (in seconds).
:type duration: float|None
:return: Audio data.
:rtype: numpy.ndarray
"""
return load(path=str(audio_file), sr=sr, mono=mono,
offset=offset, duration=duration)[0]
def load_numpy_object(f_name):
"""Loads and returns a numpy object.
:param f_name: Path of the object.
:type f_name: pathlib.Path
:return: Numpy object.
:rtype: numpy.ndarray
"""
return np.load(str(f_name), allow_pickle=True)
def load_pickle_file(file_name, encoding='latin1'):
"""Loads a pickle file.
:param file_name: File name (extension included).
:type file_name: pathlib.Path
:param encoding: Encoding of the file.
:type encoding: str
:return: Loaded object.
:rtype: object | list | dict | numpy.ndarray
"""
with file_name.open('rb') as f:
return pickle.load(f, encoding=encoding)
def load_settings_file(file_name, settings_dir=pathlib.Path('settings')):
"""Reads and returns the contents of a YAML settings file.
:param file_name: Name of the settings file.
:type file_name: pathlib.Path
:param settings_dir: Directory with the settings files.
:type settings_dir: pathlib.Path
:return: Contents of the YAML settings file.
:rtype: dict
"""
settings_file_path = settings_dir.joinpath(file_name.with_suffix('.yaml'))
return load_yaml_file(settings_file_path)
def load_yaml_file(file_path):
"""Reads and returns the contents of a YAML file.
:param file_path: Path to the YAML file.
:type file_path: pathlib.Path
:return: Contents of the YAML file.
:rtype: dict
"""
with file_path.open('r') as f:
return yaml.load(f, Loader=yaml_loader.YAMLLoader)
def read_txt_file(file_name):
"""Reads a text (.txt) file and returns the contents.
:param file_name: File name of the txt file.
:type file_name: pathlib.Path
:return: Contents of the file.
:rtype: list[str]
"""
with file_name.open() as f:
return f.readlines()
# EOF
|
python
|
# Generated by Django 3.2.7 on 2021-09-17 12:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Course', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LearnGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32, verbose_name='Название')),
('is_studies', models.BooleanField(default=False, verbose_name='Идут занятия')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Создана')),
],
options={
'verbose_name': 'Группа',
'verbose_name_plural': 'Группы',
},
),
migrations.AlterModelOptions(
name='student',
options={'verbose_name': 'Ученик', 'verbose_name_plural': 'Ученики'},
),
migrations.AlterField(
model_name='student',
name='contact',
field=models.CharField(max_length=128, verbose_name='Контакты'),
),
migrations.AlterField(
model_name='student',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Зарегестрирован'),
),
migrations.AlterField(
model_name='student',
name='email',
field=models.EmailField(max_length=64, unique=True, verbose_name='Почта'),
),
migrations.AlterField(
model_name='student',
name='is_learned',
field=models.BooleanField(default=False, verbose_name='Учащийся'),
),
migrations.AlterField(
model_name='student',
name='name',
field=models.CharField(max_length=32, verbose_name='Имя'),
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='Имя')),
('contact', models.CharField(max_length=128, verbose_name='Контакты')),
('email', models.EmailField(max_length=64, unique=True, verbose_name='Почта')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Зарегестрирован')),
('groups', models.ManyToManyField(to='Course.LearnGroup')),
],
options={
'verbose_name': 'Ученик',
'verbose_name_plural': 'Ученики',
},
),
]
|
python
|
import gym
import copy
env = gym.make('LunarLander-v2')
env.seed(111) # we cna fix the background for now
env.action_space.np_random.seed(123) #fix random actions for now
env.reset()
for step in range(60):
#input()
env.render()
#save info before action
if step == 55:
save_state = copy.copy(info)
print("save pos", save_state['posx'], save_state['posy'])
input("Let's restart here")
elif step > 55:
print(step, info['posx'], info['posy'])
obs, r, done, info = env.step(0)#env.action_space.sample()) # take a random action
if step == 55:
#save after state
after_55 = copy.copy(info)
obs_after_55 = obs
# print(obs)
# print(r)
# print(done)
# print(obs)
# print(info['posx'], info['posy'])
# print()
#print('SAVED lander', save_state['lander'])
print("recover pos", save_state['leg_posx'], save_state['leg_posy'])
obs, r, done, info = env.reset(game_state = save_state, action = 0)
#print('SAVED lander', save_state['lander'])
print("obs after 55")
print(obs_after_55)
print("obs after reset")
print(obs)
env.render()
print("recovered pos", after_55['leg_posx'])
print("reset state", info['leg_posx'])
input()
for step in range(56,60):
env.render()
print(step, info['posx'], info['posy'])
obs, r, done, info = env.step(0)#env.action_space.sample()) # take a random action
# print(obs)
# print(r)
# print(done)
#print(obs)
# print()
input()
env.close()
#Questions, how do we reset the env with a particular state?
#didn't work. I think I need to rewrite all the leg and body code with and without previous state
#hmm I tried, but still seems to not be working. Why is it different? Need to run debugger and find out.
#okay so I reset the leg linearVelocity and angular velocity
#TODO: doesn't work!! Need to debug. The posx and posy and angle for the legs is wrong for some reason... I thought I had it working...
#Need to run two debuggers and see when it changes
#check None action!
#TODO test with non-zero actions and make sure it works and see if I can get the particles to also work.
#TODO test with dispersion noise
|
python
|
import asyncio
import inspect
import warnings
import functools
from typing import Callable, Generic, TypeVar, Type, Optional
def deprecated(reason, stacklevel=2) -> Callable:
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Source: https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
"""
if isinstance(reason, str):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func):
if inspect.isclass(func):
msg = "Call to deprecated class {name} ({reason})."
else:
msg = "Call to deprecated function {name} ({reason})."
@functools.wraps(func)
def wrapper(*args, **kwargs):
warn_deprecated(msg.format(name=func.__name__, reason=reason), stacklevel=stacklevel)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return wrapper
return decorator
if inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func1 = reason
if inspect.isclass(func1):
msg1 = "Call to deprecated class {name}."
else:
msg1 = "Call to deprecated function {name}."
@functools.wraps(func1)
def wrapper1(*args, **kwargs):
warn_deprecated(msg1.format(name=func1.__name__), stacklevel=stacklevel)
return func1(*args, **kwargs)
return wrapper1
raise TypeError(repr(type(reason)))
def warn_deprecated(message, warning=DeprecationWarning, stacklevel=2):
warnings.simplefilter('always', warning)
warnings.warn(message, category=warning, stacklevel=stacklevel)
warnings.simplefilter('default', warning)
def renamed_argument(old_name: str, new_name: str, until_version: str, stacklevel: int = 3):
"""
A meta-decorator to mark an argument as deprecated.
.. code-block:: python3
@renamed_argument("chat", "chat_id", "3.0") # stacklevel=3 by default
@renamed_argument("user", "user_id", "3.0", stacklevel=4)
def some_function(user_id, chat_id=None):
print(f"user_id={user_id}, chat_id={chat_id}")
some_function(user=123) # prints 'user_id=123, chat_id=None' with warning
some_function(123) # prints 'user_id=123, chat_id=None' without warning
some_function(user_id=123) # prints 'user_id=123, chat_id=None' without warning
:param old_name:
:param new_name:
:param until_version: the version in which the argument is scheduled to be removed
:param stacklevel: leave it to default if it's the first decorator used.
Increment with any new decorator used.
:return: decorator
"""
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
def _handling(kwargs):
"""
Returns updated version of kwargs.
"""
routine_type = 'coroutine' if is_coroutine else 'function'
if old_name in kwargs:
warn_deprecated(f"In {routine_type} '{func.__name__}' argument '{old_name}' "
f"is renamed to '{new_name}' "
f"and will be removed in aiogram {until_version}",
stacklevel=stacklevel)
kwargs = kwargs.copy()
kwargs.update({new_name: kwargs.pop(old_name)})
return kwargs
if is_coroutine:
@functools.wraps(func)
async def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return await func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return func(*args, **kwargs)
return wrapped
return decorator
def removed_argument(name: str, until_version: str, stacklevel: int = 3):
"""
A meta-decorator to mark an argument as removed.
.. code-block:: python3
@removed_argument("until_date", "3.0") # stacklevel=3 by default
def some_function(user_id, chat_id=None):
print(f"user_id={user_id}, chat_id={chat_id}")
:param name:
:param until_version: the version in which the argument is scheduled to be removed
:param stacklevel: leave it to default if it's the first decorator used.
Increment with any new decorator used.
:return: decorator
"""
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
def _handling(kwargs):
"""
Returns updated version of kwargs.
"""
routine_type = 'coroutine' if is_coroutine else 'function'
if name in kwargs:
warn_deprecated(
f"In {routine_type} {func.__name__!r} argument {name!r} "
f"is planned to be removed in aiogram {until_version}",
stacklevel=stacklevel,
)
kwargs = kwargs.copy()
del kwargs[name]
return kwargs
if is_coroutine:
@functools.wraps(func)
async def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return await func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapped(*args, **kwargs):
kwargs = _handling(kwargs)
return func(*args, **kwargs)
return wrapped
return decorator
_VT = TypeVar("_VT")
_OwnerCls = TypeVar("_OwnerCls")
class DeprecatedReadOnlyClassVar(Generic[_OwnerCls, _VT]):
"""
DeprecatedReadOnlyClassVar[Owner, ValueType]
:param warning_message: Warning message when getter gets called
:param new_value_getter: Any callable with (owner_class: Type[Owner]) -> ValueType
signature that will be executed
Usage example:
>>> class MyClass:
... some_attribute: DeprecatedReadOnlyClassVar[MyClass, int] = \
... DeprecatedReadOnlyClassVar(
... "Warning message.", lambda owner: 15)
...
>>> MyClass.some_attribute # does warning.warn with `Warning message` and returns 15 in the current case
"""
__slots__ = "_new_value_getter", "_warning_message"
def __init__(self, warning_message: str, new_value_getter: Callable[[_OwnerCls], _VT]):
self._warning_message = warning_message
self._new_value_getter = new_value_getter
def __get__(self, instance: Optional[_OwnerCls], owner: Type[_OwnerCls]):
warn_deprecated(self._warning_message, stacklevel=3)
return self._new_value_getter(owner)
|
python
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class HashTest3(Package):
"""Used to test package hashing
"""
homepage = "http://www.hashtest3.org"
url = "http://www.hashtest1.org/downloads/hashtest3-1.1.tar.bz2"
version('1.2', 'b' * 32)
version('1.3', 'c' * 32)
version('1.5', 'd' * 32)
version('1.6', 'e' * 32)
version('1.7', 'f' * 32)
variant('variantx', default=False, description='Test variant X')
variant('varianty', default=False, description='Test variant Y')
def setup_dependent_build_environment(self, env, dependent_spec):
pass
@when('@:1.4')
def install(self, spec, prefix):
print("install 1")
os.listdir(os.getcwd())
# sanity_check_prefix requires something in the install directory
mkdirp(prefix.bin)
@when('@1.5:')
def install(self, spec, prefix):
os.listdir(os.getcwd())
# sanity_check_prefix requires something in the install directory
mkdirp(prefix.bin)
for _version_constraint in ['@1.5', '@1.6']:
@when(_version_constraint)
def extra_phase(self, spec, prefix):
pass
|
python
|
"""
Yandex Transport Webdriver API. Continuous Monitoring tests.
NOTE: These are designed to run indefinitely and check current YandexTransportAPI status.
Tests are working with Live Data, with several random delays between them.
They take a lot of time as a result.
NOTE: Tests require running YandexTransportProxy server
UPD: These are kinda questionable, they made sence in the era of YandexTransportMonitor,
but now they kinda... don't.
"""
import pytest
import random
import time
import json
from yandex_transport_webdriver_api import YandexTransportProxy
# Working server settings
SERVER_HOST = '172.17.0.1'
SERVER_PORT = 25555
# Station URLs used in tests.
# Template: {"": ""}
mini_set = \
{"Москва/Метро Сокол": "https://yandex.ru/maps/213/moscow/?ll=37.511152%2C55.804204&masstransit%5BstopId%5D=stop__9647423&mode=stop&z=17",
"Москва/Улица Станиславского": "https://yandex.ru/maps/213/moscow/?ll=37.664542%2C55.744704&masstransit%5BstopId%5D=stop__9647379&mode=stop&z=17"
}
# These are working stations, they should return getStopInfo and getLayerRegions.
station_urls = \
{"Москва/Метро Сокол": "https://yandex.ru/maps/213/moscow/?ll=37.511152%2C55.804204&masstransit%5BstopId%5D=stop__9647423&mode=stop&z=17",
"Москва/Улица Станиславского": "https://yandex.ru/maps/213/moscow/?ll=37.664542%2C55.744704&masstransit%5BstopId%5D=stop__9647379&mode=stop&z=17",
"Москва/Платформа Тестовская": "https://yandex.ru/maps/213/moscow/?ll=37.535037%2C55.752682&masstransit%5BstopId%5D=stop__9649559&mode=stop&z=17",
"Москва/Тишинская площадь": "https://yandex.ru/maps/213/moscow/?ll=37.587580%2C55.770117&masstransit%5BstopId%5D=stop__9648355&mode=stop&z=17",
"Москва/Метро Китай-город": "https://yandex.ru/maps/213/moscow/?ll=37.634151%2C55.754175&masstransit%5BstopId%5D=stop__10187976&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D37.633884%252C55.754364%26spn%3D0.001000%252C0.001000%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259C%25D0%25BE%25D1%2581%25D0%25BA%25D0%25B2%25D0%25B0%252C%2520%25D0%25A2%25D0%25B0%25D0%25B3%25D0%25B0%25D0%25BD%25D1%2581%25D0%25BA%25D0%25BE-%25D0%259A%25D1%2580%25D0%25B0%25D1%2581%25D0%25BD%25D0%25BE%25D0%25BF%25D1%2580%25D0%25B5%25D1%2581%25D0%25BD%25D0%25B5%25D0%25BD%25D1%2581%25D0%25BA%25D0%25B0%25D1%258F%2520%25D0%25BB%25D0%25B8%25D0%25BD%25D0%25B8%25D1%258F%252C%2520%25D0%25BC%25D0%25B5%25D1%2582%25D1%2580%25D0%25BE%2520%25D0%259A%25D0%25B8%25D1%2582%25D0%25B0%25D0%25B9-%25D0%25B3%25D0%25BE%25D1%2580%25D0%25BE%25D0%25B4%2520&z=19",
"Петропавловск-Камчатский/Советская улица": "https://yandex.ru/maps/78/petropavlovsk/?ll=158.650965%2C53.015840&masstransit%5BstopId%5D=1543338149&mode=stop&z=17",
"Магадан/Телевышка": "https://yandex.ru/maps/79/magadan/?ll=150.800171%2C59.560040&masstransit%5BstopId%5D=1941449091&mode=stop&z=16",
"Владивосток/Центр": "https://yandex.ru/maps/75/vladivostok/?ll=131.886671%2C43.115497&masstransit%5BstopId%5D=stop__9980150&mode=stop&sll=37.540794%2C55.925019&sspn=0.145741%2C0.050022&z=17",
"Якутск/Крестьянский рынок": "https://yandex.ru/maps/74/yakutsk/?ll=129.728396%2C62.035988&masstransit%5BstopId%5D=2040377980&mode=stop&z=16",
"Иркутск/Железнодорожный вокзал": "https://yandex.ru/maps/63/irkutsk/?ll=104.259650%2C52.282821&masstransit%5BstopId%5D=stop__9795272&mode=stop&sctx=ZAAAAAgBEAAaKAoSCWnCm9o%2BElpAEVnd6jlpJUpAEhIJE7%2Ft%2F5%2Bnwj8RVFOjSVz4qz8iBAABAgQoCjAAOKqiz7joupHNA0DVzQZIAFXNzMw%2BWABqAnJ1cACdAc3MzD2gAQCoAQA%3D&sll=104.259650%2C52.282821&sspn=0.004554%2C0.001708&text=%D0%98%D1%80%D0%BA%D1%83%D1%82%D1%81%D0%BA%20cnfywbz&z=18",
"Красноярск/Железнодорожный вокзал": "https://yandex.ru/maps/62/krasnoyarsk/?ll=92.832626%2C56.006039&masstransit%5BstopId%5D=stop__9901229&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D92.852577%252C56.010567%26spn%3D0.541885%252C0.222061%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259A%25D1%2580%25D0%25B0%25D1%2581%25D0%25BD%25D0%25BE%25D1%258F%25D1%2580%25D1%2581%25D0%25BA%2520&z=17",
"Омск/Железнодорожный вокзал": "https://yandex.ru/maps/66/omsk/?ll=73.386035%2C54.939776&masstransit%5BstopId%5D=stop__9727412&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D73.368217%252C54.989346%26spn%3D0.563622%252C0.594631%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259E%25D0%25BC%25D1%2581%25D0%25BA%2520&z=17",
"Екатеринбург/1-й километр": "https://yandex.ru/maps/54/yekaterinburg/?ll=60.611944%2C56.863058&masstransit%5BstopId%5D=stop__9810370&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D60.597473%252C56.838013%26spn%3D0.679832%252C0.389126%26&z=18",
"Самара/Некрасовская улица": "https://yandex.ru/maps/51/samara/?ll=50.102397%2C53.189701&masstransit%5BstopId%5D=stop__10097748&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D50.101788%252C53.195541%26spn%3D0.659111%252C0.459122%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%25A1%25D0%25B0%25D0%25BC%25D0%25B0%25D1%2580%25D0%25B0%2520&z=17",
"Санкт-Петербург/Станция метро Невский проспект": "https://yandex.ru/maps/2/saint-petersburg/?ll=30.326364%2C59.935241&masstransit%5BstopId%5D=stop__10075220&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D30.315639%252C59.938953%26spn%3D1.334415%252C0.611099%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%25A1%25D0%25B0%25D0%25BD%25D0%25BA%25D1%2582-%25D0%259F%25D0%25B5%25D1%2582%25D0%25B5%25D1%2580%25D0%25B1%25D1%2583%25D1%2580%25D0%25B3%2520&z=18",
"Калининград/Гостиница Калининград": "https://yandex.ru/maps/22/kaliningrad/?ll=20.509223%2C54.712040&masstransit%5BstopId%5D=3313917805&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D20.507313%252C54.707394%26spn%3D0.359865%252C0.148655%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259A%25D0%25B0%25D0%25BB%25D0%25B8%25D0%25BD%25D0%25B8%25D0%25BD%25D0%25B3%25D1%2580%25D0%25B0%25D0%25B4%2520&z=18",
"Москва/Метро Марьино (южная)": "https://yandex.ru/maps/213/moscow/?ll=37.744035%2C55.649321&masstransit%5BstopId%5D=stop__9647488&mode=stop&ol=geo&ouri=ymapsbm1%3A%2F%2Fgeo%3Fll%3D37.743473%252C55.650028%26spn%3D0.001000%252C0.001000%26text%3D%25D0%25A0%25D0%25BE%25D1%2581%25D1%2581%25D0%25B8%25D1%258F%252C%2520%25D0%259C%25D0%25BE%25D1%2581%25D0%25BA%25D0%25B2%25D0%25B0%252C%2520%25D0%25BC%25D0%25B5%25D1%2582%25D1%2580%25D0%25BE%2520%25D0%259C%25D0%25B0%25D1%2580%25D1%258C%25D0%25B8%25D0%25BD%25D0%25BE%2520&z=17"
}
# This is an empty station, it should return nothing.
# There was a small period when it returned getLayerRegions.
station_empty = {"Якутск/Школа №7": "https://yandex.ru/maps/74/yakutsk/?ll=129.725800%2C62.037399&mode=poi&poi%5Bpoint%5D=129.728085%2C62.036624&poi%5Buri%5D=ymapsbm1%3A%2F%2Forg%3Foid%3D179807288972&sll=37.586616%2C55.802258&sspn=0.036435%2C0.012545&text=%D1%8F%D0%BA%D1%83%D1%82%D1%81%D0%BA&z=16"}
# These are working routes, they should return getRouteInfo, getVehiclesInfo, getVehiclesInfoWithRegion, getLayerRegions
routes_urls = {"Москва/Автобус 105": "https://yandex.ru/maps/213/moscow/?ll=37.517402%2C55.804455&masstransit%5BlineId%5D=213_105_bus_mosgortrans&masstransit%5BstopId%5D=stop__9647423&masstransit%5BthreadId%5D=213A_105_bus_mosgortrans&mode=stop&z=14",
"Москва/Троллейбус 53": "https://yandex.ru/maps/213/moscow/?ll=37.746753%2C55.737977&masstransit%5BlineId%5D=2036926340&masstransit%5BstopId%5D=stop__9647379&masstransit%5BthreadId%5D=213A_53_trolleybus_mosgortrans&mode=stop&z=13",
"Москва/Автобус 12": "https://yandex.ru/maps/213/moscow/?ll=37.546941%2C55.755232&masstransit%5BlineId%5D=213_12_bus_mosgortrans&masstransit%5BstopId%5D=stop__9649559&masstransit%5BthreadId%5D=213A_12_bus_mosgortrans&mode=stop&z=15",
"Москва/Троллейбус 54": "https://yandex.ru/maps/213/moscow/?ll=37.587580%2C55.770117&masstransit%5BlineId%5D=213_54_trolleybus_mosgortrans&masstransit%5BstopId%5D=stop__9648355&masstransit%5BthreadId%5D=2036927249&mode=stop&z=17",
"Москва/Автобус Н1": "https://yandex.ru/maps/213/moscow/?ll=37.634151%2C55.754175&masstransit%5BlineId%5D=N1_bus_default&masstransit%5BstopId%5D=stop__10187976&masstransit%5BthreadId%5D=2036926069&mode=stop&z=19",
"Петропавловск-Камчатский/Автобус 1": "https://yandex.ru/maps/78/petropavlovsk/?ll=158.650258%2C53.016359&masstransit%5BlineId%5D=1704841626&masstransit%5BthreadId%5D=2163257102&mode=stop&z=17",
"Магадан/Автобус 1": "https://yandex.ru/maps/79/magadan/?ll=150.800171%2C59.560040&masstransit%5BlineId%5D=1704917872&masstransit%5BthreadId%5D=1952775971&mode=stop&z=16",
"Владивосток/Маршрутка 24": "https://yandex.ru/maps/75/vladivostok/?ll=131.886671%2C43.115497&masstransit%5BlineId%5D=2468209792&masstransit%5BthreadId%5D=2468209966&mode=stop&z=17",
"Якутск/Автобус 104": "https://yandex.ru/maps/74/yakutsk/?ll=129.723504%2C62.037152&masstransit%5BlineId%5D=1704844454&masstransit%5BthreadId%5D=3442738945&mode=stop&z=16",
"Иркутск/Трамвай 4А": "https://yandex.ru/maps/63/irkutsk/?ll=104.259354%2C52.282396&masstransit%5BlineId%5D=1962955244&masstransit%5BthreadId%5D=1962955369&mode=stop&z=18",
"Красноярск/Маршрутка 130": "https://yandex.ru/maps/62/krasnoyarsk/?ll=92.831247%2C56.005319&masstransit%5BlineId%5D=2611970500&masstransit%5BthreadId%5D=2611970606&mode=stop&z=17",
"Омск/Троллейбус 12": "https://yandex.ru/maps/66/omsk/?ll=73.386035%2C54.939776&masstransit%5BlineId%5D=2012848234&masstransit%5BthreadId%5D=2012848632&mode=stop&z=17",
"Екатеринбург/Трамвай 5": "https://yandex.ru/maps/54/yekaterinburg/?ll=60.614978%2C56.863073&masstransit%5BlineId%5D=2107048890&masstransit%5BthreadId%5D=2107049173&mode=stop&z=16",
"Самара/Трамвай 5": "https://yandex.ru/maps/51/samara/?ll=50.099858%2C53.188705&masstransit%5BlineId%5D=2193179444&masstransit%5BthreadId%5D=2193179903&mode=stop&z=17",
"Санкт-Петербург/Троллейбус 22": "https://yandex.ru/maps/2/saint-petersburg/?ll=30.324825%2C59.935390&masstransit%5BlineId%5D=22_trolleybus_discus&masstransit%5BthreadId%5D=22B_trolleybus_discus&mode=stop&z=18",
"Калининград/Автобус 593": "https://yandex.ru/maps/22/kaliningrad/?ll=20.508255%2C54.712590&masstransit%5BlineId%5D=3181656187&masstransit%5BthreadId%5D=3181656277&mode=stop&z=18",
"Москва/Маршрутка 937к": "https://yandex.ru/maps/213/moscow/?ll=37.465495%2C55.878790&masstransit%5BlineId%5D=937_minibus_default&masstransit%5BthreadId%5D=937A_minibus_default&mode=stop&z=13",
"Москва/Трамвай А": "https://yandex.ru/maps/213/moscow/?ll=37.638675%2C55.764634&masstransit%5BlineId%5D=213_A_tramway_mosgortrans&masstransit%5BthreadId%5D=2036927519&mode=stop&z=18"
}
# Accumulated results. Good idea is to actually SAVE accumulated data results.
query_results = []
json_data = []
# NOTE: It seems sometimes getting ALL services (get_stop_info, getLayerRegions etc) might fail.
# It may be a browser issue, or problem may be on Yandex side.
# In tests this sometimes appears near the end of strain of get_all_info queries while checking stops.
# For now we're increasing random period from 15-45 to 40-90
def wait_random_time():
value = random.randint(40, 90)
print("Waiting " + str(value) + " seconds.")
time.sleep(value)
# ----- DATA COLLECTION ----- #
do_data_collection = True
do_stations_collection = True
do_routes_collection = True
data_collection_passed = False
def perform_data_collection():
"""
Data collection test, every single request should return valid JSON object.
This test can be switched off, and data can be loaded from files instead during development.
This takes a huge amount of time to process, by the way, due to wait times between queries
(We don't want Yandex to get angry due to frequent queries, so we're playing safe here).
Expect about 40-60 minutes of data collection.
"""
global query_results
global do_data_collection
global do_stations_collection
global do_routes_collection
global data_collection_passed
if not do_data_collection:
return
if data_collection_passed:
return
print()
proxy = YandexTransportProxy(SERVER_HOST, SERVER_PORT)
if do_stations_collection:
for station, url in station_urls.items():
print("Collecting station: " + station + "... ", end='')
result=''
try:
result = proxy.get_all_info(url)
for entry in result:
query_results.append({"success": True,
"station": station,
"url": url,
"method": entry['method'],
"data": entry['data']})
print(entry['method'], end=' ')
print("[OK]")
except Exception as e:
query_results.append({"success": False,
"station": station,
"url": url,
"method": "getAllInfo (failed)",
"data": ""
}
)
print("[FAILED]")
print("Exception (station): ",str(e))
f = open('tests/testdata/output/station_' + station.replace('/', '-') + '.json.pretty', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False, indent=4, separators=(',', ': ')))
f.close()
f = open('tests/testdata/output/station_' + station.replace('/', '-') + '.json', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False))
f.close()
wait_random_time()
if do_routes_collection:
for route, url in routes_urls.items():
print("Collecting route: " + route + "... ", end='')
result = ''
try:
result = proxy.get_all_info(url)
for entry in result:
query_results.append({"success": True,
"route": route,
"url": url,
"method": entry['method'],
"data": entry['data']})
print(entry['method'], end=' ')
print("[OK]")
except Exception as e:
query_results.append({"success": False,
"route": route,
"url": url,
"method": "getAllInfo (failed)",
"data": ""
})
print("[FAILED]")
print("Exception (route): ", str(e))
f = open('tests/testdata/output/route_' + route.replace('/', '-') + '.json.pretty', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False, indent=4, separators=(',', ': ')))
f.close()
f = open('tests/testdata/output/route_' + route.replace('/', '-') + '.json', 'w', encoding='utf-8')
f.write(json.dumps(result, ensure_ascii=False))
f.close()
wait_random_time()
# Saving data to files
f = open('test_data.json', 'w', encoding='utf-8')
f.write(json.dumps(query_results, ensure_ascii=False))
f.close()
# Setting "data collection passed" flag.
data_collection_passed = True
# Basically, always succeeds
assert True == True
def load_data_from_file():
global json_data
print()
f = open('test_data.json', 'r', encoding='utf-8')
data = f.readline()
f.close()
json_data = json.loads(data)
for entry in json_data:
if 'station' in entry:
print('Station : ', entry["station"], ",", entry["success"], ",", end=' ')
if 'method' in entry:
print(entry["method"])
else:
print("")
if 'route' in entry:
print('Route : ', entry["route"], ",", entry["success"], ",", end=' ')
if 'method' in entry:
print(entry["method"])
else:
print("")
@pytest.fixture(scope="session", autouse=True)
def prepare_data():
# Collect data from Yandex Maps, save it to a file
perform_data_collection()
# Load data from file for tests.
load_data_from_file()
# ----- TESTS ----- #
@pytest.mark.timeout(3600)
def test_data_load_stage():
"""Needed to call perform_data_collection and load_data_from_file functions"""
print()
assert True == True
@pytest.mark.timeout(120)
def test_initial():
"""Most basic test.py to ensure pytest DEFINITELY works"""
assert True == True
# ----- CONTINUOUS TESTS ----- #
# With "getting the JSON" approach, checking if JSON contains the required data (stopInfo, routeInfo etc)
# is too difficult. Instead, continuous tests just check integrity of obtained JSONS, and which methods were called.
# Testing of actual data should be performed for particular API functions instead, if implemented,
# like getVehiclesInfo should count vehicles on several routes, or getStopCoordinates should get stop coordinated,
# accordingly.
# -------------------------------------------------------------------------------------------------------------------- #
def test_check_if_failure():
""" Test if there was no failures"""
failure_found = False
for entry in json_data:
if not entry["success"]:
failure_found = True
if 'station' in entry:
print("Data for stop ", entry['station'], "collection failed.")
if 'route' in entry:
print("Data for route ", entry['route'], "collection failed.")
assert not failure_found
def test_check_if_json():
""" Test is every record is JSON"""
failure_found = False
for entry in json_data:
try:
record = json.dumps(entry['data'])
except Exception as e:
print("Exception:", str(e))
failure_found = True
if 'station' in entry:
print("Data for stop ", entry['station'], "is not JSON.")
if 'route' in entry:
print("Data for route ", entry['route'], "is not JSON.")
assert not failure_found
def test_check_if_no_error():
""" Check that there is no 'error' field in json"""
failure_found = False
for entry in json_data:
if 'error' in entry['data']:
failure_found = True
if 'station' in entry:
print("Data for stop ", entry['station'], "has an 'error'.")
if 'route' in entry:
print("Data for route ", entry['route'], "has an 'error'.")
assert not failure_found
def test_encountered_methods():
"""
Test that each method was encountered at least once.
Pretty "forgiving" test, "strict" would be expect every valid stop return getStopInfo and getLayerRegions,
ane every route return getLayerRegions, getVehiclesInfo, getVehiclesInfoWithRegion and getRouteInfo.
This may not happen due to network conditions.
"""
print()
print("Counting methods:")
result = {'getStopInfo': 0,
'getRouteInfo': 0,
'getLine': 0,
'getVehiclesInfo': 0,
'getVehiclesInfoWithRegion': 0,
'getLayerRegions': 0,
'getAllInfo (failed)': 0}
for entry in json_data:
result[entry['method']] += 1
for key, value in result.items():
print(key, ':', value)
assert result['getStopInfo'] > 0
assert result['getVehiclesInfo'] > 0
assert result['getLayerRegions'] > 0
assert result['getLine'] > 0
assert result['getVehiclesInfoWithRegion'] > 0
assert result['getAllInfo (failed)'] == 0
def test_no_data_returned():
"""
Test if there is a stop/route with no data returned
"""
print()
data_stations = {}
data_routes = {}
failure_found = False
for entry in json_data:
if 'station' in entry:
data_stations[entry['station']] = 1
if 'route' in entry:
data_routes[entry['route']] = 1
for key, value in station_urls.items():
if key not in data_stations:
failure_found = True
print("No data for station", key)
for key, value in routes_urls.items():
if key not in data_routes:
failure_found = True
print("No data for route", key)
assert not failure_found
# -------------------------------------------------------------------------------------------------------------------- #
|
python
|
from __future__ import (
absolute_import,
unicode_literals,
)
import importlib
import os
import pytest
@pytest.fixture(scope='module')
def server_settings(server_class):
"""
Load the server_settings used by this service.
"""
if server_class.use_django:
from django.conf import settings
else:
settings_module = os.environ.get('PYSOA_SETTINGS_MODULE', None)
assert settings_module, 'PYSOA_SETTINGS_MODULE environment variable must be set to run tests.'
try:
settings = importlib.import_module(settings_module)
except ImportError:
raise AssertionError('Could not import PYSOA_SETTINGS_MODULE: {}'.format(settings_module))
try:
soa_settings = settings.SOA_SERVER_SETTINGS
except AttributeError:
try:
soa_settings = settings.settings
except AttributeError:
raise AssertionError('Could not access settings.SOA_SERVER_SETTINGS or settings.settings')
return soa_settings
@pytest.fixture(scope='module')
def service_client_settings(server_class, server_settings):
"""Config passed to the service client on instantiation"""
return {
server_class.service_name: {
'transport': {
'path': 'pysoa.common.transport.local:LocalClientTransport',
'kwargs': {
'server_class': server_class,
'server_settings': server_settings,
},
},
},
}
@pytest.fixture(scope='module')
def service_client_class(server_class):
"""
Override the service client being used to test to automatically inject the service name for
your testing convenience.
"""
from pysoa.client import Client # inline so as not to mess up coverage
class _TestClient(Client):
def call_action(self, action, body=None, service_name=None, **kwargs):
service_name = service_name or server_class.service_name
return super(_TestClient, self).call_action(service_name, action, body=body, **kwargs)
return _TestClient
@pytest.fixture(scope='module')
def service_client(service_client_class, service_client_settings):
"""
Instantiate the service client class with the requisite config. Service doing the testing should define
the server_class fixture.
"""
return service_client_class(service_client_settings)
@pytest.fixture
def action_stubber():
"""
Equivalent of the pytest `mocker` fixture for stub_action, with similar motivations and behavior.
Allows a test to stub actions without having to manually clean up after the test.
See https://github.com/pytest-dev/pytest-mock for more info
"""
from pysoa.test.stub_service import stub_action # inline so as not to mess up coverage
stubbies = []
def _do_stub(*args, **kwargs):
stubby = stub_action(*args, **kwargs)
stubbies.append(stubby)
return stubby.__enter__()
yield _do_stub
for stub in stubbies[::-1]:
stub.__exit__()
|
python
|
#! /usr/bin/env python3
from setuptools import find_packages
from setuptools import setup
def parse_requirements(filename):
"""Given a filename, strip empty lines and those beginning with #."""
with open(filename) as rfd:
output = []
for line in rfd:
line = line.strip()
if line != '' and not line.startswith('#'):
output.append(line)
return output
setup(
name='FollowerAnalyzer',
version='0.1',
author='Sravan Bhamidipati',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
description="Application to analyze a Twitter user's followers.",
long_description='\n' + open('README.md').read(),
)
|
python
|
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import (
LearningRateScheduler,
CombinedLearningRateScheduler,
PolynomialDecay,
)
from allennlp.training.optimizers import Optimizer
class TestCombinedLRScheduler(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.model = torch.nn.Sequential(torch.nn.Linear(10, 10))
self.optimizer = Optimizer.from_params(
model_parameters=self.model.named_parameters(),
params=Params({"type": "sgd", "lr": 1.0}),
)
def get_scheduler(self) -> LearningRateScheduler:
return LearningRateScheduler.from_params(
Params(
{
"type": "combined",
"schedulers": [
[
2,
{
"type": "polynomial_decay",
"warmup_steps": 10,
"end_learning_rate": 0.5,
},
],
[
5,
{
"type": "polynomial_decay",
"warmup_steps": 0,
"end_learning_rate": 0.1,
},
],
],
}
),
optimizer=self.optimizer,
num_steps_per_epoch=10,
)
def test_partial_schedule(self):
scheduler = self.get_scheduler()
assert isinstance(scheduler, CombinedLearningRateScheduler)
assert isinstance(scheduler._current_scheduler, PolynomialDecay)
# This should be 0 because the PolynomialDecay scheduler initializes the LR to 0.
assert self.optimizer.param_groups[0]["lr"] == 0.0
epoch_end_lrs = []
for epoch in range(10):
if epoch > 6:
assert scheduler._current_scheduler is None
elif epoch >= 2:
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 50
assert scheduler._current_scheduler.base_values[0] == 0.5
else:
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 20
assert scheduler._current_scheduler.base_values[0] == 1.0
for step in range(10):
scheduler.step_batch()
scheduler.step()
epoch_end_lrs.append(self.optimizer.param_groups[0]["lr"])
assert epoch_end_lrs[0] == 1.0
assert epoch_end_lrs[1] == 0.5
assert epoch_end_lrs[6] == 0.1
assert epoch_end_lrs[6] == 0.1
def test_load_from_checkpoint(self):
scheduler = self.get_scheduler()
for epoch in range(3):
for step in range(10):
scheduler.step_batch()
scheduler.step()
assert scheduler.last_epoch == 2
assert scheduler._current_scheduler is not None
assert scheduler._current_scheduler.total_steps == 50
assert scheduler._current_scheduler.base_values[0] == 0.5
state_dict = scheduler.state_dict()
new_scheduler = self.get_scheduler()
new_scheduler.load_state_dict(state_dict)
assert new_scheduler.last_epoch == 2
assert new_scheduler._current_scheduler is not None
assert new_scheduler._current_scheduler.total_steps == 50
assert new_scheduler._current_scheduler.base_values[0] == 0.5, state_dict
|
python
|
#!/usr/bin/python3
for i in range(122, 96, -1):
if i % 2 == 0:
print("{:s}".format(chr(i)), end="")
else:
print("{:s}".format(chr(i-32)), end="")
|
python
|
# -*- coding: utf-8 -*-
## @package palette.main
#
# Main funcion.
# @author tody
# @date 2015/08/20
from palette.datasets.google_image import createDatasets
from palette.results.single_image import signleImageResults
from palette.results.multi_images import multiImagesResults
if __name__ == '__main__':
data_names = ["tulip", "flower"]
num_images = 5
data_ids = range(3)
createDatasets(data_names, num_images, update=False)
signleImageResults(data_names, data_ids)
multiImagesResults(data_names, data_ids)
|
python
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNN(nn.Module):
def __init__(self, config):
"""
type_rnn: RNN, GRU, LSTM 可选
"""
super(RNN, self).__init__()
# self.xxx = config.xxx
self.input_size = config.input_size
self.hidden_size = config.hidden_size // 2 if config.bidirectional else config.hidden_size
self.num_layers = config.num_layers
self.dropout = config.dropout
self.bidirectional = config.bidirectional
self.last_layer_hn = config.last_layer_hn
self.type_rnn = config.type_rnn
rnn = eval(f'nn.{self.type_rnn}')
self.rnn = rnn(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
bidirectional=self.bidirectional,
bias=True,
batch_first=True)
def forward(self, x, x_len):
"""
Args:
torch.Tensor [batch_size, seq_max_length, input_size], [B, L, H_in] 一般是经过embedding后的值
x_len: torch.Tensor [L] 已经排好序的句长值
Returns:
output: torch.Tensor [B, L, H_out] 序列标注的使用结果
hn: torch.Tensor [B, N, H_out] / [B, H_out] 分类的结果,当 last_layer_hn 时只有最后一层结果
"""
B, L, _ = x.size()
H, N = self.hidden_size, self.num_layers
x_len = x_len.cpu()
x = pack_padded_sequence(x, x_len, batch_first=True, enforce_sorted=True)
output, hn = self.rnn(x)
output, _ = pad_packed_sequence(output, batch_first=True, total_length=L)
if self.type_rnn == 'LSTM':
hn = hn[0]
if self.bidirectional:
hn = hn.view(N, 2, B, H).transpose(1, 2).contiguous().view(N, B, 2 * H).transpose(0, 1)
else:
hn = hn.transpose(0, 1)
if self.last_layer_hn:
hn = hn[:, -1, :]
return output, hn
|
python
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or '28#oN^^VfhcxV7x8H32yGOGIk2wLY%OFi!!V'
### email configs,https://pythonhosted.org/Flask-Mail/
###
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '25'))
MAIL_USE_SSL = False
# enable transport layer security security ,TLS
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in ['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_SUBJECT_PREFIX_JH = '[HJiahu]'
MAIL_SENDER_JH = os.environ.get('MAIL_SENDER_JH') # 发信者需要与所连接的smtp服务器用户名同
# 这里是管理员所使用的邮箱,原始的 flasky 使用的变量名是 FLASKY_ADMIN
MAIL_ADMIN_EMAIL_JH = os.environ.get('MAIL_ADMIN_EMAIL_JH','[email protected]')
SQLALCHEMY_TRACK_MODIFICATIONS = False
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
MAIL_SERVER = 'smtp.163.com'
MAIL_USERNAME = 'jiahuhenan'
MAIL_PASSWORD = 'jiahu123'
MAIL_SENDER_JH = '[email protected]'
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Push notifications tests."""
__author__ = '[email protected] (Ali Afshar)'
import unittest
from apiclient import push
from apiclient import model
from apiclient import http
from test_discovery import assertUrisEqual
class ClientTokenGeneratorTest(unittest.TestCase):
def test_next(self):
t = push.new_token()
self.assertTrue(t)
class ChannelTest(unittest.TestCase):
def test_creation_noargs(self):
c = push.Channel(channel_type='my_channel_type', channel_args={})
self.assertEqual('my_channel_type', c.channel_type)
self.assertEqual({}, c.channel_args)
def test_creation_args(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b'})
self.assertEqual('my_channel_type', c.channel_type)
self.assertEqual({'a':'b'}, c.channel_args)
def test_as_header_value_noargs(self):
c = push.Channel(channel_type='my_channel_type', channel_args={})
self.assertEqual('my_channel_type?', c.as_header_value())
def test_as_header_value_args(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b'})
self.assertEqual('my_channel_type?a=b', c.as_header_value())
def test_as_header_value_args_space(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b c'})
self.assertEqual('my_channel_type?a=b+c', c.as_header_value())
def test_as_header_value_args_escape(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b%c'})
self.assertEqual('my_channel_type?a=b%25c', c.as_header_value())
def test_write_header_noargs(self):
c = push.Channel(channel_type='my_channel_type', channel_args={})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?', headers['X-GOOG-SUBSCRIBE'])
def test_write_header_args(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b'})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?a=b', headers['X-GOOG-SUBSCRIBE'])
def test_write_header_args_space(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b c'})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?a=b+c', headers['X-GOOG-SUBSCRIBE'])
def test_write_header_args_escape(self):
c = push.Channel(channel_type='my_channel_type',
channel_args={'a': 'b%c'})
headers = {}
c.write_header(headers)
self.assertEqual('my_channel_type?a=b%25c', headers['X-GOOG-SUBSCRIBE'])
class WebhookChannelTest(unittest.TestCase):
def test_creation_no_appengine(self):
c = push.WebhookChannel('http://example.org')
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=false',
c.as_header_value())
def test_creation_appengine(self):
c = push.WebhookChannel('http://example.org', app_engine=True)
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=true',
c.as_header_value())
class HeadersTest(unittest.TestCase):
def test_creation(self):
h = push.Headers()
self.assertEqual('', h[push.SUBSCRIBE])
def test_items(self):
h = push.Headers()
h[push.SUBSCRIBE] = 'my_channel_type'
self.assertEqual([(push.SUBSCRIBE, 'my_channel_type')], list(h.items()))
def test_items_non_whitelisted(self):
h = push.Headers()
def set_bad_header(h=h):
h['X-Banana'] = 'my_channel_type'
self.assertRaises(ValueError, set_bad_header)
def test_read(self):
h = push.Headers()
h.read({'x-goog-subscribe': 'my_channel_type'})
self.assertEqual([(push.SUBSCRIBE, 'my_channel_type')], list(h.items()))
def test_read_non_whitelisted(self):
h = push.Headers()
h.read({'X-Banana': 'my_channel_type'})
self.assertEqual([], list(h.items()))
def test_write(self):
h = push.Headers()
h[push.SUBSCRIBE] = 'my_channel_type'
headers = {}
h.write(headers)
self.assertEqual({'x-goog-subscribe': 'my_channel_type'}, headers)
class SubscriptionTest(unittest.TestCase):
def test_create(self):
s = push.Subscription()
self.assertEqual('', s.client_token)
def test_create_for_channnel(self):
c = push.WebhookChannel('http://example.org')
s = push.Subscription.for_channel(c)
self.assertTrue(s.client_token)
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=false',
s.subscribe)
def test_create_for_channel_client_token(self):
c = push.WebhookChannel('http://example.org')
s = push.Subscription.for_channel(c, client_token='my_token')
self.assertEqual('my_token', s.client_token)
assertUrisEqual(self,
'web_hook?url=http%3A%2F%2Fexample.org&app_engine=false',
s.subscribe)
def test_subscribe(self):
s = push.Subscription()
s.headers[push.SUBSCRIBE] = 'my_header'
self.assertEqual('my_header', s.subscribe)
def test_subscription_id(self):
s = push.Subscription()
s.headers[push.SUBSCRIPTION_ID] = 'my_header'
self.assertEqual('my_header', s.subscription_id)
def test_subscription_id_set(self):
c = push.WebhookChannel('http://example.org')
s = push.Subscription.for_channel(c)
self.assertTrue(s.subscription_id)
def test_topic_id(self):
s = push.Subscription()
s.headers[push.TOPIC_ID] = 'my_header'
self.assertEqual('my_header', s.topic_id)
def test_topic_uri(self):
s = push.Subscription()
s.headers[push.TOPIC_URI] = 'my_header'
self.assertEqual('my_header', s.topic_uri)
def test_client_token(self):
s = push.Subscription()
s.headers[push.CLIENT_TOKEN] = 'my_header'
self.assertEqual('my_header', s.client_token)
def test_event_type(self):
s = push.Subscription()
s.headers[push.EVENT_TYPE] = 'my_header'
self.assertEqual('my_header', s.event_type)
def test_unsubscribe(self):
s = push.Subscription()
s.headers[push.UNSUBSCRIBE] = 'my_header'
self.assertEqual('my_header', s.unsubscribe)
def test_do_subscribe(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='GET',
body='{}',
headers={'content-type': 'application/json'})
h = http.HttpMockSequence([
({'status': 200,
'X-Goog-Subscription-ID': 'my_subscription'},
'{}')])
c = push.Channel('my_channel', {})
s = push.Subscription.for_request(request, c)
request.execute(http=h)
self.assertEqual('my_subscription', s.subscription_id)
def test_subscribe_with_token(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='GET',
body='{}',
headers={'content-type': 'application/json'})
h = http.HttpMockSequence([
({'status': 200,
'X-Goog-Subscription-ID': 'my_subscription'},
'{}')])
c = push.Channel('my_channel', {})
s = push.Subscription.for_request(request, c, client_token='my_token')
request.execute(http=h)
self.assertEqual('my_subscription', s.subscription_id)
self.assertEqual('my_token', s.client_token)
def test_verify_good_token(self):
s = push.Subscription()
s.headers['X-Goog-Client-Token'] = '123'
notification_headers = {'x-goog-client-token': '123'}
self.assertTrue(s.verify(notification_headers))
def test_verify_bad_token(self):
s = push.Subscription()
s.headers['X-Goog-Client-Token'] = '321'
notification_headers = {'x-goog-client-token': '123'}
self.assertFalse(s.verify(notification_headers))
def test_request_is_post(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='GET',
body='{}',
headers={'content-type': 'application/json'})
c = push.Channel('my_channel', {})
push.Subscription.for_request(request, c)
self.assertEqual('POST', request.method)
def test_non_get_error(self):
m = model.JsonModel()
request = http.HttpRequest(
None,
m.response,
'https://www.googleapis.com/someapi/v1/collection/?foo=bar',
method='POST',
body='{}',
headers={'content-type': 'application/json'})
c = push.Channel('my_channel', {})
self.assertRaises(push.InvalidSubscriptionRequestError,
push.Subscription.for_request, request, c)
|
python
|
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
indigo.setOption("ignore-stereochemistry-errors", True)
indigo.setOption("molfile-saving-skip-date", True)
def testSerializeIsotopes():
mol = indigo.loadMolecule(
"[H][12C]1=[8C]([2H])[10C]([3H])=C([2H])[14C]([3H])=[13C]1[1H]"
)
mol2 = indigo.unserialize(mol.serialize())
print(mol2.smiles())
if indigo.exactMatch(mol, mol2) is None:
print("NOT MATCHED!")
def testSerializeIsotopes2():
mol = indigo.loadMolecule("C")
for n in range(1, 300):
mol.getAtom(0).setIsotope(n)
try:
mol2 = indigo.unserialize(mol.serialize())
if indigo.exactMatch(mol, mol2) is None:
print("NOT MATCHED! " + n)
except IndigoException as e:
print("caught " + getIndigoExceptionText(e))
break
testSerializeIsotopes()
testSerializeIsotopes2()
|
python
|
import arrow
import dateutil
import requests
COUNTRY_CODE = 'RO'
def fetch_RO():
url = 'http://www.transelectrica.ro/sen-filter'
data = {}
for item in requests.get(url).json():
d = list(item.iteritems())[0]
data[d[0]] = d[1]
obj = {
'countryCode': COUNTRY_CODE,
'datetime': arrow.get(data['row1_HARTASEN_DATA'], "YY/M/D H:mm:ss").replace(
tzinfo=dateutil.tz.gettz('Europe/Bucharest')).datetime
}
obj['consumption'] = {
'unknown': float(data['CONS'])
}
# According to http://www.transelectrica.ro/widget/web/tel/sen-harta/-/harta_WAR_SENOperareHartaportlet
# BALT and UCRS (for Baltic and Ukraine South) are categorized under Bulgary on transelectrica website. We did the same here.
obj['exchange'] = {
'BG': float(data.get('VARN', 0)) + float(data.get('DOBR', 0)) + float(data.get('KOZL1', 0)) + float(data.get('KOZL2', 0)) + float(data.get('BALT', 0)) + float(data.get('UCRS', 0)),
'HU': float(data.get('SAND', 0)) + float(data.get('BEKE1', 0)) + float(data.get('BEKE2', 0)),
'MD': float(data.get('COSE', 0)) + float(data.get('UNGE', 0)) + float(data.get('CIOA', 0)) + float(data.get('GOTE', 0)),
'RS': float(data.get('DJER', 0)) + float(data.get('PAN1', 0)) + float(data.get('PAN2', 0)) + float(data.get('KUSJ', 0)) + float(data.get('SIP_', 0)) + float(data.get('KIKI', 0)),
'UA': float(data.get('VULC', 0)) + float(data.get('MUKA', 0)) + float(data.get('COD1', 0))
}
obj['production'] = {
'biomass': float(data['BMASA']),
'coal': float(data['CARB']),
'gas': float(data['GAZE']),
'hydro': float(data['APE']),
'nuclear': float(data['NUCL']),
'solar': float(data['FOTO']),
'wind': float(data['EOLIAN'])
}
return obj
if __name__ == '__main__':
print fetch_RO()
|
python
|
from claf.config.factory.data_reader import DataReaderFactory
from claf.config.factory.data_loader import DataLoaderFactory
from claf.config.factory.model import ModelFactory
from claf.config.factory.optimizer import OptimizerFactory
from claf.config.factory.tokens import TokenMakersFactory
__all__ = [
"DataReaderFactory",
"DataLoaderFactory",
"ModelFactory",
"OptimizerFactory",
"TokenMakersFactory",
]
|
python
|
import os
import shutil
from datetime import datetime
from os.path import dirname, join
import torch
class Logger():
def __init__(self, para):
self.para = para
now = datetime.now() if 'time' not in vars(para) else para.time
now = now.strftime("%Y_%m_%d_%H_%M_%S")
mark = para.model + '_' + para.dataset
file_path = join(para.save_dir, now + '_' + mark, 'log.txt')
self.save_dir = dirname(file_path)
self.check_dir(file_path)
self.logger = open(file_path, 'a+')
# variable register
self.register_dict = {}
# tensorboard
def record_para(self):
self('recording parameters ...')
for key, value in vars(self.para).items():
self('{}: {}'.format(key, value), timestamp=False)
def check_dir(self, file_path):
dir = dirname(file_path)
os.makedirs(dir, exist_ok=True)
def __call__(self, *args, verbose=True, prefix='', timestamp=True):
if timestamp:
now = datetime.now()
now = now.strftime("%Y/%m/%d, %H:%M:%S - ")
else:
now = ''
info = prefix + now
for msg in args:
if not isinstance(msg, str):
msg = str(msg)
info += msg + '\n'
self.logger.write(info)
if verbose:
print(info, end='')
self.logger.flush()
def __del__(self):
self.logger.close()
# register values for each epoch, such as loss, PSNR etc.
def register(self, name, epoch, value):
if name in self.register_dict:
self.register_dict[name][epoch] = value
if value > self.register_dict[name]['max']:
self.register_dict[name]['max'] = value
if value < self.register_dict[name]['min']:
self.register_dict[name]['min'] = value
else:
self.register_dict[name] = {}
self.register_dict[name][epoch] = value
self.register_dict[name]['max'] = value
self.register_dict[name]['min'] = value
def report(self, items, state, epoch):
# items - [['MSE', 'min'], ['PSNR', 'max'] ... ]
msg = '[{}] '.format(state.lower())
state = '_' + state.lower()
for i in range(len(items)):
item, best = items[i]
msg += '{} : {:.4f} (best {:.4f})'.format(
item,
self.register_dict[item + state][epoch],
self.register_dict[item + state][best]
)
if i < len(items) - 1:
msg += ', '
self(msg, timestamp=False)
def is_best(self, epoch):
item = self.register_dict[self.para.loss + '_valid']
return item[epoch] == item['min']
def save(self, state, filename='checkpoint.pth.tar'):
path = join(self.save_dir, filename)
torch.save(state, path)
if self.is_best(state['epoch']):
copy_path = join(self.save_dir, 'model_best.pth.tar')
shutil.copy(path, copy_path)
|
python
|
###############################################################################
# Exceptions
###############################################################################
class UnexpectedCharacter(Exception):
def __init__(self, char, idx, matcher):
super().__init__(
'Expected {} at position {} but got {}'.format(
getattr(matcher, '__name__', matcher), idx, char)
)
###############################################################################
# Constants
###############################################################################
PERIOD = b'.'
NEGATIVE_SIGN = b'-'
# Define the Parser.container_value_context_stack values.
ARRAY_VALUE_CONTEXT = 'ARRAY_VALUE_CONTEXT'
OBJECT_VALUE_CONTEXT = 'OBJECT_VALUE_CONTEXT'
###############################################################################
# Matchers
#
# Matchers are character strings or predicate functions that are used to both
# test whether a character is as expected and serve as an indicator as to which
# class a character belongs.
###############################################################################
class Matchers:
OBJECT_OPEN = b'{'
ARRAY_OPEN = b'['
STRING_START = b'"'
STRING_TERMINATOR = b'"'
NULL_START = b'n'
TRUE_START = b't'
FALSE_START = b'f'
IS_NUMBER_START = lambda c: c == NEGATIVE_SIGN or c.isdigit()
OBJECT_CLOSE = b'}'
ARRAY_CLOSE = b']'
KV_SEP = b':'
ITEM_SEP = b','
EOF = b''
# Set derived matchers.
# Create separate scalar / object / array matchers that use the same logic but
# exist as uniquely identifiable values.
def IS_OBJECT_KEY_START(c):
return c == Matchers.STRING_START
Matchers.IS_OBJECT_KEY_START = IS_OBJECT_KEY_START
def IS_VALUE_START(c):
return (
c == Matchers.OBJECT_OPEN
or c == Matchers.ARRAY_OPEN
or c == Matchers.STRING_START
or Matchers.IS_NUMBER_START(c)
or c == Matchers.NULL_START
or c == Matchers.TRUE_START
or c == Matchers.FALSE_START
)
Matchers.IS_VALUE_START = IS_VALUE_START
def IS_ARRAY_VALUE_START(c):
return IS_VALUE_START(c)
Matchers.IS_ARRAY_VALUE_START = IS_ARRAY_VALUE_START
def IS_OBJECT_VALUE_START(c):
return IS_VALUE_START(c)
Matchers.IS_OBJECT_VALUE_START = IS_OBJECT_VALUE_START
def IS_ARRAY_ITEM_SEP(c):
return c == Matchers.ITEM_SEP
Matchers.IS_ARRAY_ITEM_SEP = IS_ARRAY_ITEM_SEP
def IS_OBJECT_ITEM_SEP(c):
return c == Matchers.ITEM_SEP
Matchers.IS_OBJECT_ITEM_SEP = IS_OBJECT_ITEM_SEP
###############################################################################
# Events
#
# Events represent things that we expect to encounter, and want to act in
# response to, while parsing a JSON string.
###############################################################################
class Events:
ARRAY_CLOSE = 'ARRAY_CLOSE'
ARRAY_ITEM_SEP = 'ARRAY_ITEM_SEP'
ARRAY_OPEN = 'ARRAY_OPEN'
ARRAY_VALUE_FALSE = 'ARRAY_VALUE_FALSE'
ARRAY_VALUE_NULL = 'ARRAY_VALUE_NULL'
ARRAY_VALUE_NUMBER = 'ARRAY_VALUE_NUMBER'
ARRAY_VALUE_STRING = 'ARRAY_VALUE_STRING'
ARRAY_VALUE_TRUE = 'ARRAY_VALUE_TRUE'
EOF = 'END_OF_FILE'
FALSE = 'FALSE'
KV_SEP = 'KV_SEP'
NULL = 'NULL'
NUMBER = 'NUMBER'
OBJECT_CLOSE = 'OBJECT_CLOSE'
OBJECT_ITEM_SEP = 'OBJECT_ITEM_SEP'
OBJECT_KEY = 'OBJECT_KEY'
OBJECT_OPEN = 'OBJECT_OPEN'
OBJECT_VALUE_FALSE = 'OBJECT_VALUE_FALSE'
OBJECT_VALUE_NULL = 'OBJECT_VALUE_NULL'
OBJECT_VALUE_NUMBER = 'OBJECT_VALUE_NUMBER'
OBJECT_VALUE_STRING = 'OBJECT_VALUE_STRING'
OBJECT_VALUE_TRUE = 'OBJECT_VALUE_TRUE'
STRING = 'STRING'
TRUE = 'TRUE'
###############################################################################
# Helpers
###############################################################################
is_digit = lambda c: c.isdigit()
###############################################################################
# Parser
###############################################################################
class Parser:
def __init__(self, stream, encoding='utf-8'):
self.stream = stream
self.encoding = encoding
# Store the current stream char number for reporting the position of
# unexpected characters.
self.char_num = 0
# Store a place to stuff a character that we read from the stream but
# need to put back for the next read. next_char() will pop this value
# before reading again from the stream, thus providing a sort of 1-byte
# lookahead mechanism.
self.stuffed_char = None
# Define a stack to store the Matcher that we expect to match the next
# character from next_nonspace_char(). A single matcher element is
# considered to be manadatory and parsing will fail if the matcher
# fails. A 2-element tuple can be provided with the first element as an
# optional matcher and the second as a mandatory:
# i.e. ( <optional-match>, <mandatory-matcher> )
self.expect_stack = [ Matchers.EOF, Matchers.IS_VALUE_START ]
# Define a stack for storing the context of the current container-type
# (i.e. object value or array value) that we're currently parsing. This
# is used in order to yield the appropriate event on array/object
# close.
self.container_value_context_stack = []
def next_char(self):
# If there's a stuffed nonspace char, return that and do not increment
# char_num.
if self.stuffed_char is not None:
c = self.stuffed_char
self.stuffed_char = None
return c
# Return the next byte from the stream and increment char_num.
c = self.stream.read(1)
self.char_num += 1
return c
def next_nonspace_char(self):
# Advance the stream past the next non-whitespace character and return
# the character, or Matchers.EOF if the stream has been exhausted.
while True:
c = self.next_char()
if c == Matchers.EOF:
return Matchers.EOF
if not c.isspace():
return c
def stuff_char(self, c):
# Assert that stuffed_char is empty and write the character to it.
if self.stuffed_char is not None:
raise AssertionError
self.stuffed_char = c
def expect(self, matcher):
# Assert that the next non-whitespace charater is as expected and
# return both the character and the matcher that matched it.
c = self.next_nonspace_char()
# The expect_stack contains elements that are either a single matcher
# or a tuple of matches in the format:
# ( <optional-matcher>, <mandatory-matcher> )
# Iterate through all tuple-type matchers.
while isinstance(matcher, tuple):
optional, matcher = matcher
# If the matcher is callable, call it, otherwise test against the
# byte literal.
if (callable(optional) and optional(c)) or c == optional:
# An optional matcher matched, so push the mandatory one back
# onto the expect_stack.
self.expect_stack.append(matcher)
# Return the character and matched optional matcher.
return c, optional
# Either no optional matches were specified or none matched, so attempt
# to match against the mandatory matcher.
if (callable(matcher) and matcher(c)) or c == matcher:
# Return the character and matched mandatory matcher.
return c, matcher
# The mandatory matcher failed, so raise UnexpectedCharacter.
raise UnexpectedCharacter(c, self.char_num, matcher)
def yield_while(self, pred):
# Yield characters from the stream until testing them against the
# specified predicate function returns False.
while True:
# Read the next character from the stream.
c = self.next_char()
# Check whether the character satisfies the predicate.
if not pred(c):
# The predicate has not been satisfied so stuff the last-read
# character back and return.
self.stuff_char(c)
return
# Yield the character.
yield c
def parse_string(self):
# Yield characters from the stream up until the next string terminator
# (i.e. '"') character.
while True:
c = self.next_char()
if c == Matchers.STRING_TERMINATOR:
return
# Disallow control characters.
if c[0] <= 0x1f:
raise UnexpectedCharacter(c, self.char_num, 'NOT_CONTROL_CHAR')
yield c
def parse_number(self):
# Yield characters from the stream up until the next non-number char.
# Expect the first character to be a negative sign or digit.
yield self.expect(lambda c: c == NEGATIVE_SIGN or c.isdigit())[0]
# Expect one or more digits.
yield from self.yield_while(is_digit)
# Check to see if the next char is a decimal point.
c = self.next_char()
if c != PERIOD:
# Not a decimal point so stuff it back and return.
self.stuff_char(c)
return
# It is a decimal point.
yield c
# Expect the next character to be a digit.
yield self.expect(is_digit)[0]
# Yield any remaining digits.
yield from self.yield_while(is_digit)
def parse(self):
# Start parsing self.stream.
while True:
# Get the next event.
event, value_gen, expect = self.next_event()
# If event is EOF, we've reached the end of the stream.
if event is Events.EOF:
return
# Yield the event and any value generator.
yield event, value_gen
# If a value generator hasn't been fully consumed, drain it.
if value_gen is not None:
for _ in value_gen:
pass
# If next_event() returned something to expect next, push it.
if expect is not None:
self.expect_stack.append(expect)
def next_event(self):
"""Attempt to match the next stream character to what's on the top of
the expect stack and return a tuple in the format:
( <event>, <value-generator-or-None>, <expected-next-or-None> )
"""
# Call expect() with the next item from the expect_stack.
c, matcher = self.expect(self.expect_stack.pop())
if matcher == Matchers.EOF:
# Char is an empty string which indicates that the input stream has
# been exhausted.
return Events.EOF, None, None
if c == Matchers.ARRAY_OPEN:
# Char is an array initiator (i.e. '[').
# If the context is array or object, push the appropriate value
# onto the container_value_context_stack.
if matcher == Matchers.IS_ARRAY_VALUE_START:
self.container_value_context_stack.append(ARRAY_VALUE_CONTEXT)
elif matcher == Matchers.IS_OBJECT_VALUE_START:
self.container_value_context_stack.append(OBJECT_VALUE_CONTEXT)
# Expect an array terminator (which is already on the stack) to
# follow.
return (
Events.ARRAY_OPEN,
None,
(Matchers.IS_ARRAY_VALUE_START, Matchers.ARRAY_CLOSE)
)
if c == Matchers.OBJECT_OPEN:
# Char is an object initiator (i.e. '{')
# If the context is array or object, push the appropriate value
# onto the container_value_context_stack.
if matcher == Matchers.IS_ARRAY_VALUE_START:
self.container_value_context_stack.append(ARRAY_VALUE_CONTEXT)
elif matcher == Matchers.IS_OBJECT_VALUE_START:
self.container_value_context_stack.append(OBJECT_VALUE_CONTEXT)
# Expect an object key, item separator, or object terminator (which
# is already on the stack) to follow.
return (
Events.OBJECT_OPEN,
None,
(Matchers.IS_OBJECT_KEY_START, Matchers.OBJECT_CLOSE)
)
if matcher == Matchers.ARRAY_CLOSE:
# Char is an array terminator (i.e. ']')
# If container_value_context_stack is non-empty, pop the last
# context and expect whatever's appropriate to follow.
expect = None
if self.container_value_context_stack:
context = self.container_value_context_stack.pop()
item_sep_matcher = (
Matchers.IS_ARRAY_ITEM_SEP
if context == ARRAY_VALUE_CONTEXT
else Matchers.IS_OBJECT_ITEM_SEP
)
expect = (item_sep_matcher, self.expect_stack.pop())
return Events.ARRAY_CLOSE, None, expect
if matcher == Matchers.OBJECT_CLOSE:
# Char is an object terminator (i.e. '}').
# If container_value_context_stack is non-empty, pop the last
# context and expect whatever's appropriate to follow.
expect = None
if self.container_value_context_stack:
context = self.container_value_context_stack.pop()
item_sep_matcher = (
Matchers.IS_ARRAY_ITEM_SEP
if context == ARRAY_VALUE_CONTEXT
else Matchers.IS_OBJECT_ITEM_SEP
)
expect = (item_sep_matcher, self.expect_stack.pop())
return Events.OBJECT_CLOSE, None, expect
if matcher == Matchers.IS_OBJECT_KEY_START:
# Char is the expected object key's opening double-qoute.
# Expect a object key/value separator (i.e. ':') to follow.
return Events.OBJECT_KEY, self.parse_string(), Matchers.KV_SEP
if matcher == Matchers.KV_SEP:
# Char is an object key / value separator (i.e. ':')
# Expect an object value (e.g. string, number, null) to follow.
return Events.KV_SEP, None, Matchers.IS_OBJECT_VALUE_START
if matcher == Matchers.IS_OBJECT_ITEM_SEP:
# Char is an item separator (i.e. ',') in a post-object-value
# context. Expect an object key or object terminator (which is
# already on the stack) to follow.
return (
Events.OBJECT_ITEM_SEP,
None,
(Matchers.IS_OBJECT_KEY_START, self.expect_stack.pop())
)
if matcher == Matchers.IS_ARRAY_ITEM_SEP:
# Char is an item separator (i.e. ',') in a post-array-value
# context. Expect an array value, item separator (thus accomodating
# unlimited trailing commas), or array terminator to follow.
return (
Events.ARRAY_ITEM_SEP,
None,
(Matchers.IS_ARRAY_VALUE_START, self.expect_stack.pop())
)
if c == Matchers.STRING_START:
# Char is a string initiator (i.e. '"')
# Return the event along with a string value parser/generator.
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_STRING
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_STRING
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.STRING
expect = None
return event, self.parse_string(), expect
if Matchers.IS_NUMBER_START(c):
# Char is a number initiator (i.e. '-' or a digit)
# Return the event along with a number value parser/generator.
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_NUMBER
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_NUMBER
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.NUMBER
expect = None
# parse_number() is going to need this first character, so stuff it
# back in.
self.stuff_char(c)
return event, self.parse_number(), expect
if c == Matchers.NULL_START:
# Char is a null initiator (i.e. 'n'), expect the remaining chars.
self.expect(b'u')
self.expect(b'l')
self.expect(b'l')
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_NULL
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_NULL
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.NULL
expect = None
return event, None, expect
if c == Matchers.TRUE_START:
# Char is a true initiator (i.e. 't'), expect the remaining chars.
self.expect(b'r')
self.expect(b'u')
self.expect(b'e')
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_TRUE
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_TRUE
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.TRUE
expect = None
return event, None, expect
if c == Matchers.FALSE_START:
# Char is a false initiator (i.e. 'f'), expect the remaining chars.
self.expect(b'a')
self.expect(b'l')
self.expect(b's')
self.expect(b'e')
if matcher == Matchers.IS_OBJECT_VALUE_START:
event = Events.OBJECT_VALUE_FALSE
# Maybe expect an object item separator next.
expect = Matchers.IS_OBJECT_ITEM_SEP, self.expect_stack.pop()
elif matcher == Matchers.IS_ARRAY_VALUE_START:
event = Events.ARRAY_VALUE_FALSE
# Maybe expect an array item separator next.
expect = Matchers.IS_ARRAY_ITEM_SEP, self.expect_stack.pop()
else:
event = Events.FALSE
expect = None
return event, None, expect
# Something went wrong :shrug:
raise AssertionError(c, matcher)
def convert(self, event, value):
# Convert a parsed value to a Python type.
if (event == Events.ARRAY_VALUE_NULL
or event == Events.OBJECT_VALUE_NULL
or event == Events.NULL):
return None
if (event == Events.ARRAY_VALUE_TRUE
or event == Events.OBJECT_VALUE_TRUE
or event == Events.TRUE):
return True
if (event == Events.ARRAY_VALUE_FALSE
or event == Events.OBJECT_VALUE_FALSE
or event == Events.FALSE):
return False
if (event == Events.ARRAY_VALUE_STRING
or event == Events.OBJECT_VALUE_STRING
or event == Events.STRING
or event == Events.OBJECT_KEY):
return b''.join(value).decode(self.encoding)
if (event == Events.ARRAY_VALUE_NUMBER
or event == Events.OBJECT_VALUE_NUMBER
or event == Events.NUMBER):
s = b''.join(value)
# Cast to either float or int based on presence of a decimal place.
return float(s) if PERIOD in s else int(s)
raise NotImplementedError(event, value)
def yield_paths(self, paths):
# Yield ( <path>, <value-generator> ) tuples for all specified paths
# that exist in the data.
#
# paths must be an iterable of lists of byte strings and integers in
# the format:
# [ '<object-key>', <array-index>, ... ]
# Example:
# [ 'people', 0, 'first_name' ]
#
# Track the indexes of the paths in paths to be yielded so that we can
# abort as soon as all requested paths have been yielded.
unyielded_path_idxs = set(range(len(paths)))
# Define the current path stack.
path = []
parse_gen = self.parse()
for event, value in parse_gen:
if event == Events.OBJECT_OPEN:
# An object has opened.
# If the current path node is an array index, increment it.
if path and isinstance(path[-1], int):
path[-1] += 1
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
yielded = False
for i in unyielded_path_idxs:
if path == paths[i]:
# Reset the parser state such that the next call will
# re-yield this same OBJECT_OPEN to make load() work.
yield path, self.load(parse_gen)
unyielded_path_idxs.remove(i)
yielded = True
break
if not yielded:
# If this container was not already load()ed and yielded,
# Append an empty object indicator to the current path, to
# be overwritten by the next parsed key.
path.append(PERIOD)
continue
elif event == Events.OBJECT_CLOSE:
# The object has closed.
# Pop it from the current path.
path.pop()
continue
elif event == Events.ARRAY_OPEN:
# An array has opened.
# If the current path node is an array index, increment it.
if path and isinstance(path[-1], int):
path[-1] += 1
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
yielded = False
for i in unyielded_path_idxs:
if path == paths[i]:
# Reset the parser state such that the next call will
# re-yield this same ARRAY_OPEN to make load() work.
yield path, self.load(parse_gen)
unyielded_path_idxs.remove(i)
yielded = True
break
if not yielded:
# If this container was not already load()ed and yielded,
# Append an array index of -1 to the current path, to be
# increment on the next parsed array value.
path.append(-1)
continue
elif event == Events.ARRAY_CLOSE:
# The array has closed.
# Pop it from the current path.
path.pop()
continue
elif event == Events.OBJECT_KEY:
# We parsed an object key.
# Overwrite the current path node with the key value.
path[-1] = self.convert(Events.OBJECT_KEY, value)
elif (event == Events.ARRAY_VALUE_STRING
or event == Events.ARRAY_VALUE_NUMBER
or event == Events.ARRAY_VALUE_NULL
or event == Events.ARRAY_VALUE_TRUE
or event == Events.ARRAY_VALUE_FALSE):
# We parsed an array value.
# Increment the current path node array index.
path[-1] += 1
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
for i in unyielded_path_idxs:
if path == paths[i]:
yield path, self.convert(event, value)
unyielded_path_idxs.remove(i)
break
elif (event == Events.OBJECT_VALUE_STRING
or event == Events.OBJECT_VALUE_NUMBER
or event == Events.OBJECT_VALUE_NULL
or event == Events.OBJECT_VALUE_TRUE
or event == Events.OBJECT_VALUE_FALSE):
# We parsed an object value.
# For each unyielded path, attempt to match it against the
# current path. If it matches, yield the event and remove the
# path index from the unyielded set.
for i in unyielded_path_idxs:
if path == paths[i]:
yield path, self.convert(event, value)
unyielded_path_idxs.remove(i)
break
# Abort if all of the requested paths have been yielded.
if len(unyielded_path_idxs) == 0:
return
def load(self, parse_gen=None):
# If parse_gen is specified, parse the single next value in the stream,
# otherwise parse the entire stream, and return a single Python object,
# similar to the built-in json.load() / json.loads() behavior.
if parse_gen is None:
parse_gen = self.parse()
# Initialize the value based on the first read.
event, value = next(parse_gen)
# If it's a single scalar value, convert and return it.
if (event == Events.STRING
or event == Events.NUMBER
or event == Events.NULL
or event == Events.TRUE
or event == Events.FALSE):
return self.convert(event, value)
# Create an initial, root object to represent the initial container.
if (event == Events.OBJECT_OPEN or event == Events.OBJECT_KEY):
root = {}
elif (event == Events.ARRAY_OPEN or event.startswith('ARRAY_VALUE_')):
root = []
else:
raise NotImplementedError(event)
# Create a stack to store the hierarchy of open container objects.
container_stack = []
# Define the current container object. Building the final object will
# entail in-place mutation of whatever object 'container' points to.
container = root
# Define a place to store the last-parsed object key.
key = None
def open_container(_container):
nonlocal container
# Attach the new container to the one that's currently open.
if type(container) is list:
container.append(_container)
else:
container[key] = _container
# Push the currently-open container onto the stack.
container_stack.append(container)
# Set the new container as the current.
container = _container
def close_container():
# Close the current container object and reopen the last one.
nonlocal container
container = container_stack.pop()
# If we're already in the context of an array or object item, use
# it to init the container state.
if event.startswith('ARRAY_VALUE_'):
container.append(self.convert(event, value))
elif event == Events.OBJECT_KEY:
key = self.convert(event, value)
# Start parsing.
for event, value in parse_gen:
if event == Events.ARRAY_OPEN:
# An array just opened so open a new list container.
open_container([])
elif event == Events.OBJECT_OPEN:
# An array just opened so open a new object container.
open_container({})
elif event == Events.ARRAY_CLOSE or event == Events.OBJECT_CLOSE:
# The current array or object container just closed.
# If there are no open containers, stop parsing.
if len(container_stack) == 0:
break
# Close the current container and reopen the last one.
close_container()
elif (event == Events.ARRAY_VALUE_STRING
or event == Events.ARRAY_VALUE_NUMBER
or event == Events.ARRAY_VALUE_NULL
or event == Events.ARRAY_VALUE_TRUE
or event == Events.ARRAY_VALUE_FALSE):
# We just parsed an array value.
# Append it to the current list container.
container.append(self.convert(event, value))
elif event == Events.OBJECT_KEY:
# We just parsed an object key. Record it.
key = self.convert(event, value)
elif (event == Events.OBJECT_VALUE_STRING
or event == Events.OBJECT_VALUE_NUMBER
or event == Events.OBJECT_VALUE_NULL
or event == Events.OBJECT_VALUE_TRUE
or event == Events.OBJECT_VALUE_FALSE):
# We just parsed an object value.
# Use the last-parsed object key to create an item in the
# current object container.
container[key] = self.convert(event, value)
# Return the mutated root object.
return root
###############################################################################
# CLI
###############################################################################
def convert_dot_path_to_yield_path(path):
# Convert the dot-delimited --path argument to a path list required by
# Parser.yield_paths().
final_path = []
i = 0
splits = [int(seg) if seg.isdigit() else seg for seg in path.split('.')]
splits_len = len(splits)
while i < splits_len:
seg = splits[i]
if seg != '':
final_path.append(seg)
else:
# An empty seg indicates the presence of a double-dot which is used
# to indicate an escaped segment value dot.
if final_path:
final_path[-1] += '.' + splits[i + 1]
else:
final_path.append('.' + splits[i + 1])
i += 1
i += 1
return final_path
def convert_yielded_key_to_dot_path(key):
return '.'.join(str(seg) if isinstance(seg, int) else seg for seg in key)
if __name__ == '__main__':
import argparse
from io import BytesIO
from json import dumps
arg_parser = argparse.ArgumentParser()
g = arg_parser.add_mutually_exclusive_group()
g.add_argument('--file', type=argparse.FileType('rb'))
g.add_argument('--string', type=str)
arg_parser.add_argument('--action', choices=('load', 'parse'),
default="load")
arg_parser.add_argument('--path', type=str, action='append',
help='Dot-delimited path specifier with dots in '\
'keys escaped as a double-dot')
args = arg_parser.parse_args()
if args.string:
args.file = BytesIO(args.string.encode('utf-8'))
if args.path and args.action != 'load':
arg_parser.error('Please specify --action=load when using --path')
parser = Parser(args.file)
if args.action == 'load':
if not args.path:
# Load it all and pretty-print the result.
print(dumps(parser.load(), indent=2))
else:
# Load only the specified paths.
result = {}
# Assert that no path is the prefix of another, indicating both
# a container and sub sub-object which won't work because the
# container itself will be read/consumed before the sub-object
# ever has a chance.
num_paths = len(args.path)
for a in args.path:
for b in args.path:
if a == b:
continue
if b.startswith(a) and b[len(a)] == '.':
arg_parser.error(
'Specifying container sub-elements ({}) and the '\
'container itself ({}) is not supported.'
.format(b, a)
)
# Convert the dot-delimited paths to path segments lists as
# required by Parser.yield_paths().
paths = list(map(convert_dot_path_to_yield_path, args.path))
for key, value in parser.yield_paths(paths):
# Convert the yielded key back to a dot path.
key = convert_yielded_key_to_dot_path(key)
result[key] = value
# Print the result as JSON.
print(dumps(result, indent=2))
elif args.action == 'parse':
for event, value in parser.parse():
if value is not None:
value = parser.convert(event, value)
print(event, value)
|
python
|
from ex112_1.utilidadescev import moeda, dados
preco = dados.leiadinheiro('Digite o preço: ')
moeda.resumo(preco, 90, 35)
|
python
|
#!/usr/bin/env python3
"""
Filters for masking stationary or near stationary data based on vessel speed
"""
def mask_stationary(Sv, speed, threshold):
"""
Mask stationary or near stationary data based on vessel speed
Args:
Sv (float): 2D numpy array with Sv data to be masked (dB)
speed (float): 1D numpy array with vessel speed data (knots)
threshold (int): speed below which Sv data will be masked (knots)
Returns:
bool: 2D numpy array mask (stationary = True)
float: 2D numpy array with Sv data masked with NAN values
"""
print('TODO')
# TODO: need to implement distance and speed retrieval in PyEchoLab
# which seems not to be working yet?
|
python
|
# TemperatureConverter Tests
import unittest
from TemperatureConverter import TemperatureConverter
from TemperatureErrors import *
class KnownValues(unittest.TestCase):
knownValues = (
(0, 32),
(100, 212)
)
temp_converter = TemperatureConverter()
# test value conversions
def testCtoF(self):
"""cToF should return the known Fahrenheit value for the
provided Celsius value."""
for c_val, f_val in self.knownValues:
result = self.temp_converter.cToF(c_val)
self.assertEqual(result, f_val)
def testCtoK(self):
"""cToK should return 273.15 Kelvin for 0 Celsius"""
result = self.temp_converter.cToK(0)
self.assertEqual(result, 273.15)
def testFtoC(self):
"""fToC should return the known Celsius value for the
provided Fahrenheit value."""
for c_val, f_val in self.knownValues:
result = self.temp_converter.fToC(f_val)
self.assertEqual(result, c_val)
def testFtoK(self):
"""fToK should return 273.15 Kelvin for 32 Fahrenheit"""
result = self.temp_converter.fToK(32)
self.assertEqual(result, 273.15)
def testKtoC(self):
"""kToC should return 0 Celsius for 273.15 Kelvin"""
result = self.temp_converter.kToC(273.15)
self.assertEqual(result, 0)
def testKtoF(self):
"""kToF should return 32 Fahrenheit for 273.15 Kelvin"""
result = self.temp_converter.kToF(273.15)
self.assertEqual(result, 32)
# sanity checks
def testCToKtoC(self):
"""Celsius to Kelvin to Celsius yields initial Celsius value"""
original_celsius_value = 20
kelvin_value = self.temp_converter.cToK(original_celsius_value)
new_celsius_value = self.temp_converter.kToC(kelvin_value)
self.assertEqual(original_celsius_value, new_celsius_value)
def testCToFToC(self):
"""Celsius to Fahrenheit to Celsius yields initial Celsius value"""
original_celsius_value = 20
fahrenheit_value = self.temp_converter.cToF(original_celsius_value)
new_celsius_value = self.temp_converter.fToC(fahrenheit_value)
self.assertEqual(original_celsius_value, new_celsius_value)
def testKToFToK(self):
"""Kelvin to Fahrenheit to Kelvin yields initial Kelvin value"""
original_kelvin_value = 20
fahrenheit_value = self.temp_converter.kToF(original_kelvin_value)
new_kelvin_value = self.temp_converter.fToK(fahrenheit_value)
self.assertEqual(original_kelvin_value, new_kelvin_value)
# test range exceptions
def testCtoFRange(self):
"""cToF should raise CelsiusRangeError if Celsius is less than -273.15"""
self.assertRaises(CelsiusRangeError, self.temp_converter.cToF, -274)
def testCtoKRange(self):
"""cToK should raise CelsiusRangeError if Celsius is less than -273.15"""
self.assertRaises(CelsiusRangeError, self.temp_converter.cToK, -274)
def testFtoCRange(self):
"""fToC should raise FahrenheitRangeError if Fahrenheit is less than -459.67"""
self.assertRaises(FahrenheitRangeError, self.temp_converter.fToC, -460)
def testFtoKRange(self):
"""fToK should raise FahrenheitRangeError if Fahrenheit is less than -459.67"""
self.assertRaises(FahrenheitRangeError, self.temp_converter.fToK, -460)
def testKtoCRange(self):
"""kToC should raise KelvinRangeError if Kelvin is less than 0"""
self.assertRaises(KelvinRangeError, self.temp_converter.kToC, -1)
def testKtoFRange(self):
"""kToF should raise KelvinRangeError if Kelvin is less than 0"""
self.assertRaises(KelvinRangeError, self.temp_converter.kToF, -1)
if __name__ == '__main__':
unittest.main()
|
python
|
# Reads float and prints BRL money conversion to JPY
# In 2020/11/01 JP¥ 1.00 = R$ 0.060 :)
brl = float(input('Por favor, digite quanto dinheiro você tem na carteira: R$ '))
jpy = brl / 0.060
print('Com R$ {:.2f} você pode comprar ¥ {:.2f}.'.format(brl, jpy))
|
python
|
def print_error(message):
print("An error occured: " + message)
def remove_white_chars(line):
line = line.replace('\t', '')
line = line.replace(' ', '')
line = line.replace('\n', '')
line = line.replace('\r', '')
return line
def remove_comments_and_empty_lines(text):
new_text = ''
first_line = True
for line in text.split("\n"):
line = remove_white_chars(line)
if len(line) > 0:
index = line.find('#')
if index >= 0:
line = line[:index]
if first_line:
new_text = line
first_line = False
else:
new_text += "\n" + line
return new_text
def get_path_of_main_file():
return os.path.dirname(os.path.realpath(__file__))
|
python
|
import os
import logging
from dp4py_config.section import Section
from dp4py_config.utils import bool_env
from dp4py_sanic.config import CONFIG as SANIC_CONFIG
from dp_conceptual_search.config.utils import read_git_sha
def get_log_level(variable: str, default: str="INFO"):
"""
Returns the configured log level, and logs error if invalid
:param variable:
:param default:
:return:
"""
from dp4py_sanic.logging.log_config import get_level_name
level = os.environ.get(variable, default)
if isinstance(level, str):
level = level.upper()
try:
return get_level_name(level)
except NotImplementedError as e:
logging.error("Caught exception parsing log level", exc_info=e)
raise SystemExit()
# APP
APP_CONFIG = Section("APP config")
APP_CONFIG.sanic = SANIC_CONFIG
APP_CONFIG.app_version = read_git_sha()
APP_CONFIG.title = 'dp-conceptual-search'
APP_CONFIG.description = 'Dedicated search API for digital publishing.'
# API
API_CONFIG = Section("API config")
API_CONFIG.enabled_prometheus_metrics = bool_env('ENABLE_PROMETHEUS_METRICS', False)
API_CONFIG.testing = bool_env("TESTING", False)
API_CONFIG.conceptual_search_enabled = bool_env("CONCEPTUAL_SEARCH_ENABLED", False)
API_CONFIG.redirect_conceptual_search = bool_env("REDIRECT_CONCEPTUAL_SEARCH", False)
API_CONFIG.recommended_search_enabled = bool_env("RECOMMENDED_SEARCH_ENABLED", False)
# ML
ML_CONFIG = Section("Machine Learning config")
ML_CONFIG.unsupervised_model_filename = os.environ.get("UNSUPERVISED_MODEL_FILENAME",
"./dp_conceptual_search/ml/data/word2vec/ons_supervised.vec")
FASTTEXT_CONFIG = Section("FastText config")
FASTTEXT_CONFIG.fasttext_host = os.environ.get("DP_FASTTEXT_HOST", "localhost")
FASTTEXT_CONFIG.fasttext_port = int(os.environ.get("DP_FASTTEXT_PORT", 5100))
FASTTEXT_CONFIG.num_labels = int(os.environ.get("FASTTEXT_NUM_LABELS", 5))
FASTTEXT_CONFIG.threshold = float(os.environ.get("FASTTEXT_THRESHOLD", 0.0))
# Elasticsearch
ELASTIC_SEARCH_CONFIG = Section("Elasticsearch config")
ELASTIC_SEARCH_CONFIG.server = os.environ.get("ELASTIC_SEARCH_SERVER", "http://localhost:9200")
ELASTIC_SEARCH_CONFIG.async_enabled = bool_env("ELASTIC_SEARCH_ASYNC_ENABLED", True)
ELASTIC_SEARCH_CONFIG.timeout = int(os.environ.get("ELASTIC_SEARCH_TIMEOUT", 1000))
ELASTIC_SEARCH_CONFIG.elasticsearch_log_level = get_log_level("ELASTICSEARCH_LOG_LEVEL", default="INFO")
# Search
SEARCH_CONFIG = Section("Search API config")
SEARCH_CONFIG.default_search_index = "ons"
SEARCH_CONFIG.search_index = os.environ.get("SEARCH_INDEX", SEARCH_CONFIG.default_search_index)
SEARCH_CONFIG.departments_search_index = "departments"
SEARCH_CONFIG.results_per_page = int(os.getenv("RESULTS_PER_PAGE", 10))
SEARCH_CONFIG.max_visible_paginator_link = int(os.getenv("MAX_VISIBLE_PAGINATOR_LINK", 5))
SEARCH_CONFIG.max_request_size = int(os.getenv("SEARCH_MAX_REQUEST_SIZE", 200))
|
python
|
from federatedscope.tabular.model.quadratic import QuadraticModel
__all__ = ['QuadraticModel']
|
python
|
string = """0: "HOW ResidentSleeper LONG ResidentSleeper CAN ResidentSleeper THIS ResidentSleeper GO ResidentSleeper ON ResidentSleeper"
1: "REPPIN LONDON ONTARIO 519 GANG"
2: "my eyes"
3: "CLULG"
4: "ResidentSleeper"
5: "mvp ward"
6: "4Head EU WATCHING NA 4Head STILL AWAKE 4Head NO JOB 4Head NO RESPONSIBILITIES 4Head TYPICAL EU 4Head"
7: "***"
8: "ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
9: "JUST GO IN ResidentSleeper"
10: "PogU"
11: "ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
12: "EU WON A DREAMHACK TOURAMENT WITH NO KOREANS LUL"
13: "zonySleeper"
14: "santorin has to make a play"
15: "CLG 10th place"
16: "FIGHTTTTT"
17: "ResidentSleeper ResidentSleeper"
18: "IS THIS GROUNDHOGS GAME?? ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
19: "6k lead and doing nothing LUL"
20: "HOW ResidentSleeper LONG ResidentSleeper CAN ResidentSleeper THIS ResidentSleeper GO ResidentSleeper ON ResidentSleeper"
21: "HEY CASTERS! how many stacks does turtle have??"
22: "NARAM"
23: "LOOOOOOOOOL ResidentSleeper"
24: "ResidentSleeper ResidentSleeper ResidentSleeper"
25: "SKIP ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
26: "ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper ResidentSleeper"
27: "ResidentSleeper exciting game ResidentSleeper exciting game ResidentSleeper exciting game ResidentSleeper exciting game"
28: "WHY DOES VIPER HAVE 700G BOUNTY WTF"
29: "bioftost sweepers are awful LUL"""
clean = string.replace("\n", "").split('"')
arr = []
for i in range(len(clean)):
if i % 2 != 0:
arr.append(clean[i])
print(arr)
|
python
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.instance_delete_group"
class InstanceDeleteRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'instance_delete_runners'
_runner_cls = 'InstanceDeleteRunner'
@test(depends_on_groups=[groups.INST_CREATE_WAIT],
groups=[GROUP, groups.INST_DELETE],
runs_after_groups=[groups.INST_INIT_DELETE,
groups.INST_ACTIONS,
groups.INST_UPGRADE,
groups.INST_ACTIONS_RESIZE_WAIT,
groups.BACKUP_INST_DELETE,
groups.BACKUP_INC_INST_DELETE,
groups.CFGGRP_INST_DELETE,
groups.DB_ACTION_DELETE,
groups.DB_ACTION_INST_DELETE,
groups.MODULE_INST_DELETE,
groups.REPL_INST_DELETE_WAIT,
groups.ROOT_ACTION_INST_DELETE,
groups.USER_ACTION_DELETE,
groups.USER_ACTION_INST_DELETE])
class InstanceDeleteGroup(TestGroup):
"""Test Instance Delete functionality."""
def __init__(self):
super(InstanceDeleteGroup, self).__init__(
InstanceDeleteRunnerFactory.instance())
@test
def instance_delete(self):
"""Delete an existing instance."""
self.test_runner.run_instance_delete()
@test(depends_on_groups=[groups.INST_DELETE],
groups=[GROUP, groups.INST_DELETE_WAIT],
runs_after_groups=[groups.BACKUP_INST_DELETE_WAIT,
groups.BACKUP_INC_INST_DELETE_WAIT,
groups.CFGGRP_INST_DELETE_WAIT,
groups.DB_ACTION_INST_DELETE_WAIT,
groups.MODULE_INST_DELETE_WAIT,
groups.REPL_INST_DELETE_WAIT,
groups.ROOT_ACTION_INST_DELETE_WAIT,
groups.USER_ACTION_INST_DELETE_WAIT])
class InstanceDeleteWaitGroup(TestGroup):
"""Test that Instance Delete Completes."""
def __init__(self):
super(InstanceDeleteWaitGroup, self).__init__(
InstanceDeleteRunnerFactory.instance())
@test
def instance_delete_wait(self):
"""Wait for existing instance to be gone."""
self.test_runner.run_instance_delete_wait()
|
python
|
from reading import Maze, MazeReader
class Node(object):
"""Nodes for evolving in graph."""
def __init__(self, maze, x, y, parent=None):
"""Initialize node. Keep maze, parent, and position."""
self.maze = maze
self.x = x
self.y = y
self.parent = parent
self.up = None
self.left = None
self.down = None
self.right = None
def lookup_around(self):
"""Look if it's possible to go up, left, right and down. It it is,
expand properly everything, and put it into self.up, self.left,
self.right and self.down."""
x = self.x
y = self.y
maze = self.maze.maze
def around(x, y, maze):
"""Look around to see if cases are occupied."""
fields = [False, False, False, False]
# Particular cases.
# Lefter line.
if x == 0:
fields[0] = None
fields[2] = None
if y == len(maze):
fields[3] = None
if maze[y - 1][x] == 0:
fields[1] = True
elif y == 0:
fields[1] = None
if maze[y][x] == 0:
fields[3] = True
else:
if maze[y][x] == 0:
fields[3] = True
if maze[y - 1][x] == 0:
fields[1] = True
# Upper line.
elif y == 0:
fields[0] = None
fields[1] = None
if x == len(maze[0]):
fields[3] = None
if maze[y][x - 1] == 0:
fields[2] = True
else:
if maze[y][x] == 0:
fields[3] = True
if maze[y][x - 1] == 0:
fields[2] = True
# Downer line.
elif y == len(maze):
fields[2] = None
fields[3] = None
if x == len(maze[0]):
fields[1] = None
if maze[y - 1][x - 1] == 0:
fields[0] = True
else:
if maze[y - 1][x - 1] == 0:
fields[0] = True
if maze[y - 1][x] == 0:
fields[1] = True
# Righter line.
elif x == len(maze[0]):
fields[1] = None
fields[3] = None
if y == len(maze):
fields[2] = None
if maze[y - 1][x - 1] == 0:
fields[0] = True
else:
if maze[y - 1][x - 1] == 0:
fields[0] = True
if maze[y][x - 1] == 0:
fields[2] = True
# General cases.
else:
if maze[y - 1][x - 1] == 0:
fields[0] = True
if maze[y - 1][x] == 0:
fields[1] = True
if maze[y][x - 1] == 0:
fields[2] = True
if maze[y][x] == 0:
fields[3] = True
return fields
def obstacle(cases):
"""Look if there's an obstacle near the point."""
for case in cases:
if case is None or case is False:
return True
return False
fields = around(x, y, maze)
if fields[0] is True and fields[1] is True:
if obstacle(around(x, y - 1, maze)) is False:
self.up = Node(self.maze, x, y - 1, self)
if fields[0] is True and fields[2] is True:
if obstacle(around(x - 1, y, maze)) is False:
self.left = Node(self.maze, x - 1, y, self)
if fields[2] is True and fields[3] is True:
if obstacle(around(x, y + 1, maze)) is False:
self.down = Node(self.maze, x, y + 1, self)
if fields[3] is True and fields[1] is True:
if obstacle(around(x + 1, y, maze)) is False:
self.right = Node(self.maze, x + 1, y, self)
def spaces(self, number):
"""Affichage du noeud."""
if self.up:
up = "Haut !"
else:
up = 'None'
if self.left:
left = "Gauche !"
else:
left = 'None'
if self.down:
down = "Bas !"
else:
down = 'None'
if self.right:
right = "Droite !"
else:
right = 'None'
rstring = number * ' ' + 'Noeud x: ' + str(self.x) + ' et y: ' + str(self.y) + '\n'
rstring += number * ' ' + up + '\n' + number * ' ' + left + '\n'
rstring += number * ' ' + down + '\n' + number * ' ' + right
return rstring
def __str__(self):
return self.spaces(0)
|
python
|
import pyps
from sys import argv
if len(argv) == 1:
# this source include a main() with a call to foo() ; but foo() isn't defined !
w=pyps.workspace("broker01.c")
# We give a method to resolve missing module (here foo())
w.props.preprocessor_missing_file_handling="external_resolver"
w.props.preprocessor_missing_file_generator="python broker01.py"
# We display with cumulated effects because we want callees to be computed
w.fun.main.display(pyps.module.print_code_cumulated_effects)
else:
# tricky, use the test file as a simple broker too :p
print "foo.c"
|
python
|
from typing import Any, Dict, Tuple
from dateutil.parser import parse
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.execution_engine.execution_engine import (
MetricDomainTypes,
MetricPartialFunctionTypes,
)
from great_expectations.expectations.metrics.map_metric import MapMetricProvider
from great_expectations.expectations.metrics.metric_provider import metric_partial
from great_expectations.expectations.metrics.util import filter_pair_metric_nulls
class ColumnPairValuesAGreaterThanB(MapMetricProvider):
condition_metric_name = "column_pair_values.a_greater_than_b"
condition_value_keys = (
"ignore_row_if",
"or_equal",
"parse_strings_as_datetimes",
"allow_cross_type_comparisons",
)
domain_keys = ("batch_id", "table", "column_A", "column_B")
@metric_partial(
engine=PandasExecutionEngine,
partial_fn_type=MetricPartialFunctionTypes.MAP_CONDITION_SERIES,
domain_type=MetricDomainTypes.COLUMN_PAIR,
)
def _pandas(
cls,
execution_engine: "PandasExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
ignore_row_if = metric_value_kwargs.get("ignore_row_if")
if not ignore_row_if:
ignore_row_if = "both_values_are_missing"
or_equal = metric_value_kwargs.get("or_equal")
parse_strings_as_datetimes = metric_value_kwargs.get(
"parse_strings_as_datetimes"
)
allow_cross_type_comparisons = metric_value_kwargs.get(
"allow_cross_type_comparisons"
)
df, compute_domain, accessor_domain = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN_PAIR
)
column_A, column_B = filter_pair_metric_nulls(
df[metric_domain_kwargs["column_A"]],
df[metric_domain_kwargs["column_B"]],
ignore_row_if=ignore_row_if,
)
if allow_cross_type_comparisons:
raise NotImplementedError
if parse_strings_as_datetimes:
temp_column_A = column_A.map(parse)
temp_column_B = column_B.map(parse)
else:
temp_column_A = column_A
temp_column_B = column_B
if or_equal:
return temp_column_A >= temp_column_B, compute_domain, accessor_domain
else:
return temp_column_A > temp_column_B, compute_domain, accessor_domain
|
python
|
from authlib.common.urls import quote, unquote
def escape(s):
return quote(s, safe=b'~')
def unescape(s):
return unquote(s)
|
python
|
import json
from typing import Optional
import pyrebase
from .database import Database
class FirebaseDatabase(Database):
def __init__(self, serialised_config: str):
super().__init__()
self.config = json.loads(serialised_config)
def add_document(self, doc_id: str, doc: dict) -> None:
self._upload_analysis(doc_id, doc)
def get_document_as_str(self, doc_id: str) -> Optional[str]:
return self._fetch_analysis_data(doc_id)
def _open_db_connection(self):
return pyrebase.initialize_app(self.config).database()
def _upload_analysis(self, replay_id: str, replay_analysis: dict) -> None:
db = self._open_db_connection()
db.child("zerg_macro_analyses").child(replay_id).set(
json.dumps(replay_analysis))
def _fetch_analysis_data(self, replay_id: str) -> str:
db = self._open_db_connection()
analysis_data = db.child("zerg_macro_analyses").child(
replay_id).get().val()
return analysis_data if analysis_data else ""
|
python
|
from .s3 import *
|
python
|
#!/usr/bin/env python3
"""
Get jobs from aCT.
Returns:
1: No proxy found.
2: One of the elements in job list is not a range.
3: One of the elements in job list is not a valid ID.
5: tmp directory not configured.
"""
import argparse
import sys
import shutil
import os
import logging
import act.client.jobmgr as jobmgr
import act.client.clicommon as clicommon
from act.client.errors import TargetDirExistsError
from act.client.errors import InvalidJobRangeError
from act.client.errors import InvalidJobIDError
from act.client.errors import TmpConfigurationError
from act.client.errors import NoJobDirectoryError
def getLocalDir(jobdir, dirname=''):
"""
Assemble destination directory for job results.
Raises:
TargetDirExistsError: Destination for job results already exists.
"""
if dirname:
dstDir = os.path.join(dirname, jobdir)
else:
dstDir = os.path.join(os.getcwd(), jobdir)
if not os.path.exists(dstDir):
return dstDir
else:
raise TargetDirExistsError(dstDir)
def main():
# parse arguments
parser = argparse.ArgumentParser(description='Get jobs from aCT')
parser.add_argument('-a', '--all', action='store_true',
help='all jobs that match other criteria')
parser.add_argument('-j', '--jobs', default='',
help='comma separated list of job IDs or ranges')
parser.add_argument('-f', '--find', default='',
help='get only jobs with matching (sub)string in their name')
parser.add_argument('-s', '--state', default='',
help='get only jobs with certain state')
parser.add_argument('-v', '--verbose', action='store_true',
help='show more information')
parser.add_argument('-p', '--proxy', default=None,
help='custom path to proxy certificate')
parser.add_argument('-n', '--no-clean', action='store_true',
help='do not clean jobs')
clicommon.showHelpOnCommandOnly(parser)
args = parser.parse_args()
# logging
logFormat = "[%(asctime)s] [%(filename)s:%(lineno)d] [%(levelname)s] - %(message)s"
if args.verbose:
logging.basicConfig(format=logFormat, level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(format=logFormat, level=logging.DEBUG, filename=os.devnull)
# create a list of jobs to work on
if args.all:
jobs = [] # empty means all jobs
elif args.jobs:
try:
jobs = jobmgr.getIDsFromList(args.jobs)
except InvalidJobRangeError as e:
print("error: range '{}' is not a valid range".format(e.jobRange))
sys.exit(2)
except InvalidJobIDError as e:
print("error: ID '{}' is not a valid ID".format(e.jobid))
sys.exit(3)
else:
print("error: no jobs specified (use -a or -j)")
sys.exit(10)
# get proxy ID given proxy
proxyid = clicommon.getProxyIdFromProxy(args.proxy)
# get job info
manager = jobmgr.JobManager()
try:
results = manager.getJobs(proxyid, jobs, args.state, args.find)
except TmpConfigurationError:
print('error: tmp directory not configured')
sys.exit(5)
if not results.jobdicts:
print('no jobs to get')
sys.exit(0)
# copy job results
dontRemove = []
for result in results.jobdicts:
try:
if result['dir']: # if there are job results in tmp
dst_dirname = os.path.basename(os.path.normpath(result['name']))
dstdir = getLocalDir(dst_dirname)
shutil.copytree(result['dir'], dstdir)
print('Results stored at: {}'.format(dstdir))
else:
raise NoJobDirectoryError(result['dir'])
except NoJobDirectoryError as e:
print('error: tmp results directory {} does not exist'.format(e.jobdir))
except TargetDirExistsError as e:
print('error: job destination {} already exists'.format(e.dstdir))
# don't clean job that could not be removed
dontRemove.append(result['id'])
# delete jobs that should not be removed from results
for jobid in dontRemove:
for result in results.jobdicts:
if result['id'] == jobid:
jobix = results.clientIDs.index(result['id'])
del results.clientIDs[jobix]
del results.arcIDs[jobix]
del results.jobdicts[jobix]
# clean jobs
if not args.no_clean:
manager.forceCleanJobs(results)
if __name__ == '__main__':
main()
|
python
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Turkish Topic Model",
version="0.0.1",
author="Ali ÇİMEN, Sevinç GÜLSEÇEN",
author_email="cimenwd@gmailcom, [email protected]",
description="Türkçe metin ön işleme ve konu analizi konusunda hazırlanmış fonksiyonlar kümesi",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
install_requires=['pymysql >= 1.0.2',
'pandas >= 1.3.5',
'jpype1 >=1.3.0',
'requests >= 2.26.0',
'nltk >= 3.6.7',
'tomotopy >= 0.12.2']
)
|
python
|
import pandas
from sklearn.model_selection import train_test_split
def add_series(X, y, name):
"""Converts list (y) to pd.Series with name (name)
then adds it to a dataframe (X)"""
X = X.copy()
series = pandas.Series(data=y, name=name)
X[name] = series
return X
def data_split(X):
"""Splits dataframe into train, validate, and test sets"""
train, test = train_test_split(X, random_state=20)
train, val = train_test_split(train, random_state=20)
return train, val, test
if __name__ == "__main__":
df = pandas.DataFrame({'odd': [1, 3, 5]})
test_list = [2, 4, 6]
df = add_series(df, test_list, 'even')
df
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
import re
class ResCompany(models.Model):
_inherit = 'res.company'
org_number = fields.Char(compute='_compute_org_number')
@api.depends('vat')
def _compute_org_number(self):
for company in self:
if company.country_id.code == "SE" and company.vat:
org_number = re.sub(r'\D', '', company.vat)[:-2]
org_number = org_number[:6] + '-' + org_number[6:]
company.org_number = org_number
else:
company.org_number = ''
|
python
|
import numpy as np
from ..functions import B_nu, dB_nu_dT
from ..integrate import integrate_loglog
from ..constants import sigma, k, c
def test_b_nu():
nu = np.logspace(-20, 20., 10000)
for T in [10, 100, 1000, 10000]:
# Compute planck function
b = B_nu(nu, T)
# Check that the intergral is correct
total = integrate_loglog(nu, b)
np.testing.assert_allclose(total, sigma * T ** 4 / np.pi, rtol=1e-4)
# Check that we reach the rayleigh-jeans limit at low frequencies
rj = 2. * nu ** 2 * k * T / c**2
np.testing.assert_allclose(b[nu < 1e-10], rj[nu < 1e-10], rtol=1.e-8)
def test_db_nu_dt():
nu = np.logspace(-20, 20., 10000)
for T in [10, 100, 1000, 10000]:
# Compute exact planck function derivative
db = dB_nu_dT(nu, T)
# Compute numerical planck function derivative
dT = T / 1e6
b1 = B_nu(nu, T - dT)
b2 = B_nu(nu, T + dT)
db_num = 0.5 * (b2 - b1) / dT
# Check that the two are the same
np.testing.assert_allclose(db, db_num, rtol=1.e-2)
|
python
|
"""Myia Pytorch frontend."""
from .pytorch import *
|
python
|
"""
Dictionary Embedder Class
"""
import spacy
from .base import BaseEmbedder
class DictionaryEmbedder(BaseEmbedder):
"""Base Embedder class extended for implementing text embedders"""
def __init__(self, spacy_pkg='en_vectors_web_lg', embedding_length=None):
super(DictionaryEmbedder, self).__init__()
self.nlp = spacy.load(spacy_pkg)
self.embedding_length = embedding_length
def train(self, input_data):
"""Training is not required because spacy stores the vocab
"""
pass
def process(self, input_data):
"""Split the input text into an array of tokens, and replace each
token with the unique identifier for that token in the spacy vocab
"""
spacy_doc = self.nlp(unicode(input_data))
embeddings = []
for token in spacy_doc:
embeddings.append(token.rank)
if self.embedding_length \
and len(embeddings) == self.embedding_length:
break
if self.embedding_length and len(embeddings) < self.embedding_length:
embeddings.extend([0] * self.embedding_length - len(embeddings))
return embeddings
|
python
|
# -*- coding: utf-8 -*-
"""Tests for Terminal methods that account for sequences in strings"""
# std imports
import os
import sys
import struct
import platform
import itertools
# 3rd party
import six
import pytest
# local
from .accessories import TestTerminal, as_subprocess
from .conftest import IS_WINDOWS
if platform.system() != 'Windows':
import fcntl
import termios
def test_length_cjk():
"""Test length of East Asian characters"""
@as_subprocess
def child():
term = TestTerminal()
# given,
given = term.bold_red(u'コンニチハ, セカイ!')
expected = sum((2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1,))
# exercise,
assert term.length(given) == expected
child()
def test_length_ansiart():
"""Test length of ANSI art"""
@as_subprocess
def child(kind):
import codecs
term = TestTerminal(kind=kind)
# this 'ansi' art contributed by xzip!impure for another project,
# unlike most CP-437 DOS ansi art, this is actually utf-8 encoded.
fname = os.path.join(os.path.dirname(__file__), 'wall.ans')
with codecs.open(fname, 'r', 'utf-8') as ansiart:
lines = ansiart.readlines()
assert term.length(lines[0]) == 67 # ^[[64C^[[34m▄▓▄
assert term.length(lines[1]) == 75
assert term.length(lines[2]) == 78
assert term.length(lines[3]) == 78
assert term.length(lines[4]) == 78
assert term.length(lines[5]) == 78
assert term.length(lines[6]) == 77
kind = 'vtwin10' if IS_WINDOWS else 'xterm-256color'
child(kind)
def test_sequence_length(all_terms):
"""Ensure T.length(string containing sequence) is correcterm."""
# pylint: disable=too-complex,too-many-statements
@as_subprocess
def child(kind):
term = TestTerminal(kind=kind, force_styling=True)
# Make sure to test with 24-bit color on at least one terminal
if kind == 'xterm':
term.number_of_colors = 1 << 24
# Create a list of ascii characters, to be separated
# by word, to be zipped up with a cycling list of
# terminal sequences. Then, compare the length of
# each, the basic plain_texterm.__len__ vs. the Terminal
# method length. They should be equal.
plain_text = (u'The softest things of the world '
u'Override the hardest things of the world '
u'That which has no substance '
u'Enters into that which has no openings')
if term.bold:
assert (term.length(term.bold) == 0)
assert (term.length(term.bold(u'x')) == 1)
assert (term.length(term.bold_red) == 0)
assert (term.length(term.bold_red(u'x')) == 1)
assert (term.length(term.bold_on_red) == 0)
assert (term.length(term.bold_on_red(u'x')) == 1)
assert (term.length(term.bold_olivedrab4) == 0)
assert (term.length(term.bold_olivedrab4(u'x')) == 1)
assert (term.length(term.bold_on_olivedrab4) == 0)
assert (term.length(term.bold_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.bold) == u'')
assert (term.rstrip(term.bold) == u'')
assert (term.lstrip(term.bold) == u'')
assert (term.strip(term.bold(u' x ')) == u'x')
assert (term.strip(term.bold(u'z x q'), 'zq') == u' x ')
assert (term.rstrip(term.bold(u' x ')) == u' x')
assert (term.lstrip(term.bold(u' x ')) == u'x ')
assert (term.strip(term.bold_red) == u'')
assert (term.rstrip(term.bold_red) == u'')
assert (term.lstrip(term.bold_red) == u'')
assert (term.strip(term.bold_on_red) == u'')
assert (term.rstrip(term.bold_on_red) == u'')
assert (term.lstrip(term.bold_on_red) == u'')
assert (term.strip(term.bold_olivedrab4) == u'')
assert (term.rstrip(term.bold_olivedrab4) == u'')
assert (term.lstrip(term.bold_olivedrab4) == u'')
assert (term.strip(term.bold_on_olivedrab4) == u'')
assert (term.rstrip(term.bold_on_olivedrab4) == u'')
assert (term.lstrip(term.bold_on_olivedrab4) == u'')
assert (term.strip(term.bold_red(u' x ')) == u'x')
assert (term.rstrip(term.bold_red(u' x ')) == u' x')
assert (term.lstrip(term.bold_red(u' x ')) == u'x ')
assert (term.strip(term.bold_on_red(u' x ')) == u'x')
assert (term.rstrip(term.bold_on_red(u' x ')) == u' x')
assert (term.lstrip(term.bold_on_red(u' x ')) == u'x ')
assert (term.strip(term.bold_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.bold_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.bold_olivedrab4(u' x ')) == u'x ')
assert (term.strip(term.bold_on_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.bold_on_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.bold_on_olivedrab4(u' x ')) == u'x ')
assert (term.strip_seqs(term.bold) == u'')
assert (term.strip_seqs(term.bold(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_red) == u'')
assert (term.strip_seqs(term.bold_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_on_red) == u'')
assert (term.strip_seqs(term.bold_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_olivedrab4) == u'')
assert (term.strip_seqs(term.bold_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.bold_on_olivedrab4) == u'')
assert (term.strip_seqs(term.bold_on_olivedrab4(u' x ')) == u' x ')
if term.underline:
assert (term.length(term.underline) == 0)
assert (term.length(term.underline(u'x')) == 1)
assert (term.length(term.underline_red) == 0)
assert (term.length(term.underline_red(u'x')) == 1)
assert (term.length(term.underline_on_red) == 0)
assert (term.length(term.underline_on_red(u'x')) == 1)
assert (term.length(term.underline_olivedrab4) == 0)
assert (term.length(term.underline_olivedrab4(u'x')) == 1)
assert (term.length(term.underline_on_olivedrab4) == 0)
assert (term.length(term.underline_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.underline) == u'')
assert (term.strip(term.underline(u' x ')) == u'x')
assert (term.strip(term.underline_red) == u'')
assert (term.strip(term.underline_red(u' x ')) == u'x')
assert (term.rstrip(term.underline_red(u' x ')) == u' x')
assert (term.lstrip(term.underline_red(u' x ')) == u'x ')
assert (term.strip(term.underline_on_red) == u'')
assert (term.strip(term.underline_on_red(u' x ')) == u'x')
assert (term.rstrip(term.underline_on_red(u' x ')) == u' x')
assert (term.lstrip(term.underline_on_red(u' x ')) == u'x ')
assert (term.strip(term.underline_olivedrab4) == u'')
assert (term.strip(term.underline_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.underline_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.underline_olivedrab4(u' x ')) == u'x ')
assert (term.strip(term.underline_on_olivedrab4) == u'')
assert (term.strip(term.underline_on_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.underline_on_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.underline_on_olivedrab4(u' x ')) == u'x ')
assert (term.strip_seqs(term.underline) == u'')
assert (term.strip_seqs(term.underline(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_red) == u'')
assert (term.strip_seqs(term.underline_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_on_red) == u'')
assert (term.strip_seqs(term.underline_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_olivedrab4) == u'')
assert (term.strip_seqs(term.underline_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.underline_on_olivedrab4) == u'')
assert (term.strip_seqs(term.underline_on_olivedrab4(u' x ')) == u' x ')
if term.reverse:
assert (term.length(term.reverse) == 0)
assert (term.length(term.reverse(u'x')) == 1)
assert (term.length(term.reverse_red) == 0)
assert (term.length(term.reverse_red(u'x')) == 1)
assert (term.length(term.reverse_on_red) == 0)
assert (term.length(term.reverse_on_red(u'x')) == 1)
assert (term.length(term.reverse_olivedrab4) == 0)
assert (term.length(term.reverse_olivedrab4(u'x')) == 1)
assert (term.length(term.reverse_on_olivedrab4) == 0)
assert (term.length(term.reverse_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.reverse) == u'')
assert (term.strip(term.reverse(u' x ')) == u'x')
assert (term.strip(term.reverse_red) == u'')
assert (term.strip(term.reverse_red(u' x ')) == u'x')
assert (term.rstrip(term.reverse_red(u' x ')) == u' x')
assert (term.lstrip(term.reverse_red(u' x ')) == u'x ')
assert (term.strip(term.reverse_on_red) == u'')
assert (term.strip(term.reverse_on_red(u' x ')) == u'x')
assert (term.rstrip(term.reverse_on_red(u' x ')) == u' x')
assert (term.lstrip(term.reverse_on_red(u' x ')) == u'x ')
assert (term.strip(term.reverse_olivedrab4) == u'')
assert (term.strip(term.reverse_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.reverse_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.reverse_olivedrab4(u' x ')) == u'x ')
assert (term.strip(term.reverse_on_olivedrab4) == u'')
assert (term.strip(term.reverse_on_olivedrab4(u' x ')) == u'x')
assert (term.rstrip(term.reverse_on_olivedrab4(u' x ')) == u' x')
assert (term.lstrip(term.reverse_on_olivedrab4(u' x ')) == u'x ')
assert (term.strip_seqs(term.reverse) == u'')
assert (term.strip_seqs(term.reverse(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_red) == u'')
assert (term.strip_seqs(term.reverse_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_on_red) == u'')
assert (term.strip_seqs(term.reverse_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_olivedrab4) == u'')
assert (term.strip_seqs(term.reverse_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.reverse_on_olivedrab4) == u'')
assert (term.strip_seqs(term.reverse_on_olivedrab4(u' x ')) == u' x ')
if term.blink:
assert (term.length(term.blink) == 0)
assert (term.length(term.blink(u'x')) == 1)
assert (term.length(term.blink_red) == 0)
assert (term.length(term.blink_red(u'x')) == 1)
assert (term.length(term.blink_on_red) == 0)
assert (term.length(term.blink_on_red(u'x')) == 1)
assert (term.length(term.blink_olivedrab4) == 0)
assert (term.length(term.blink_olivedrab4(u'x')) == 1)
assert (term.length(term.blink_on_olivedrab4) == 0)
assert (term.length(term.blink_on_olivedrab4(u'x')) == 1)
assert (term.strip(term.blink) == u'')
assert (term.strip(term.blink(u' x ')) == u'x')
assert (term.strip(term.blink(u'z x q'), u'zq') == u' x ')
assert (term.strip(term.blink_red) == u'')
assert (term.strip(term.blink_red(u' x ')) == u'x')
assert (term.strip(term.blink_on_red) == u'')
assert (term.strip(term.blink_on_red(u' x ')) == u'x')
assert (term.strip(term.blink_olivedrab4) == u'')
assert (term.strip(term.blink_olivedrab4(u' x ')) == u'x')
assert (term.strip(term.blink_on_olivedrab4) == u'')
assert (term.strip(term.blink_on_olivedrab4(u' x ')) == u'x')
assert (term.strip_seqs(term.blink) == u'')
assert (term.strip_seqs(term.blink(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_red) == u'')
assert (term.strip_seqs(term.blink_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_on_red) == u'')
assert (term.strip_seqs(term.blink_on_red(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_olivedrab4) == u'')
assert (term.strip_seqs(term.blink_olivedrab4(u' x ')) == u' x ')
assert (term.strip_seqs(term.blink_on_olivedrab4) == u'')
assert (term.strip_seqs(term.blink_on_olivedrab4(u' x ')) == u' x ')
if term.home:
assert (term.length(term.home) == 0)
assert (term.strip(term.home) == u'')
if term.clear_eol:
assert (term.length(term.clear_eol) == 0)
assert (term.strip(term.clear_eol) == u'')
if term.enter_fullscreen:
assert (term.length(term.enter_fullscreen) == 0)
assert (term.strip(term.enter_fullscreen) == u'')
if term.exit_fullscreen:
assert (term.length(term.exit_fullscreen) == 0)
assert (term.strip(term.exit_fullscreen) == u'')
# horizontally, we decide move_down and move_up are 0,
assert (term.length(term.move_down) == 0)
assert (term.length(term.move_down(2)) == 0)
assert (term.length(term.move_up) == 0)
assert (term.length(term.move_up(2)) == 0)
# other things aren't so simple, somewhat edge cases,
# moving backwards and forwards horizontally must be
# accounted for as a "length", as <x><move right 10><y>
# will result in a printed column length of 12 (even
# though columns 2-11 are non-destructive space
assert (term.length(u'x\b') == 0)
assert (term.strip(u'x\b') == u'')
# XXX why are some terminals width of 9 here ??
assert (term.length(u'\t') in (8, 9))
assert (term.strip(u'\t') == u'')
assert (term.length(u'_' + term.move_left) == 0)
assert (term.length(term.move_right) == 1)
if term.cub:
assert (term.length((u'_' * 10) + term.cub(10)) == 0)
if term.cuf:
assert (term.length(term.cuf(10)) == 10)
# vertical spacing is unaccounted as a 'length'
assert (term.length(term.move_up) == 0)
assert (term.length(term.cuu(10)) == 0)
assert (term.length(term.move_down) == 0)
assert (term.length(term.cud(10)) == 0)
# this is how manpages perform underlining, this is done
# with the 'overstrike' capability of teletypes, and aparently
# less(1), '123' -> '1\b_2\b_3\b_'
text_wseqs = u''.join(itertools.chain(
*zip(plain_text, itertools.cycle(['\b_']))))
assert (term.length(text_wseqs) == len(plain_text))
child(all_terms)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires cwcwidth which requires python3.6 or higher")
def test_length_zerowidth():
"""Test length of East Asian characters"""
@as_subprocess
def child():
term = TestTerminal()
# given,
given = term.bold_red(u'0123')
expected = sum((1, 1, 1, 1, 0, 0, 0, 0,))
# exercise,
assert term.length(given) == expected
child()
def test_env_winsize():
"""Test height and width is appropriately queried in a pty."""
@as_subprocess
def child():
# set the pty's virtual window size
os.environ['COLUMNS'] = '99'
os.environ['LINES'] = '11'
term = TestTerminal(stream=six.StringIO())
save_init = term._init_descriptor
save_stdout = sys.__stdout__
try:
term._init_descriptor = None
sys.__stdout__ = None
winsize = term._height_and_width()
width = term.width
height = term.height
finally:
term._init_descriptor = save_init
sys.__stdout__ = save_stdout
assert winsize.ws_col == width == 99
assert winsize.ws_row == height == 11
child()
@pytest.mark.skipif(IS_WINDOWS, reason="requires fcntl")
def test_winsize(many_lines, many_columns):
"""Test height and width is appropriately queried in a pty."""
pixel_width, pixel_height = 1024, 768
@as_subprocess
def child(lines=25, cols=80):
# set the pty's virtual window size
val = struct.pack('HHHH', lines, cols, pixel_width, pixel_height)
fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val)
term = TestTerminal()
winsize = term._height_and_width()
assert term.width == cols
assert term.height == lines
assert winsize.ws_col == cols
assert winsize.ws_row == lines
assert term.pixel_width == pixel_width
assert term.pixel_height == pixel_height
child(lines=many_lines, cols=many_columns)
def test_Sequence_alignment_fixed_width(all_terms):
"""Test alignment methods with width provided"""
@as_subprocess
def child(kind):
term = TestTerminal(kind=kind)
pony_msg = 'pony express, all aboard, choo, choo!'
pony_len = len(pony_msg)
pony_colored = u''.join('%s%s' % (term.color(n % 7), ch,)
for n, ch in enumerate(pony_msg))
pony_colored += term.normal
ladjusted = term.ljust(pony_colored, 88)
radjusted = term.rjust(pony_colored, 88)
centered = term.center(pony_colored, 88)
assert (term.length(pony_colored) == pony_len)
assert (term.length(centered.strip()) == pony_len)
assert (term.length(centered) == len(pony_msg.center(88)))
assert (term.length(ladjusted.strip()) == pony_len)
assert (term.length(ladjusted) == len(pony_msg.ljust(88)))
assert (term.length(radjusted.strip()) == pony_len)
assert (term.length(radjusted) == len(pony_msg.rjust(88)))
child(kind=all_terms)
@pytest.mark.skipif(IS_WINDOWS, reason="requires fcntl")
def test_Sequence_alignment(all_terms):
"""Tests methods related to Sequence class, namely ljust, rjust, center."""
@as_subprocess
def child(kind, lines=25, cols=80):
# set the pty's virtual window size
val = struct.pack('HHHH', lines, cols, 0, 0)
fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val)
term = TestTerminal(kind=kind)
pony_msg = 'pony express, all aboard, choo, choo!'
pony_len = len(pony_msg)
pony_colored = u''.join('%s%s' % (term.color(n % 7), ch,)
for n, ch in enumerate(pony_msg))
pony_colored += term.normal
ladjusted = term.ljust(pony_colored)
radjusted = term.rjust(pony_colored)
centered = term.center(pony_colored)
assert (term.length(pony_colored) == pony_len)
assert (term.length(centered.strip()) == pony_len)
assert (term.length(centered) == len(pony_msg.center(term.width)))
assert (term.length(ladjusted.strip()) == pony_len)
assert (term.length(ladjusted) == len(pony_msg.ljust(term.width)))
assert (term.length(radjusted.strip()) == pony_len)
assert (term.length(radjusted) == len(pony_msg.rjust(term.width)))
child(kind=all_terms)
def test_hyperlink_nostyling():
"""Test length our of hyperlink URL's."""
@as_subprocess
def child():
# given,
term = TestTerminal(force_styling=None)
given_basic_url = term.link(
'https://blessed.readthedocs.org', 'blessed')
assert given_basic_url == 'blessed'
child()
def test_basic_hyperlinks():
"""Test length our of hyperlink URL's."""
@as_subprocess
def child():
# given,
term = TestTerminal()
given_basic_url = term.link(
'https://blessed.readthedocs.org', 'blessed')
# exercise,
split_parts = term.split_seqs(given_basic_url)
# verify
if term.does_styling:
assert split_parts[0] == '\x1b]8;;https://blessed.readthedocs.org\x1b\\'
assert term.length(split_parts[0]) == 0
assert ''.join(split_parts[1:8]) == 'blessed'
assert split_parts[8] == '\x1b]8;;\x1b\\'
assert len(split_parts) == 9
else:
assert ''.join(split_parts) == 'blessed'
child()
def test_hyperlink_with_id():
"""Test length our of hyperlink URL's with ID."""
@as_subprocess
def child():
# given,
term = TestTerminal()
given_advanced_urltext = term.link(
'https://blessed.readthedocs.org', 'blessed', '123')
# exercise,
split_parts = term.split_seqs(given_advanced_urltext)
# verify,
if term.does_styling:
assert split_parts[0] == '\x1b]8;id=123;https://blessed.readthedocs.org\x1b\\'
assert term.length(split_parts[0]) == 0
assert ''.join(split_parts[1:8]) == 'blessed'
assert split_parts[8] == '\x1b]8;;\x1b\\'
assert len(split_parts) == 9
else:
assert ''.join(split_parts) == 'blessed'
child()
def test_sequence_is_movement_false(all_terms):
"""Test parser about sequences that do not move the cursor."""
@as_subprocess
def child(kind):
from blessed.sequences import measure_length
term = TestTerminal(kind=kind)
assert measure_length(u'', term) == 0
# not even a mbs
assert measure_length(u'xyzzy', term) == 0
# negative numbers, though printable as %d, do not result
# in movement; just garbage. Also not a valid sequence.
assert measure_length(term.cuf(-333), term) == 0
assert (len(term.clear_eol) == measure_length(term.clear_eol, term))
# various erases don't *move*
assert (len(term.clear_bol) == measure_length(term.clear_bol, term))
assert (len(term.clear_eos) == measure_length(term.clear_eos, term))
assert (len(term.bold) == measure_length(term.bold, term))
# various paints don't move
assert (len(term.red) == measure_length(term.red, term))
assert (len(term.civis) == measure_length(term.civis, term))
if term.cvvis:
assert (len(term.cvvis) == measure_length(term.cvvis, term))
assert (len(term.underline) == measure_length(term.underline, term))
assert (len(term.reverse) == measure_length(term.reverse, term))
for _num in (0, term.number_of_colors):
expected = len(term.color(_num))
given = measure_length(term.color(_num), term)
assert (expected == given)
assert (len(term.normal_cursor) == measure_length(term.normal_cursor, term))
assert (len(term.hide_cursor) == measure_length(term.hide_cursor, term))
assert (len(term.save) == measure_length(term.save, term))
assert (len(term.italic) == measure_length(term.italic, term))
assert (len(term.standout) == measure_length(term.standout, term)
), (term.standout, term._wont_move)
child(all_terms)
def test_termcap_will_move_false(all_terms): # pylint: disable=too-complex,too-many-branches
"""Test parser about sequences that do not move the cursor."""
@as_subprocess
def child(kind): # pylint: disable=too-many-branches
from blessed.sequences import iter_parse
term = TestTerminal(kind=kind)
if term.clear_eol:
assert not next(iter_parse(term, term.clear_eol))[1].will_move
if term.clear_bol:
assert not next(iter_parse(term, term.clear_bol))[1].will_move
if term.clear_eos:
assert not next(iter_parse(term, term.clear_eos))[1].will_move
if term.bold:
assert not next(iter_parse(term, term.bold))[1].will_move
if term.red:
assert not next(iter_parse(term, term.red))[1].will_move
if term.civis:
assert not next(iter_parse(term, term.civis))[1].will_move
if term.cvvis:
assert not next(iter_parse(term, term.cvvis))[1].will_move
if term.underline:
assert not next(iter_parse(term, term.underline))[1].will_move
if term.reverse:
assert not next(iter_parse(term, term.reverse))[1].will_move
if term.color(0):
assert not next(iter_parse(term, term.color(0)))[1].will_move
if term.normal_cursor:
assert not next(iter_parse(term, term.normal_cursor))[1].will_move
if term.save:
assert not next(iter_parse(term, term.save))[1].will_move
if term.italic:
assert not next(iter_parse(term, term.italic))[1].will_move
if term.standout:
assert not next(iter_parse(term, term.standout))[1].will_move
child(all_terms)
def test_sequence_is_movement_true(all_terms):
"""Test parsers about sequences that move the cursor."""
@as_subprocess
def child(kind):
from blessed.sequences import measure_length
term = TestTerminal(kind=kind)
# movements
assert (len(term.move(98, 76)) ==
measure_length(term.move(98, 76), term))
assert (len(term.move(54)) ==
measure_length(term.move(54), term))
assert (len(term.move_xy(1, 2)) ==
measure_length(term.move(1, 2), term))
assert (len(term.move_yx(3, 4)) ==
measure_length(term.move(3, 4), term))
assert not term.cud1 or (len(term.cud1) ==
measure_length(term.cud1, term))
assert not term.cub1 or (len(term.cub1) ==
measure_length(term.cub1, term))
assert not term.cuf1 or (len(term.cuf1) ==
measure_length(term.cuf1, term))
assert not term.cuu1 or (len(term.cuu1) ==
measure_length(term.cuu1, term))
assert not term.cub or (len(term.cub(333)) ==
measure_length(term.cub(333), term))
assert not term.cuf or (len(term.cuf(333)) ==
measure_length(term.cuf(333), term))
assert not term.home or (len(term.home) ==
measure_length(term.home, term))
assert not term.restore or (len(term.restore) ==
measure_length(term.restore, term))
assert not term.clear or (len(term.clear) ==
measure_length(term.clear, term))
child(all_terms)
def test_termcap_will_move_true(all_terms):
"""Test parser about sequences that move the cursor."""
@as_subprocess
def child(kind):
from blessed.sequences import iter_parse
term = TestTerminal(kind=kind, force_styling=True)
assert next(iter_parse(term, term.move(98, 76)))[1].will_move
assert next(iter_parse(term, term.move_yx(8, 76)))[1].will_move
assert next(iter_parse(term, term.move_xy(98, 7)))[1].will_move
assert next(iter_parse(term, term.move(54)))[1].will_move
assert next(iter_parse(term, term.cud1))[1].will_move
assert next(iter_parse(term, term.cub1))[1].will_move
assert next(iter_parse(term, term.cuf1))[1].will_move
assert next(iter_parse(term, term.cuu1))[1].will_move
if term.cub(333):
assert next(iter_parse(term, term.cub(333)))[1].will_move
if term.cuf(333):
assert next(iter_parse(term, term.cuf(333)))[1].will_move
assert next(iter_parse(term, term.home))[1].will_move
assert next(iter_parse(term, term.restore))[1].will_move
assert next(iter_parse(term, term.clear))[1].will_move
child(all_terms)
def test_foreign_sequences():
"""Test parsers about sequences received from foreign sources."""
@as_subprocess
def child(kind):
from blessed.sequences import measure_length
term = TestTerminal(kind=kind)
assert measure_length(u'\x1b[m', term) == len('\x1b[m')
child(kind='ansi')
|
python
|
def get_token(fi='dont.mess.with.me'):
with open(fi, 'r') as f:
return f.readline().strip()
t = get_token()
|
python
|
import re
from address_extractor import (
unit_type,
zipcode,
street_direction,
street_type,
cities,
)
class InvalidAddressError(Exception):
pass
class Address(object):
def __init__(self, tokens):
self.tokens = tuple(self._clean_tokens(tokens[:11]))
self.street_number_index = None
self.street_direction_index = None
self.street_name_range = None
self.street_type_index = None
self.unit_range = None
# self.unit_type_index = None
# self.unit_number_index = None
self.city_range = None
self.state_index = None
self.zipcode_index = None
self.error = None
self._remaining_indices = []
self._parse()
def _clean_tokens(self, original_tokens):
tokens = []
for token in original_tokens:
cleaned = token.replace(".", "").replace(",", "")
if cleaned.startswith("#"):
cleaned = cleaned.replace("#", "")
tokens.append("#")
tokens.append(cleaned)
return tokens
@property
def is_valid(self):
return self.error is None
def _ordered_parts(self):
return [
self.street_number,
self.street_direction,
self.street_name,
self.street_type,
self.unit,
self.city,
self.state,
self.zipcode,
]
def _render_parts(self):
parts = self._ordered_parts()
return " ".join([p for p in parts if p is not None])
def __str__(self):
if not self.is_valid:
return ""
return self._render_parts()
def __repr__(self):
if self.error is None:
msg = "<address_extractor.Address address: {addr}>"
return msg.format(addr=str(self))
else:
msg = "<address_extractor.Address error: {err}, address: {addr}>"
return msg.format(err=self.error, addr=self._render_parts())
def _parse(self):
"""
Programmatically and sequentially locate the most predictable parts
of an address.
"""
try:
self._remaining_indices = list(range(len(self.tokens)))
self._extract_street_number()
self._extract_state()
self._extract_zipcode()
self._extract_city()
self._remove_indices_after_zipcode()
self._extract_street_type()
self._extract_street_name()
self._extract_unit()
self._check_remaining_indices()
except InvalidAddressError:
pass
except IndexError:
self.error = "Invalid Address Format - Too short"
pass
@property
def street_number(self):
return self._get_by_index("street_number_index")
@property
def street_name(self):
return self._get_by_range("street_name_range")
@property
def city(self):
return self._get_by_range("city_range")
@property
def street_type(self):
return self._get_by_index("street_type_index")
@property
def state(self):
return self._get_by_index("state_index")
@property
def zipcode(self):
return self._get_by_index("zipcode_index")
@property
def unit(self):
return self._get_by_range("unit_range")
@property
def unit_number(self):
return self._get_by_index("unit_number_index")
@property
def street_direction(self):
return self._get_by_index("street_direction_index")
def _get_by_index(self, name):
index = getattr(self, name)
if index is not None:
return self.tokens[index]
def _get_by_range(self, name):
ranged = getattr(self, name)
if isinstance(ranged, tuple):
low = ranged[0]
high = ranged[1]
if high == low:
return self.tokens[low]
else:
return " ".join(self.tokens[low:high])
def _extract_state(self):
for index in range(len(self.tokens)):
if zipcode.is_state(self.tokens[index]):
self._remaining_indices.remove(index)
self.state_index = index
return
self.error = "State Not Found"
raise InvalidAddressError
def _extract_street_number(self):
if self.tokens[0].isnumeric():
self.street_number_index = 0
self._remaining_indices.remove(0)
return
self.error = "Invalid Street Number"
raise InvalidAddressError
def _extract_street_type(self):
street_type_indices = self._index_of_street_type()
lowest_street_type_index = min(street_type_indices)
def _extract_zipcode(self):
"""
depends_on:
- state_index
"""
index = self.state_index + 1
token = self.tokens[index]
if zipcode.is_zipcode_5(token) or zipcode.is_zipcode_dashed(token):
self.zipcode_index = index
self._remaining_indices.remove(index)
return
self.error = "Zipcode Not Found"
raise InvalidAddressError
def _extract_city(self):
maybe_city = []
for index in reversed(self._remaining_indices):
if not index < self.state_index:
# not interested in things found after the state
continue
maybe_city = [self.tokens[index]] + maybe_city
#print("maybe_city", maybe_city, "with index", index)
# the 'st' of `st louis` is not in the zipcode info
# so we expand the abbreviation 'st' into 'saint'
city_parts = [cities.expand_abbreviation(p) for p in maybe_city]
city = " ".join(city_parts)
#print("city is", city)
is_city = zipcode.is_valid_place(city, self.state, self.zipcode)
if is_city:
self.city_range = (index, self.state_index)
inner_range = range(self.state_index - index)
for inner in [x+index for x in inner_range]:
self._remaining_indices.remove(inner)
return
if not self.city_range:
self.error = "Invalid City/State/Zipcode Combo"
raise InvalidAddressError
def _extract_unit(self):
"""
No error from this method because it is optional
depends_on:
- city_range
- street_type_index
"""
start = self.street_type_index
stop = min(self.city_range)
unit_indices = []
has_a_unit_type = False
for index in reversed(self._remaining_indices):
if index > start and index < stop:
token = self.tokens[index]
if unit_type.is_unit_type(token):
has_a_unit_type = True
unit_indices.append(index)
if has_a_unit_type or (len(unit_indices) == 1 and self.tokens[unit_indices[0]].isnumeric()):
for index in unit_indices:
self._remaining_indices.remove(index)
self.unit_range = (min(unit_indices), stop)
# def _extract_street_direction(self):
# """
# No error from this method because it is optional
# depends on:
# - street_number_index
# - street_type_index
# """
# street_direction_index = self.street_number_index + 1
# if (street_direction_index + 1) == self.street_type_index:
# # if the street_type_index is the next index
# # then the thing at street_direction_index is the
# # street_name and not a direction no matter
# # what it looks like
# return
# maybe_direction = self.tokens[street_direction_index]
# if street_direction.is_direction(maybe_direction):
# self.street_direction_index = street_direction_index
# self._remaining_indices.remove(street_direction_index)
def _extract_street_type(self):
kept = []
# find all candidate street types
for index in self._remaining_indices:
if street_type.is_valid(self.tokens[index]):
kept.append(index)
# we want the first index that matches closest to the start of the city
# so we reverse the indices and then filter for only those
# indices that are before the city
city_starts = min(self.city_range)
for index in reversed(kept):
if index >= city_starts:
# the street type must come before the city starts
continue
self.street_type_index = index
self._remaining_indices.remove(index)
return
self.error = "No Street Type"
raise InvalidAddressError
def _extract_street_name(self):
"""
depends_on:
- street_number_index
- street_type_index
"""
limit = self.street_type_index
parts = [i for i in self._remaining_indices if i < limit]
if len(parts) > 4:
self.errors = "Street name too long"
raise InvalidAddressError
if len(parts) == 0:
self.error = "No Street Name"
raise InvalidAddressError
for i in parts:
self._remaining_indices.remove(i)
if len(parts) > 1:
direction_index = parts[0]
direction_token = self.tokens[direction_index]
is_direction = street_direction.is_direction(direction_token)
if is_direction:
self.street_direction_index = direction_index
parts.remove(direction_index)
self.street_name_range = (min(parts), self.street_type_index)
def _check_remaining_indices(self):
if len(self._remaining_indices) > 0:
self.error = "Address has unidentified parts"
raise InvalidAddressError
def _remove_indices_after_zipcode(self):
remaining = self._remaining_indices[:]
for index in remaining:
if index > self.zipcode_index:
self._remaining_indices.remove(index)
def tokenize_text(text):
return [t for t in re.split("\s+", text) if len(t) > 0]
def extract_all(text):
addresses = []
tokens = tokenize_text(text)
skip_to = 0
# print("tokens", tokens)
for (index, token) in enumerate(tokens):
# print("scanning token", index, token)
if index < skip_to:
# print("skipping", index, token)
continue
if token.isnumeric():
# print("found numeric", token)
address = Address(tokens[index:])
if address.is_valid:
skip_to = index + address.zipcode_index + 1
# print("updated skip_to", skip_to, "by", address)
else:
# print("invalid address", address.error, address.tokens)
pass
addresses.append(address)
return addresses
|
python
|
import os.path
from newsplease.pipeline.pipelines.elements.extracted_information_storage import ExtractedInformationStorage
class HtmlFileStorage(ExtractedInformationStorage):
"""
Handles storage of the file on the local system
"""
# Save the html and filename to the local storage folder
def process_item(self, item, spider):
# Add a log entry confirming the save
self.log.info("Saving HTML to %s", item['abs_local_path'])
# Ensure path exists
dir_ = os.path.dirname(item['abs_local_path'])
os.makedirs(dir_, exist_ok=True)
# Write raw html to local file system
with open(item['abs_local_path'], 'wb') as file_:
file_.write(item['spider_response'].body)
return item
|
python
|
import json
from decimal import Decimal
import jwt
from django.core import mail
from mock import patch
from nose.tools import eq_, ok_
from amo import CONTRIB_PENDING, CONTRIB_PURCHASE
from amo.tests import TestCase
from amo.urlresolvers import reverse
from constants.payments import PROVIDER_BANGO
from market.models import Price, PriceCurrency
from users.models import UserProfile, GroupUser
from mkt.api.base import get_url, list_url
from mkt.api.tests.test_oauth import BaseOAuth
from mkt.constants import regions
from mkt.purchase.tests.utils import PurchaseTest
from mkt.site.fixtures import fixture
from mkt.webpay.models import ProductIcon
from stats.models import Contribution
class TestPrepare(PurchaseTest, BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519', 'prices')
def setUp(self):
BaseOAuth.setUp(self, api_name='webpay')
self.create_switch('marketplace')
self.list_url = list_url('prepare')
self.user = UserProfile.objects.get(pk=2519)
def test_allowed(self):
self._allowed_verbs(self.list_url, ['post'])
def test_anon(self):
eq_(self.anon.post(self.list_url, data={}).status_code, 401)
def test_good(self):
self.setup_base()
self.setup_package()
res = self.client.post(self.list_url, data=json.dumps({'app': 337141}))
contribution = Contribution.objects.get()
eq_(res.status_code, 201)
eq_(res.json['contribStatusURL'], reverse('api_dispatch_detail',
kwargs={'api_name': 'webpay', 'resource_name': 'status',
'uuid': contribution.uuid}))
ok_(res.json['webpayJWT'])
@patch('mkt.webapps.models.Webapp.has_purchased')
def test_already_purchased(self, has_purchased):
has_purchased.return_value = True
self.setup_base()
self.setup_package()
res = self.client.post(self.list_url, data=json.dumps({'app': 337141}))
eq_(res.status_code, 409)
eq_(res.content, '{"reason": "Already purchased app."}')
def _post(self):
return self.client.post(self.list_url,
data=json.dumps({'app': 337141}))
def test_waffle_fallback(self):
self.setup_base()
self.setup_package()
flag = self.create_flag('override-app-purchase', everyone=None)
flag.users.add(self.user.user)
with self.settings(PURCHASE_LIMITED=True):
eq_(self._post().status_code, 201)
class TestStatus(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestStatus, self).setUp(api_name='webpay')
self.contribution = Contribution.objects.create(
addon_id=337141, user_id=2519, type=CONTRIB_PURCHASE,
uuid='some:uid')
self.get_url = ('api_dispatch_detail', {
'api_name': 'webpay', 'resource_name': 'status',
'uuid': self.contribution.uuid})
def test_allowed(self):
self._allowed_verbs(self.get_url, ['get'])
def test_get(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(res.json['status'], 'complete')
def test_no_contribution(self):
self.contribution.delete()
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
def test_incomplete(self):
self.contribution.update(type=CONTRIB_PENDING)
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
def test_no_purchase(self):
self.contribution.addon.addonpurchase_set.get().delete()
res = self.client.get(self.get_url)
eq_(res.status_code, 200, res.content)
eq_(res.json['status'], 'incomplete', res.content)
class TestPrices(BaseOAuth):
def make_currency(self, amount, tier, currency, region):
return PriceCurrency.objects.create(price=Decimal(amount), tier=tier,
currency=currency, provider=PROVIDER_BANGO, region=region.id)
def setUp(self):
super(TestPrices, self).setUp(api_name='webpay')
self.price = Price.objects.create(name='1', price=Decimal(1))
self.currency = self.make_currency(3, self.price, 'DE', regions.DE)
self.us_currency = self.make_currency(3, self.price, 'USD', regions.US)
self.list_url = list_url('prices')
self.get_url = get_url('prices', self.price.pk)
# If regions change, this will blow up.
assert regions.BR.default_currency == 'BRL'
def get_currencies(self, data):
return [p['currency'] for p in data['prices']]
def test_list_allowed(self):
self._allowed_verbs(self.list_url, ['get'])
self._allowed_verbs(self.get_url, ['get'])
def test_single(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
eq_(res.json['pricePoint'], '1')
eq_(res.json['name'], 'Tier 1')
# Ensure that price is in the JSON since solitude depends upon it.
eq_(res.json['price'], '1.00')
def test_price_point(self):
res = self.client.get(self.list_url + ({'pricePoint': '1'},))
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['pricePoint'], '1')
def test_list(self):
res = self.client.get(self.list_url)
eq_(res.json['meta']['total_count'], 1)
self.assertSetEqual(self.get_currencies(res.json['objects'][0]),
['USD', 'DE'])
def test_list_filtered(self):
self.currency.update(provider=0)
res = self.client.get(self.list_url + ({'provider': 'bango'},))
eq_(self.get_currencies(res.json['objects'][0]), ['USD'])
def test_prices(self):
res = self.client.get(self.get_url)
eq_(res.status_code, 200)
self.assertSetEqual(self.get_currencies(res.json), ['USD', 'DE'])
def test_prices_filtered(self):
self.currency.update(provider=0)
res = self.client.get(self.get_url + ({'provider': 'bango'},))
eq_(res.status_code, 200)
self.assertSetEqual(self.get_currencies(res.json), ['USD'])
def test_has_cors(self):
self.assertCORS(self.client.get(self.get_url), 'get')
@patch('mkt.webpay.resources.PriceResource.dehydrate_prices')
def test_other_cors(self, prices):
prices.side_effect = ValueError
res = self.client.get(self.get_url)
eq_(res.status_code, 500)
self.assertCORS(res, 'get')
def test_locale(self):
self.make_currency(5, self.price, 'BRL', regions.BR)
res = self.client.get(self.get_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['localized']['locale'], 'R$5,00')
def test_locale_list(self):
# Check that for each price tier a different localisation is
# returned.
self.make_currency(2, self.price, 'BRL', regions.BR)
price_two = Price.objects.create(name='2', price=Decimal(1))
self.make_currency(12, price_two, 'BRL', regions.BR)
res = self.client.get(self.list_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['objects'][0]['localized']['locale'], 'R$2,00')
eq_(res.json['objects'][1]['localized']['locale'], 'R$12,00')
def test_no_locale(self):
# This results in a region of BR and a currency of BRL. But there
# isn't a price tier for that currency. So we don't know what to show.
res = self.client.get(self.get_url, HTTP_ACCEPT_LANGUAGE='pt-BR')
eq_(res.status_code, 200)
eq_(res.json['localized'], {})
class TestNotification(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestNotification, self).setUp(api_name='webpay')
self.grant_permission(self.profile, 'Transaction:NotifyFailure')
self.contribution = Contribution.objects.create(addon_id=337141,
uuid='sample:uuid')
self.list_url = ('api_dispatch_list', {'resource_name': 'failure'})
self.get_url = ['api_dispatch_detail',
{'resource_name': 'failure',
'pk': self.contribution.pk}]
def test_list_allowed(self):
self._allowed_verbs(self.get_url, ['patch'])
def test_notify(self):
url = 'https://someserver.com'
res = self.client.patch(self.get_url,
data=json.dumps({'url': url, 'attempts': 5}))
eq_(res.status_code, 202)
eq_(len(mail.outbox), 1)
msg = mail.outbox[0]
assert url in msg.body
eq_(msg.recipients(), [u'[email protected]'])
def test_no_permission(self):
GroupUser.objects.filter(user=self.profile).delete()
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 401)
def test_missing(self):
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 400)
def test_not_there(self):
self.get_url[1]['pk'] += 1
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 404)
def test_no_uuid(self):
self.contribution.update(uuid=None)
res = self.client.patch(self.get_url, data=json.dumps({}))
eq_(res.status_code, 404)
class TestProductIconResource(BaseOAuth):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestProductIconResource, self).setUp(api_name='webpay')
self.list_url = list_url('product/icon')
p = patch('mkt.webpay.resources.tasks.fetch_product_icon')
self.fetch_product_icon = p.start()
self.addCleanup(p.stop)
self.data = {
'ext_size': 128,
'ext_url': 'http://someappnoreally.com/icons/icon_128.png',
'size': 64
}
def post(self, data, with_perms=True):
if with_perms:
self.grant_permission(self.profile, 'ProductIcon:Create')
return self.client.post(self.list_url, data=json.dumps(data))
def test_list_allowed(self):
self._allowed_verbs(self.list_url, ['get', 'post'])
def test_missing_fields(self):
res = self.post({'ext_size': 1})
eq_(res.status_code, 400)
def test_post(self):
res = self.post(self.data)
eq_(res.status_code, 202)
self.fetch_product_icon.delay.assert_called_with(self.data['ext_url'],
self.data['ext_size'],
self.data['size'])
def test_post_without_perms(self):
res = self.post(self.data, with_perms=False)
eq_(res.status_code, 401)
def test_anon_get(self):
data = {
'ext_size': 128,
'ext_url': 'http://someappnoreally.com/icons/icon_128.png',
'size': 64,
'format': 'png'
}
icon = ProductIcon.objects.create(**data)
# We don't need to filter by these:
data.pop('size')
data.pop('format')
res = self.anon.get(self.list_url, data=data)
eq_(res.status_code, 200)
ob = json.loads(res.content)['objects'][0]
eq_(ob['url'], icon.url())
class TestSigCheck(TestCase):
def test(self):
key = 'marketplace'
aud = 'webpay'
secret = 'third door on the right'
with self.settings(APP_PURCHASE_SECRET=secret,
APP_PURCHASE_KEY=key,
APP_PURCHASE_AUD=aud):
res = self.client.post(reverse('webpay.sig_check'))
eq_(res.status_code, 201, res)
data = json.loads(res.content)
req = jwt.decode(data['sig_check_jwt'].encode('ascii'), secret)
eq_(req['iss'], key)
eq_(req['aud'], aud)
eq_(req['typ'], 'mozilla/payments/sigcheck/v1')
|
python
|
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib import animation
from optimisation import FireflyOptimizer
import numpy as np
from optimisation import Ackley
f_alg = FireflyOptimizer(population_size=10, problem_dim=2, generations=100)
func = Ackley(2)
N = 100
x = np.linspace(-5, 5, N)
y = np.linspace(-5, 5, N)
X, Y = np.meshgrid(x, y)
z = func.get_y_2d(X, Y)
# dt = 1. / 30
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = fig.add_subplot(111, aspect='equal', xlim=(-5, 5), ylim=(-5, 5)) # autoscale_on=False)
cs = ax.contourf(X, Y, z, cmap=cm.PuBu_r)
cbar = fig.colorbar(cs)
particles, = ax.plot([], [], 'bo', ms=6)
rect = plt.Rectangle([-5, 5], 10, 10, ec='none', lw=2, fc='none')
ax.add_patch(rect)
def init():
global f_alg, rect
particles.set_data([], [])
rect.set_edgecolor('none')
return particles, rect
def animate(i):
global f_alg, rect, ax, fig
ms = int(fig.dpi * 2 * 0.04 * fig.get_figwidth()
/ np.diff(ax.get_xbound())[0])
rect.set_edgecolor('k')
x = []
y = []
for ind in f_alg.population:
x.append(ind.position[0])
y.append(ind.position[1])
f_alg.step()
particles.set_data(x, y)
particles.set_markersize(ms)
return particles, rect
ani = animation.FuncAnimation(fig, animate, frames=200, interval=10,
blit=True, init_func=init)
ani.save('videos/ackley_firefly.mp4', fps=5, extra_args=['-vcodec', 'libx264'])
plt.show()
#animate_firefly_convergence()
|
python
|
import socket
import pickle
import pandas as pd
import matplotlib.pyplot as plt
def graph_setting():
plt.ion()
fig, ax = plt.subplots()
return ax
def data_get(df, conn, i):
data = conn.recv(1024)
data = float(pickle.loads(data))
df_temp = pd.DataFrame([[data]], columns=['data'])
df = pd.concat([df, df_temp], axis=0)
df = df.reset_index(drop = True)
return df
def run_server(host='127.0.0.1', port=7788):
ax = graph_setting()
with socket.socket() as sock:
sock.bind((host, port))
sock.listen()
conn, addr = sock.accept()
df1 = pd.DataFrame(columns=['data1'])
df2 = pd.DataFrame(columns=['data2'])
df3 = pd.DataFrame(columns=['data3'])
df4 = pd.DataFrame(columns=['data4'])
i = 0
ax.plot(df1, 'deepskyblue', label='IntellivueABP',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df2, 'skyblue', label='IntellivuePLETH',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df3, 'navy', label='PrimusAWP',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df4, 'dodgerblue', label='PrimusCO2',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.set_facecolor('blanchedalmond')
while True:
df1 = data_get(df1, conn, i)
df2 = data_get(df2, conn, i)
df3 = data_get(df3, conn, i)
df4 = data_get(df4, conn, i)
ax.plot(df1, 'deepskyblue',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df2, 'skyblue',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df3, 'navy',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.plot(df4, 'dodgerblue',dash_capstyle= 'round',dash_joinstyle = 'round')
ax.set_xlabel('EPOCH')
ax.set_ylabel('TXT Y Value')
plt.legend(loc='upper left')
if i >= 100 :
plt.xlim([i-99, i])
plt.show()
plt.pause(0.01)
i += 1
if __name__ == '__main__':
run_server()
|
python
|
import re
from csv import DictReader, DictWriter
from .utils import get_headers
class Dataset(object):
def __init__(self, key=None, headers=None, data=None):
self.headers = headers
self.data = data
self.key = key
def write(self, fd, delim='\t'):
writer = DictWriter(fd, self.headers, delimiter=delim)
writer.writeheader()
for row in self.data:
writer.writerow(row)
@classmethod
def from_file(cls, fd, delim='\t'):
reader = DictReader(fd, delimiter=delim)
data = [row for row in reader]
headers = data[0].keys() if len(data) > 0 else None
return cls(headers=headers, data=data)
def merge(self, other_dataset):
return Dataset(headers=set(self.headers + other_dataset.headers),
data=(self.data + other_dataset.data))
def map(self, transform):
def dict_update(row, extra):
row.update(extra)
return row
data = [dict_update(row, transform(row)) for row in self.data]
return Dataset(headers=get_headers(data), data=data)
def filter(self, predicate):
data = [row for row in self.data if predicate(row)]
return Dataset(headers=get_headers(data), data=data)
def select(self, columns):
col_spec = self._select_columns(columns)
headers = [header for i, header in enumerate(self.headers)
if i in col_spec or header in col_spec]
data = [{header:row[header] for header in headers} for row in self.data]
return Dataset(headers=headers, data=data)
def group_by(self, columns):
# return GroupedDataset(groups)
pass #returns GroupedDataset
def agg(self, agg_func):
return agg_func(self.data)
def _select_columns(self, columns):
"""Given a list of string representations of columns, returns a set of integers
representing each column.
>>> d = Dataset(headers=['this', 'that'])
>>> d._select_columns('1-3')
set(1, 2, 3)
>>> d._select_columns('1-3,8-12')
set(1, 2, 3, 8, 9, 10, 11, 12)
>>> d._select_columns('that')
set(1)
>>> d._select_columns('this,that')
set(0, 1)
"""
if isinstance(columns, basestring):
columns = [columns]
col_spec = set()
for col in columns:
if col.startswith('-') or col[0] in '0123456789':
m = re.match(r'(?P<first>[0-9]*)-(?P<last>[0-9]*)', col)
first = int(m.group('first')) if m.group('first') else 0
last = int(m.group('last')) if m.group('last') else len(self.headers)
col_spec |= set(range(first, last))
else:
col_spec.add(col)
return col_spec
class GroupedDataset(object):
def __init__(self, groups):
self.groups = groups
def agg(self, agg_func):
pass
#return [group.agg(agg_func) for group in self.groups]
# def test_parse():
# test_cases = [
# '1-3',
# '1-magic',
# 'a-c',
# '1-3,5-7'
# ]
# for test in test_cases:
# print test, parse(test)
#number = d+
#range = number|(number?-number?)
#name = [a-zA-Z0-9_-]+
#term = (name|range)
#statement = term(,term)*
#from pyparsing import *
#dash = Literal('-')
#lodash = Literal('_')
#comma = Literal(',')
#number = Word(nums)
#range_ = number | Combine(Optional(number) + dash + Optional(number))
#name = Word(alphas + nums + '_')
#term = name | range_
#statement = term | Combine(term + Optional(Combine(comma + ZeroOrMore(term))))
#return statement.parseString(string)
#for x in tmp:
#y = x.split('-')
#if len(y) == 0: continue
#if len(y) == 1: cols.add(int(y[0]))
#if len(y) == 2: cols.update(range(int(y[0]), int(y[1])+1))
#if len(y) > 2: raise ValueError("Misformatted columnspec.")
#return sorted(list(cols))
|
python
|
from telnetlib import Telnet
from uuid import uuid4
from time import sleep
from hashlib import md5
from os import chmod
from re import compile as compile_regex
from sys import version_info
from .abstractremoteshell import AbstractRemoteShell
from .shellresult import ShellResult
from .streamreader import PrefixedStreamReader
from .queue import Queue
class TelnetShell(AbstractRemoteShell):
def __init__(self, hostname, username, password=None, port=23, *args, **kwargs):
super(TelnetShell, self).__init__(hostname, *args, **kwargs)
self._prompt = self._id
self._hostname = hostname
self._username = username
self._password = password
self._port = port
self._telnet = Telnet()
self._is_connected = False
self._buffer = ""
self.connect()
def do_connect(self):
self._telnet.open(self._hostname, self._port)
self._read_until("login: ")
self._write(self._username + "\n")
if self._password:
self._read_until("Password: ")
self._write(self._password + "\n")
sleep(.1)
self._write("export PS1='%s'\n" % self._prompt)
self._read_until(self._prompt)
self._read_until(self._prompt)
self._write("export COLUMNS=1024\n")
self._read_until(self._prompt)
self._write("stty columns 1027\n")
self._read_until(self._prompt)
def do_disconnect(self):
self._telnet.close()
def _write(self, text):
self.log_spy_write(text)
self._telnet.write(text.encode('utf-8'))
def _read_until(self, marker):
out = self._telnet.read_until(marker.encode('utf-8'))
self.log_spy_read(out)
return out
def readline(self):
choices = [ "\n", self._prompt ]
if version_info[0] > 2: choices = [ bytes(x, 'utf-8') for x in choices ]
(index, _, line) = self._telnet.expect(choices)
self.log_spy_read(line.decode('utf-8').rstrip("\n\r"))
if index == 0:
return line
return None
def execute_command(self, command, env={}, wait=True, check_err=False, cwd=None):
wrapped_command = PrefixedStreamReader.wrap_command(command, env, cwd)
self._write(wrapped_command + "\n")
self.readline()
sleep(.2)
queue = Queue()
PrefixedStreamReader(self, queue)
return ShellResult(self, command, queue, wait, check_err)
def do_reboot(self):
self._write("reboot\n")
sleep(.3)
|
python
|
"""
二分探索
<最悪実行時間に関する漸化式>
T(n) = | Θ(1) if n = 1
| T(n/2) + c if n > 1
ループの度に検査範囲が半減するので、Θ(lgn)となる。
"""
def binary_search(A, v):
left = 0
right = len(A) - 1
while left <= right:
i = (left + right) // 2
if v < A[i]:
right = i - 1
elif A[i] < v:
left = i + 1
else:
return i
else:
return None
|
python
|
#
# RawIO
# Copyright (c) 2021 Yusuf Olokoba.
#
from cv2 import findTransformECC, MOTION_TRANSLATION, TERM_CRITERIA_COUNT, TERM_CRITERIA_EPS
from numpy import asarray, eye, float32
from PIL import Image
from sklearn.feature_extraction.image import extract_patches_2d
from typing import Callable
def markov_similarity (min_probability: float=0.8, trials: int=100, patch_size: float=0.1) -> Callable[[str, str], bool]:
"""
Create a similarity function which estimates a binomial distribution on a Markov random field defined over the image.
In simple terms, it checks for patch correspondences :/
We use Evangelidis & Psarakis, 2008 with Monte Carlo simulation to estimate the binomial distribution.
Parameters:
min_probability (float): Minimum probability for images to be considered similar, in range [0., 1.].
trials (int): Number of Monte Carlo trials for estimating the binomial distribution.
patch_size (float): Relative patch size for ECC trials, in range [0., 1.].
Returns:
callable: Pairwise image similarity function returning a boolean.
"""
def similarity_fn (path_a: str, path_b: str) -> bool:
# Load images
image_a = Image.open(path_a)
image_b = Image.open(path_b)
# Check sizes
if image_a.size != image_b.size:
return False
# Load images
image_a.draft("L", (2560, 1440))
image_b.draft("L", (2560, 1440))
image_a = asarray(image_a)
image_b = asarray(image_b)
# Extract patches
SEED = 1
size = int(min(image_a.shape) * patch_size)
patches_a = extract_patches_2d(image_a, (size, size), max_patches=trials, random_state=SEED)
patches_b = extract_patches_2d(image_b, (size, size), max_patches=trials, random_state=SEED)
# Run Monte Carlo estimation
IDENTITY = eye(2, 3, dtype=float32)
CRITERIA = (TERM_CRITERIA_EPS | TERM_CRITERIA_COUNT, 50, 1e-4)
passes = 0
for patch_a, patch_b in zip(patches_a, patches_b):
try:
findTransformECC(patch_a, patch_b, IDENTITY.copy(), MOTION_TRANSLATION, CRITERIA, None, 5)
passes += 1
except:
pass
# Check
estimator = passes / patches_a.shape[0]
return estimator >= min_probability
return similarity_fn
|
python
|
import numpy as np
from glob import glob
import os
from sklearn.model_selection import train_test_split
base_path = "/media/ml/data_ml/EEG/deepsleepnet/data_npy"
files = glob(os.path.join(base_path, "*.npz"))
train_val, test = train_test_split(files, test_size=0.15, random_state=1337)
train, val = train_test_split(train_val, test_size=0.1, random_state=1337)
train_dict = {k: np.load(k) for k in train}
test_dict = {k: np.load(k) for k in test}
val_dict = {k: np.load(k) for k in val}
|
python
|
from ..game import Actor
def test_id():
actor1 = Actor()
actor2 = Actor()
assert actor1.id
assert actor2.id
assert actor1.id != actor2.id
def test_add_food():
actor = Actor()
assert actor.food == 0
actor.add_food(10)
assert actor.food == 10
actor.add_food(5)
assert actor.food == 15
def test_take_food():
actor = Actor()
assert actor.food == 0
actor.add_food(10)
assert actor.food == 10
assert actor.take_food() == 10
assert actor.food == 0
def test_set_position():
actor = Actor()
actor.set_position((0, 0))
assert actor.position == (0, 0)
actor.set_position((5, 5))
assert actor.position == (5, 5)
def test_set_owner():
actor = Actor()
actor.set_owner("foo")
assert actor.owner_id == "foo"
actor.set_owner("bar")
assert actor.owner_id == "bar"
def test_move():
actor = Actor().set_position((0, 0))
assert actor.position == (0, 0)
actor.move((1, 0))
actor.move((1, 0))
assert actor.position == (1, 0)
actor.move((1, 1))
actor.move((1, 1))
assert actor.position == (1, 1)
def test_health():
actor = Actor()
assert actor.health > 0
def test_missing_health():
actor = Actor()
assert actor.missing_health == 0
actor.deal_damage(1)
assert actor.missing_health == 1
def test_missing_heal():
actor = Actor()
assert actor.missing_health == 0
actor.deal_damage(10)
assert actor.missing_health == 10
actor.heal(5)
assert actor.missing_health == 5
actor.heal(5)
assert actor.missing_health == 0
def test_alive():
actor = Actor()
assert actor.alive
actor.deal_damage(actor.health)
assert not actor.alive
def test_dead():
actor = Actor()
assert not actor.dead
actor.deal_damage(actor.health)
assert actor.dead
|
python
|
import deepmerge
import functools
import importlib
import logging
import yaml
from inspect import getmembers, isfunction
from urllib import parse, request
from wcmatch import fnmatch
from . import macros, middleware
log = logging.getLogger("netbox_rbac")
# Collect all public functions from macros.
functions = [
(name, fn)
for name, fn in getmembers(macros, isfunction)
if not name.startswith("_")
]
# In order to guard against external dependencies, such as Gitlab, being
# unavailable, store the last configuration.
config = None
def load(paths):
global config
errors = []
for path in paths:
try:
path = parse.urlparse(path, scheme="file")
data = request.urlopen(path.geturl())
config = Rule(yaml.safe_load(data))
return config
except Exception as err:
errors.append("%s: %s" % (path.geturl(), err))
log.warn("load: no valid rules found: %s", errors)
# Unable to load a new config, so return the current one.
return config
class Rule:
def __init__(self, config):
self.roles = {}
# Although YAML provides a mechanism for referencing one object from
# another, it doesn't support deep merging, so we handle that manually.
for name, role in config.items():
for base in role.get("base", []):
deepmerge.always_merger.merge(role, config[base])
# Ignore template roles.
if "groups" in role:
self.roles[name] = Role(name, **role)
# Given the user's roles, the requested permission, and the object, returns
# whether or not the operation is allowed.
def has_perm(self, roles, perm, obj):
for role in roles:
role = self.roles.get(role)
# Permission is granted when:
# * The role is valid (defined in the configuration).
# * The requested permission is granted by the role.
# * The rule evaluates to True on the object.
if role and role.has_perm(perm) and role.eval(obj):
return True
return False
class Role:
def __init__(self, name, **kwargs):
self.name = name
self.context = kwargs.get("context", {})
self.groups = kwargs.get("groups", [])
self.imports = kwargs.get("imports", [])
self.perms = kwargs.get("perms", [])
self.rule = kwargs.get("rule")
if self.rule:
self.code = compile(self.rule, "<string>", "eval")
else:
self.code = None
# Returns the result of evaluating the rule on the object, if both are
# defined. Returns True otherwise.
def eval(self, obj):
if self.code and obj:
context = {**self.context, "obj": obj}
for name in self.imports:
context[name] = importlib.import_module(name)
for name, fn in functions:
context[name] = functools.partial(fn, obj)
context.update(
{"fnmatch": fnmatch.fnmatch, "request": middleware.request(),}
)
return eval(self.code, context)
return True
# Returns whether or not this role provides the requested permission.
def has_perm(self, perm):
return fnmatch.fnmatch(perm, self.perms)
|
python
|
from django.core.paginator import Paginator
from django.shortcuts import redirect, render, get_object_or_404
from comps.models.comp import Comp
from comps.models.heat import Heat
from comps.models.heatlist_error import Heatlist_Error
def delete_heatlist_error(request, error_id):
heatlist_error = get_object_or_404(Heatlist_Error, pk=error_id)
comp = heatlist_error.comp
heatlist_error.delete()
return redirect("comps:show_heatlist_errors", comp.id)
def check_heatlist_error(request, error_id):
heatlist_error = get_object_or_404(Heatlist_Error, pk=error_id)
heat = heatlist_error.heat
if heatlist_error.error == Heatlist_Error.UNKNOWN_LEVEL:
if heat.base_value > 0:
heatlist_error.delete()
elif heatlist_error.error == Heatlist_Error.UNKNOWN_STYLE:
if heat.style != Heat.UNKNOWN:
heatlist_error.delete()
return redirect('comps:heat', heat.id)
def show_heatlist_errors(request, comp_id):
comp = get_object_or_404(Comp, pk=comp_id)
heatlist_errors = Heatlist_Error.objects.filter(comp=comp).order_by('error', 'dancer', 'heat__heat_number')
paginator = Paginator(heatlist_errors, 16)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, "comps/show_heatlist_errors.html", {'comp': comp, 'page_obj': page_obj, })
|
python
|
# Generated by Django 3.0.6 on 2020-06-12 16:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('My_Account', '0017_auto_20200612_1522'),
]
operations = [
migrations.RenameField(
model_name='sharing_image',
old_name='Likes',
new_name='Amazing_Num',
),
migrations.RenameField(
model_name='sharing_image',
old_name='Loves',
new_name='Good_Num',
),
migrations.RenameField(
model_name='sharing_post',
old_name='Likes',
new_name='Amazing_Num',
),
migrations.RenameField(
model_name='sharing_post',
old_name='Loves',
new_name='Good_Num',
),
]
|
python
|
import pygame
from pygame.locals import *
from GUI.Colours import Colours
from GUI.Measures import Measures
from GUI.Point import Point
from GUI.BarPoint import BarPoint
from GUI.BearoffPoint import BearoffPoint
from GUI.Checker import Checker
class GUIBoard:
def __init__(self):
pass
def generate_board_surface(self):
boardSurf = pygame.Surface((Measures.BOARDWIDTH.value, Measures.BOARDHEIGHT.value))
boardSurf.fill(Colours.SOFT_TEAL.value)
# quadrants
leftHalf = Rect(Measures.BORDERSIZE.value, Measures.BORDERSIZE.value, Measures.QUADRANTWIDTH.value,
Measures.QUADRANTHEIGHT.value)
rightHalf = Rect(Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value,
Measures.BORDERSIZE.value, Measures.QUADRANTWIDTH.value, Measures.QUADRANTHEIGHT.value)
pygame.draw.rect(boardSurf, Colours.TEAL.value, leftHalf)
pygame.draw.rect(boardSurf, Colours.TEAL.value, rightHalf)
self.draw_board_triangles(boardSurf)
self.create_points()
return boardSurf
def draw_board_triangles(self, surface):
width = ['left', 'right']
height = ['top', 'bottom']
for i in range(2):
for j in range(2):
self.draw_triangles(surface, width[i], height[j])
@staticmethod
def draw_triangles(surface, width, height):
if width == 'left':
x = Measures.BORDERSIZE.value
else:
x = Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value
if height == 'top':
y = Measures.BORDERSIZE.value
tip = y + Measures.TRIANGLEHEIGHT.value
else:
y = Measures.BORDERSIZE.value + Measures.QUADRANTHEIGHT.value
tip = y - Measures.TRIANGLEHEIGHT.value
left_point = (x, y)
right_point = (x + Measures.TRIANGLEWIDTH.value, y)
tip_point = (x + (Measures.TRIANGLEWIDTH.value / 2), tip)
for i in range(6):
points = [left_point, right_point, tip_point]
if i % 2 == 0 and height == 'top' or i % 2 != 0 and height == 'bottom':
pygame.draw.polygon(surface, Colours.ORANGE.value, points)
else:
pygame.draw.polygon(surface, Colours.ORANGE.value, points, 2)
left_point = right_point
right_point = (left_point[0] + Measures.TRIANGLEWIDTH.value, y)
tip_point = (tip_point[0] + Measures.TRIANGLEWIDTH.value, tip)
@staticmethod
def create_points():
points = []
point = BearoffPoint(((Measures.QUADRANTWIDTH.value * 2) + Measures.BARSIZE.value,
Measures.BOTTOMHEIGHT.value - Measures.BORDERSIZE.value),
0, (Measures.BORDERSIZE.value, Measures.BOARDHEIGHT.value + Measures.BORDERSIZE.value))
points.append(point)
x = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - Measures.TRIANGLEWIDTH.value
y = Measures.BOARDHEIGHT.value - Measures.BORDERSIZE.value - Measures.TRIANGLEHEIGHT.value
for i in range(1, 7):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x - Measures.TRIANGLEWIDTH.value
x = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - Measures.QUADRANTWIDTH.value \
- Measures.BARSIZE.value - Measures.TRIANGLEWIDTH.value
y = Measures.BOARDHEIGHT.value - Measures.BORDERSIZE.value - Measures.TRIANGLEHEIGHT.value
for i in range(7, 13):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x - Measures.TRIANGLEWIDTH.value
x = Measures.BORDERSIZE.value
y = Measures.BORDERSIZE.value
for i in range(13, 19):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x + Measures.TRIANGLEWIDTH.value
x = Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value
y = Measures.BORDERSIZE.value
for i in range(19, 25):
point = Point((50, 200), i, (x, y))
points.append(point)
x = x + Measures.TRIANGLEWIDTH.value
point = BarPoint((Measures.BARSIZE.value, Measures.QUADRANTHEIGHT.value), 25,
(Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value, Measures.BORDERSIZE.value))
points.append(point)
return points
@staticmethod
def create_checkers(points):
board = [[0, 0, 0, 0, 0, 5, 0, 3, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 5, 0, 3, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]]
for i in range(24):
if board[0][i] > 0:
for c in range(board[0][i]):
checker = Checker(Colours.BLACK.value)
points[i+1].checkers.append(checker)
for i in range(24):
if board[1][i] > 0:
for c in range(board[1][i]):
checker = Checker(Colours.WHITE.value)
points[24-i].checkers.append(checker)
@staticmethod
def draw_point_numbers(surface):
font = pygame.font.Font(None, 18)
x, y = Measures.BORDERSIZE.value + 20, 15
for i in range(12, 18):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x + Measures.TRIANGLEWIDTH.value
x, y = Measures.BORDERSIZE.value + Measures.QUADRANTWIDTH.value + Measures.BARSIZE.value + 20, 15
for i in range(18, 24):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x + Measures.TRIANGLEWIDTH.value
x, y = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - 28, Measures.BOARDHEIGHT.value - 25
for i in range(0, 6):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x - Measures.TRIANGLEWIDTH.value
x = Measures.BOARDWIDTH.value - Measures.BORDERSIZE.value - Measures.QUADRANTWIDTH.value - \
Measures.BARSIZE.value - 30
y = Measures.BOARDHEIGHT.value - 25
for i in range(6, 12):
number = font.render(str(i + 1), True, Colours.BLACK.value)
surface.blit(number, (x, y))
x = x - Measures.TRIANGLEWIDTH.value
|
python
|
from cloud_scanner_azure.config.azure_credential_config import (
AzureCredentialConfig)
class AzureResourceServiceConfig:
"""Configuration required for usage of AzureResourceService."""
def __init__(self, subscription_id, creds: AzureCredentialConfig):
self.credentials = creds
self.subscription_id = subscription_id
|
python
|
# -*- coding: utf-8 -*-
# Imports
import sys,re,os
import glob
import numpy as n
# Script information
__author__ = "Sergi Rodà Llordés"
__version__ ="1.0"
__maintainer__="Sergi Rodà Llordés"
__email__="[email protected]"
class pKa:
def __init__(self,PDB,Ser_residue):
self.__PDB = PDB
self.__Ser_residue = Ser_residue
self.__Results = {}
self.__pI_folded = 0
self.__pI_unfolded = 0
self.__pI_active_site = 0
@property
def PDB(self):
return self.__PDB
@property
def Ser_residue(self):
return self.__Ser_residue
@property
def pI(self):
return [self.__pI_folded,self.__pI_unfolded,self.__pI_active_site]
def propka(self):
"""
Take the PDB file and calculate the pKa of titrable residues using propka
PARAMETERS
----------
PDB : string
PDB file that wants to be added to the analysis
OUTPUT
------
Results : dict of titrable residues with the calculated pKa
pI_folded: The isoelectric point of the protein in the folded state
pI_unfolded: The isoelectric point of the protein in the unfolded state
"""
index_pKa1,index_pKa2 = 0,0
try:
os.system("propka31 %s -q"%self.__PDB)
print("Computing pI values...")
except:
print("propka is not installed. To install it git clone the following repository: https://github.com/jensengroup/propka-3.1")
print("Then: python setup.py install --user")
exit()
else:
os.system("rm *.propka_input")
pKa_file = open("%s.pka" %self.__PDB[self.__PDB.rindex("/")+1:-4])
for line in pKa_file:
if "SUMMARY OF THIS PREDICTION" in line:
index_pKa1=1
continue
if index_pKa1!=0:
index_pKa2=index_pKa1
index_pKa1=0
continue
if index_pKa2!=0:
self.__Results[line[3:6]+"_"+line[7:10]] = [int(line[7:10]),float(line[16:21])]
if "N+" in line and index_pKa2!=0:
self.__Results[line[3:6]+"_"+line[7:10]] = [int(line[7:10]), float(line[16:21])]
index_pKa2=0
if "The pI is " in line:
self.__pI_folded, self.__pI_unfolded = float(line[10:15]), float(line[29:34])
os.system("rm *.pka")
def Neighbouratoms(self):
"""
Take the atoms near the active site to compute the pI around this area
PARAMETERS
----------
PDB : string
PDB file that wants to be added to the analysis
Ser_residue : int
Index or number referring to the catalytic Ser residue
Results : dict
dict of titrable residues with the calculated pKa
OUTPUT
------
pI_active_site : pI of the active site and surroundings (10 Å)
"""
Aux_results,values = {},[]
# Get the coordinates of the Ser residue to look for the neighbour titrable residues
PDB_file = open(self.__PDB, "rt")
for line in PDB_file:
if line[17:20]=="SER" and int(self.__Ser_residue)==int(line[23:26]) and "OG" in line:
x,y,z = float(line[30:38]),float(line[38:46]),float(line[46:54])
# Get the neighbour residues and store them with the pKa value
PDB_file = open(self.__PDB, "rt")
for line in PDB_file:
if "TER" in line:
pass
elif "ATOM" in line:
x_aux, y_aux, z_aux = float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip())
if n.sqrt((x-x_aux)**2+(y-y_aux)**2+(z-z_aux)**2)<=float(10):
if line[17:20]+"_"+line[23:26] in self.__Results:
Aux_results[line[17:20]+"_"+line[23:26]] = self.__Results[line[17:20]+"_"+line[23:26]]
else:
pass
self.__Results = Aux_results
for value in list(Aux_results.values()):
values.append(value[1])
self.__pI_active_site = n.mean(values)
def computepI(self):
"""
It executes the methods of the class sequentially,
returning the 3 computed values of pI
"""
self.propka()
self.Neighbouratoms()
|
python
|
from rest_framework import serializers
from products.models import Product, Category
class CategorySerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
class Meta:
model = Category
fields = ("id", "name")
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = (
"id",
"name",
"price",
"quantity",
"featured",
"description",
"picture",
"slug",
)
lookup_field = "slug"
|
python
|
import datetime
import json
import discord
from discord.ext import commands
import requests
'''
スプラトゥーン2絡みのコマンド
'''
class Splatoon2(commands.Cog, name='スプラトゥーン2'):
def __init__(self, bot):
self.bot = bot
@commands.command(name='バイトシフト')
async def say_salmon_schedule(self, ctx):
'''サーモンランのスケジュールを出す'''
url = 'https://spla2.yuu26.com/coop/schedule'
ua = 'ShiroBot/1.0 (@[email protected])'
headers = {'User-Agent': ua}
template = '''
直近:
・ステージ: {0}
・時間: {1}~{2}
・ブキ: {3}
次:
・ステージ: {4}
・時間: {5}~{6}
・ブキ: {7}
'''
ret = requests.get(url, headers=headers)
if (ret.status_code == 200):
# OK
data = ret.json()['result']
await ctx.send(template.format(
data[0]['stage']['name'],
data[0]['start'],
data[0]['end'],
','.join([data[0]['weapons'][0]['name'], data[0]['weapons'][1]['name'], data[0]['weapons'][2]['name'], data[0]['weapons'][3]['name']]),
data[1]['stage']['name'],
data[1]['start'],
data[1]['end'],
','.join([data[1]['weapons'][0]['name'], data[1]['weapons'][1]['name'], data[1]['weapons'][2]['name'], data[1]['weapons'][3]['name']])
))
else:
# NG
await ctx.send('バイトデータの取得に失敗しました')
|
python
|
from django.db import models
# Create your models here.
from django_countries.fields import CountryField
from django.contrib.gis.db import models
from django.db.models import Manager as GeoManager
class Site(models.Model):
id = models.AutoField(primary_key=True)
site = models.CharField('Site name', max_length=255, blank=False, null=True)
country = CountryField('Country', blank=True, null=True)
data_source = models.CharField('Data Source', max_length=50, blank=True, null=True)
#latitude = models.FloatField('Latitude', blank=True, null=True)
#longitude = models.FloatField('Longitude', blank=True, null=True)
altitude = models.FloatField('Altitude', blank=True, null=True)
site_types = (('Shelter', 'Shelter'), ('Cave', 'Cave'), ('Open-air', 'Open-air'), ('Unknown', 'Unknown'))
site_type = models.CharField('Site type', max_length=20, blank=True, null=True, choices=site_types)
display = models.BooleanField('Flagged', blank=True, null=True)
map_location = models.PointField(dim=2, blank=True, null=True)
objects = GeoManager()
notes = models.TextField(blank=True, null=True)
def longitude(self):
if self.map_location:
return self.map_location.x
else:
return None
def latitude(self):
if self.map_location:
return self.map_location.y
else:
return None
def min_date(self):
dates = Date.objects.filter(site=self) # queryset of date objects
date_list = [d.date for d in dates] # list of dates from date objects
date_list = list(filter(None, date_list)) # remove pesky None values
if date_list: # if the date list contains anything
result = min(date_list) # then get the min
else:
result = None # otherwise return None
return result
def max_date(self):
dates = Date.objects.filter(site=self) # queryset of date objects
date_list = [d.date for d in dates] # list of dates from date objects
date_list = list(filter(None, date_list)) # remove pesky None values
if date_list: # if the date list contains anything
result = max(date_list) # then get the max
else:
result = None # otherwise return None
return result
class Meta:
managed = True
#db_table = 'sites'
def __unicode__(self):
return u'%s, %s' % (self.site, self.country)
def __str__(self):
return f'[{self.id}] {self.site}, {self.country}'
class Date(models.Model):
site = models.ForeignKey(Site, on_delete=models.CASCADE)
layer = models.CharField('Layer', max_length=300, blank=True, null=True)
industry = models.CharField('Industry', max_length=100, blank=True, null=True)
industry_2 = models.CharField('Industry', max_length=100, blank=True, null=True)
industry_3 = models.CharField('Industry', max_length=100, blank=True, null=True)
cat_no = models.CharField('Catalog Number', max_length=100, blank=True, null=True)
date = models.FloatField('Age', blank=True, null=True)
sd_plus = models.FloatField('SD Plus', blank=True, null=True)
sd_minus = models.FloatField('SD Minus', blank=True, null=True)
sample = models.CharField('Sample', max_length=100, blank=True, null=True)
technique = models.CharField('Method', max_length=100, blank=True, null=True)
corrected_date_BP = models.FloatField('Cal. Age BP', blank=True, null=True)
plus = models.FloatField('Cal. Plus', blank=True, null=True)
minus = models.FloatField('Cal. Minus', blank=True, null=True)
hominid_remains = models.TextField('Hominins', blank=True, null=True)
bibliography = models.TextField('Bibliography', blank=True, null=True)
period = models.CharField('Period', max_length=100, blank=True, null=True)
notes = models.TextField('Notes', blank=True, null=True)
intcal09_max = models.FloatField('IntCal09 Max. Age', blank=True, null=True)
intcal09_min = models.FloatField('IntCal09 Min. Age', blank=True, null=True)
class Meta:
managed = True
#db_table = 'dates'
def __unicode__(self):
return u'%s %s %s' % (self.site,self.layer,self.industry)
class Site_plus_dates(Site):
class Meta:
proxy = True
managed = True
verbose_name = "Sites and dates"
verbose_name_plural = "Sites and dates"
|
python
|
#!/usr/bin/env python
# Pull out yearly precipitation
# Daryl Herzmann 26 Jul 2004
import pg, dbflib, mx.DateTime, shutil, shapelib
from pyIEM import wellknowntext
mydb = pg.connect('wepp','iemdb')
sts = mx.DateTime.DateTime(2005,3,1)
ets = mx.DateTime.DateTime(2005,11,1)
interval = mx.DateTime.RelativeDateTime(days=+7)
now = sts
twp = {}
rs = mydb.query("SELECT astext(transform(the_geom,4326)) as t, model_twp from iatwp ORDER by model_twp ASC").dictresult()
for i in range(len(rs)):
twp[ rs[i]["model_twp"] ] = rs[i]["t"]
while (now < ets):
print "Hello Heather, I am here ", now
shp = shapelib.create("weeklysm/%ssm" % (now.strftime("%Y%m%d"), ), shapelib.SHPT_POLYGON)
dbf = dbflib.create("weeklysm/%ssm" % (now.strftime("%Y%m%d"), ) )
dbf.add_field("S0-10CM", dbflib.FTDouble, 8, 2)
dbf.add_field("S10-20CM", dbflib.FTDouble, 8, 2)
dbf.add_field("VSM", dbflib.FTDouble, 8, 2)
rs = mydb.query("select model_twp, avg(vsm) as v, \
avg(s10cm) as s10, avg(s20cm) as s20 from \
waterbalance_by_twp WHERE valid >= '%s' and valid < '%s' \
GROUP by model_twp ORDER by model_twp ASC" % ( \
now.strftime("%Y-%m-%d"), (now+interval).strftime("%Y-%m-%d")\
) ).dictresult()
for i in range(len(rs)):
m = rs[i]['model_twp']
f = wellknowntext.convert_well_known_text( twp[m] )
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(i, (rs[i]['s10'],rs[i]['s20'],rs[i]['v']) )
del dbf
del shp
shutil.copy("static/hrap_point_4326.prj", "weeklysm/%ssm.prj" % (now.strftime("%Y%m%d"), ) )
now += interval
|
python
|
#!/usr/bin/env python
#
# Simple websocket server to perform signaling.
#
import asyncio
import binascii
import os
import websockets
clients = {}
async def echo(websocket, path):
client_id = binascii.hexlify(os.urandom(8))
clients[client_id] = websocket
try:
async for message in websocket:
for c in clients.values():
if c != websocket:
await c.send(message)
finally:
clients.pop(client_id)
asyncio.get_event_loop().run_until_complete(
websockets.serve(echo, '0.0.0.0', 8765))
asyncio.get_event_loop().run_forever()
|
python
|
import sys
import traceback
import logging
from glass import http
from . import highlight
ERROR_TEMPLATE = '''
<title>{code} {title}</title>
<h1>{title}</h1>
<p>{description}</p>
'''
logger = logging.getLogger('glass.app')
class HTTPError(Exception):
code = 500
description = "Internal Server Error"
def __init__(self, description='', code=None):
self.description = description or self.description
self.code = code or self.code
super().__init__(self.description)
def get_response(self, debug=False):
if debug and None not in sys.exc_info():
return self._format_tb(traceback.format_exc())
return self._format_response()
def _format_response(self):
title = http.HTTP_STATUS_CODES.get(self.code, "Error")
response = ERROR_TEMPLATE.format(code=self.code,
title=title,
description=self.description)
return response
def headers(self):
header = [('Content-Type', 'text/html; charset=utf-8')]
return header
def _format_tb(self, tb):
html = ['<html><body> <h1> Server Error</h1>']
try:
html.append(highlight.highlight(tb, 'python'))
except Exception as e:
logger.info('Failed to highlight traceback [%s]' % e)
html.append(tb)
html.append('''
<h3>Note: You are seeing this traceback because
<b>Debug</b> is set to True.</h3>''')
html.append('</body></html>')
return ''.join(html)
class HTTP404(HTTPError):
code = 404
description = 'The requested url not found on this server'
class MethodNotAllow(HTTPError):
code = 405
description = 'The method not allow for the requested path'
class InternalServerError(HTTPError):
code = 500
description = """
Internal Server Error. An error occurs
while processing request
"""
class BadRequest(HTTPError):
code = 403
description = '''Bad Request'''
class RequestTooLarge(HTTPError):
code = 413
description = 'Payload Too Large'
|
python
|
"""
An evolving population of genotypes(Ideally for optimizing network
hyperparameters).
See genotype constraint docs in spikey/meta/series.
Examples
--------
.. code-block:: python
metagame = EvolveNetwork(GenericLoop(network, game, **params), **metagame_config,)
population = Population(metagame, **pop_config)
while not population.terminated:
fitness = population.evaluate()
population.update(fitness)
print(f"{population.epoch} - Max fitness: {max(fitness)}")
"""
import os
from copy import copy, deepcopy
import numpy as np
from spikey.module import Module, Key
from spikey.meta.backends.default import MultiprocessBackend
from spikey.logging import log, MultiLogger
class GenotypeMapping(Module):
"""
Cache genotype-fitness matchings.
Parameters
----------
n_storing: int
Number of genotypes to store
Examples
--------
.. code-block:: python
cache = GenotypeCache(256)
cache.update({'a': 1}, 24)
fitness = cache[{'a': 1}]
print(fitness) # -> 24
"""
def __init__(self, n_storing: int):
self.n_storing = n_storing
self.genotypes = []
self.fitnesses = []
def __getitem__(self, genotype: dict) -> float:
"""
Pull value for specific genotype from cache.
Parameters
----------
genotype: dict
Genotype to pull cached value of.
Returns
-------
float or None The cached fitness of the genotype or None.
Examples
--------
.. code-block:: python
cache = GenotypeCache(256)
cache.update({'a': 1}, 24)
fitness = cache[{'a': 1}]
print(fitness) # -> 24
"""
genotype_no_age = copy(genotype)
if "_age" in genotype_no_age:
del genotype_no_age["_age"]
if genotype_no_age not in self.genotypes:
return None
idx = self.genotypes.index(genotype_no_age)
fitness = self.fitnesses[idx]
self.update(genotype, fitness)
return fitness
def update(self, genotype: dict, fitness: float):
"""
Update cache with result.
Parameters
----------
genotype: dict
Genotype to use as cache key.
fitness: float
Fitness of genotype given.
Examples
--------
.. code-block:: python
cache = GenotypeCache(256)
cache.update({'a': 1}, 24)
fitness = cache[{'a': 1}]
print(fitness) # -> 24
"""
if not self.n_storing:
return
# shallow copy ok -- only robust to del age in copy
# mutate, crossover use deepcopy so ok here
genotype_no_age = copy(genotype) # deepcopy(genotype)
if "_age" in genotype_no_age:
del genotype_no_age["_age"]
self.genotypes.append(genotype_no_age)
self.fitnesses.append(fitness)
assert len(self.genotypes) == len(self.fitnesses), "Cache broken!"
if len(self.genotypes) >= self.n_storing:
self.genotypes = self.genotypes[-self.n_storing :]
self.fitnesses = self.fitnesses[-self.n_storing :]
def run(
fitness_func: callable,
cache: GenotypeMapping,
genotype: dict,
log_fn: callable,
filename: str,
) -> (float, bool):
"""
Parameters
----------
fitness_func: callable
Function to determine fitness of genotype.
cache: GenotypeMapping
Genotype-fitness cache.
genotype: dict
Current genotype to test.
Returns
-------
fitness: float, terminate: bool
"""
fitness = cache[genotype]
if fitness is not None:
terminate = False
else:
fitness, terminate = fitness_func(genotype)
if filename:
results = {
"fitness": fitness,
"filename": filename,
}
log_fn(
None,
None,
results=results,
info=genotype,
filename=filename,
)
cache.update(genotype, fitness)
return fitness, terminate
def checkpoint_population(population: object, folder: str = "."):
"""
Checkpoint current epoch of population in file.
Parameters
----------
population: Population
Population to checkpoint.
folder: str
Folder to store checkpoint file.
"""
from pickle import dump as pickledump
if folder:
try:
os.makedirs(folder)
print(f"Created directory {folder}!")
except FileExistsError:
pass
if hasattr(population, "multilogger"):
file_header = population.multilogger.prefix
else:
file_header = ""
filename = f"{file_header}~EPOCH-({population.epoch:03d}).obj"
with open(os.path.join(folder, filename), "wb") as file:
pickledump(population, file)
def read_population(folder: str = ".") -> list:
"""
Read genotypes & fitnesses from last epoch and use it.
Parameters
----------
folder: path
Folder to find most recent checkpoint from.
Returns
-------
Population Saved population object.
"""
from pickle import load as pickleload
relevant_filenames = []
for filename in os.listdir(folder):
if "EPOCH" in filename:
relevant_filenames.append(filename)
if not relevant_filenames:
raise ValueError(f"Could not find an previous EPOCH data in {folder}!")
relevant_filenames.sort()
with open(os.path.join(folder, relevant_filenames[-1]), "rb") as file:
population = pickleload(file)
return population
class Population(Module):
"""
An evolving population.
See genotype constraint docs in spikey/meta/series.
Parameters
----------
game: MetaRL
MetaRL game to evolve agents for.
backend: MetaBackend, default=MultiprocessBackend(max_process)
Backend to execute experiments with.
max_process: int, default=16
Number of separate processes to run experiments for
default backend.
kwargs: dict, default=None
Any configuration, required keys listed in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
metagame = EvolveNetwork(GenericLoop(network, game, **params), **metagame_config,)
population = Population(metagame, **pop_config)
while not population.terminated:
fitness = population.evaluate()
population.update(fitness)
print(f"{population.epoch} - Max fitness: {max(fitness)}")
"""
NECESSARY_KEYS = [
Key("n_storing", "Number of genotypes to store in cache.", int),
Key(
"n_agents",
"Number of agents in population per epoch.",
(int, list, tuple, np.ndarray),
),
Key(
"n_epoch",
"Number of epochs -- unused if n_agents is iterable.",
int,
default=9999,
),
Key(
"mutate_eligable_pct",
"(0, 1] Pct of prev agents eligable to be mutated.",
float,
),
Key(
"max_age",
"Max age agent can reach before being removed from mutation/crossover/survivor pools.",
int,
),
Key(
"random_rate",
"(0, 1) Percent agents in population to generate randomly.",
float,
),
Key(
"survivor_rate",
"(0, 1) Percent(new generation) previous generation preserved/turn.",
float,
),
Key(
"mutation_rate",
"(0, 1) Percent(new generation) previous generation mutated/turn.",
float,
),
Key(
"crossover_rate",
"(0, 1) Percent(new generation) previous generation crossed over/turn.",
float,
),
Key("logging", "Whether to log or not.", bool, default=True),
Key("log_fn", "f(n, g, r, i, filename) Logging function.", default=log),
Key("folder", "Folder to save logs to.", str, default="log"),
]
def __init__(
self,
game: object,
backend: object = None,
max_process: int = 16,
**config,
):
super().__init__(**config)
self.genotype_constraints = game.GENOTYPE_CONSTRAINTS
self.get_fitness = game.get_fitness
self.backend = backend or MultiprocessBackend(max_process)
if isinstance(self._n_agents, (list, tuple, np.ndarray)):
self.n_agents = list(self._n_agents)
else:
self.n_agents = [self._n_agents for _ in range(self._n_epoch)]
self.epoch = 0 # For summaries
self.terminated = False
self.cache = GenotypeMapping(self._n_storing)
self.population = [self._random() for _ in range(self.n_agents[self.epoch])]
if self._mutate_eligable_pct == 0:
raise ValueError("mutate_eligable pct cannot be 0!")
self._normalize_rates()
if self._logging:
self._setup_logging(config, game.params)
def _normalize_rates(self):
"""
Normalize pertinent algorithm rates to 1.
"""
total = (
self._random_rate
+ self._survivor_rate
+ self._mutation_rate
+ self._crossover_rate
)
if not total:
raise ValueError(
"Need nonzero value for the survivor, mutation or crossover rate."
)
self._random_rate /= total
self._survivor_rate /= total
self._mutation_rate /= total
self._crossover_rate /= total
def _setup_logging(self, pop_params, game_params):
self.multilogger = MultiLogger(folder=self._folder)
info = {"population_config": pop_params}
info.update({"metagame_info": game_params})
self.multilogger.summarize(results=None, info=info)
def __len__(self) -> int:
return len(self.population)
def _genotype_dist(self, genotype1: dict, genotype2: dict) -> float:
"""
Testing Population._genotype_dist.
Parameters
----------
genotype1: genotype
Genotypes to find the distance between.
genotype2: genotype
Genotypes to find the distance between.
Returns
-------
Euclidean distance between the two genotypes.
"""
total = 0
for key in self.genotype_constraints.keys():
if isinstance(genotype1[key], (list, tuple)):
for i in range(len(genotype1[key])):
total += (genotype1[key][i] - genotype2[key][i]) ** 2
continue
total += (genotype1[key] - genotype2[key]) ** 2
return total ** 0.5
def _random(self) -> dict:
"""
Randomly generate a genotype given constraints.
"""
eval_constraint = (
lambda cons: np.random.uniform(*cons)
if isinstance(cons, tuple)
else cons[np.random.choice(len(cons))]
)
genotype = {
key: eval_constraint(constraint)
for key, constraint in self.genotype_constraints.items()
}
genotype["_age"] = 0
return genotype
def _mutate(self, genotypes: list) -> list:
"""
Mutate a random key of each genotype given.
"""
if not isinstance(genotypes, (list, np.ndarray)):
genotypes = [genotypes]
new_genotypes = []
for genotype in genotypes:
new_genotype = deepcopy(genotype) ## prevent edit of original!
key = np.random.choice(list(self.genotype_constraints.keys()))
cons = self.genotype_constraints[key]
if isinstance(cons, tuple):
new_genotype[key] = np.random.uniform(*cons)
else:
new_genotype[key] = cons[np.random.choice(len(cons))]
new_genotype["_age"] = 0
new_genotypes.append(new_genotype)
return new_genotypes
def _crossover(self, genotype1: dict, genotype2: dict) -> [dict, dict]:
"""
Crossover two different genotypes.
Parameters
----------
genotype: dict, str: float
Genotype.
Returns
-------
2 new genotypes.
"""
offspring1, offspring2 = {}, {}
switch = False
switch_key = np.random.choice(list(self.genotype_constraints.keys()))
keys = list(self.genotype_constraints.keys())
np.random.shuffle(keys) # Prevent bias
for key in keys:
if key == switch_key:
switch = True
offspring1[key] = genotype1[key] if switch else genotype2[key]
offspring2[key] = genotype2[key] if switch else genotype1[key]
offspring1["_age"] = 0
offspring2["_age"] = 0
return [offspring1, offspring2]
def update(self, f: list):
"""
Update the population based on each agents fitness.
Parameters
----------
f: list of float
Fitness values for each agent.
"""
self.epoch += 1
try:
n_agents = self.n_agents[self.epoch]
except (StopIteration, IndexError):
self.terminated = True
return
prev_gen = [(self.population[i], f[i]) for i in range(len(f))]
prev_gen = sorted(prev_gen, key=lambda x: x[1])
prev_gen = [value[0] for value in prev_gen if value[0]["_age"] < self._max_age]
self.population = []
self.population += [
self._random() for _ in range(int(n_agents * self._random_rate))
]
if int(n_agents * self._survivor_rate): # -0 returns whole list!!
survivors = [
deepcopy(genotype)
for genotype in prev_gen[-int(n_agents * self._survivor_rate) :]
]
for genotype in survivors:
genotype["_age"] += 1
self.population += survivors
mutate_candidates = prev_gen[-int(self._mutate_eligable_pct * len(prev_gen)) :]
self.population += self._mutate(
[
deepcopy(genotype)
for genotype in np.random.choice(
mutate_candidates, size=int(n_agents * self._mutation_rate)
)
]
)
for _ in range(int(n_agents * self._crossover_rate) // 2):
genotype1 = np.random.choice(prev_gen)
genotype2 = np.random.choice(prev_gen)
self.population += self._crossover(deepcopy(genotype1), deepcopy(genotype2))
if len(self) < n_agents:
diff = n_agents - len(self)
self.population += self._mutate(np.random.choice(prev_gen, size=diff))
def evaluate(self) -> list:
"""
Evaluate each agent on the fitness function.
Returns
-------
Fitness values for each agent.
"""
params = [
(
self.get_fitness,
self.cache,
genotype,
self._log_fn,
next(self.multilogger.filename_generator) if self._logging else None,
)
for genotype in self.population
]
results = self.backend.distribute(run, params)
fitnesses = [result[0] for result in results]
terminated = [result[1] for result in results]
if any(terminated):
self.terminated = True
return fitnesses
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
packages = ['eda.' + p for p in find_packages('eda', exclude=['test', 'test*', '*.t'])]
packages.append('eda')
#packages=['eda', 'eda.components', 'eda.components.ST', 'eda.circuits'],
setup(
name='EDA',
version='1.0.1',
author='Paweł Wodnicki',
author_email='[email protected]',
url='https://github.com/32bitmicro/EDA/',
license='BSD 3-clause',
description='EDA for generative design.',
test_suite='eda.tests.gen_test',
packages=packages
)
|
python
|
import gym
import retro
import os
import numpy as np
from PIL import Image
from gym import spaces
from collections import deque
import cv2
SCRIPT_DIR = os.getcwd() #os.path.dirname(os.path.abspath(__file__))
# Taken from: https://gitlab.cs.duke.edu/mark-nemecek/vel/-/blob/cfa17ddd8c328331076b3992449665ccd2471bd3/vel/openai/baselines/common/atari_wrappers.py
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
# return frame[:, :, None]
frame = Image.fromarray(frame).convert('L').resize((self.width, self.height))
# self._display_last_frame(frame)
# frame = np.array(frame).astype(np.float32).reshape(1, self.width, self.height) / 255
frame = np.array(frame).astype(np.float32).reshape(1, self.width, self.height) / 255
return frame
def _display_last_frame(self, img):
img.show()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
# self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
# Taken from: https://github.com/openai/retro/blob/master/retro/examples/discretizer.py
class Discretizer(gym.ActionWrapper):
"""
Wrap a gym environment and make it use discrete actions.
Args:
combos: ordered list of lists of valid button combinations
"""
def __init__(self, env, combos):
super().__init__(env)
assert isinstance(env.action_space, gym.spaces.MultiBinary)
buttons = env.unwrapped.buttons
self._decode_discrete_action = []
for combo in combos:
arr = [False] * env.action_space.n # np.array([False] * env.action_space.n)
for button in combo:
arr[buttons.index(button)] = True
self._decode_discrete_action.append(arr)
self.action_space = gym.spaces.Discrete(len(self._decode_discrete_action))
def action(self, act):
if type(act) is list:
out = np.zeros((self.unwrapped.action_space.n,), dtype=bool) # [0] * self.unwrapped.action_space.n
for a in act:
dec_act = self._decode_discrete_action[a].copy()
out += dec_act
else:
out = self._decode_discrete_action[act].copy()
return out
# Define classes per game per buttons combo
class MarioDiscretizerSimple(Discretizer):
"""
Use Mario Bros specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
Buttons: ['B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A']
"""
def __init__(self, env):
combo_list = [[None], ['B'], ['A'], ['LEFT'], ['RIGHT']]
super().__init__(env=env, combos=combo_list)
class MarioDiscretizerComplex(Discretizer):
"""
Use Mario Bros specific discrete actions
based on https://github.com/openai/retro-baselines/blob/master/agents/sonic_util.py
Buttons: ['B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A']
"""
def __init__(self, env):
# combo_list = [[None],['RIGHT'],['RIGHT', 'A'],['RIGHT', 'B'],['RIGHT', 'A', 'B'],['A'], ['LEFT'],['LEFT', 'A'],['LEFT', 'B'],['LEFT', 'A', 'B'],['DOWN'],['UP']]
combo_list = [[None],['RIGHT'],['RIGHT', 'A'],['RIGHT', 'B'],['RIGHT', 'A', 'B'],['A']]
super().__init__(env=env, combos=combo_list)
def setup_env(env_id, level_id):
retro.data.Integrations.add_custom_path(os.path.join(SCRIPT_DIR, "retro_integration"))
print(retro.data.list_games(inttype=retro.data.Integrations.CUSTOM_ONLY))
print(env_id in retro.data.list_games(inttype=retro.data.Integrations.CUSTOM_ONLY))
obs_type = retro.Observations.IMAGE # or retro.Observations.RAM
env = retro.make(env_id, level_id, record=False, inttype=retro.data.Integrations.CUSTOM_ONLY, obs_type=obs_type)
env = WarpFrame(env)
# env = FrameStack(env, 4)
return env
# x=setup_env("SMB-JU", "Level1-1")
|
python
|
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
fig, axes = plt.subplots(2, 1)
axes[0].set_title("Hammer projection")
map = Basemap(projection='hammer', lon_0 = 10, lat_0 = 50, ax=axes[0])
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
axes[1].set_title("Robinson projection")
map = Basemap(projection='robin', lon_0 = 10, lat_0 = 50, ax=axes[1])
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
plt.show()
|
python
|
"""
Forward-ports of types from Python 2 for use with Python 3:
- ``basestring``: equivalent to ``(str, bytes)`` in ``isinstance`` checks
- ``dict``: with list-producing .keys() etc. methods
- ``str``: bytes-like, but iterating over them doesn't product integers
- ``long``: alias of Py3 int with ``L`` suffix in the ``repr``
- ``unicode``: alias of Py3 str with ``u`` prefix in the ``repr``
"""
from past import utils
if utils.PY2:
import __builtin__
basestring = __builtin__.basestring
dict = __builtin__.dict
str = __builtin__.str
long = __builtin__.long
unicode = __builtin__.unicode
__all__ = []
else:
from .basestring import basestring
from .olddict import olddict
from .oldstr import oldstr
long = int
unicode = str
# from .unicode import unicode
__all__ = ['basestring', 'olddict', 'oldstr', 'long', 'unicode']
|
python
|
from pycocotools.coco import COCO
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import random
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
from skimage import io,transform
import matplotlib.pyplot as plt
import os
import torch
from torchvision import transforms
import numpy as np
import PIL.Image as Image
class CocoValPerson(Dataset): #继承Dataset
def __init__(self, cocoRoot="/disk2/mycode/common_data/coco", dataType="val2017", num_use=None): #__init__是初始化该类的一些基础参数
self.cocoRoot = cocoRoot
self.dataType = dataType
annFile = os.path.join(self.cocoRoot, f'annotations/instances_{self.dataType}.json')
print(f'Annotation file: {annFile}')
self.coco=COCO(annFile)
# 利用getCatIds函数获取某个类别对应的ID,
# 这个函数可以实现更复杂的功能,请参考官方文档
person_id = self.coco.getCatIds('person')[0]
print(f'"person" 对应的序号: {person_id}')
# 利用loadCats获取序号对应的文字类别
# 这个函数可以实现更复杂的功能,请参考官方文档
cats = self.coco.loadCats(1)
print(f'"1" 对应的类别名称: {cats}')
self.imgIds = self.coco.getImgIds(catIds=[1])
print(f'包含person的图片共有:{len(self.imgIds)}张')
# crowds filter
new_imgIds = []
for i in range(len(self.imgIds)):
imgId = self.imgIds[i]
annIds = self.coco.getAnnIds(imgIds=imgId, catIds=[1], iscrowd=True)
if len(annIds) == 0:
new_imgIds.append(imgId)
self.imgIds = new_imgIds
print(f'筛选掉crowds mask 的图片后,剩余:{len(self.imgIds)}张')
if num_use != None:
self.imgIds = self.imgIds[:num_use]
print(f'Only use {num_use} images')
def __len__(self):
return len(self.imgIds)
def __getitem__(self, index):
imgId = self.imgIds[index]
imgInfo = self.coco.loadImgs(imgId)[0]
imPath = os.path.join(self.cocoRoot, self.dataType, imgInfo['file_name'])
img = Image.open(imPath).convert('RGB')
img = transforms.Resize((500, 500))(img)
img = transforms.ToTensor()(img)
annIds = self.coco.getAnnIds(imgIds=imgId, catIds=[1])
anns = self.coco.loadAnns(annIds)
masks_tensor = torch.Tensor(14,500,500).fill_(-1)
box_tesnor = torch.Tensor(14,4).fill_(-1)
h_w_r_tensor = torch.Tensor(14).fill_(-1)
one_layer = torch.ones(1,500,500)
zero_layer = torch.zeros(1,500,500)
if len(annIds) >= 14:
print(imgInfo['file_name'])
# print(len(annIds))
for i in range(len(annIds)):
if anns[i]['iscrowd'] == 1:
print(imgInfo['file_name'])
print(len(annIds))
continue
mask = self.coco.annToMask(anns[i])
mask = torch.from_numpy(mask).float()
mask = transforms.ToPILImage()(mask)
mask = transforms.Resize((500, 500))(mask)
mask = transforms.ToTensor()(mask)
mask = torch.where(mask>0.5, one_layer, zero_layer)
masks_tensor[i] = mask
box = anns[i]['bbox']
h_w_r = box[3]/box[2]
box_trans = box.copy()
box_trans[0] = box[0]/imgInfo['width'] * 500
box_trans[1] = box[1]/imgInfo['height'] * 500
box_trans[2] = box[2]/imgInfo['width'] * 500
box_trans[3] = box[3]/imgInfo['height'] * 500
box_tesnor[i] = torch.Tensor(box_trans)
h_w_r_tensor[i] = h_w_r
# masks_area_sort_index = torch.sort(masks_area_tensor, descending=True)[1]
# masks_tensor_sort = masks_tensor[masks_area_sort_index]
# vali = torch.sum(torch.sum(masks_tensor_sort, dim=-1), dim=-1)
# masks_tensor_sort_top = masks_tensor_sort[:14]
# masks_tensor_sort_top_len = masks_tensor_sort_top.shape[0]
# masks_tensor_return = torch.Tensor(14,1,500,500).fill_(-1)
# masks_tensor_return[:masks_tensor_sort_top_len] = masks_tensor_sort[:masks_tensor_sort_top_len]
# if len(annIds) >= 14:
# mask = masks_tensor_return[0]
# mask = transforms.ToPILImage()(mask)
# mask.show()
return img, masks_tensor, box_tesnor, h_w_r_tensor
if __name__=='__main__':
data = CocoValPerson(dataType="val2017", num_use=10)
dataloader = DataLoader(data, batch_size=1,shuffle=False) #使用DataLoader加载数据
max_len = 0
for epoch in range(10):
for i_batch,batch_data in enumerate(dataloader):
if i_batch % 50 ==0:
img, masks, bboxes, h_w_r = batch_data
# masks_pil = transforms.ToPILImage()(masks[0,0])
# masks_pil.show()
bbox = bboxes[0,0]
cccc = masks[0,0].clone()
cccc[int(bbox[1]):int(bbox[1]+bbox[3]),int(bbox[0]):int(bbox[0]+bbox[2])] = 1
cccc_p = cccc+masks[0,0]
cccc_p = cccc_p/torch.max(cccc_p)
cccc_p_pil = transforms.ToPILImage()(cccc_p)
cccc_p_pil.show()
print(i_batch)
|
python
|
import LAMMPyS as lp
steps = lp.Steps('test.dump')
step = steps[-1]
atoms = step.atoms
|
python
|
def do_training(train, train_labels, test, test_labels, num_classes):
#set TensorFlow logging level to INFO
tf.logging.set_verbosity(tf.logging.INFO)
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
# Compute feature_columns from dataframe keys using list comprehension
feature_columns =
[tf.feature_column.numeric_column(key=key) for key in train.keys()],
hidden_units=[10, 10],
n_classes=num_classes)
# Train the Model
classifier.train(
input_fn=lambda:train_input_fn(train, train_labels,100),
steps=1000
)
# Evaluate the model
eval_result = classifier.evaluate(
input_fn=lambda:eval_input_fn(test, test_labels,100)
)
return (classifier, eval_result)
|
python
|
from __future__ import absolute_import
import abc
class Writer(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def text(self, text):
pass
@abc.abstractmethod
def start(self, name, attributes=None):
pass
@abc.abstractmethod
def end(self, name):
pass
@abc.abstractmethod
def self_closing(self, name, attributes=None):
pass
@abc.abstractmethod
def append(self, html):
pass
@abc.abstractmethod
def as_string(self):
pass
|
python
|
import utime
import machine
from machine import Timer, Pin, RTC
import time
import ntptime
import micropython
import config
import control
import mqtt_reporting
def set_ntp_time(timer):
ntptime.host = "tempus1.gum.gov.pl"
for i in range(1,10):
try:
t = ntptime.time()
tm = utime.localtime(t)
tm = tm[0:3] + (0,) + tm[3:6] + (0,)
RTC().datetime(tm)
break
except OSError:
continue
# def init_modules():
# config.init()
micropython.alloc_emergency_exception_buf(500)
machine.freq(160000000)
timer = Timer(-1)
timer.init(period=300*1000, mode=Timer.PERIODIC, callback=set_ntp_time)
set_ntp_time(None)
control.start(timer)
mqtt_reporting.setup()
print("\nGROWBOX INIT COMPLETE!")
while True:
mqtt_reporting.run()
time.sleep_ms(60*1000)
|
python
|
import pytest
from pathlib import Path
from yalul.lex.scanners.grouping import GroupingScanner
from yalul.lex.token_type import TokenType
@pytest.fixture(scope='function')
def open_file(request):
return open(str(Path.cwd()) + "/tests/lex_examples/" + request.param)
class TestShouldLex:
def test_when_is_left_paren(self):
assert GroupingScanner.should_lex('(')
def test_when_is_right_paren(self):
assert GroupingScanner.should_lex(')')
def test_when_isnt_paren(self):
assert not GroupingScanner.should_lex('a')
class TestCreateToken:
@pytest.mark.parametrize('open_file', ['grouping_example.yalul'], indirect=['open_file'])
def test_create_token(self, open_file):
char = open_file.read(1)
scanner = GroupingScanner(char, open_file)
assert scanner.create_token().type == TokenType.LEFT_PAREN
assert scanner.create_token().type == TokenType.RIGHT_PAREN
|
python
|
{"filter":false,"title":"bot.py","tooltip":"/bot.py","undoManager":{"mark":53,"position":53,"stack":[[{"start":{"row":163,"column":18},"end":{"row":163,"column":35},"action":"remove","lines":["BOT_USERNAME_HERE"],"id":2},{"start":{"row":163,"column":18},"end":{"row":163,"column":19},"action":"insert","lines":["k"]}],[{"start":{"row":163,"column":19},"end":{"row":163,"column":20},"action":"insert","lines":["r"],"id":3}],[{"start":{"row":163,"column":20},"end":{"row":163,"column":21},"action":"insert","lines":["i"],"id":4}],[{"start":{"row":163,"column":21},"end":{"row":163,"column":22},"action":"insert","lines":["t"],"id":5}],[{"start":{"row":163,"column":22},"end":{"row":163,"column":23},"action":"insert","lines":["i"],"id":6}],[{"start":{"row":163,"column":23},"end":{"row":163,"column":24},"action":"insert","lines":["b"],"id":7}],[{"start":{"row":163,"column":24},"end":{"row":163,"column":25},"action":"insert","lines":["o"],"id":8}],[{"start":{"row":163,"column":25},"end":{"row":163,"column":26},"action":"insert","lines":["t"],"id":9}],[{"start":{"row":167,"column":49},"end":{"row":167,"column":62},"action":"remove","lines":["WEBHOOK_HERE'"],"id":10},{"start":{"row":167,"column":49},"end":{"row":167,"column":96},"action":"insert","lines":["https://kik-botinteractive-k27mehta.c9users.io."]}],[{"start":{"row":167,"column":95},"end":{"row":167,"column":96},"action":"remove","lines":["."],"id":11}],[{"start":{"row":167,"column":95},"end":{"row":167,"column":96},"action":"insert","lines":["/"],"id":12}],[{"start":{"row":167,"column":96},"end":{"row":167,"column":97},"action":"insert","lines":["i"],"id":13}],[{"start":{"row":167,"column":97},"end":{"row":167,"column":98},"action":"insert","lines":["n"],"id":14}],[{"start":{"row":167,"column":98},"end":{"row":167,"column":99},"action":"insert","lines":["c"],"id":15}],[{"start":{"row":167,"column":99},"end":{"row":167,"column":100},"action":"insert","lines":["o"],"id":16}],[{"start":{"row":167,"column":100},"end":{"row":167,"column":101},"action":"insert","lines":["m"],"id":17}],[{"start":{"row":167,"column":101},"end":{"row":167,"column":102},"action":"insert","lines":["i"],"id":18}],[{"start":{"row":167,"column":102},"end":{"row":167,"column":103},"action":"insert","lines":["n"],"id":19}],[{"start":{"row":167,"column":103},"end":{"row":167,"column":104},"action":"insert","lines":["g"],"id":20}],[{"start":{"row":169,"column":37},"end":{"row":169,"column":38},"action":"remove","lines":["1"],"id":21}],[{"start":{"row":169,"column":37},"end":{"row":169,"column":38},"action":"insert","lines":["0"],"id":22}],[{"start":{"row":169,"column":31},"end":{"row":169,"column":32},"action":"remove","lines":["7"],"id":23}],[{"start":{"row":169,"column":30},"end":{"row":169,"column":31},"action":"remove","lines":["2"],"id":24}],[{"start":{"row":169,"column":29},"end":{"row":169,"column":30},"action":"remove","lines":["1"],"id":25}],[{"start":{"row":169,"column":29},"end":{"row":169,"column":30},"action":"insert","lines":["0"],"id":26}],[{"start":{"row":23,"column":0},"end":{"row":24,"column":0},"action":"insert","lines":["",""],"id":27}],[{"start":{"row":24,"column":0},"end":{"row":24,"column":1},"action":"insert","lines":["i"],"id":28}],[{"start":{"row":24,"column":1},"end":{"row":24,"column":2},"action":"insert","lines":["m"],"id":29}],[{"start":{"row":24,"column":2},"end":{"row":24,"column":3},"action":"insert","lines":["p"],"id":30}],[{"start":{"row":24,"column":3},"end":{"row":24,"column":4},"action":"insert","lines":["o"],"id":31}],[{"start":{"row":24,"column":4},"end":{"row":24,"column":5},"action":"insert","lines":["r"],"id":32}],[{"start":{"row":24,"column":5},"end":{"row":24,"column":6},"action":"insert","lines":["t"],"id":33}],[{"start":{"row":24,"column":6},"end":{"row":24,"column":7},"action":"insert","lines":[" "],"id":34}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":["8"],"id":35}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":["8"],"id":36}],[{"start":{"row":24,"column":9},"end":{"row":25,"column":0},"action":"insert","lines":["",""],"id":37}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":9},"action":"remove","lines":["88"],"id":38},{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":["o"]}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":["s"],"id":39}],[{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"remove","lines":["0"],"id":40}],[{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"insert","lines":["2"],"id":41}],[{"start":{"row":171,"column":30},"end":{"row":171,"column":31},"action":"insert","lines":["7"],"id":42}],[{"start":{"row":171,"column":31},"end":{"row":171,"column":32},"action":"insert","lines":["0"],"id":43}],[{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"insert","lines":["1"],"id":44}],[{"start":{"row":171,"column":32},"end":{"row":171,"column":33},"action":"remove","lines":["0"],"id":45}],[{"start":{"row":171,"column":37},"end":{"row":171,"column":38},"action":"remove","lines":["0"],"id":46}],[{"start":{"row":171,"column":37},"end":{"row":171,"column":38},"action":"insert","lines":["1"],"id":47}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":9},"action":"remove","lines":["os"],"id":48},{"start":{"row":24,"column":7},"end":{"row":24,"column":9},"action":"insert","lines":["88"]},{"start":{"row":171,"column":29},"end":{"row":171,"column":32},"action":"remove","lines":["127"]},{"start":{"row":171,"column":29},"end":{"row":171,"column":30},"action":"insert","lines":["0"]},{"start":{"row":171,"column":35},"end":{"row":171,"column":36},"action":"remove","lines":["1"]},{"start":{"row":171,"column":35},"end":{"row":171,"column":36},"action":"insert","lines":["0"]}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"remove","lines":["8"],"id":49}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"remove","lines":["8"],"id":50}],[{"start":{"row":24,"column":7},"end":{"row":24,"column":8},"action":"insert","lines":["o"],"id":51}],[{"start":{"row":24,"column":8},"end":{"row":24,"column":9},"action":"insert","lines":["s"],"id":52}],[{"start":{"row":169,"column":104},"end":{"row":169,"column":105},"action":"insert","lines":["'"],"id":53}],[{"start":{"row":165,"column":30},"end":{"row":165,"column":32},"action":"remove","lines":["BO"],"id":54},{"start":{"row":165,"column":30},"end":{"row":166,"column":0},"action":"insert","lines":["89cd34b3-1467-400e-8036-f97bb6fb7650",""]}],[{"start":{"row":165,"column":66},"end":{"row":166,"column":14},"action":"remove","lines":["","T_API_KEY_HERE"],"id":55}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":2,"column":14},"end":{"row":2,"column":44},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1502566388668,"hash":"99ee4478e169cb6fff453b2637074a178be66821"}
|
python
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import List, Literal, Optional, TypedDict
from typing_extensions import NotRequired, Required
from .activity import PartialPresenceUpdate
from .voice import GuildVoiceState
from .integration import BaseIntegration, IntegrationApplication
from .role import Role
from .channel import ChannelType, StageInstance
from .interactions import Interaction
from .invite import InviteTargetType
from .emoji import Emoji, PartialEmoji
from .member import MemberWithUser
from .snowflake import Snowflake
from .message import Message
from .sticker import GuildSticker
from .appinfo import GatewayAppInfo, PartialAppInfo
from .guild import Guild, UnavailableGuild
from .user import User
from .threads import Thread, ThreadMember
from .scheduled_event import GuildScheduledEvent
class SessionStartLimit(TypedDict):
total: int
remaining: int
reset_after: int
max_concurrency: int
class Gateway(TypedDict):
url: str
class GatewayBot(Gateway):
shards: int
session_start_limit: SessionStartLimit
class ReadyEvent(TypedDict):
v: int
user: User
guilds: List[UnavailableGuild]
session_id: str
shard: List[int] # shard_id, num_shards
application: GatewayAppInfo
ResumedEvent = Literal[None]
MessageCreateEvent = Message
class MessageDeleteEvent(TypedDict):
id: Snowflake
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
class MessageDeleteBulkEvent(TypedDict):
ids: List[Snowflake]
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
class MessageUpdateEvent(Message):
channel_id: Snowflake
class MessageReactionAddEvent(TypedDict):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
member: NotRequired[MemberWithUser]
guild_id: NotRequired[Snowflake]
class MessageReactionRemoveEvent(TypedDict):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
guild_id: NotRequired[Snowflake]
class MessageReactionRemoveAllEvent(TypedDict):
message_id: Snowflake
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
class MessageReactionRemoveEmojiEvent(TypedDict):
emoji: PartialEmoji
message_id: Snowflake
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
InteractionCreateEvent = Interaction
PresenceUpdateEvent = PartialPresenceUpdate
UserUpdateEvent = User
class InviteCreateEvent(TypedDict):
channel_id: Snowflake
code: str
created_at: str
max_age: int
max_uses: int
temporary: bool
uses: Literal[0]
guild_id: NotRequired[Snowflake]
inviter: NotRequired[User]
target_type: NotRequired[InviteTargetType]
target_user: NotRequired[User]
target_application: NotRequired[PartialAppInfo]
class InviteDeleteEvent(TypedDict):
channel_id: Snowflake
code: str
guild_id: NotRequired[Snowflake]
class _ChannelEvent(TypedDict):
id: Snowflake
type: ChannelType
ChannelCreateEvent = ChannelUpdateEvent = ChannelDeleteEvent = _ChannelEvent
class ChannelPinsUpdateEvent(TypedDict):
channel_id: Snowflake
guild_id: NotRequired[Snowflake]
last_pin_timestamp: NotRequired[Optional[str]]
class ThreadCreateEvent(Thread, total=False):
newly_created: bool
members: List[ThreadMember]
ThreadUpdateEvent = Thread
class ThreadDeleteEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
parent_id: Snowflake
type: ChannelType
class ThreadListSyncEvent(TypedDict):
guild_id: Snowflake
threads: List[Thread]
members: List[ThreadMember]
channel_ids: NotRequired[List[Snowflake]]
class ThreadMemberUpdate(ThreadMember):
guild_id: Snowflake
class ThreadMembersUpdate(TypedDict):
id: Snowflake
guild_id: Snowflake
member_count: int
added_members: NotRequired[List[ThreadMember]]
removed_member_ids: NotRequired[List[Snowflake]]
class GuildMemberAddEvent(MemberWithUser):
guild_id: Snowflake
class GuildMemberRemoveEvent(TypedDict):
guild_id: Snowflake
user: User
class GuildMemberUpdateEvent(TypedDict):
guild_id: Snowflake
roles: List[Snowflake]
user: User
avatar: Optional[str]
joined_at: Optional[str]
nick: NotRequired[str]
premium_since: NotRequired[Optional[str]]
deaf: NotRequired[bool]
mute: NotRequired[bool]
pending: NotRequired[bool]
communication_disabled_until: NotRequired[str]
class GuildEmojisUpdateEvent(TypedDict):
guild_id: Snowflake
emojis: List[Emoji]
class GuildStickersUpdateEvent(TypedDict):
guild_id: Snowflake
stickers: List[GuildSticker]
GuildCreateEvent = GuildUpdateEvent = Guild
GuildDeleteEvent = UnavailableGuild
class _GuildBanEvent(TypedDict):
guild_id: Snowflake
user: User
GuildBanAddEvent = GuildBanRemoveEvent = _GuildBanEvent
class _GuildRoleEvent(TypedDict):
guild_id: Snowflake
role: Role
class GuildRoleDeleteEvent(TypedDict):
guild_id: Snowflake
role_id: Snowflake
GuildRoleCreateEvent = GuildRoleUpdateEvent = _GuildRoleEvent
class GuildMembersChunkEvent(TypedDict):
guild_id: Snowflake
members: List[MemberWithUser]
chunk_index: int
chunk_count: int
not_found: NotRequired[List[Snowflake]]
presences: NotRequired[List[PresenceUpdateEvent]]
nonce: NotRequired[str]
class GuildIntegrationsUpdateEvent(TypedDict):
guild_id: Snowflake
class _IntegrationEvent(BaseIntegration, total=False):
guild_id: Required[Snowflake]
role_id: Optional[Snowflake]
enable_emoticons: bool
subscriber_count: int
revoked: bool
application: IntegrationApplication
IntegrationCreateEvent = IntegrationUpdateEvent = _IntegrationEvent
class IntegrationDeleteEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
application_id: NotRequired[Snowflake]
class WebhooksUpdateEvent(TypedDict):
guild_id: Snowflake
channel_id: Snowflake
StageInstanceCreateEvent = StageInstanceUpdateEvent = StageInstanceDeleteEvent = StageInstance
GuildScheduledEventCreateEvent = GuildScheduledEventUpdateEvent = GuildScheduledEventDeleteEvent = GuildScheduledEvent
class _GuildScheduledEventUsersEvent(TypedDict):
guild_scheduled_event_id: Snowflake
user_id: Snowflake
guild_id: Snowflake
GuildScheduledEventUserAdd = GuildScheduledEventUserRemove = _GuildScheduledEventUsersEvent
VoiceStateUpdateEvent = GuildVoiceState
class VoiceServerUpdateEvent(TypedDict):
token: str
guild_id: Snowflake
endpoint: Optional[str]
class TypingStartEvent(TypedDict):
channel_id: Snowflake
user_id: Snowflake
timestamp: int
guild_id: NotRequired[Snowflake]
member: NotRequired[MemberWithUser]
|
python
|
class Solution:
def minDeletionSize(self, A: List[str]) -> int:
return sum(list(column) != sorted(column) for column in zip(*A))
|
python
|
import sys
import os
import os.path
import re
import shutil
from setuptools import setup
from setuptools.command.install_lib import install_lib
from setuptools.command.install import install
import setuptools.command.bdist_egg
import distutils.spawn
import subprocess
import sys
import glob
exclude_directories = lambda files: [ file for file in files if not os.path.isdir(file) ]
share_checklist_files=glob.glob("checklists/*")
pt_steps_files=glob.glob("pt_steps/*")
conf_files=glob.glob("conf/*")
doc_files=exclude_directories(glob.glob("doc/*"))
doc_pte_files=exclude_directories(glob.glob("doc/processtrak_example/*"))
doc_pte_creston_files=exclude_directories(glob.glob("doc/processtrak_example/creston_jan2016/*"))
xslt_files=glob.glob("xslt/*")
root_files=["README.txt","INSTALL.txt"]
#limatix_widgets_glade_catalogs_package_files=["*.xml"]
limatix_widgets_package_files=["*.glade","glade_catalogs/*"]
limatix_checklist_steps_package_files=["*.glade"]
limatix_package_files=["pt_steps/*.py","*.glade","limatix_checklists/*","limatix_conf/*", "limatix_plans/*"]
console_scripts=["datacollect2",
"dc_checklist",
"pt_checkprovenance",
"dc_chx2chf",
"dc_glade",
"dc_gui",
"dc_paramdb2",
"thermal2limatix",
"processtrak",
"dc_ricohphoto",
"dc_xlg2dpd",
"pt_cleanup",
"limatix-git"]
gui_scripts = [] # Could move graphical scrips into here to eliminate stdio window on Windows (where would error messages go?)
console_scripts_entrypoints = [ "%s = limatix.bin.%s:main" % (script,script.replace("-","_")) for script in console_scripts ]
gui_scripts_entrypoints = [ "%s = limatix.bin.%s:main" % (script,script.replace("-","_")) for script in gui_scripts ]
canonicalize_path_config_files=["limatix/canonicalize_path/canonical_paths.conf.example","limatix/canonicalize_path/tag_index_paths.conf.example"]
canonicalize_path_package_files=["canonical_paths.conf","tag_index_paths.conf"]
limatix_checklist_step_paths=glob.glob("limatix/steps/*.py")
limatix_checklist_step_names=[ os.path.splitext(os.path.split(path)[1])[0] for path in limatix_checklist_step_paths if not path.endswith("__init__.py")]
limatix_checklist_step_entrypoints = [ '%s = limatix.steps.%s' % (stepname,stepname) for stepname in limatix_checklist_step_names]
limatix_widget_paths=glob.glob("limatix/widgets/*.py")
limatix_widget_names=[ os.path.splitext(os.path.split(path)[1])[0] for path in limatix_widget_paths if not path.endswith("__init__.py")]
limatix_widget_entrypoints = [ '%s = limatix.widgets.%s' % (widgetname,widgetname) for widgetname in limatix_widget_names]
#package_files=["canonical_paths.conf","tag_index_paths.conf"]
# NOTE ***: share files will be installed to prefix/share/limatix
# By default, prefix is /usr so share_files to be found in
# /usr/share/limatix
# Apply hotfix to setuptools issue #130, from
# https://bitbucket.org/pypa/setuptools/issues/130/install_data-doesnt-respect-prefix
# hotfix applies at least to all setuptools versions prior to 20.2
def setuptools_command_bdist_egg_call_command_hotfix(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
if cmdname != 'install_data':
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
setuptools_version=tuple([int(versionpart) for versionpart in setuptools.__version__.split(".")[:3]])
# Apply hotfix to all versions prior to 20.2
if setuptools_version < (20,2):
setuptools.command.bdist_egg.call_command=setuptools_command_bdist_egg_call_command_hotfix
pass
class install_lib_save_prefix_and_version(install_lib):
"""Save a file install_prefix.txt with the install prefix"""
def run(self):
install_lib.run(self)
#sys.stderr.write("\nprefix:" + str((self.distribution.command_obj["install"].prefix))+"\n\n\n")
#sys.stderr.write("\ninstall_dir:" + self.install_dir+"\n\n\n")
#sys.stderr.write("\npackages:" + str(self.distribution.command_obj["build_py"].packages)+"\n\n\n")
for package in self.distribution.command_obj["build_py"].packages:
install_dir=os.path.join(*([self.install_dir] + package.split('.')))
fh=open(os.path.join(install_dir,"install_prefix.txt"),"w")
#fh.write(self.distribution.command_obj["install"].prefix)
# Fix for Ubuntu: install_data seems to be the prefix
# for where stuff is installed (?)
fh.write(self.distribution.command_obj["install"].install_data)
fh.close()
fh=open(os.path.join(install_dir,"version.txt"),"w")
fh.write("%s\n" % (version)) # version global, as created below
fh.close()
pass
pass
pass
# Extract GIT version
if os.path.exists(".git") and distutils.spawn.find_executable("git") is not None:
# Check if tree has been modified
modified = subprocess.call(["git","diff-index","--quiet","HEAD","--"]) != 0
gitrev = subprocess.check_output(["git","rev-parse","HEAD"]).strip().decode('utf-8')
version = "git-%s" % (gitrev)
# See if we can get a more meaningful description from "git describe"
try:
versionraw=subprocess.check_output(["git","describe","--tags","--match=v*"],stderr=subprocess.STDOUT).decode('utf-8').strip()
# versionraw is like v0.1.0-50-g434343
# for compatibility with PEP 440, change it to
# something like 0.1.0+50.g434343
matchobj=re.match(r"""v([^.]+[.][^.]+[.][^-.]+)(-.*)?""",versionraw)
version=matchobj.group(1)
if matchobj.group(2) is not None:
#version += '+'+matchobj.group(2)[1:].replace("-",".")
version += '.'+matchobj.group(2)[1:].replace("-",".")
pass
pass
except subprocess.CalledProcessError:
# Ignore error, falling back to above version string
pass
if modified and version.find('+') >= 0:
version += ".modified"
pass
elif modified:
#version += "+modified"
version += ".modified"
pass
pass
else:
version = "UNKNOWN"
pass
print("version = %s" % (version))
setup(name="limatix",
description="Automated data collection",
author="Stephen D. Holland",
version=version,
# url="http://limatix.org/dataguzzler",
zip_safe=False,
packages=["limatix",
"limatix.steps",
"limatix.bin",
"limatix.widgets",
"limatix.canonicalize_path",
"limatix.dc_lxml_treesync"],
package_dir={"limatix.canonicalize_path": "limatix/canonicalize_path/canonicalize_path"},
cmdclass={"install_lib": install_lib_save_prefix_and_version},
data_files=[ ("share/limatix/checklists",share_checklist_files),
("share/limatix/pt_steps",pt_steps_files),
("share/limatix/conf",conf_files),
("share/limatix/doc",doc_files),
("share/limatix/doc/processtrak_example",doc_pte_files),
("share/limatix/doc/processtrak_example/creston_jan2016",doc_pte_creston_files),
("share/limatix/xslt",xslt_files),
("share/limatix",root_files),
("etc/canonicalize_path",canonicalize_path_config_files)],
package_data={"limatix.canonicalize_path": canonicalize_path_package_files,
"limatix.widgets": limatix_widgets_package_files,
"limatix.steps": limatix_checklist_steps_package_files,
"limatix": limatix_package_files},
entry_points={
"limatix.checklist_search_path": [ "limatix.checklist_search_path_entry=limatix:getchecklisturlpath"],
"limatix.checklist.step": limatix_checklist_step_entrypoints,
"limatix.widget": limatix_widget_entrypoints,
"limatix.datacollect2.config_url_search_path": [ "limatix.share.conf = limatix:getconfigurlpath" ],
"limatix.processtrak.step_url_search_path": [ "limatix.share.pt_steps = limatix:getptstepurlpath" ],
"console_scripts": console_scripts_entrypoints,
"gui_scripts": gui_scripts_entrypoints })
# scripts=["bin/datacollect2",
# "bin/dc_checklist",
# "bin/pt_checkprovenance",
# "bin/dc_chx2chf",
# "bin/dc_glade",
# "bin/dc_gui",
# "bin/dc_paramdb2",
# "bin/thermal2limatix",
# "bin/processtrak",
# "bin/dc_ricohphoto",
# "bin/dc_xlg2dpd",
# "bin/pt_cleanup"],
|
python
|
from dataclasses import dataclass
from bindings.csw.graph_style_type import GraphStyleType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class GraphStyle1(GraphStyleType):
"""The style descriptor for a graph consisting of a number of features.
Describes graph-specific style attributes.
"""
class Meta:
name = "GraphStyle"
namespace = "http://www.opengis.net/gml"
|
python
|
#####################################################
## librealsense streams test ##
#####################################################
# This assumes .so file is found on the same directory
import pyrealsense2 as rs
# Prettier prints for reverse-engineering
from pprint import pprint
# Get realsense pipeline handle
pipe = rs.pipeline()
# Print all connected devices and find the T265
devices = rs.context().devices
for i in range(len(devices)):
print('---------------------------')
# Other fields of camera_info: https://intelrealsense.github.io/librealsense/python_docs/_generated/pyrealsense2.camera_info.html
print('Found connected device #', i + 1, ':', devices[i].get_info(rs.camera_info.name), ', serial no: ', devices[i].get_info(rs.camera_info.serial_number))
print('Available streams for this device:')
pprint(dir(rs.stream))
|
python
|
# ██████ ██▓ ▄▄▄ ██▒ █▓ ██▓ ▄████▄ ██▓███ ██▓▒██ ██▒▓█████ ██▓
# ▒██ ▒ ▓██▒ ▒████▄ ▓██░ █▒▓██▒▒██▀ ▀█ ▓██░ ██▒▓██▒▒▒ █ █ ▒░▓█ ▀ ▓██▒
# ░ ▓██▄ ▒██░ ▒██ ▀█▄▓██ █▒░▒██▒▒▓█ ▄ ▓██░ ██▓▒▒██▒░░ █ ░▒███ ▒██░
# ▒ ██▒▒██░ ░██▄▄▄▄██▒██ █░░░██░▒▓▓▄ ▄██▒ ▒██▄█▓▒ ▒░██░ ░ █ █ ▒ ▒▓█ ▄ ▒██░
# ▒██████▒▒░██████▒▓█ ▓██▒▒▀█░ ░██░▒ ▓███▀ ░ ▒██▒ ░ ░░██░▒██▒ ▒██▒░▒████▒░██████▒
# ▒ ▒▓▒ ▒ ░░ ▒░▓ ░▒▒ ▓▒█░░ ▐░ ░▓ ░ ░▒ ▒ ░ ▒▓▒░ ░ ░░▓ ▒▒ ░ ░▓ ░░░ ▒░ ░░ ▒░▓ ░
# ░ ░▒ ░ ░░ ░ ▒ ░ ▒ ▒▒ ░░ ░░ ▒ ░ ░ ▒ ░▒ ░ ▒ ░░░ ░▒ ░ ░ ░ ░░ ░ ▒ ░
# ░ ░ ░ ░ ░ ░ ▒ ░░ ▒ ░░ ░░ ▒ ░ ░ ░ ░ ░ ░
# ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
# ░ ░
import os
import re
import socket
import subprocess
from libqtile import qtile
from libqtile.config import Click, Drag, Group, KeyChord, Key, Match, Screen
from libqtile.command import lazy
from libqtile import layout, bar, widget, hook
from libqtile.lazy import lazy
from typing import List # noqa: F401
from libqtile.widget import Spacer
from libqtile.utils import guess_terminal
from nic import get_nic_name
from datetime import datetime
mod = "mod4"
mod1 = "alt"
mod2 = "control"
home = os.path.expanduser('~')
terminal = guess_terminal()
myTerm="alacritty"
interface_name = get_nic_name() # set get_nic_name(wired) if using a wired connection
current_year = datetime.now().year
@lazy.function
def window_to_prev_group(qtile):
if qtile.currentWindow is not None:
i = qtile.groups.index(qtile.currentGroup)
qtile.currentWindow.togroup(qtile.groups[i - 1].name)
@lazy.function
def window_to_next_group(qtile):
if qtile.currentWindow is not None:
i = qtile.groups.index(qtile.currentGroup)
qtile.currentWindow.togroup(qtile.groups[i + 1].name)
keys = [
# FUNCTION KEYS
Key([], "F12", lazy.spawn('xfce4-terminal --drop-down')),
# SUPER + FUNCTION KEYS
Key([mod], "f", lazy.window.toggle_fullscreen()),
Key([mod], "q", lazy.window.kill()),
Key([mod], "x", lazy.spawn('arcolinux-logout')),
Key([mod], "Escape", lazy.spawn('xkill')),
Key([mod], "Return", lazy.spawn(myTerm)),
# SUPER + SHIFT KEYS
Key([mod, "shift"], "Return", lazy.spawn('thunar')),
#Key([mod, "shift"], "d", lazy.spawn("dmenu_run -i -nb '#191919' -nf '#fea63c' -sb '#fea63c' -sf '#191919' -fn 'NotoMonoRegular:bold:pixelsize=14'")),
Key([mod, "shift"], "d", lazy.spawn("dmenu_run -i -h 38 -fn 'UbuntuMono:bold:pixelsize=22'")),
Key([mod, "shift"], "q", lazy.window.kill()),
Key([mod, "shift"], "r", lazy.restart()),
Key([mod, "control"], "r", lazy.restart()),
# Key([mod, "shift"], "x", lazy.shutdown()),
# CONTROL + ALT KEYS
# Key(["mod1", "control"], "e", lazy.spawn('arcolinux-tweak-tool')),
Key(["mod1", "control"], "e", lazy.spawn("emacsclient -c -a 'emacs'")),
Key(["mod1", "control"], "f", lazy.spawn('firefox')),
Key(["mod1", "control"], "c", lazy.spawn('code')),
Key(["mod1", "control"], "i", lazy.spawn('nitrogen')),
Key(["mod1", "control"], "p", lazy.spawn('pavucontrol')),
Key(["mod1", "control"], "v", lazy.spawn('virt-manager')),
Key(["mod1", "control"], "b", lazy.spawn('brave')),
Key(["mod1", "control"], "s", lazy.spawn('steam')),
Key(["mod1", "control"], "t", lazy.spawn('thunderbird')),
Key(["mod1", "control"], "q", lazy.spawn(myTerm + ' -e nvim /home/pixel/.config/qtile/config.py')),
# CONTROL + SHIFT KEYS
Key([mod2, "shift"], "Escape", lazy.spawn('xfce4-taskmanager')),
# SCREENSHOTS
#Key([], "Print", lazy.spawn("scrot 'ArcoLinux-%Y-%m-%d-%s_screenshot_$wx$h.jpg' -e 'mv $f $$(xdg-user-dir PICTURES)'")),
Key([], "Print", lazy.spawn('xfce4-screenshooter')),
Key([mod], "Print", lazy.spawn('thunar /home/pixel/Pictures/Screenshots')),
Key([mod2, "shift"], "Print", lazy.spawn('gnome-screenshot -i')),
# MULTIMEDIA KEYS
Key([], "XF86Calculator", lazy.spawn("qalculate-gtk")),
# INCREASE/DECREASE/MUTE VOLUME
Key([], "XF86AudioMute", lazy.spawn("amixer -q set Master toggle")),
Key([], "XF86AudioLowerVolume", lazy.spawn("amixer -q set Master 5%-")),
Key([], "XF86AudioRaiseVolume", lazy.spawn("amixer -q set Master 5%+")),
# QTILE LAYOUT KEYS
Key([mod], "n", lazy.layout.normalize()),
Key([mod], "space", lazy.next_layout()),
# CHANGE FOCUS
Key([mod], "Up", lazy.layout.up()),
Key([mod], "Down", lazy.layout.down()),
Key([mod], "Left", lazy.layout.left()),
Key([mod], "Right", lazy.layout.right()),
Key([mod], "k", lazy.layout.up()),
Key([mod], "j", lazy.layout.down()),
Key([mod], "h", lazy.layout.left()),
Key([mod], "l", lazy.layout.right()),
# RESIZE UP, DOWN, LEFT, RIGHT
Key([mod, "control"], "l",
lazy.layout.grow_right(),
lazy.layout.grow(),
lazy.layout.increase_ratio(),
lazy.layout.delete(),
),
Key([mod, "control"], "Right",
lazy.layout.grow_right(),
lazy.layout.grow(),
lazy.layout.increase_ratio(),
lazy.layout.delete(),
),
Key([mod, "control"], "h",
lazy.layout.grow_left(),
lazy.layout.shrink(),
lazy.layout.decrease_ratio(),
lazy.layout.add(),
),
Key([mod, "control"], "Left",
lazy.layout.grow_left(),
lazy.layout.shrink(),
lazy.layout.decrease_ratio(),
lazy.layout.add(),
),
Key([mod, "control"], "k",
lazy.layout.grow_up(),
lazy.layout.grow(),
lazy.layout.decrease_nmaster(),
),
Key([mod, "control"], "Up",
lazy.layout.grow_up(),
lazy.layout.grow(),
lazy.layout.decrease_nmaster(),
),
Key([mod, "control"], "j",
lazy.layout.grow_down(),
lazy.layout.shrink(),
lazy.layout.increase_nmaster(),
),
Key([mod, "control"], "Down",
lazy.layout.grow_down(),
lazy.layout.shrink(),
lazy.layout.increase_nmaster(),
),
# FLIP LAYOUT FOR MONADTALL/MONADWIDE
Key([mod, "shift"], "f", lazy.layout.flip()),
# FLIP LAYOUT FOR BSP
Key([mod, "mod1"], "k", lazy.layout.flip_up()),
Key([mod, "mod1"], "j", lazy.layout.flip_down()),
Key([mod, "mod1"], "l", lazy.layout.flip_right()),
Key([mod, "mod1"], "h", lazy.layout.flip_left()),
# MOVE WINDOWS UP OR DOWN BSP LAYOUT
Key([mod, "shift"], "k", lazy.layout.shuffle_up()),
Key([mod, "shift"], "j", lazy.layout.shuffle_down()),
Key([mod, "shift"], "h", lazy.layout.shuffle_left()),
Key([mod, "shift"], "l", lazy.layout.shuffle_right()),
# MOVE WINDOWS UP OR DOWN MONADTALL/MONADWIDE LAYOUT
Key([mod, "shift"], "Up", lazy.layout.shuffle_up()),
Key([mod, "shift"], "Down", lazy.layout.shuffle_down()),
Key([mod, "shift"], "Left", lazy.layout.swap_left()),
Key([mod, "shift"], "Right", lazy.layout.swap_right()),
# TOGGLE FLOATING LAYOUT
Key([mod, "shift"], "space", lazy.window.toggle_floating()),]
group_names = [("1 ", {'layout': 'monadtall'}),
("2 ", {'layout': 'monadtall'}),
("3 ", {'layout': 'monadtall'}),
("4 ", {'layout': 'monadtall'}),
("5 ", {'layout': 'monadtall'}),
("6 ", {'layout': 'monadtall'}),
("7 ", {'layout': 'monadtall'}),
("8 λ", {'layout': 'monadtall'}),
("9 ", {'layout': 'floating'})]
groups = [Group(name, **kwargs) for name, kwargs in group_names]
for i, (name, kwargs) in enumerate(group_names, 1):
keys.append(Key([mod], str(i), lazy.group[name].toscreen())) # Switch to another group
keys.append(Key([mod, "shift"], str(i), lazy.window.togroup(name))) # Send current window to another group
layout_theme = {"border_width": 2,
"margin": 8,
"border_focus": "e1acff",
"border_normal": "1D2330",
"single_border_width": 0
}
layouts = [
#layout.MonadWide(**layout_theme),
#layout.Bsp(**layout_theme),
#layout.Stack(stacks=2, **layout_theme),
#layout.Columns(**layout_theme),
#layout.RatioTile(**layout_theme),
#layout.VerticalTile(**layout_theme),
#layout.Matrix(**layout_theme),
#layout.Zoomy(**layout_theme),
layout.MonadTall(**layout_theme),
layout.Max(**layout_theme),
layout.Tile(shift_windows=True, **layout_theme),
layout.Stack(num_stacks=2),
layout.TreeTab(
font = "Ubuntu",
fontsize = 10,
sections = ["FIRST", "SECOND"],
section_fontsize = 11,
bg_color = "141414",
active_bg = "90C435",
active_fg = "000000",
inactive_bg = "384323",
inactive_fg = "a0a0a0",
padding_y = 5,
section_top = 10,
panel_width = 320
),
layout.Floating(**layout_theme)
]
colors = [["#282c34", "#282c34"], # panel background
["#3d3f4b", "#434758"], # background for current screen tab
["#ffffff", "#ffffff"], # font color for group names
["#ff5555", "#ff5555"], # border line color for current tab
["#74438f", "#74438f"], # border line color for 'other tabs' and color for 'odd widgets'
["#4f76c7", "#4f76c7"], # color for the 'even widgets'
["#e1acff", "#e1acff"], # window name
["#808080", "#808080"]] # vertical line color
##### DEFAULT WIDGET SETTINGS #####
widget_defaults = dict(
font="FiraCode Nerd Font",
fontsize = 21,
padding = 2,
background=colors[2]
)
extension_defaults = widget_defaults.copy()
def init_widgets_list():
prompt = "{0}@{1}: ".format(os.environ["USER"], socket.gethostname())
widgets_list = [
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[2],
background = colors[0]
),
widget.Image(
filename = "~/.config/qtile/icons/tux.png",
scale = "False",
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm)},
background = colors[0]
),
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[2],
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[2],
background = colors[0]
),
widget.GroupBox(
font = "FiraCode Nerd Font",
fontsize = 18,
margin_y = 3,
margin_x = 0,
padding_y = 5,
padding_x = 3,
borderwidth = 3,
active = "#ff71ce",
inactive = colors[2],
rounded = False,
highlight_color = colors[0],
highlight_method = "line",
this_current_screen_border = colors[6],
this_screen_border = colors [4],
other_current_screen_border = colors[6],
other_screen_border = colors[4],
foreground = colors[2],
background = colors[0]
),
widget.Prompt(
prompt = prompt,
font = "Ubuntu Mono",
padding = 10,
foreground = colors[3],
background = colors[1],
fontsize = 16
),
widget.Sep(
linewidth = 0,
padding = 40,
foreground = colors[2],
background = colors[0]
),
widget.WindowName(
foreground = colors[6],
background = colors[0],
padding = 0
),
widget.Sep(
linewidth = 0,
padding = 6,
foreground = colors[0],
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.CheckUpdates(
update_interval = 1800,
distro = "Arch_checkupdates",
display_format = "⟳{updates} Updates",
foreground = colors[6],
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm + ' -e sudo pacman -Syu')},
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.CPU(
format = '\uf108 cpu: {load_percent}% {freq_current}GHz',
foreground = '#ecbe7b',
background = colors[0]
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text = " 🌡",
padding = 2,
foreground = '#01cdfe',
background = colors[0],
fontsize = 16
),
widget.ThermalSensor(
foreground = '#01cdfe',
background = colors[0],
threshold = 90,
padding = 5,
tag_sensor = "Package id 0"
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text = " 🌡",
padding = 2,
foreground = '#05ffa1',
background = colors[0],
fontsize = 16
),
widget.NvidiaSensors(
foreground = '#05ffa1',
background = colors[0],
format = 'gpu: {temp}°C'
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Memory(
foreground = '#ff6c6b',
background = colors[0],
format = '\uf233 {MemUsed: .0f}M/{MemTotal: .0f}M',
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm + ' -e htop')},
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Net(
interface = interface_name,
format = '\uf0ab {down} \uf0aa {up}',
foreground = '#fffb96',
background = colors[0],
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text=" ",
foreground='#ff71ce',
background=colors[0],
font="Font Awesome 5 Free Solid",
# fontsize=38,
),
widget.Volume(
#foreground = '#828CF6',
foreground='#ff71ce',
background = colors[0],
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.CurrentLayoutIcon(
custom_icon_paths = [os.path.expanduser("~/.config/qtile/icons")],
foreground = '#c678dd',
background = colors[0],
padding = 0,
scale = 0.7
),
widget.CurrentLayout(
foreground = '#c678dd',
background = colors[0],
padding = 5
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.TextBox(
text=" ",
foreground='#46d9ff',
background=colors[0],
font="Font Awesome 5 Free Solid",
# fontsize=38,
),
widget.Clock(
foreground = '#46d9ff',
background = colors[0],
format = "%A, %B %d - %H:%M:%S",
mouse_callbacks = {'Button1': lambda: qtile.cmd_spawn(myTerm + f" --hold -e cal {current_year}")}
),
widget.TextBox(
text = '|',
background = colors[0],
foreground = colors[7],
fontsize = 20
),
widget.Systray(
background = colors[0],
icon_size=21,
padding = 4
),
widget.Sep(
linewidth = 0,
padding = 10,
foreground = colors[0],
background = colors[0]
),
]
return widgets_list
widgets_list = init_widgets_list()
def init_widgets_screen1():
widgets_screen1 = init_widgets_list()
return widgets_screen1
def init_widgets_screen2():
widgets_screen2 = init_widgets_list()
del widgets_screen2[34:37]
return widgets_screen2
widgets_screen1 = init_widgets_screen1()
widgets_screen2 = init_widgets_screen2()
def init_screens():
return [Screen(top=bar.Bar(widgets=init_widgets_screen1(), size=38)),
Screen(top=bar.Bar(widgets=init_widgets_screen2(), size=26))]
screens = init_screens()
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
*layout.Floating.default_float_rules,
Match(wm_class='Arcolinux-welcome-app.py'),
Match(wm_class='Arcolinux-tweak-tool.py'),
Match(wm_class='confirm'),
Match(wm_class='dialog'),
Match(wm_class='download'),
Match(wm_class='error'),
Match(wm_class='file_progress'),
Match(wm_class='notification'),
Match(wm_class='splash'),
Match(wm_class='toolbar'),
Match(wm_class='confirmreset'),
Match(wm_class='makebranch'),
Match(wm_class='maketag'),
Match(wm_class='Arandr'),
Match(wm_class='feh'),
Match(wm_class='Galculator'),
Match(wm_class='arcolinux-logout'),
Match(wm_class='xfce4-terminal'),
Match(wm_class='ssh-askpass'),
Match(wm_class='mullvad vpn'),
Match(title='branchdialog'),
Match(title='Open File'),
Match(title='pinentry'),
Match(title='Qalculate!'),
Match(title='Connman System Tray'),
Match(title='Steam'),
Match(title='Steam Login'),
], fullscreen_border_width = 0, border_width = 0)
auto_fullscreen = True
focus_on_window_activation = "smart"
@hook.subscribe.startup_once
def start_once():
home = os.path.expanduser('~')
subprocess.call([home + '/.config/qtile/scripts/autostart.sh'])
@hook.subscribe.startup
def start_always():
# Set the cursor to something sane in X
subprocess.Popen(['xsetroot', '-cursor_name', 'left_ptr'])
@hook.subscribe.client_new
def set_floating(window):
if (window.window.get_wm_transient_for()
or window.window.get_wm_type() in floating_types):
window.floating = True
floating_types = ["notification", "toolbar", "splash", "dialog"]
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/worker_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.protobuf import worker_pb2 as tensorflow_dot_core_dot_protobuf_dot_worker__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/worker_service.proto',
package='tensorflow.grpc',
syntax='proto3',
serialized_pb=_b('\n-tensorflow/core/protobuf/worker_service.proto\x12\x0ftensorflow.grpc\x1a%tensorflow/core/protobuf/worker.proto2\x99\x07\n\rWorkerService\x12H\n\tGetStatus\x12\x1c.tensorflow.GetStatusRequest\x1a\x1d.tensorflow.GetStatusResponse\x12\x66\n\x13\x43reateWorkerSession\x12&.tensorflow.CreateWorkerSessionRequest\x1a\'.tensorflow.CreateWorkerSessionResponse\x12\x66\n\x13\x44\x65leteWorkerSession\x12&.tensorflow.DeleteWorkerSessionRequest\x1a\'.tensorflow.DeleteWorkerSessionResponse\x12T\n\rRegisterGraph\x12 .tensorflow.RegisterGraphRequest\x1a!.tensorflow.RegisterGraphResponse\x12Z\n\x0f\x44\x65registerGraph\x12\".tensorflow.DeregisterGraphRequest\x1a#.tensorflow.DeregisterGraphResponse\x12\x45\n\x08RunGraph\x12\x1b.tensorflow.RunGraphRequest\x1a\x1c.tensorflow.RunGraphResponse\x12Q\n\x0c\x43leanupGraph\x12\x1f.tensorflow.CleanupGraphRequest\x1a .tensorflow.CleanupGraphResponse\x12K\n\nCleanupAll\x12\x1d.tensorflow.CleanupAllRequest\x1a\x1e.tensorflow.CleanupAllResponse\x12M\n\nRecvTensor\x12\x1d.tensorflow.RecvTensorRequest\x1a\x1e.tensorflow.RecvTensorResponse\"\x00\x12\x42\n\x07Logging\x12\x1a.tensorflow.LoggingRequest\x1a\x1b.tensorflow.LoggingResponse\x12\x42\n\x07Tracing\x12\x1a.tensorflow.TracingRequest\x1a\x1b.tensorflow.TracingResponseB3\n\x1aorg.tensorflow.distruntimeB\x13WorkerServiceProtosP\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\032org.tensorflow.distruntimeB\023WorkerServiceProtosP\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class WorkerServiceStub(object):
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/tensorflow.grpc.WorkerService/GetStatus',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.FromString,
)
self.CreateWorkerSession = channel.unary_unary(
'/tensorflow.grpc.WorkerService/CreateWorkerSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.FromString,
)
self.DeleteWorkerSession = channel.unary_unary(
'/tensorflow.grpc.WorkerService/DeleteWorkerSession',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.FromString,
)
self.RegisterGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/RegisterGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.FromString,
)
self.DeregisterGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/DeregisterGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.FromString,
)
self.RunGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/RunGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.FromString,
)
self.CleanupGraph = channel.unary_unary(
'/tensorflow.grpc.WorkerService/CleanupGraph',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.FromString,
)
self.CleanupAll = channel.unary_unary(
'/tensorflow.grpc.WorkerService/CleanupAll',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.FromString,
)
self.RecvTensor = channel.unary_unary(
'/tensorflow.grpc.WorkerService/RecvTensor',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.FromString,
)
self.Logging = channel.unary_unary(
'/tensorflow.grpc.WorkerService/Logging',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.FromString,
)
self.Tracing = channel.unary_unary(
'/tensorflow.grpc.WorkerService/Tracing',
request_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.SerializeToString,
response_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.FromString,
)
class WorkerServiceServicer(object):
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def GetStatus(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RegisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeregisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CleanupGraph(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CleanupAll(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RecvTensor(self, request, context):
"""See worker.proto for details.
RecvTensor Method
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Logging(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Tracing(self, request, context):
"""See worker.proto for details.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WorkerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.SerializeToString,
),
'CreateWorkerSession': grpc.unary_unary_rpc_method_handler(
servicer.CreateWorkerSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.SerializeToString,
),
'DeleteWorkerSession': grpc.unary_unary_rpc_method_handler(
servicer.DeleteWorkerSession,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.SerializeToString,
),
'RegisterGraph': grpc.unary_unary_rpc_method_handler(
servicer.RegisterGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.SerializeToString,
),
'DeregisterGraph': grpc.unary_unary_rpc_method_handler(
servicer.DeregisterGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.SerializeToString,
),
'RunGraph': grpc.unary_unary_rpc_method_handler(
servicer.RunGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.SerializeToString,
),
'CleanupGraph': grpc.unary_unary_rpc_method_handler(
servicer.CleanupGraph,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.SerializeToString,
),
'CleanupAll': grpc.unary_unary_rpc_method_handler(
servicer.CleanupAll,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.SerializeToString,
),
'RecvTensor': grpc.unary_unary_rpc_method_handler(
servicer.RecvTensor,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.SerializeToString,
),
'Logging': grpc.unary_unary_rpc_method_handler(
servicer.Logging,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.SerializeToString,
),
'Tracing': grpc.unary_unary_rpc_method_handler(
servicer.Tracing,
request_deserializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.FromString,
response_serializer=tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.grpc.WorkerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaWorkerServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def GetStatus(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CreateWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeleteWorkerSession(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RegisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeregisterGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RunGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CleanupGraph(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CleanupAll(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RecvTensor(self, request, context):
"""See worker.proto for details.
RecvTensor Method
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Logging(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Tracing(self, request, context):
"""See worker.proto for details.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaWorkerServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""//////////////////////////////////////////////////////////////////////////////
WorkerService defines a TensorFlow service that executes dataflow
graphs on a set of local devices, on behalf of a MasterService.
A worker service keeps track of multiple "registered graphs". Each
registered graph is a subgraph of a client's graph, corresponding to
only the nodes that should execute on this worker (and any
additional nodes necessary for inter-process communication using
the `RecvTensor` method).
//////////////////////////////////////////////////////////////////////////////
"""
def GetStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
GetStatus.future = None
def CreateWorkerSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
CreateWorkerSession.future = None
def DeleteWorkerSession(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
DeleteWorkerSession.future = None
def RegisterGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
RegisterGraph.future = None
def DeregisterGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
DeregisterGraph.future = None
def RunGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
RunGraph.future = None
def CleanupGraph(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
CleanupGraph.future = None
def CleanupAll(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
CleanupAll.future = None
def RecvTensor(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
RecvTensor Method
"""
raise NotImplementedError()
RecvTensor.future = None
def Logging(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
Logging.future = None
def Tracing(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""See worker.proto for details.
"""
raise NotImplementedError()
Tracing.future = None
def beta_create_WorkerService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.FromString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.FromString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.FromString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.FromString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.FromString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.FromString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.FromString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.FromString,
}
response_serializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.SerializeToString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.SerializeToString,
}
method_implementations = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): face_utilities.unary_unary_inline(servicer.CleanupAll),
('tensorflow.grpc.WorkerService', 'CleanupGraph'): face_utilities.unary_unary_inline(servicer.CleanupGraph),
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): face_utilities.unary_unary_inline(servicer.CreateWorkerSession),
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): face_utilities.unary_unary_inline(servicer.DeleteWorkerSession),
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): face_utilities.unary_unary_inline(servicer.DeregisterGraph),
('tensorflow.grpc.WorkerService', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus),
('tensorflow.grpc.WorkerService', 'Logging'): face_utilities.unary_unary_inline(servicer.Logging),
('tensorflow.grpc.WorkerService', 'RecvTensor'): face_utilities.unary_unary_inline(servicer.RecvTensor),
('tensorflow.grpc.WorkerService', 'RegisterGraph'): face_utilities.unary_unary_inline(servicer.RegisterGraph),
('tensorflow.grpc.WorkerService', 'RunGraph'): face_utilities.unary_unary_inline(servicer.RunGraph),
('tensorflow.grpc.WorkerService', 'Tracing'): face_utilities.unary_unary_inline(servicer.Tracing),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_WorkerService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphRequest.SerializeToString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingRequest.SerializeToString,
}
response_deserializers = {
('tensorflow.grpc.WorkerService', 'CleanupAll'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupAllResponse.FromString,
('tensorflow.grpc.WorkerService', 'CleanupGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CleanupGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'CreateWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.CreateWorkerSessionResponse.FromString,
('tensorflow.grpc.WorkerService', 'DeleteWorkerSession'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeleteWorkerSessionResponse.FromString,
('tensorflow.grpc.WorkerService', 'DeregisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.DeregisterGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'GetStatus'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.GetStatusResponse.FromString,
('tensorflow.grpc.WorkerService', 'Logging'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.LoggingResponse.FromString,
('tensorflow.grpc.WorkerService', 'RecvTensor'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RecvTensorResponse.FromString,
('tensorflow.grpc.WorkerService', 'RegisterGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RegisterGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'RunGraph'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.RunGraphResponse.FromString,
('tensorflow.grpc.WorkerService', 'Tracing'): tensorflow_dot_core_dot_protobuf_dot_worker__pb2.TracingResponse.FromString,
}
cardinalities = {
'CleanupAll': cardinality.Cardinality.UNARY_UNARY,
'CleanupGraph': cardinality.Cardinality.UNARY_UNARY,
'CreateWorkerSession': cardinality.Cardinality.UNARY_UNARY,
'DeleteWorkerSession': cardinality.Cardinality.UNARY_UNARY,
'DeregisterGraph': cardinality.Cardinality.UNARY_UNARY,
'GetStatus': cardinality.Cardinality.UNARY_UNARY,
'Logging': cardinality.Cardinality.UNARY_UNARY,
'RecvTensor': cardinality.Cardinality.UNARY_UNARY,
'RegisterGraph': cardinality.Cardinality.UNARY_UNARY,
'RunGraph': cardinality.Cardinality.UNARY_UNARY,
'Tracing': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'tensorflow.grpc.WorkerService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
python
|
def validate_cell(cell: tuple) -> tuple:
x, y = cell
if x == 6:
x = 0
elif x == -1:
x = 5
if y == 6:
y = 0
elif y == -1:
y = 5
return x, y
def get_cell(cell: tuple, field: list) -> str:
row, col = cell
return field[row][col]
def set_cell(cell: tuple, field: list, value: int) -> None:
row, col = cell
matrix[row][col] = value
move = {
'up': lambda x: (x[0]-1, x[1]),
'down': lambda x: (x[0]+1, x[1]),
'left': lambda x: (x[0], x[1]-1),
'right': lambda x: (x[0], x[1]+1)
}
deposits = {
'W': ['Water', 0],
'M': ['Metal', 0],
'C': ['Concrete', 0]
}
matrix = [[x for x in input().split(' ')] for _ in range(6)]
current_position = next((x, y) for x in range(6) for y in range(6) if matrix[x][y] == 'E')
commands = input().split(', ')
for command in commands:
next_position = move[command](current_position)
next_position = validate_cell(next_position)
row, col = next_position
item_at_cell = get_cell(next_position, matrix)
if item_at_cell in ('W', 'M', 'C'):
deposits[item_at_cell][1] += 1
print(f"{deposits[item_at_cell][0]} deposit found at ({row}, {col})")
set_cell(current_position, matrix, '-')
set_cell(next_position, matrix, 'E')
elif item_at_cell == 'R':
print(f"Rover got broken at ({row}, {col})")
break
current_position = next_position
if deposits['W'][1] > 0 and deposits['C'][1] > 0 and deposits['M'][1] > 0:
print("Area suitable to start the colony.")
else:
print("Area not suitable to start the colony.")
|
python
|
# nukedatastore tests
import pytest
import datetime
from nukedatastore import NukeDataStore, NukeDataStoreError
def test_datastore_crud(datastore):
datastore['project_data'] = {'id': 1234, 'name': 'project name'}
assert len(datastore.list()) == 1
assert datastore.list()[0] == 'project_data'
assert datastore['project_data'] == {'id': 1234, 'name': 'project name'}
def test_datastore_crud_invalid_key(datastore):
with pytest.raises(KeyError):
datastore['invalid_key']
def test_datastore_crud_invalid_data(datastore):
with pytest.raises(NukeDataStoreError):
datastore['data'] = datetime.datetime.now()
def test_datastore_crud_frozen(datastore):
datastore.freeze()
with pytest.raises(NukeDataStoreError):
datastore['project_data'] = {}
datastore.unfreeze()
def test_deleted_node(datastore, nuke):
nuke.delete(nuke.toNode('data_store'))
with pytest.raises(NukeDataStoreError):
datastore.store
def test_existing_node_init(nuke):
NukeDataStore('data_store')
x = NukeDataStore('data_store')
assert x
|
python
|
# Copyright (c) 2020 Graphcore Ltd.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file has been modified by Graphcore Ltd.
# It has been modified to run the application on IPU hardware.
"""Validate a network.
Usage:
python validate.py tfrecord_dir/ tfrecord_dir2/
"""
import os
from absl import app, flags
from tensorflow import gfile
import dual_net
import preprocessing
import utils
flags.DEFINE_integer('examples_to_validate', 50 * 2048,
'Number of examples to run validation on.')
flags.DEFINE_string('validate_name', 'selfplay',
'Name of validation set (i.e. selfplay or human).')
flags.DEFINE_bool('expand_validation_dirs', True,
'Whether to expand the input paths by globbing. If false, '
'directly read and validate on the given files.')
# From dual_net.py
flags.declare_key_flag('work_dir')
flags.declare_key_flag('use_tpu')
flags.declare_key_flag('num_tpu_cores')
FLAGS = flags.FLAGS
def validate(*tf_records):
"""Validate a model's performance on a set of holdout data."""
if FLAGS.use_tpu:
def _input_fn(params):
return preprocessing.get_tpu_input_tensors(
params['batch_size'], tf_records, filter_amount=1.0)
else:
def _input_fn():
return preprocessing.get_ipu_input_tensors(
FLAGS.train_batch_size, tf_records, filter_amount=1.0,
shuffle_examples=False)
steps = FLAGS.examples_to_validate // FLAGS.train_batch_size
if FLAGS.use_tpu:
steps //= FLAGS.num_tpu_cores
estimator = dual_net._get_ipu_estimator(num_replicas=1, iterations_per_loop=steps)
with utils.logged_timer("Validating"):
estimator.evaluate(_input_fn, steps=steps, name=FLAGS.validate_name)
def main(argv):
"""Validate a model's performance on a set of holdout data."""
_, *validation_paths = argv
if FLAGS.expand_validation_dirs:
tf_records = []
with utils.logged_timer("Building lists of holdout files"):
for record_dir in validation_paths:
tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))
else:
tf_records = validation_paths
if not tf_records:
raise RuntimeError("Did not find any holdout files for validating!")
validate(*tf_records)
if __name__ == "__main__":
app.run(main)
|
python
|
import torch
import warprnnt_pytorch as warp_rnnt
from torch.autograd import Function
from torch.nn import Module
from .warp_rnnt import *
__all__ = ['rnnt_loss', 'RNNTLoss']
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, reduction):
"""
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
loss_func = warp_rnnt.gpu_rnnt if is_cuda else warp_rnnt.cpu_rnnt
grads = torch.zeros_like(acts) if acts.requires_grad else torch.zeros(0).to(acts)
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, dtype=acts.dtype)
loss_func(acts,
labels,
act_lens,
label_lens,
costs,
grads,
blank,
0)
if reduction in ['sum', 'mean']:
costs = costs.sum().unsqueeze_(-1)
if reduction == 'mean':
costs /= minibatch_size
grads /= minibatch_size
costs = costs.to(acts.device)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul_(grad_output), None, None, None, None, None
def rnnt_loss(acts, labels, act_lens, label_lens, blank=0, reduction='mean'):
""" RNN Transducer Loss
Args:
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
if not acts.is_cuda:
acts = torch.nn.functional.log_softmax(acts, -1)
return _RNNT.apply(acts, labels, act_lens, label_lens, blank, reduction)
class RNNTLoss(Module):
"""
Parameters:
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
"""
def __init__(self, blank=0, reduction='mean'):
super(RNNTLoss, self).__init__()
self.blank = blank
self.reduction = reduction
self.loss = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
"""
acts: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(acts, labels, act_lens, label_lens, self.blank, self.reduction)
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError("must have a length per example.")
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError("must have a label length per example.")
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError("Input length mismatch")
if U != max_U + 1:
raise ValueError("Output length mismatch")
|
python
|
from utils import color
from browser import help
import random
def get(cmds, typ, add_attr=None):
'''
USE:
error.get(cmds, type, [optional:add_attr]) where add_attr must be < 3
Description:
Returns a correctly colored message according to declared "typ"
'''
#---------------------------------------------------------------
if not add_attr:
add_attr = [None,None,None]
elif len(add_attr)<3:
for i in range(3-len(add_attr)):
add_attr.append(None)
if len(cmds) < 2:
cmds.append(None)
operator = help.spfc_opr(cmds[0],True)
names=[None,None,None]
if operator == 'q':
names = ['Ken Rotaris', 'Tunahan Erbay', 'Leonardo Salsi']
random.shuffle(names) #names in random order
names[0] = color.bold(color.red(names[0]))
names[1] = color.bold(color.greenDark(names[1]))
names[2] = color.bold(color.yellow(names[2]))
random.shuffle(names)
dictionary = {
#command | messages #TODO: blank messages are not being used yet/ have not ben set yet.
'cd' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('The directory does not exist')
},
'open': {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error': color.red('unable to open file')
},
'ls' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('given directory doesn\'t exist'),
'unknown': color.red('Unknown option \'{}\''.format(cmds[1]))
},
'cat' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('file doesn\'t exist at \'{1}\''.format(cmds[0], add_attr)),
'nt_supp': color.red('file type currently not supported by \'{}\' command'.format(cmds[0])),
'hint' : color.grey('tip: use \'{}\' followed by an integer to display a range.'.format(cmds[0]))
},
'mk' : {'success': color.greenDark('folder {0} created'.format(add_attr[0])), #add_attr = [name, path]
'warning': color.yellow('wrong argument format'),
'file_error' : color.red('name cannot contain a dot'), #add_attr = [name, typ, path]
'format_error' : color.red('please use command as follows: mk <dir_name>'),
'path_error': color.red('the path the folder is to be created in does not exist'.format(add_attr))
},
'add' : {'success': color.greenDark('File added to the filesystem.'),
# add_attr = [name, path]
'warning': color.yellow('wrong argument format'),
'error': color.red('{0} "{1}" already exists at {2}'.format(add_attr[1], add_attr[0], add_attr[2])),
# add_attr = [name, typ, path]
'path_error': color.red('The source does not exist'.format(add_attr)),
'format_error': color.red('\'{}\' either outside of the filesystem or not an existing directory'.format(add_attr[2])),
'nodstdir': color.red('Destination folder does not exist.'),
'fs_error': color.red('Cannot add files from within the filesystem.')
},
'rm' : {'success': color.greenDark('deleted {0} from {1}'.format(add_attr[0], add_attr[1])), #add_attr = [name, path]
'warning': color.yellow('wrong argument format'),
'error' : color.red('{0} "{1}" does not exists at {2}'.format(add_attr[1], add_attr[0], add_attr[2])), #add_attr = [name, typ, path]
'path_error' : color.red('\'{}\' doesn\'t exist'.format(add_attr))
},
'mount' : {'success': color.greenDark('Filesystem mounted successfully.'),
'warning': color.yellow('Mount a filesystem of an other user with mnt <user> <filesystem_name> [<path>]'),
'error' : color.red('Unable to mount filesystem.'),
'nodst': color.red('Destination path does not exist.')
},
'umt' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'exp' : {'success': color.greenDark('Filesystem has been successfully exported!'),
'warning': color.yellow('wrong argument format'),
'error' : color.red('No root_mockup folder found at current location or its super folders \'{}\'.'.format(add_attr[0]))
},
'mkp' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('folder \'{0}\' already exists at \'{1}\''.format(add_attr[0], add_attr[1]))
},
'pwd' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'img' : {'success': color.greenDark('sucessfully created image \'{0}\' at \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'txt' : {'success': color.greenDark('sucessfully created text \'{0}\' at \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'mv' : {'success': color.greenDark('sucessfully moved file \'{0}\' to \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('the {0} path \'{1}\' doen\'s exist'.format(add_attr[0], add_attr[1])),
'sameDir': color.grey('Information: you moving a file/folder within the same directory.'),
'nodstdir': color.red('The destination directory does not exist.'),
'nosrcdir': color.red('The source file or directory does not exist.')
},
'cp' : {'success': color.greenDark('sucessfully copied file \'{0}\' to \'{1}\''.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('the {0} path \'{1}\' doen\'s exist'.format(add_attr[0], add_attr[1]))
},
'rn' : {'success' : color.greenDark('sucessfully renamed file \'{0}\' to \'{1}\''.format(add_attr[0], add_attr[1])),
'warning' : color.yellow('wrong argument format'),
'error' : color.red('the given path \'{0}\' doen\'s exist'.format(add_attr[0])),
'nosrcdir': color.red('The source file or directory does not exist.')
},
'f' : {'success': color.greenDark('\'{0}\' found at {1}'.format(add_attr[0], add_attr[1])),
'warning': color.yellow('wrong argument format'),
'error' : color.red('\'{0}\' not found in \'{1}\''.format(add_attr[0], add_attr[1]))
},
'--help' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
},
'quit' : {'success': '\n Thanks for using our Application!\n Made with ' + color.bold(
color.redLight('<3')) + ' by: {0}, {1}, {2}\n'.format(names[0], names[1], names[2]),
'warning': color.yellow('If you want to terminate program, enter q without further arguments.'),
'error' : color.red('If you want to terminate the program, enter q without further arguments.')
},
'clear' : {'success': color.greenDark(''),
'warning': color.yellow('wrong argument format'),
'error' : color.red('')
}
}
return dictionary[operator][typ]
|
python
|
from brownie import accounts, PassiveStrategy
from brownie.network.gas.strategies import ExponentialScalingStrategy
import os
STRATEGIES = [
# "0x40C36799490042b31Efc4D3A7F8BDe5D3cB03526", # V0 ETH/USDT
# "0xA6803E6164EE978d8C511AfB23BA49AE0ae0C1C3", # old V1 ETH/USDC
# "0x5503bB32a0E37A1F0B8F8FE2006abC33C779a6FD", # old V1 ETH/USDT
"0x1cEA471aab8c57118d187315f3d6Ae1834cCD836", # V1 ETH/USDC
"0x4e03028626aa5e5d5e4CFeF2970231b0D6c5d5Ed", # V1 ETH/USDT
"0x8209df5A847C321d26eCb155CA76f95224c5DCd9", # V1 WBTC/USDC
]
def getAccount(account, pw):
from web3.auto import w3
with open(account, "r") as f:
return accounts.add(w3.eth.account.decrypt(f.read(), pw))
def main():
keeper = getAccount(os.environ["KEEPER_ACCOUNT"], os.environ["KEEPER_PW"])
# keeper = accounts.load(input("Brownie account: "))
balance = keeper.balance()
gas_strategy = ExponentialScalingStrategy("50 gwei", "1000 gwei")
for address in STRATEGIES:
print(f"Running for strategy: {address}")
strategy = PassiveStrategy.at(address)
try:
strategy.rebalance({"from": keeper, "gas_price": gas_strategy})
print("Rebalanced!")
except ValueError as e:
print(e)
print()
print(f"Gas used: {(balance - keeper.balance()) / 1e18:.4f} ETH")
print(f"New balance: {keeper.balance() / 1e18:.4f} ETH")
|
python
|
from typing import Any
import pandas as pd
from anubis.models import Submission, Assignment
from anubis.utils.cache import cache
def get_submissions(course_id: str) -> pd.DataFrame:
"""
Get all submissions from visible assignments, and put them in a dataframe
:return:
"""
# Get the submission sqlalchemy objects
raw_submissions = (
Submission.query.join(Assignment)
.filter(
Assignment.hidden == False,
Assignment.course_id == course_id,
)
.all()
)
# Specify which columns we want
columns = ["id", "owner_id", "assignment_id", "processed", "created"]
# Build a dataframe of from the columns we pull out of each submission object
submissions = pd.DataFrame(
data=list(
map(
lambda x: ({column: getattr(x, column) for column in columns}),
raw_submissions,
)
),
columns=columns,
)
# Round the submission timestamps to the nearest hour
submissions["created"] = submissions["created"].apply(lambda date: pd.to_datetime(date).round("H"))
return submissions
@cache.memoize(timeout=360)
def get_raw_submissions() -> list[dict[str, Any]]:
submissions_df = get_submissions()
data = (
submissions_df.groupby(["assignment_id", "created"])["id"]
.count()
.reset_index()
.rename(columns={"id": "count"})
.to_dict()
)
data["created"] = {k: str(v) for k, v in data["created"].items()}
assignment_ids = list(set(data["assignment_id"].values()))
response = {}
for assignment_id in assignment_ids:
assignment = Assignment.query.filter(Assignment.id == assignment_id).first()
response[assignment_id] = {
"data": [],
"name": assignment.name,
"release_date": str(assignment.release_date),
"due_date": str(assignment.due_date),
}
for index, assignment_id in data["assignment_id"].items():
response[assignment_id]["data"].append(
{
"x": data["created"][index],
"y": data["count"][index],
"label": f"{data['created'][index]} {data['count'][index]}",
}
)
return list(response.values())
|
python
|
# jsb/socklib/partyline.py
#
#
""" provide partyline functionality .. manage dcc sockets. """
__copyright__ = 'this file is in the public domain'
__author__ = 'Aim'
## jsb imports
from jsb.lib.fleet import getfleet
from jsb.utils.exception import handle_exception
from jsb.lib.threads import start_new_thread
from jsb.imports import getjson
json = getjson()
## basic imports
import thread
import pickle
import socket
import logging
## classes
class PartyLine(object):
""" partyline can be used to talk through dcc chat connections. """
def __init__(self):
self.socks = [] # partyline sockets list
self.jids = []
self.lock = thread.allocate_lock()
def size(self):
return len(self.socks)
def resume(self, sessionfile):
""" resume bot from session file. """
try:
session = json.load(open(sessionfile, 'r'))
self._resume(session)
except: handle_exception()
def _resume(self, data, reto=None):
""" resume a party line connection after reboot. """
fleet = getfleet()
for i in data['partyline']:
logging.warn("partyline - resuming %s" % i)
bot = fleet.byname(i['botname'])
if not bot: logging.error("partyline - can't find bot") ; continue
sock = socket.fromfd(i['fileno'], socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(1)
nick = i['nick']
userhost = i['userhost']
channel = i['channel']
if not bot:
logging.error("partyline - can't find %s bot in fleet" % i['botname'])
continue
self.socks.append({'bot': bot, 'sock': sock, 'nick': nick, 'userhost': userhost, 'channel': channel, 'silent': i['silent']})
bot._dccresume(sock, nick, userhost, channel)
if reto: self.say_nick(nick, 'rebooting done')
def _resumedata(self):
""" return data used for resume. """
result = []
for i in self.socks: result.append({'botname': i['bot'].cfg.name, 'fileno': i['sock'].fileno(), 'nick': i['nick'], 'userhost': i['userhost'], 'channel': i['channel'], 'silent': i['silent']})
return result
def stop(self, bot):
""" stop all users on bot. """
for i in self.socks:
if i['bot'] == bot:
try:
i['sock'].shutdown(2)
i['sock'].close()
except: pass
def stop_all(self):
""" stop every user on partyline. """
for i in self.socks:
try:
i['sock'].shutdown(2)
i['sock'].close()
except:
pass
def loud(self, nick):
""" enable broadcasting of txt for nick. """
for i in self.socks:
if i['nick'] == nick: i['silent'] = False
def silent(self, nick):
""" disable broadcasting txt from/to nick. """
for i in self.socks:
if i['nick'] == nick: i['silent'] = True
def add_party(self, bot, sock, nick, userhost, channel):
''' add a socket with nick to the list. '''
for i in self.socks:
if i['sock'] == sock: return
self.socks.append({'bot': bot, 'sock': sock, 'nick': nick, 'userhost': userhost, 'channel': channel, 'silent': False})
logging.warn("partyline - added user %s" % nick)
def del_party(self, nick):
''' remove a socket with nick from the list. '''
nick = nick.lower()
self.lock.acquire()
try:
for socknr in range(len(self.socks)-1, -1, -1):
if self.socks[socknr]['nick'].lower() == nick: del self.socks[socknr]
logging.debug('partyline - removed user %s' % nick)
finally: self.lock.release()
def list_nicks(self):
''' list all connected nicks. '''
result = []
for item in self.socks: result.append(item['nick'])
return result
def say_broadcast(self, txt):
''' broadcast a message to all ppl on partyline. '''
for item in self.socks:
if not item['silent']: item['sock'].send("%s\n" % txt)
def say_broadcast_notself(self, nick, txt):
''' broadcast a message to all ppl on partyline, except the sender. '''
nick = nick.lower()
for item in self.socks:
if item['nick'] == nick: continue
if not item['silent']: item['sock'].send("%s\n" % txt)
def say_nick(self, nickto, msg):
''' say a message on the partyline to an user. '''
nickto = nickto.lower()
for item in self.socks:
if item['nick'].lower() == nickto:
if not '\n' in msg: msg += "\n"
item['sock'].send("%s" % msg)
return
def is_on(self, nick):
''' checks if user an is on the partyline. '''
nick = nick.lower()
for item in self.socks:
if item['nick'].lower() == nick: return True
return False
## global partyline object
partyline = PartyLine()
def size():
return partyline.size()
|
python
|
from __future__ import unicode_literals
from builtins import str
import six
@six.python_2_unicode_compatible
class TokenSet:
"""
A token set is used to store the unique list of all tokens
within an index. Token sets are also used to represent an
incoming query to the index, this query token set and index
token set are then intersected to find which tokens to look
up in the inverted index.
A token set can hold multiple tokens, as in the case of the
index token set, or it can hold a single token as in the
case of a simple query token set.
Additionally token sets are used to perform wildcard matching.
Leading, contained and trailing wildcards are supported, and
from this edit distance matching can also be provided.
Token sets are implemented as a minimal finite state automata,
where both common prefixes and suffixes are shared between tokens.
This helps to reduce the space used for storing the token set.
TODO: consider https://github.com/glyph/automat
"""
_next_id = 1
def __init__(self):
self.final = False
self.edges = {}
self.id = self._next_id
self.__class__._next_id += 1
def __str__(self):
try:
return self._string
except AttributeError:
pass
string = "1" if self.final else "0"
for label in sorted(list(self.edges.keys())):
node = self.edges[label]
try:
node_id = str(node.id)
except AttributeError:
# TODO: JS seems to rely on undefined for the id attribute?
node_id = ""
string = string + label + node_id
return string
def __repr__(self):
return '<TokenSet "{}">'.format(str(self))
@classmethod
def from_string(self, string):
"""Creates a TokenSet from a string.
The string may contain one or more wildcard characters (*) that will
allow wildcard matching when intersecting with another TokenSet
"""
node = TokenSet()
root = node
# Iterates throough all characters in the passed string appending
# a node for each character.
# When a wildcard character is found then a self referencing edge
# is introduced to continually match any number of characters
for i, char in enumerate(string):
final = i == len(string) - 1
if char == "*":
node.edges[char] = node
node.final = final
else:
next_ = TokenSet()
next_.final = final
node.edges[char] = next_
node = next_
return root
@classmethod
def from_fuzzy_string(cls, string, edit_distance):
"""Creates a token set representing a single string with a specified
edit distance.
Insertions, deletions, substitutions and transpositions are each
treated as an edit distance of 1.
Increasing the allowed edit distance will have a dramatic impact
on the performance of both creating and intersecting these TokenSets.
It is advised to keep the edit distance less than 3.
"""
root = TokenSet()
stack = [{"node": root, "edits_remaining": edit_distance, "string": string}]
while stack:
frame = stack.pop()
# no edit
if len(frame["string"]) > 0:
char = frame["string"][0]
no_edit_node = None
if char in frame["node"].edges:
no_edit_node = frame["node"].edges[char]
else:
no_edit_node = TokenSet()
frame["node"].edges[char] = no_edit_node
if len(frame["string"]) == 1:
no_edit_node.final = True
stack.append(
{
"node": no_edit_node,
"edits_remaining": frame["edits_remaining"],
"string": frame["string"][1:],
}
)
if frame["edits_remaining"] == 0:
continue
# insertion, can only do insertion if there are edits remaining
if "*" in frame["node"].edges:
insertion_node = frame["node"].edges["*"]
else:
insertion_node = TokenSet()
frame["node"].edges["*"] = insertion_node
if len(frame["string"]) == 0:
insertion_node.final = True
stack.append(
{
"node": insertion_node,
"edits_remaining": frame["edits_remaining"] - 1,
"string": frame["string"],
}
)
# deletion, can only do a deletion if we have enough edits
# remaining and if there are characters left to delete in the string
if len(frame["string"]) > 1:
stack.append(
{
"node": frame["node"],
"edits_remaining": frame["edits_remaining"] - 1,
"string": frame["string"][1:],
}
)
# deletion, just removing the last character of the string
if len(frame["string"]) == 1:
frame["node"].final = True
# substitution, can only do a substitution if we have enough edits
# remaining and there are characters left to substitute
if len(frame["string"]) >= 1:
if "*" in frame["node"].edges:
substitution_node = frame["node"].edges["*"]
else:
substitution_node = TokenSet()
frame["node"].edges["*"] = substitution_node
if len(frame["string"]) == 1:
substitution_node.final = True
stack.append(
{
"node": substitution_node,
"edits_remaining": frame["edits_remaining"] - 1,
"string": frame["string"][1:],
}
)
# transposition, can only do a transposition if there are edits
# remaining and there are enough characters to transpose
if frame["edits_remaining"] and len(frame["string"]) > 1:
char_a = frame["string"][0]
char_b = frame["string"][1]
transpose_node = None
if char_b in frame["node"].edges:
transpose_node = frame["node"].edges[char_b]
else:
transpose_node = TokenSet()
frame["node"].edges[char_b] = transpose_node
if len(frame["string"]) == 1:
transpose_node.final = True
stack.append(
{
"node": transpose_node,
"edits_remaining": frame["edits_remaining"] - 1,
"string": char_a + frame["string"][2:],
}
)
return root
@classmethod
def from_list(cls, list_of_words):
from lunr.token_set_builder import TokenSetBuilder
builder = TokenSetBuilder()
for word in list_of_words:
builder.insert(word)
builder.finish()
return builder.root
@classmethod
def from_clause(cls, clause):
if clause.edit_distance:
return cls.from_fuzzy_string(clause.term, clause.edit_distance)
else:
return cls.from_string(clause.term)
def to_list(self):
words = []
stack = [{"prefix": "", "node": self}]
while stack:
frame = stack.pop()
if frame["node"].final:
words.append(frame["prefix"])
for edge in frame["node"].edges.keys():
stack.append(
{
"prefix": frame["prefix"] + str(edge),
"node": frame["node"].edges[edge],
}
)
return words
def intersect(self, other):
"""Returns a new TokenSet that is the intersection of this TokenSet
and the passed TokenSet.
This intersection will take into account any wildcards contained within
the TokenSet.
"""
output = TokenSet()
stack = [{"node": self, "q_node": other, "output": output}]
while stack:
frame = stack.pop()
for q_edge in frame["q_node"].edges.keys():
for n_edge in frame["node"].edges.keys():
if n_edge == q_edge or q_edge == "*":
node = frame["node"].edges[n_edge]
q_node = frame["q_node"].edges[q_edge]
final = node.final and q_node.final
next_ = None
if n_edge in frame["output"].edges:
next_ = frame["output"].edges[n_edge]
next_.final = next_.final or final
else:
next_ = TokenSet()
next_.final = final
frame["output"].edges[n_edge] = next_
stack.append({"node": node, "q_node": q_node, "output": next_})
return output
|
python
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import pandas as pd
class OffsetScaler(object):
@staticmethod
def create_normalizing_scaler(dataframe):
"""
Creates a scaling object that normalizes the data between 0 and 1
Args:
dataframe: pandas DataFrame
The dataframe containing the data (usually the training data)
that will be used to compute the scaling factor and offset
"""
expected_columns = list(dataframe.columns)
offset = dataframe.min()
factor = dataframe.max() - dataframe.min()
return OffsetScaler(expected_columns, offset, factor)
@staticmethod
def create_from_mean_std(dataframe):
"""
Creates a scaling object using the mean as the offset and the
standard devation as the as the factor
Args:
dataframe: pandas DataFrame
The dataframe containing the data (usually the training data) that will
be used to compute the mean and standard devation for the scaler
"""
expected_columns = list(dataframe.columns)
offset = dataframe.mean()
factor = dataframe.std()
return OffsetScaler(expected_columns, offset, factor)
def __init__(self, expected_columns, offset_series, factor_series):
"""
This scaling object shifts by the offset and then scales the result
by the factor. Typically, one would create this with the static method
create_from_dataframe.
scaled_data = (data-offset)/factor
Args:
expected_columns: list of str
list of strings indicating the names of the columns in offset_series,
factor_series, and the dataframe passed to scale and unscale.
offset_series: pandas Series
Series with columns (or labels) the same as expected_columns and
values that represent the offset to be used when shifting the data
factor_series: pandas Series
Series with columns (or labels) the same as expected_columns and
values that represent the factor to be used to scale the shifted data
"""
self._expected_columns = expected_columns
self._offset = offset_series
self._factor = factor_series
if list(offset_series.index) != expected_columns:
raise ValueError(
"OffsetScaler was passed an offset series with an index that"
" does not match expected_columns. Please make sure these labels match."
)
if list(factor_series.index) != expected_columns:
raise ValueError(
"OffsetScaler was passed a factor series with an index that"
" does not match expected_columns. Please make sure these labels match."
)
def _verify_columns_match(self, dataframe):
if self._expected_columns != list(dataframe.columns):
raise ValueError(
"OffsetScaler was passed a dataframe that did not contain"
" the same column labels as those used to create the scaler."
" Please make sure the column labels match."
)
def scale(self, dataframe):
"""
Return a new dataframe where the values are scaled according to the
offset and factor
Args:
dataframe: pandas Dataframe
The dataframe to be scaled
Returns: pandas DataFrame
"""
self._verify_columns_match(dataframe)
df = dataframe - self._offset
df = df.divide(self._factor)
return df
def unscale(self, dataframe):
"""
Return a new dataframe where the values are unscaled according to the
offset and factor
Args:
dataframe: pandas Dataframe
The dataframe to be unscaled
Returns: pandas DataFrame
"""
self._verify_columns_match(dataframe)
df = dataframe.multiply(self._factor)
df = df + self._offset
return df
def expected_columns(self):
"""
Return the expected column names for the scaler series objects
"""
return self._expected_columns
def offset_series(self):
"""
Return the offset for the scaler as a pandas Series object
"""
return self._offset
def factor_series(self):
"""
Return the factors for the scaler as a pandas Series object
"""
return self._factor
def to_dict(self):
"""
Returns a dictionary representation of this scaler
"""
d = dict()
d["expected_columns"] = list(self._expected_columns)
d["offset"] = self._offset.to_dict()
d["factor"] = self._factor.to_dict()
return d
@staticmethod
def from_dict(d):
"""
Create an instance of this scaler from a dictionary
(that was created with to_dict)
Args:
d : dict
The dict created with to_dict
Returns: new OffsetScaler
"""
expected_columns = d["expected_columns"]
offset = pd.Series(d["offset"])
factor = pd.Series(d["factor"])
return OffsetScaler(expected_columns, offset, factor)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.