content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from machine import Pin
import utime
#import pyb
class IRQCounter:
provides = ["count", "time_since_last_trigger"]
def __init__(self, port, trigger, cooldown):
self.counter = 0
self.last_trigger = utime.ticks_ms()
def irq_handler(pin):
now = utime.ticks_ms()
if self.last_trigger + cooldown < now:
self.counter += 1
self.last_trigger = now
port.init(Pin.IN, None)
port.irq(irq_handler, trigger)
def readout(self):
irq_state = pyb.disable_irq()
count = self.counter
time_since_last_trigger = utime.ticks_ms() - self.last_trigger
pyb.enable_irq(irq_state)
return {"count": count, "time_since_last_trigger": time_since_last_trigger}
|
python
|
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import pytest
import rfpipe
from astropy import time
def test_create():
st = rfpipe.state.State(validate=False, showsummary=False)
assert st.metadata.atdefaults()
@pytest.fixture(scope="module")
def mockstate():
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.3/(24*3600), 27, 4, 32*4, 4,
5e3, datasource='sim', antconfig='D')
return rfpipe.state.State(inmeta=meta)
def test_mock(mockstate):
assert mockstate.datashape == (60, 351, mockstate.nchan, 2)
def test_pol(mockstate):
assert len(mockstate.metadata.pols_orig) == 4 and len(mockstate.pols) == 2
def test_mocknseg(mockstate):
assert mockstate.nsegment == 1
def test_version(mockstate):
assert mockstate.version
def test_clearcache(mockstate):
segmenttimes = mockstate.segmenttimes
mockstate.clearcache()
mockstate.summarize()
assert (segmenttimes == mockstate.segmenttimes).all()
def test_lowmem():
t0 = time.Time.now().mjd
meta = rfpipe.metadata.mock_metadata(t0, t0+0.3/(24*3600), 27, 4, 32*4, 4,
5e3, datasource='sim')
st = rfpipe.state.State(inmeta=meta, inprefs={'memory_limit': 0.1})
assert st.validate()
|
python
|
'''
* Copyright (c) 2022 MouBieCat
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
'''
from typing import List
# 是否大於零
def not_zero(
number:int,
showErrorMsg:bool=False
) -> bool:
if number < 0 and showErrorMsg:
print("該數字不是一個合法有效的數字")
return number < 0
# 是否為質數
def is_prime_number(
number:int
) -> bool:
for nub in range(2, number):
if number % nub == 0:
return False
return True
# 奇數 = True, 偶數 = False
def is_odd_number(
number:int
) -> bool:
return (number % 2 == 1)
# 獲取因數列表
def get_number_divisor_array(
number:int,
showCountMsg:bool = False
) -> List[int]:
# 使用該功能必須導入
# from typing import List
# 函數庫語句
# 聲明:該部分目前不再該課程當中,我只是想當作自己的經驗與練習用途!
returnList = [] # 建立因數集合對象
for nbs in range(1, number + 1):
if number % nbs == 0:
returnList.append(nbs)
if showCountMsg:
print("總共找到了 " + str(returnList.__len__()) + " 個因數")
return returnList
# Main
number = int(input("請輸入一個正整數 => "))
if not not_zero(number, True):
print(str(number) + " 它是一個質數"
if is_prime_number(number)
else str(number) + " 它不是一個質數")
print(str(number) + " 它是一個奇數"
if is_odd_number(number)
else str(number) + " 它是一個偶數")
for index in get_number_divisor_array(number):
print(str(number) + " 的因數有 " + str(index))
|
python
|
from setuptools import setup
setup(
name="azblob",
version="1.0.1",
author="Philipp Lang",
packages=["azblob"],
url=("https://github.com/plang85/azblob"),
license="MIT License",
description="Download Azure blobs.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development",
],
entry_points={"console_scripts": ["azblob = azblob.ops:cli"]},
install_requires=[
"azure-storage-blob>=12.0.0",
"azure-storage-file>=1.3.0",
"tabulate>=0.8.2",
],
extras_require={"dev": ["black", "twine"]},
)
|
python
|
# program to create set difference.
setx = set(["apple", "mango"])
sety = set(["mango", "orange"])
setz = setx & sety
print(setz)
#Set difference
setb = setx - setz
print(setb)
|
python
|
#!/usr/bin/env python
# coding: utf-8
# #NUMBER 1: DATA PREPARATION
# In[4]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
print("Done importing libraries")
# In[5]:
#path_to_file= "C:/Users/necbanking/Desktop/2.1 modular/fifa_AI/"
df=pd.read_csv("players_20.csv")
df_19=pd.read_csv("players_19.csv")
df.head()
# In[6]:
df_19.head()
# In[7]:
df = df.drop('dob', axis =1)
df = df.drop('weight_kg', axis =1)
df = df.drop('international_reputation', axis =1)
df = df.drop('real_face', axis =1)
df = df.drop('release_clause_eur', axis =1)
df = df.drop('player_tags', axis =1)
df = df.drop('team_jersey_number', axis =1)
df = df.drop('loaned_from', axis =1)
df = df.drop('joined', axis =1)
df = df.drop('contract_valid_until', axis =1)
df = df.drop('nation_position', axis =1)
df = df.drop('nation_jersey_number', axis =1)
df = df.drop('player_traits', axis =1)
df = df.drop('sofifa_id', axis =1)
df = df.drop('long_name', axis =1)
# In[8]:
df_19 = df_19.drop('dob', axis =1)
df_19 = df_19.drop('weight_kg', axis =1)
df_19 = df_19.drop('international_reputation', axis =1)
df_19 = df_19.drop('real_face', axis =1)
df_19 = df_19.drop('release_clause_eur', axis =1)
df_19 = df_19.drop('player_tags', axis =1)
df_19 = df_19.drop('team_jersey_number', axis =1)
df_19 = df_19.drop('loaned_from', axis =1)
df_19 = df_19.drop('joined', axis =1)
df_19 = df_19.drop('contract_valid_until', axis =1)
df_19 = df_19.drop('nation_position', axis =1)
df_19 = df_19.drop('nation_jersey_number', axis =1)
df_19 = df_19.drop('player_traits', axis =1)
df_19 = df_19.drop('sofifa_id', axis =1)
df_19 = df_19.drop('long_name', axis =1)
# #NUMBER 2: CORRELATION
# In[9]:
#splitting data
train_data, test_data=train_test_split(df,test_size=0.25)
print("Leingth of training data is:"+str(len(train_data)))
print("Leingth of test data is:"+str(len(test_data)))
# In[10]:
#selecting features
target_feature='overall'
#finding features that arecorrelated to the overall column
feature_corr=train_data.corr(method='pearson')[target_feature]
feature_corr=feature_corr.sort_values(ascending=False)
#print thetop ten correlations with the target value
print(feature_corr[1:21])
corr_matrix = df.corr()
corr_matrix['overall'].sort_values(ascending=False)
##
# #NUMBER 3: REGRESSION MODEL
#
# In[11]:
#Training Rgression model
features=corr_matrix['overall'].sort_values(ascending=False)
features=['potential','value_eur','wage_eur','attacking_short_passing','skill_long_passing','age','skill_ball_control','skill_curve','skill_moves','attacking_volleys']
X_train=df[features]
y_train=df['overall']
r = LinearRegression()
r.fit(X_train,y_train )
print(r.score(X_train,y_train))
# In[12]:
#copying top 20 relavent features to be used by model
features=feature_corr[1:14].index.tolist()
print(features)
# In[13]:
#training the model
x_train=train_data[features]
y_train=train_data[target_feature]
#replace all empty cells with zero
x_train.fillna(0,inplace=True)
#using the LinearRegression method to build the model
model=LinearRegression().fit(x_train,y_train)
#print score
print("Score:"+str(model.score(x_train,y_train)))
# #NUMBER 4: A PROCESS OF OPTIMISATION
# In[14]:
#testing the model usint the 25% of the players_20.csv(df) dataframe
#sort test data first
test_data=test_data.sort_values([target_feature], ascending=False)
x_test=test_data[features]
x_test.fillna(0,inplace=True)
y_test=test_data[target_feature]
#start predicting
y_predict=model.predict(x_test)
#add new column called predicted
test_data['predicted']=y_predict
rating=((y_predict-y_test)/y_test*100)
#add anew column called accuracy
test_data['difference']=rating
test_data[["short_name","overall","predicted","difference"]]
# In[16]:
#preproccessing features
df_19['potential'] = pd.to_numeric(df_19['potential'],errors='coerce')
df_19['value_eur'] = pd.to_numeric(df_19['value_eur'],errors='coerce')
df_19['wage_eur'] = pd.to_numeric(df_19['wage_eur'],errors='coerce')
df_19['attacking_short_passing'] = pd.to_numeric(df_19['attacking_short_passing'],errors='coerce')
df_19['skill_long_passing'] = pd.to_numeric(df_19['skill_long_passing'],errors='coerce')
df_19['age'] = pd.to_numeric(df_19['age'],errors='coerce')
df_19['skill_ball_control'] = pd.to_numeric(df_19['skill_ball_control'],errors='coerce')
df_19['skill_curve'] = pd.to_numeric(df_19['skill_curve'],errors='coerce')
df_19['skill_moves'] = pd.to_numeric(df_19['skill_moves'],errors='coerce')
df_19['attacking_volleys'] = pd.to_numeric(df_19['attacking_volleys'],errors='coerce')
# #NUMBER 5
# In[17]:
#selecting features from the 2019 dataset
features=['potential','value_eur','wage_eur','attacking_short_passing','skill_long_passing','age','skill_ball_control','skill_curve','skill_moves','attacking_volleys']
x_test=df_19[features]
x_test.fillna(0,inplace=True)
y_test=df_19['overall']
predict=r.predict(x_test)
df_19['predicted']=predict
df_19[['short_name','overall','predicted']]
# In[18]:
import pickle
# In[19]:
filename="player_rating.pkl"
outfile=open(filename,'wb')
pickle.dump(model,outfile)
outfile.close()
# In[ ]:
|
python
|
import util
from copy import deepcopy
from collections import defaultdict
def solver(paticipants, pizas):
pizas_key = defaultdict(list)
for i, v in enumerate(pizas):
pizas_key[v].append(i)
acc = [[], 0]
print('File loaded :: size of piza : %s, paticipants : %d ' % (len(pizas), paticipants))
def solve(pizs):
s = sum(pizs)
if s <= paticipants:
if s > acc[1]:
acc[0], acc[1] = pizs, s
print('Found :', s)
return
for j in range(len(pizs)):
solve(pizs[0:j] + pizs[j+1:])
if acc[1] == paticipants: break
solve(pizas)
return [pizas_key[v].pop(0) for v in acc[0]]
if __name__=="__main__":
util.solve_files('input', solver)
|
python
|
"""
[LeetCode] 708. Insert into a Cyclic Sorted List
# Insert into a Cyclic Sorted List linspiration
Problem
Given a node from a cyclic linked list which is sorted in ascending order, write a function to insert a value into the list such that it remains a cyclic sorted list. The given node can be a reference to any single node in the list, and may not be necessarily the smallest value in the cyclic list.
If there are multiple suitable places for insertion, you may choose any place to insert the new value. After the insertion, the cyclic list should remain sorted.
If the list is empty (i.e., given node is null), you should create a new single cyclic list and return the reference to that single node. Otherwise, you should return the original given node.
The following example may help you understand the problem better:
clipboard.png
In the figure above, there is a cyclic sorted list of three elements. You are given a reference to the node with value 3, and we need to insert 2 into the list.
clipboard.png
The new node should insert between node 1 and node 3. After the insertion, the list should look like this, and we should still return node 3.
"""
# V0
# IDEA : LINKED LIST
# IDEA : CONSIDER THE 4 CASES BELOW :
# CASE 1) No head:
# CASE 2) prev.val <= val <= cur.val
# CASE 3) prev.val > cur.val and (val < cur.val or prev.val < cur): cur is either the min or the max with not all nodes with the same value
# CASE 4) val != every nodes's value in a cyclic linked list where every node has the same value
class Solution(object):
def insert(self, head, val):
node = Node(val, head)
# case 1): no head
if not head:
return node
prev, cur = head, head.next
while True:
# case 2): prev.val <= val <= cur.val
# e.g. 1 -> 3 -> 5 -> "4" (insert 4)
if prev.val <= val <= cur.val:
break
# case 3): prev.val > cur.val and val < cur.val or prev.val < cur
# e.g. 6 -> 4 -> "5" (insert 5), or 5 -> 4 -> "3" (insert 3)
# "SORTED" means the linked-list can be in an ascending or descending order
elif prev.val > cur.val and (val <= cur.val or prev.val <= val):
break
prev, cur = prev.next, cur.next
# case 4): prev == head
# e.g. 1 -> 1 -> 1 ->...-> 1
if prev == head: # in case of all nodes have same value that are > val
break
# insert node between prev and cur
prev.next = node
node.next = cur
return head
# V1
# https://blog.csdn.net/weixin_41677877/article/details/81200818
class Solution:
def insert(self, node, x):
# write your code
originNode = node
tmp = ListNode(x)
if node == None:
node = tmp
node.next = node
return node
else:
while True:
if node.next.next == node:
tmp.next = node.next
node.next = tmp
return node
if (node.val<=x and node.next.val>x) or (node.val<x and node.next.val>=x) or (node.val>node.next.val and node.val<x and node.next.val<x) or (node.val>node.next.val and node.val>x and node.next.val>x):
tmp.next = node.next
node.next = tmp
return node
node = node.next
if node == originNode:
tmp.next = node.next
node.next = tmp
return node
# V1'
# https://ttzztt.gitbooks.io/lc/content/linked-list/insert-into-a-cyclic-sorted-list.html
# IDEA : LINKED LIST
# IDEA : CONSIDER THE 4 CASES BELOW :
# CASE 1) No head:
# CASE 2) prev.val <= val <= cur.val
# CASE 3) prev.val > cur.val and (val < cur.val or prev.val < cur): cur is either the min or the max with not all nodes with the same value
# CASE 4) val != every nodes's value in a cyclic linked list where every node has the same value
class Solution(object):
def insert(self, head, val):
"""
:type head: Node
:type insertVal: int
:rtype: Node
"""
node = Node(val, head)
# case 1 no head
if not head:
return node
prev, cur = head, head.next
while 1:
# case 2: prev.val <= val <= cur.val
if prev.val <= val <= cur.val:
break
# case 3: prev.val > cur.val and val < cur.val or prev.val < cur
elif prev.val > cur.val and (val <= cur.val or prev.val <= val):
break
prev, cur = prev.next, cur.next
# case 4: prev == head
if prev == head: # in case of all nodes have same value that are > val
break
# insert node between prev and cur
prev.next = node
node.next = cur
return head
# V1''
# https://github.com/dennyzhang/code.dennyzhang.com/tree/master/problems/insert-into-a-cyclic-sorted-list
class Solution:
def insert(self, head, insertVal):
"""
:type head: Node
:type insertVal: int
:rtype: Node
"""
node = Node(insertVal, None)
# empty
if head is None:
node.next = node
return node
# one node
if head.next is None:
head.next = node
node.next = head
return head
# find the smallest value, which is no less than the target
p = head
while True:
# end of the loop
if p.val > p.next.val:
# biggest or smallest
if insertVal >= p.val or insertVal <= p.next.val:
break
# should keep going
if insertVal > p.next.val and insertVal < p.val:
p = p.next
continue
break
if insertVal >= p.val and insertVal <= p.next.val:
break
p = p.next
if p == head:
# run into the loop again
break
node.next = p.next
p.next = node
return head
# V1
# https://ttzztt.gitbooks.io/lc/content/linked-list/insert-into-a-cyclic-sorted-list.html
class Solution(object):
def insert(self, head, val):
node = Node(val, head)
# case 1 no head
if not head:
return node
prev, cur = head, head.next
while 1:
# case 2: prev.val <= val <= cur.val
if prev.val <= val <= cur.val:
break
# case 3: prev.val > cur.val and val < cur.val or prev.val < cur
elif prev.val > cur.val and (val <= cur.val or prev.val <= val):
break
prev, cur = prev.next, cur.next
# case 4: prev == head
if prev == head: # in case of all nodes have same value that are > val
break
# insert node between prev and cur
prev.next = node
node.next = cur
return head
# V1
# https://ithelp.ithome.com.tw/articles/10223721
# V1'
# https://blog.51cto.com/u_15127692/3670466
# V2
|
python
|
# Estadística para Datos Univariados
Datos univariados (o data univariada) son datos que se describen con una sola variable. Por ejemplo, las alturas de los compñaeros de clase son datos univariados. El propósito principal del análisis de datos univariados es la descripción de los datos.
El análisis de datos univariados no considera relaciones entre distintas variables, como podría ser la relación entre la altura y el peso de los compañeros de clase.
import math
import random
import numpy as np
import pandas as pd
import plotly.express as px
## Muestreo (Sampling)
**Definiciones**
- **Población:** el grupo entero del que queremos estudiar una característica. Por ejemplo, todos las mujeres de Chile, todos los hogares de la comuna de Providencia.
- **Muestra (Sample):** el subgrupo de la población que se utiliza para inferir propiedades de la población. Por ejemplo, para estudiar alguna propiedad de las mujeres de Chile, utilizamos una muestra de mujeres que consiste en 10 mujeres de cada comuna de Chile.
### Técnicas de Muestreo
- **Muestreo por Conveniencia (convenience sampling):** se seleccionan aquellos miembros de la población que son de más fácil acceso. Por ejemplo, para el ejemplo de las mujeres de Chile, utilizo como muestra a las mujeres de mi colegio.
- **Muestro Aleatorio Simple (simple random sampling):** cada miembro de la población tiene la misma probabilidad de ser elegido. Por ejemplo, con un generador de números aleatorios genramos RUTs y elegimos los RUTs generados que correspondan a mujeres.
- **Muestreo Sistemático (systematic sampling):** Se listan (ordenan) los miembros de la población y se eligen a partir de un número inicial y un intervalo fijo.
- **Muestreo Estratificado (stratified sampling):** Se divide la población en subgrupos más pequeños (estratos). Los estratos se construyen basándose en características comunes de sus miembros. Luego, se elige una muestra aleatoria de cada estrato.
- **Muestreo por Cuota (quota sampling):** Muy similar al muestro estratificado, pero el tamaño de la muestra de cada estrato depende de la proporción del estrato en la población total.
### Tipos de Datos
- **Datos Discretos:** datos cuyos valore posibles pueden ser contados (incluso si en teoría el número de datos posibles es infinito). Por ejemplo, la talla de zapatos es un dato discreto ya que sólo existe un número finito de tallas posibles.
- **Datos continuos:** datos cuyos valores posibles no pueden ser contados. Generalmente se representan con un número real (con decimales). Por ejemplo, la altura de cada individuo. La temepratura a una cierta hora del día en un lugar preespecificado de la ciudad.
## Presentación de los Datos
Para datos discretos, la herramienta más usual de presentación son la tabla y gráfico de frecuencias.
**Ejemplo:**
Consideremos las notas de 32 alumnos en un test en el cual se puede obtener una nota entera del 0 al 10. Supongamos que los resultados son los siguientes:
resultados = [0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 10]
Contamos cuantas ocurrencias de cada nota hay en la muestra:
frecuencia = [(i, resultados.count(i)) for i in range(11)]
df_frecuencia = pd.DataFrame(frecuencia, columns=['nota', 'frecuencia'])
df_frecuencia
Mostramos los datos de la tabla anterior en un gráfico de barras.
fig = px.bar(df_frecuencia, x='nota', y=['frecuencia',],
title=f'Frecuencia de Notas')
fig.show()
Para datos contínuos, en cambio, la herramienta más usual es un histograma. Un histograma también representa la frecuencia de ocurrencia de datos, pero, al tratarse de datos contínuos, se representa la frecuencia de ocurrencia de datos en un cierto intervalo.
Veamos en ejemplo considerando una serie histórica de precios del USD en términos del CLP (USDCLP) de 10 años.
df_usdclp = pd.read_excel('data/20210312_10Y_usdclp.xlsx')
fig = px.line(df_usdclp, x='fecha', y=['valor',],
title=f'Serie Histórica USDCLP')
fig.show()
Podemos ver como los valores están entre 450 y 870 aproximadamente. Vamos a dividir ese intervalo en subintervalos de 10 CLP y luego graficaremos (con un gráfico de barras) la frecuencia de precios observados en cada uno de esos subintervalos.
fig = px.histogram(
df_usdclp,
x="valor",
title='Histograma USDCLP - Frecuencia en Intervalos del 10 CLP')
fig.show()
### Forma del Histograma
Es importante describir la forma del histograma, la principal característica de un histograma es la presencia de sesgo (skew):
df_sim = pd.DataFrame([(0,1), (1,2), (2,3), (3,4), (4,5), (5,6), (6,8), (7,9), (8,11), (9,10), (10,8)],
columns=['intervalo', 'frecuencia'])
fig = px.bar(df_sim, x='intervalo', y=['frecuencia',],
title=f'Sesgo Negativo')
fig.show()
df_sim = pd.DataFrame([(0,8), (1,10), (2,11), (3,9), (4,8), (5,6), (6,5), (7,4), (8,3), (9,2), (10,1)],
columns=['intervalo', 'frecuencia'])
fig = px.bar(df_sim, x='intervalo', y=['frecuencia',],
title=f'Sesgo Positivo')
fig.show()
df_sim = pd.DataFrame([(0,1), (1,2), (2,3), (3,4), (4,5), (5,6), (6,5), (7,4), (8,3), (9,2), (10,1)],
columns=['intervalo', 'frecuencia'])
fig = px.bar(df_sim, x='intervalo', y=['frecuencia',],
title=f'Sin Sesgo')
fig.show()
## Medidas de Tendencia Central
Hasta ahora hemos visto como recopilar y presentar datos. El próximo paso es elegir un único valor que pueda representar la data de forma general. Una medida de tendencia central que nos indica donde está "la mitad" de los datos recopilados. Las medidas más comunes de tendencia central son:
- moda
- media
- mediana
### Moda
**Definición:** la moda es el valor que ocurre con más frecuencia en los datos.
**Tips:**
- Puede haber más de una *moda* si dos o más valores son los que ocurren con mayor frecuencia.
- Si no hay ningún valor de la muestra que ocurra con mayor frecuencia (todos ocurren sólo una vez) entonces la muestra no tiene *moda*.
**Ejemplo:**
data = [4, 7, 3, 3, 1, 2, 7, 5, 7, 11]
contador = {elem: data.count(elem) for elem in set(data)}
highest_counter = [(k, v) for k, v in contador.items() if v == max(contador.values())]
print(f'La moda es: {highest_counter[0][0]}')
Cuando los datos se presentan en una tabla de frecuencias, la moda es el grupo que tiene la más alta frecuencia. En el gráfico de barras, es el grupo con la barra más alta.
df_frecuencia = pd.DataFrame.from_dict(contador, orient='index')
df_frecuencia.columns = ['frecuencia']
df_frecuencia
fig = px.bar(df_frecuencia, x=df_frecuencia.index, y=['frecuencia',],
title=f'Gráfico de Barras Notas')
fig.show()
#### La Clase Modal
Cuando se busca la modal de datos que han sido agrupados, se debe determinar el **grupo** que tiene la mayor frecuencia. A este grupo se le llama la **clase modal**.
Si revisar toda la data, no se puede determinar cula valor dentro de la clase modal es el que tiene la mayor frecuencia.
### Media
La media aritmética, también llamada promedio, es la medida más común de tendencia central. La media es simplemente la suma de todos los valores, dividida por el número total de datos. Usualmente se denota con $\mu$ o $\overline x$. De forma más matemática:
$$\overline x = \frac{\sum_{i=1}^N x_i}{N}$$
Al contrario de la moda, la media, usualmente, es un número que no pertenece a los datos. Por ejemplo, si tus notas son 6, 6, 7 y 7 la media será 6.5 que no coincide con ninguna de las notas obtenidas.
¿Cómo se obtiene la media de los datos a partir de la tabla de frecuencias?
**Respuesta:** en el caso anterior la media se obtiene con la siguiente fórmula.
$$\overline x =\frac{\sum_{i=1}^N f_i\cdot x_i}{\sum_{i=1}^N f_i}$$
donde $f_i$ es la frecuencia de la observación $x_i$.
### Mediana
La mediana es el dato que está justo en el medio cuando los datos se ordenan de forma ascendente. Si el número de datos es par, entonces la mediana es la media de los dos datos que están en el medio.
Esto implica que 50% de los datos están a la izquierda de la mediana y 50% de los datos están a la derecha de la mediana.
**Ejemplo:**
Encontrar la mediana de 7, 12, 1, 4, 17, 9, 11, 16, 10, 18.
datos = [7, 12, 1, 4, 2, 17, 9, 11, 16, 10, 18]
datos.sort()
print(f'Los datos ordenados son: {datos}')
Son 11 elementos, el número del medio es entonces el número 6. Por lo tanto la mediana es:
print(f'mediana: {datos[6]}')
### Resumen
````{panels}
:column: col-4
:card: border-2
Moda
^^^
La **moda** cual es el valor que con más frecuencia ocurre en la muestra.
**Ventajas**
- Los valores extremos no afectan la moda.
**Desventajas**
- No utiliza todos los elementos del conjunto de datos.
- No es necesariamente única. Puede haber más de una **moda**. En estos casos su interpretación se hace difícil.
- La **moda** no está definida cuando ningún valor se repite.
---
Media
^^^
La media es la suma de todos los datos dividida por el número total de datos.
**Ventajas**
- Es la medida más popular y más utilizada.
- Utiliza todos los datos de la muestra.
- Es única y está siempre bien definida.
- Útil para comparar distintas muestras.
- Muy utilizada en cálculos posteriores.
**Desventajas**
- Se ve afectada por los valores extremos de la muestra.
---
Mediana
^^^
Ordenados los datos de la muestra de menor a mayor, la mediana es el dato que está justo al medio de la muestra.
**Ventajas**
- Los valores extremos no la afectan tanto como a la media.
- Útil para comparar distintas muestras.
- Es única y está siempre bien definida.
**Desventajas**
- No considera todos los datos de la muestra.
- Se utiliza poco en cálculos posteriores.
````
## Medidas de Dispersión
### Rango
El **rango** es la diferencia entre el máximo y el mínimo valor de una muestra.
$$Rango=\max\left(x_1,x_2,\ldots ,x_N\right)-\min\left(x_1,x_2,\ldots ,x_N\right)$$
donde $x_1,x_2,\ldots ,x_N$ son los datos de la muestra.
En el ejemplo de las notas de 32 alumnos en un examen con puntajes del 1 al 10 los resultados eran:
print(f'Resultados: {resultados}')
En este caso tenemos que:
min_res = min(resultados)
print(f'La nota mínima es: {min_res}')
max_res = max(resultados)
print(f'La nota máxima es: {max_res}')
rango = max_res - min_res
print(f'Por lo tanto el rango es: {max_res} - {min_res} = {rango} ')
### Cuartiles
Los **cuartiles**, son los valores que dividen la data en cuartos.
- El primer cuartil (llamado cuartil inferior o $Q_1$) es tal que 25% de los datos son inferiores a $Q_1$.
- El segundo cuartil es la mediana, 50% de los datos son inferiores a ella.
- El tercer cuartil (llamado cuartil superior o $Q_3$) es tal que 75% de los datos son inferiores a $Q_3$.
- El último cuartil es el máximo valor de la muestra.
**Observación:** $Q_1$ es la mediana del 50% inferior de la muestra y $Q_3$ es la mediana del 50% superior de la muestra.
### Box Plots
Es posible obtener una idea de la distribución de una muestra de datos examinando el siguiente resumen de 5 números:
- El valor mínimo
- El primer cuartil
- La mediana (segundo cuartil)
- El tercer cuartil
- El valor máximo
Estos 5 números pueden ser representados gráficamente a través de un diagrama de *Caja y Bigotes* (box-and-whisker diagram).
Veamos un ejemplo con datos generados de forma aleatoria.
import plotly.graph_objects as go
import numpy as np
x0 = np.array([-0.01266288, -0.39623657, -2.27460173, 0.26492423, -0.37191596,
-0.0469952 , -1.12485845, 0.26766143, -1.74320972, 0.58269502,
0.56357888, -2.16268586, 0.65205293, 0.06388311, 0.86067789,
-1.19481468, -0.45478148, -0.86976107, -1.9288584 , 1.28710555,
0.17671311, -1.19529302, 0.69459011, 0.51450959, 1.81595071,
0.8890141 , -1.31808439, -1.57484991, 0.2511651 , 0.64026872,
-1.04312134, 0.59108169, 0.75979648, -1.44733236, 1.65422606,
-0.2734052 , 1.75192239, 1.03558314, 1.01046211, 0.73390352,
-0.82820519, -1.53824126, 0.58670701, -1.33037958, 1.34250693,
0.71374556, -0.80025983, -0.75024957, -1.75550578, -1.62384854])
fig = go.Figure()
fig.add_trace(go.Box(x=x0))
fig.show()
La diferencia $Q_3-Q_1$ suele llamarse *rango intercuantil* y se denota con $IQR$.
### Outliers
Los datos extremos de una muestra se llaman **outliers** (en español también se usa la palabra en inglés, no existe una traducción ampliamente aceptada).
```{admonition} Criterio para Identificar un Outlier
Se considera outlier cualquier valor que esté $1.5 \cdot IQR$ veces por debajo de $Q_1$ o por encima de $Q_3$.
```
#### Cuando Rechazar o Mantener un Outlier
Hemos visto un criterio para identificar un outlier. Ahora se debe decidir si se acepta o se rechaza ese outlier (en la práctica esto significa eliminar o mantener el dato en la muestra).
Los outliers pueden tener un efecto importante en medidas estadísticas como la media, pero algunos de ellos son datos válidos y no es aceptable rechazarlos sin una razón bien fundamentada.
Por el contrario, cuando un outlier se produce por un error de medición, éste debe ser eliminado. Por ejemplo, si estamos estudiando la altura de una población, un dato de 3.0 metros es seguramente un error de medición.
Por otra parte, supongamos que los resultados de una prueba de 7 estudiantes son los siguientes: 20%, 22%, 18%, 30%, 26%, 89% y 21%. Si se concluye que el 89% está bien registrado, entonces eliminarlo conduciría a concluir que la prueba era demasiado difícil para los alumnos. Sin embargo, considerando que no hay error de medición, al mantenerlo se podría concluir que el nivel de dificultad de la prueba era el adecuado y que 6 de los 7 alumnos no se prepararon lo suficiente.
### Frecuencia Acumulada
Los siguientes datos muestran el número de veces que 50 estudiantes perdieron un lápiz durante la semana:
lapices = [5, 9, 10, 5, 9, 9, 8, 4, 9, 8, 5, 7, 3, 10, 7, 7, 8, 7, 6, 6, 9, 6, 4,
4, 10, 5, 6, 6, 3, 8, 7, 8,3, 4, 6, 6, 5, 7, 5, 4, 3, 5, 2, 4, 2, 8, 1,0, 3, 5]
Vamos a construir una tabla de frecuencia acumulada, es decir, en cada fila vamos a anotar el número de lápices perdidos y el número de alumnos que ha perdido ese número de lápices o menos:
frec_acum = [(i, sum([lapices.count(j) for j in range(i+1)])) for i in range(11)]
df_frec_acum = pd.DataFrame(
frec_acum,
columns=['num_lapices', 'Número de alumnos que perdió num_lapices o menos lápices'])
df_frec_acum
Vamos a dibujar el gráfico de la fecuencia acumulada:
fig = px.line(
df_frec_acum,
x='num_lapices',
y=['Número de alumnos que perdió num_lapices o menos lápices',],
title=f'Gráfico de la Frecuencia Acumulada')
fig.update_traces(mode='markers+lines')
fig.show()
- El menor valor en el eje $y$ del gráfico es 1 y el mayor valor es 50, que coincide con el número de alumnos.
- Dado que para cada nuevo valor de la variable `num_lapices`, agregamos más alumnos, el gráfico nunca puede ser decreciente.
Cuando se dispone de toda la data (*raw data*) se puede utilizar la fórmula:
$$mediana=\left(\frac{n+resto\left(n,2\right)}{2}\right)esimo\space valor$$
para calcular la mediana y los cuartiles cuando la data se ordena de forma ascendente. Aquí, $resto\left(n,2\right)$ es el resto de la división entera de $n$ por 2, por lo tanto será 0 si $n$ es par y 1 si $n$ es impar. Para muestras con muchos datos, esta distinción se hace muy poco significativa.
Sin embargo, cuando se dispone de data agrupada, puede ser difícil determinar la mediana o un cuartil cuando ese valor está en el medio de uno de los grupos.
Las curvas de frecuencia acumulada permiten estimar la mediana y los cuartiles a partir de data acumulada. Por ejemplo, para encontrar la mediana, se dibuja una línea horizontal que cruza el eje $y$ en el $\frac{n}{2} esimo$ valor y desde la intersección de esa línea con el gráfico de frecuencia acumulada, se traza una línea vertical hacia abajo. El punto donde esta línea intersecta el eje $x$, corresponde a la mediana.
Por ejemplo, en el caso anterior:
fig = px.line(
df_frec_acum,
x='num_lapices',
y=['Número de alumnos que perdió num_lapices o menos lápices'],
title=f'Gráfico de la Frecuencia Acumulada con Cálculo de Mediana')
fig.update_traces(mode='lines')
fig.add_hline(y=25, annotation_text=' y=25', line_color='green')
fig.add_vline(x=5.3, annotation_text=' x=5.3 => mediana=5.3\n', line_color='red')
fig.show()
### Percentiles
Un *percentil* es un número tal que un porcentaje de los datos están por debajo de del percentil. Por ejemplo, si el percentil 10% es $P_{10}$ esto significa que un 10% de los datos de la muestra están por debajo de $P_{10}$.
Veamos un ejemplo con una muestra de 500 datos con números aleatorios entre 1 y 100 (que podrían representar los puntajes en una prueba tomada por 500 alumnos).
np.random.seed(123456)
notas = np.random.lognormal(0, 1, 500) * 40
notas = [int(min(nota, 100)) for nota in notas]
notas_frec_acum = [(i, sum([notas.count(j) for j in range(i+1)])) for i in range(101)]
df_notas_frec_acum = pd.DataFrame(
notas_frec_acum,
columns=['nota', 'Número de alumnos con nota igual o inferior'])
Veamos las primeras 12 filas de la tabla con las frecuencias acumuladas:
df_notas_frec_acum.head(12)
Podemos observar que hay 5 alumnos (un 1.0%) con puntaje igual o menor a 2 y 7 (1.4%) alumnos con un puntaje igual a 3. Esto nos indica que el percentil 1% de esta muestra está "entremedio" de estos dos valores y que el percentil 1% no es un puntaje posible de obtener (así como la media, muchas veces, no corresponde a ningún resultado de la medición).
```{admonition} Cálculo de Percentil
:class: tip
En estas situaciones, existen varias maneras de calcular un percentil y es importante tener claro, según el contexto, cuál se está utilizando y cómo se calcula.
```
Por ejemplo en este caso:
percentil = 1
metodo = 'lower'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
metodo = 'higher'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
metodo = 'midpoint'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
metodo = 'linear'
print(f'Percentil {percentil/100:.1%}, método={metodo}: {np.percentile(notas, percentil, interpolation=metodo):.4f}')
En este ejemplo estamos usando la función `percentile` de `numpy`, que tiene la siguiente documentación. Lo referido al parámetro `interpolation` explica el funcionamiento de los distintos métodos.
print(np.percentile.__doc__)
### Varianza y Desviación Estándar
El rango y los cuartiles son buenas medidas de cuan dispersa está una muestra de datos respecto a su mediana, pero no utilizan toda la data disponible. Por otro lado, la **varianza** es una métrica de dispersión que utiliza todos los datos de la muestra. Es una métrica que refleja qué tan lejanos están, en promedio, cada uno de los datos de la media.
Para ejemplificar, volvamos a considerar el ejemplo de los resultados de 32 alumnos en una prueba con puntajes entre 0 y 10. Los datos eran los siguientes:
print(f'Notas de 32 alumnos: {resultados}')
Tabulemos esta data y vamos calculando paso a paso la varianza:
df_resultados = pd.DataFrame(resultados, columns=['nota',])
df_resultados
Se calcula la media:
media = np.mean(df_resultados['nota'])
print(f'La media es: {media:.2f}')
Se calcula la distancia de cada nota a la media:
df_resultados['(nota - media)'] = df_resultados['nota'] - media
df_resultados
Se calcula ahora el cuadrado de la distancia:
df_resultados['(nota - media)^2'] = df_resultados['(nota - media)'] ** 2
df_resultados
Elevar al cuadrado la distancia logra dos objetivos:
- evitar que las distancias negativas se compensen con las positivas dando así un falsa idea de la dispersión.
- ponderar más en el promedio final las distancias mayores.
Finalmente, la varianza, que usualmente se denota con $\sigma^2$ está dada por:
$$\sigma^2=\frac{1}{n}\sum_{i=1}^n{\left(x_i-\mu\right)^2}$$
Aquí, $\mu$ es la media de los datos.
En el ejemplo, la variaza resulta ser:
print(f"Varianza es: {np.mean(df_resultados['(nota - media)^2']):.2f}")
Dado que la unidad en la que se mide la **varianza** no coincide con la unidad de los datos, también se define la desviación estándar, que se denota con $\sigma$, como:
$$\sigma=\sqrt{\frac{1}{n}\sum_{i=1}^n{\left(x_i-\mu\right)^2}}=\sqrt{\sigma^2}$$
#### Propiedades de la Desviación Estándar
- La **desviación estándar** sólo se utiliza para medir dispersión alrededor de la media.
- La **varianza** y la **desviación estándar** son siempre positivas.
- La **desviación estándar** es sensible a los *outliers*. Un sólo *outlier* puede cambiar significativamente su valor.
- Para muestras de datos con una **media** similar, mientras más dispersión en los datos mayor es la **desviación estándar**.
|
python
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the functions needed for computing the molecular Hamiltonian.
"""
# pylint: disable= too-many-branches, too-many-arguments, too-many-locals, too-many-nested-blocks
import autograd.numpy as anp
import pennylane as qml
from pennylane import numpy as np
from pennylane.hf.hartree_fock import generate_scf, nuclear_energy
def generate_electron_integrals(mol, core=None, active=None):
r"""Return a function that computes the one- and two-electron integrals in the molecular orbital
basis.
The one- and two-electron integrals are required to construct a molecular Hamiltonian in the
second-quantized form
.. math::
H = \sum_{pq} h_{pq} c_p^{\dagger} c_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} c_p^{\dagger} c_q^{\dagger} c_r c_s,
where :math:`c^{\dagger}` and :math:`c` are the creation and annihilation operators,
respectively, and :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals.
These integrals can be computed by integrating over molecular orbitals :math:`\phi` as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right ) \phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|} dr_1 dr_2.
The molecular orbitals are constructed as a linear combination of atomic orbitals as
.. math::
\phi_i = \sum_{\nu}c_{\nu}^i \chi_{\nu}.
The one- and two-electron integrals can be written in the molecular orbital basis as
.. math::
h_{pq} = \sum_{\mu \nu} C_{p \mu} h_{\mu \nu} C_{\nu q},
and
.. math::
h_{pqrs} = \sum_{\mu \nu \rho \sigma} C_{p \mu} C_{q \nu} h_{\mu \nu \rho \sigma} C_{\rho r} C_{\sigma s}.
The :math:`h_{\mu \nu}` and :math:`h_{\mu \nu \rho \sigma}` terms refer to the elements of the
core matrix and the electron repulsion tensor, respectively, and :math:`C` is the molecular
orbital expansion coefficient matrix.
Args:
mol (Molecule): the molecule object
core (list[int]): indices of the core orbitals
active (list[int]): indices of the active orbitals
Returns:
function: function that computes the core constant, and the one- and two-electron integrals
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> generate_electron_integrals(mol)(*args)
(1.0,
array([[-1.3902192695e+00, 0.0000000000e+00],
[-4.4408920985e-16, -2.9165331336e-01]]),
array([[[[ 7.1443907755e-01, -2.7755575616e-17],
[ 5.5511151231e-17, 1.7024144301e-01]],
[[ 5.5511151231e-17, 1.7024144301e-01],
[ 7.0185315353e-01, 6.6613381478e-16]]],
[[[-1.3877787808e-16, 7.0185315353e-01],
[ 1.7024144301e-01, 2.2204460493e-16]],
[[ 1.7024144301e-01, -4.4408920985e-16],
[ 6.6613381478e-16, 7.3883668974e-01]]]]))
"""
def electron_integrals(*args):
r"""Compute the one- and two-electron integrals in the molecular orbital basis.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
tuple[array[float]]: 1D tuple containing core constant, one- and two-electron integrals
"""
_, coeffs, _, h_core, repulsion_tensor = generate_scf(mol)(*args)
one = anp.einsum("qr,rs,st->qt", coeffs.T, h_core, coeffs)
two = anp.swapaxes(
anp.einsum(
"ab,cd,bdeg,ef,gh->acfh", coeffs.T, coeffs.T, repulsion_tensor, coeffs, coeffs
),
1,
3,
)
core_constant = nuclear_energy(mol.nuclear_charges, mol.coordinates)(*args)
if core is None and active is None:
return core_constant, one, two
for i in core:
core_constant = core_constant + 2 * one[i][i]
for j in core:
core_constant = core_constant + 2 * two[i][j][j][i] - two[i][j][i][j]
for p in active:
for q in active:
for i in core:
o = anp.zeros(one.shape)
o[p, q] = 1.0
one = one + (2 * two[i][p][q][i] - two[i][p][i][q]) * o
one = one[anp.ix_(active, active)]
two = two[anp.ix_(active, active, active, active)]
return core_constant, one, two
return electron_integrals
def generate_fermionic_hamiltonian(mol, cutoff=1.0e-12, core=None, active=None):
r"""Return a function that computes the fermionic hamiltonian.
Args:
mol (Molecule): the molecule object
cutoff (float): cutoff value for discarding the negligible electronic integrals
Returns:
function: function that computes the fermionic hamiltonian
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> h = generate_fermionic_hamiltonian(mol)(*args)
"""
def fermionic_hamiltonian(*args):
r"""Compute the fermionic hamiltonian.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
tuple(array[float], list[list[int]]): the Hamiltonian coefficients and operators
"""
core_constant, one, two = generate_electron_integrals(mol, core, active)(*args)
core_constant = anp.array([core_constant])
indices_one = anp.argwhere(abs(one) >= cutoff)
operators_one = (indices_one * 2).tolist() + (
indices_one * 2 + 1
).tolist() # up-up + down-down terms
coeffs_one = anp.tile(one[abs(one) >= cutoff], 2)
indices_two = anp.argwhere(abs(two) >= cutoff)
n = len(indices_two)
operators_two = (
[(indices_two[i] * 2).tolist() for i in range(n)] # up-up-up-up term
+ [
(indices_two[i] * 2 + [0, 1, 1, 0]).tolist() for i in range(n)
] # up-down-down-up term
+ [
(indices_two[i] * 2 + [1, 0, 0, 1]).tolist() for i in range(n)
] # down-up-up-down term
+ [(indices_two[i] * 2 + 1).tolist() for i in range(n)] # down-down-down-down term
)
coeffs_two = anp.tile(two[abs(two) >= cutoff], 4) / 2
coeffs = anp.concatenate((core_constant, coeffs_one, coeffs_two))
operators = [[]] + operators_one + operators_two
indices_sort = [operators.index(i) for i in sorted(operators)]
return coeffs[indices_sort], sorted(operators)
return fermionic_hamiltonian
def generate_hamiltonian(mol, cutoff=1.0e-12, core=None, active=None):
r"""Return a function that computes the qubit hamiltonian.
Args:
mol (Molecule): the molecule object
cutoff (float): cutoff value for discarding the negligible electronic integrals
Returns:
function: function that computes the qubit hamiltonian
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True)
>>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> h = generate_hamiltonian(mol)(*args)
>>> h.terms[0]
tensor([ 0.29817879+0.j, 0.20813365+0.j, 0.20813365+0.j,
0.17860977+0.j, 0.04256036+0.j, -0.04256036+0.j,
-0.04256036+0.j, 0.04256036+0.j, -0.34724873+0.j,
0.13290293+0.j, -0.34724873+0.j, 0.17546329+0.j,
0.17546329+0.j, 0.13290293+0.j, 0.18470917+0.j], requires_grad=True)
"""
def hamiltonian(*args):
r"""Compute the qubit hamiltonian.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
Hamiltonian: the qubit Hamiltonian
"""
h_ferm = generate_fermionic_hamiltonian(mol, cutoff, core, active)(*args)
ops = []
for n, t in enumerate(h_ferm[1]):
if len(t) == 0:
coeffs = np.array([h_ferm[0][n]])
ops = ops + [qml.Identity(0)]
elif len(t) == 2:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for o_ in o:
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
coeffs = np.concatenate([coeffs, np.array(op[0]) * h_ferm[0][n]])
ops = ops + op[1]
elif len(t) == 4:
op = _generate_qubit_operator(t)
if op != 0:
for i, o in enumerate(op[1]):
if len(o) == 0:
op[1][i] = qml.Identity(0)
if len(o) == 1:
op[1][i] = _return_pauli(o[0][1])(o[0][0])
if len(o) > 1:
k = qml.Identity(0)
for o_ in o:
k = k @ _return_pauli(o_[1])(o_[0])
op[1][i] = k
coeffs = np.concatenate([coeffs, np.array(op[0]) * h_ferm[0][n]])
ops = ops + op[1]
h = qml.Hamiltonian(coeffs, ops, simplify=True)
return h
return hamiltonian
def _generate_qubit_operator(op):
r"""Convert a fermionic operator to a qubit operator using the Jordan-Wigner mapping.
The one-body fermionic operator :math:`a_2^\dagger a_0` is constructed as [2, 0] and the
two-body operator :math:`a_4^\dagger a_3^\dagger a_2 a_1` is constructed as [4, 3, 2, 1].
Args:
op (list[int]): the fermionic operator
Returns
tuple(list[complex], list[list[int, str]]): list of coefficients and the qubit-operator terms
**Example**
>>> f = [0, 0]
>>> q = _generate_qubit_operator(f)
>>> q
([(0.5+0j), (-0.5+0j)], [[], [(0, 'Z')]]) # corresponds to :math:`\frac{1}{2}(I_0 - Z_0)`
"""
if len(op) == 2:
op = [((op[0], 1), (op[1], 0))]
if len(op) == 4:
op = [((op[0], 1), (op[1], 1), (op[2], 0), (op[3], 0))]
if op[0][0][0] == op[0][1][0] or op[0][2][0] == op[0][3][0]:
return 0
for t in op:
for l in t:
z = [(index, "Z") for index in range(l[0])]
x = z + [(l[0], "X"), 0.5]
if l[1]:
y = z + [(l[0], "Y"), -0.5j]
else:
y = z + [(l[0], "Y"), 0.5j]
if t.index(l) == 0:
q = [x, y]
else:
m = []
for t1 in q:
for t2 in [x, y]:
q1, c1 = _pauli_mult(t1[:-1], t2[:-1])
m.append(q1 + [c1 * t1[-1] * t2[-1]])
q = m
c = [p[-1] for p in q]
o = [p[:-1] for p in q]
for item in o:
k = [i for i, x in enumerate(o) if x == item]
if len(k) >= 2:
for j in k[::-1][:-1]:
del o[j]
c[k[0]] = c[k[0]] + c[j]
del c[j]
return c, o
def _pauli_mult(p1, p2):
r"""Return the result of multiplication between two tensor products of Pauli operators.
The Pauli operator :math:`(P_0)` is denoted by [(0, 'P')], where :math:`P` represents
:math:`X`, :math:`Y` or :math:`Z`.
Args:
p1 (list[tuple[int, str]]): the first tensor product of Pauli operators
p2 (list[tuple[int, str]]): the second tensor product of Pauli operators
Returns
tuple(list[tuple[int, str]], complex): list of the Pauli operators and the coefficient
**Example**
>>> p1 = [(0, "X"), (1, "Y")], # X_0 @ Y_1
>>> p2 = [(0, "X"), (2, "Y")], # X_0 @ Y_2
>>> _pauli_mult(p1, p2)
([(2, "Y"), (1, "Y")], 1.0) # p1 @ p2 = X_0 @ Y_1 @ X_0 @ Y_2
"""
c = 1.0
t1 = [t[0] for t in p1]
t2 = [t[0] for t in p2]
k = []
for i in p1:
if i[0] in t1 and i[0] not in t2:
k.append((i[0], pauli_mult[i[1]]))
for j in p2:
if j[0] in t2 and j[0] not in t1:
k.append((j[0], pauli_mult[j[1]]))
if i[0] == j[0]:
if i[1] + j[1] in pauli_coeff:
k.append((i[0], pauli_mult[i[1] + j[1]]))
c = c * pauli_coeff[i[1] + j[1]]
else:
k.append((i[0], pauli_mult[i[1] + j[1]]))
k = [i for i in k if "I" not in i[1]]
for item in k:
k_ = [i for i, x in enumerate(k) if x == item]
if len(k_) >= 2:
for j in k_[::-1][:-1]:
del k[j]
return k, c
def _return_pauli(p):
r"""Return the PennyLane Pauli operator.
Args:
args (str): symbol representing the Pauli operator
Returns:
pennylane.ops: the PennyLane Pauli operator
**Example**
>>> _return_pauli('X')
qml.PauliX
"""
if p == "X":
return qml.PauliX
if p == "Y":
return qml.PauliY
return qml.PauliZ
pauli_mult = {
"XX": "I",
"YY": "I",
"ZZ": "I",
"ZX": "Y",
"XZ": "Y",
"ZY": "X",
"YZ": "X",
"XY": "Z",
"YX": "Z",
"IX": "X",
"IY": "Y",
"IZ": "Z",
"XI": "X",
"YI": "Y",
"ZI": "Z",
"I": "I",
"II": "I",
"X": "X",
"Y": "Y",
"Z": "Z",
}
pauli_coeff = {
"ZX": 1.0j,
"XZ": -1.0j,
"ZY": -1.0j,
"YZ": 1.0j,
"XY": 1.0j,
"YX": -1.0j,
}
|
python
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import ircLower
from zope.interface import implements
from fnmatch import fnmatchcase
class WhoCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "WhoCommand"
core = True
def userCommands(self):
return [ ("WHO", 1, self) ]
def parseParams(self, user, params, prefix, tags):
if not params:
return {
"mask": "*"
}
if len(params) > 1 and params[1] == "o":
return {
"mask": params[0],
"opersonly": True
}
return {
"mask": params[0]
}
def execute(self, user, data):
matchingUsers = []
channel = None
mask = data["mask"]
if mask in ("0", "*"):
for targetUser in self.ircd.users.itervalues():
if not targetUser.isRegistered():
continue
if not set(user.channels).intersection(targetUser.channels) and self.ircd.runActionUntilValue("showuser", user, targetUser, users=[user, targetUser]) is not False:
matchingUsers.append(targetUser)
elif mask in self.ircd.channels:
channel = self.ircd.channels[data["mask"]]
for targetUser in channel.users.iterkeys():
if self.ircd.runActionUntilValue("showchanneluser", channel, user, targetUser, users=[user, targetUser], channels=[channel]) is not False:
matchingUsers.append(targetUser)
else:
for targetUser in self.ircd.users.itervalues():
if not targetUser.isRegistered():
continue # We should exclude all unregistered users from this search
if self.ircd.runActionUntilValue("showuser", user, targetUser, users=[user, targetUser]) is False:
continue
lowerMask = ircLower(mask)
serverName = self.ircd.name if targetUser.uuid[:3] == self.ircd.serverID else self.ircd.servers[targetUser.uuid[:3]].name
if fnmatchcase(ircLower(targetUser.host()), lowerMask) or fnmatchcase(ircLower(targetUser.gecos), lowerMask) or fnmatchcase(ircLower(serverName), lowerMask) or fnmatchcase(ircLower(targetUser.nick), lowerMask):
matchingUsers.append(targetUser)
if "opersonly" in data:
allMatches = matchingUsers
matchingUsers = []
for targetUser in allMatches:
if self.ircd.runActionUntilValue("userhasoperpermission", targetUser, "", users=[targetUser]):
matchingUsers.append(targetUser)
for targetUser in matchingUsers:
server = self.ircd if targetUser.uuid[:3] == self.ircd.serverID else self.ircd.servers[targetUser.uuid[:3]]
serverName = server.name
isOper = self.ircd.runActionUntilValue("userhasoperpermission", targetUser, "", users=[targetUser])
isAway = targetUser.metadataKeyExists("away")
status = self.ircd.runActionUntilValue("channelstatuses", channel, targetUser, user, users=[targetUser, user], channels=[channel]) if channel else ""
hopcount = 0
if user.uuid[:3] != self.ircd.serverID:
countingServer = server
hopcount = 1
while countingServer.nextClosest != self.ircd.serverID:
countingServer = self.ircd.servers[countingServer.nextClosest]
hopcount += 1
user.sendMessage(irc.RPL_WHOREPLY, mask, targetUser.ident, targetUser.host(), serverName, targetUser.nick, "{}{}{}".format("G" if isAway else "H", "*" if isOper else "", status), "{} {}".format(hopcount, targetUser.gecos))
user.sendMessage(irc.RPL_ENDOFWHO, mask, "End of /WHO list")
return True
whoCommand = WhoCommand()
|
python
|
"""Abstractions for handling operations with reaktor `WishList` and `Voucher` (`GiftCards`) objects."""
|
python
|
#! /usr/bin/env python3
# -*- coding: utf8 -*-
"""Port of NeHe Lesson 26 by Ivan Izuver <[email protected]>"""
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
import sys
import gc
ESCAPE = b'\033'
# Number of the glut window.
window = 0
LightAmb = (0.7, 0.7, 0.7) # Окружающий свет
LightDif = (1.0, 1.0, 0.0) # Рассеянный свет
LightPos = (4.0, 4.0, 6.0, 1.0) # Позиция источника освещения
# q = GLUquadricObj()
xrot = yrot = 0.0 # Вращение по Х Y
xrotspeed = yrotspeed = 0.0 # Скорость вращения по X Y
zoom = -3.0 # Глубина сцены в экране
height = 0.5 # Высота мяча над полом
textures = {}
def LoadTextures(fname):
if textures.get(fname) is not None:
return textures.get(fname)
texture = textures[fname] = glGenTextures(1)
image = Image.open(fname)
ix = image.size[0]
iy = image.size[1]
image = image.tobytes('raw', 'RGBX', 0, -1)
# Create Texture
glBindTexture(GL_TEXTURE_2D, texture) # 2d texture (x and y size)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0,
GL_RGBA, GL_UNSIGNED_BYTE, image)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
return texture
# A general OpenGL initialization function. Sets all of the initial parameters.
# We call this right after our OpenGL window is created.
def InitGL(Width, Height):
# This Will Clear The Background Color To Black
glClearColor(0.2, 0.5, 1.0, 1.0)
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glClearStencil(0)
glDepthFunc(GL_LEQUAL) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glEnable(GL_TEXTURE_2D)
glLightfv(GL_LIGHT0, GL_AMBIENT, LightAmb)
glLightfv(GL_LIGHT0, GL_DIFFUSE, LightDif)
glLightfv(GL_LIGHT0, GL_POSITION, LightPos)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
# Reset The Current Viewport And Perspective Transformation
glViewport(0, 0, Width, Height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def DrawObject():
glColor3f(1.0, 1.0, 1.0)
glBindTexture(GL_TEXTURE_2D, LoadTextures('NeHe.bmp'))
Q = gluNewQuadric()
gluQuadricNormals(Q, GL_SMOOTH)
gluQuadricTexture(Q, GL_TRUE)
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP)
glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP)
gluSphere(Q, 0.35, 32, 16)
glColor4f(1.0, 1.0, 1.0, 0.4)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE)
glEnable(GL_TEXTURE_GEN_S)
glEnable(GL_TEXTURE_GEN_T)
gluSphere(Q, 0.35, 32, 16)
glDisable(GL_TEXTURE_GEN_S)
glDisable(GL_TEXTURE_GEN_T)
glDisable(GL_BLEND)
gluDeleteQuadric(Q)
def DrawFloor():
glBindTexture(GL_TEXTURE_2D, LoadTextures('NeHe2.bmp'))
glBegin(GL_QUADS) # Begin draw
glNormal3f(0.0, 1.0, 0.0) # Upper normal
glTexCoord2f(0.0, 1.0) # bottom left side of texture
glVertex3f(-2.0, 0.0, 2.0) # bottom left angle of floor
glTexCoord2f(0.0, 0.0) # upper left side of texture
glVertex3f(-2.0, 0.0, -2.0) # upper left angle of floor
glTexCoord2f(1.0, 0.0) # upper right side of texture
glVertex3f(2.0, 0.0, -2.0) # upper right angle of floor
glTexCoord2f(1.0, 1.0) # bottom right side of texture
glVertex3f(2.0, 0.0, 2.0) # bottom right angle of floor
glEnd() # finish draw
# The main drawing function. (optional)
def DrawGLScene(deactivate=True):
if deactivate:
return None
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
eqr = (0.0, -1.0, 0.0, 0.0)
glLoadIdentity() # Reset The View
glTranslatef(0.0, -0.6, zoom)
glColorMask(0, 0, 0, 0)
glEnable(GL_STENCIL_TEST)
glStencilFunc(GL_ALWAYS, 1, 1)
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE)
glDisable(GL_DEPTH_TEST)
DrawFloor()
glEnable(GL_DEPTH_TEST)
glColorMask(1, 1, 1, 1)
glStencilFunc(GL_EQUAL, 1, 1)
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP)
glEnable(GL_CLIP_PLANE0)
glClipPlane(GL_CLIP_PLANE0, eqr)
glPushMatrix()
glScalef(1.0, -1.0, 1.0)
glLightfv(GL_LIGHT0, GL_POSITION, LightPos)
glTranslatef(0.0, height, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
glRotatef(yrot, 0.0, 1.0, 0.0)
DrawObject()
glPopMatrix()
glDisable(GL_CLIP_PLANE0)
glDisable(GL_STENCIL_TEST)
glLightfv(GL_LIGHT0, GL_POSITION, LightPos)
glEnable(GL_BLEND)
glDisable(GL_LIGHTING)
glColor4f(1.0, 1.0, 1.0, 0.8)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
DrawFloor()
glEnable(GL_LIGHTING)
glDisable(GL_BLEND)
glTranslatef(0.0, height, 0.0)
glRotatef(xrot, 1.0, 0.0, 0.0)
glRotatef(yrot, 0.0, 1.0, 0.0)
DrawObject()
glFlush()
# since this is double buffered, swap the buffers to display what just got
# drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples
# to pass in: (key, x, y)
def keyPressed(*args):
global window
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
glutDestroyWindow(window)
def main():
global window
# pass arguments to init
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Realistic Reflection by RISC")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
# glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Print message to console, and kick off the main to get it rolling.
print('Hit ESC key to quit.')
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
if __name__ == "__main__":
main()
|
python
|
import functools
import queue
try:
import statistics
stdev = statistics.stdev
mean = statistics.mean
except ImportError:
stdev = None
def mean(l):
return sum(l) / len(l)
try:
import time
clock = time.perf_counter
except Exception:
import timeit
clock = timeit.default_timer
class tfloat(float):
color = 39
def __str__(self):
n = self * 1000
return '\x1b[%dm%f\x1b[mms' % (self.color, n)
def profile(func):
name = func.__name__
samples = queue.deque(maxlen=5)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not self.debug_enabled:
return func(self, *args, **kwargs)
start = clock()
ret = func(self, *args, **kwargs)
n = tfloat(clock() - start)
if len(samples) < 2:
m = 0
d = 0
n.color = 36
else:
m = mean(samples)
if stdev:
d = tfloat(stdev(samples))
else:
d = 0
if n <= m + d:
n.color = 32
elif n > m + d * 2:
n.color = 31
else:
n.color = 33
samples.append(n)
self.info(
'\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)', name, n, m, d
)
return ret
return wrapper
|
python
|
from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_pattern_3_xsd.nistschema_sv_iv_atomic_non_negative_integer_pattern_3 import NistschemaSvIvAtomicNonNegativeIntegerPattern3
__all__ = [
"NistschemaSvIvAtomicNonNegativeIntegerPattern3",
]
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditLoantradeNewloanarRepayApplyModel(object):
def __init__(self):
self._apply_repay_fee = None
self._apply_repay_int = None
self._apply_repay_penalty = None
self._apply_repay_prin = None
self._cust_iprole_id = None
self._loan_ar_no = None
self._repay_amt = None
self._repay_card_no = None
self._repay_type = None
self._request_id = None
@property
def apply_repay_fee(self):
return self._apply_repay_fee
@apply_repay_fee.setter
def apply_repay_fee(self, value):
self._apply_repay_fee = value
@property
def apply_repay_int(self):
return self._apply_repay_int
@apply_repay_int.setter
def apply_repay_int(self, value):
self._apply_repay_int = value
@property
def apply_repay_penalty(self):
return self._apply_repay_penalty
@apply_repay_penalty.setter
def apply_repay_penalty(self, value):
self._apply_repay_penalty = value
@property
def apply_repay_prin(self):
return self._apply_repay_prin
@apply_repay_prin.setter
def apply_repay_prin(self, value):
self._apply_repay_prin = value
@property
def cust_iprole_id(self):
return self._cust_iprole_id
@cust_iprole_id.setter
def cust_iprole_id(self, value):
self._cust_iprole_id = value
@property
def loan_ar_no(self):
return self._loan_ar_no
@loan_ar_no.setter
def loan_ar_no(self, value):
self._loan_ar_no = value
@property
def repay_amt(self):
return self._repay_amt
@repay_amt.setter
def repay_amt(self, value):
self._repay_amt = value
@property
def repay_card_no(self):
return self._repay_card_no
@repay_card_no.setter
def repay_card_no(self, value):
self._repay_card_no = value
@property
def repay_type(self):
return self._repay_type
@repay_type.setter
def repay_type(self, value):
self._repay_type = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def to_alipay_dict(self):
params = dict()
if self.apply_repay_fee:
if hasattr(self.apply_repay_fee, 'to_alipay_dict'):
params['apply_repay_fee'] = self.apply_repay_fee.to_alipay_dict()
else:
params['apply_repay_fee'] = self.apply_repay_fee
if self.apply_repay_int:
if hasattr(self.apply_repay_int, 'to_alipay_dict'):
params['apply_repay_int'] = self.apply_repay_int.to_alipay_dict()
else:
params['apply_repay_int'] = self.apply_repay_int
if self.apply_repay_penalty:
if hasattr(self.apply_repay_penalty, 'to_alipay_dict'):
params['apply_repay_penalty'] = self.apply_repay_penalty.to_alipay_dict()
else:
params['apply_repay_penalty'] = self.apply_repay_penalty
if self.apply_repay_prin:
if hasattr(self.apply_repay_prin, 'to_alipay_dict'):
params['apply_repay_prin'] = self.apply_repay_prin.to_alipay_dict()
else:
params['apply_repay_prin'] = self.apply_repay_prin
if self.cust_iprole_id:
if hasattr(self.cust_iprole_id, 'to_alipay_dict'):
params['cust_iprole_id'] = self.cust_iprole_id.to_alipay_dict()
else:
params['cust_iprole_id'] = self.cust_iprole_id
if self.loan_ar_no:
if hasattr(self.loan_ar_no, 'to_alipay_dict'):
params['loan_ar_no'] = self.loan_ar_no.to_alipay_dict()
else:
params['loan_ar_no'] = self.loan_ar_no
if self.repay_amt:
if hasattr(self.repay_amt, 'to_alipay_dict'):
params['repay_amt'] = self.repay_amt.to_alipay_dict()
else:
params['repay_amt'] = self.repay_amt
if self.repay_card_no:
if hasattr(self.repay_card_no, 'to_alipay_dict'):
params['repay_card_no'] = self.repay_card_no.to_alipay_dict()
else:
params['repay_card_no'] = self.repay_card_no
if self.repay_type:
if hasattr(self.repay_type, 'to_alipay_dict'):
params['repay_type'] = self.repay_type.to_alipay_dict()
else:
params['repay_type'] = self.repay_type
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditLoantradeNewloanarRepayApplyModel()
if 'apply_repay_fee' in d:
o.apply_repay_fee = d['apply_repay_fee']
if 'apply_repay_int' in d:
o.apply_repay_int = d['apply_repay_int']
if 'apply_repay_penalty' in d:
o.apply_repay_penalty = d['apply_repay_penalty']
if 'apply_repay_prin' in d:
o.apply_repay_prin = d['apply_repay_prin']
if 'cust_iprole_id' in d:
o.cust_iprole_id = d['cust_iprole_id']
if 'loan_ar_no' in d:
o.loan_ar_no = d['loan_ar_no']
if 'repay_amt' in d:
o.repay_amt = d['repay_amt']
if 'repay_card_no' in d:
o.repay_card_no = d['repay_card_no']
if 'repay_type' in d:
o.repay_type = d['repay_type']
if 'request_id' in d:
o.request_id = d['request_id']
return o
|
python
|
"""
# Syntax of search templates
"""
import re
# SYNTACTIC ANALYSIS OF SEARCH TEMPLATE ###
QWHERE = "/where/"
QHAVE = "/have/"
QWITHOUT = "/without/"
QWITH = "/with/"
QOR = "/or/"
QEND = "/-/"
QINIT = {QWHERE, QWITHOUT, QWITH}
QCONT = {QHAVE, QOR}
QTERM = {QEND}
PARENT_REF = ".."
ESCAPES = (
"\\\\",
"\\ ",
"\\t",
"\\n",
"\\|",
"\\=",
)
VAL_ESCAPES = {
"\\|",
"\\=",
}
opPat = r"(?:[.#&|\[\]<>:=-]+\S*)"
atomOpPat = r"(\s*)({op})\s+([^ \t=#<>~*]+)(?:(?:\s*\Z)|(?:\s+(.*)))$".format(op=opPat)
atomPat = r"(\s*)([^ \t=#<>~*]+)(?:(?:\s*\Z)|(?:\s+(.*)))$"
compPat = r"^([a-zA-Z0-9-@_]+)([<>])(.*)$"
identPat = r"^([a-zA-Z0-9-@_]+)([=#])(.+)$"
indentLinePat = r"^(\s*)(.*)"
kPat = r"^([^0-9]*)([0-9]+)([^0-9]+)$"
namePat = r"[A-Za-z0-9_.-]+"
namesPat = r"^\s*(?:{op}\s+)?([^ \t:=#<>~*]+):"
nonePat = r"^([a-zA-Z0-9-@_]+)(#?)\s*$"
truePat = r"^([a-zA-Z0-9-@_]+)[*]\s*$"
numPat = r"^-?[0-9]+$"
opLinePat = r"^(\s*)({op})\s*$".format(op=opPat)
opStripPat = r"^\s*{op}\s+(.*)$".format(op=opPat)
quPat = f"(?:{QWHERE}|{QHAVE}|{QWITHOUT}|{QWITH}|{QOR}|{QEND})"
quLinePat = r"^(\s*)({qu})\s*$".format(qu=quPat)
relPat = r"^(\s*)({nm})\s+({op})\s+({nm})\s*$".format(nm=namePat, op=opPat)
rePat = r"^([a-zA-Z0-9-@_]+)~(.*)$"
atomOpRe = re.compile(atomOpPat)
atomRe = re.compile(atomPat)
compRe = re.compile(compPat)
identRe = re.compile(identPat)
indentLineRe = re.compile(indentLinePat)
kRe = re.compile(kPat)
nameRe = re.compile(f"^{namePat}$")
namesRe = re.compile(namesPat)
numRe = re.compile(numPat)
noneRe = re.compile(nonePat)
trueRe = re.compile(truePat)
opLineRe = re.compile(opLinePat)
opStripRe = re.compile(opStripPat)
quLineRe = re.compile(quLinePat)
relRe = re.compile(relPat)
reRe = re.compile(rePat)
whiteRe = re.compile(r"^\s*(%|$)")
reTp = type(reRe)
def syntax(searchExe):
error = searchExe.api.TF.error
_msgCache = searchExe._msgCache
searchExe.good = True
searchExe.badSyntax = []
searchExe.searchLines = searchExe.searchTemplate.split("\n")
offset = searchExe.offset
_tokenize(searchExe)
if not searchExe.good:
searchExe.showOuterTemplate(_msgCache)
for (i, line) in enumerate(searchExe.searchLines):
error(f"{i + offset:>2} {line}", tm=False, cache=_msgCache)
for (ln, eline) in searchExe.badSyntax:
txt = eline if ln is None else f"line {ln + offset}: {eline}"
error(txt, tm=False, cache=_msgCache)
def _tokenize(searchExe):
tokens = []
def lastAtomToken():
for token in reversed(tokens):
kind = token["kind"]
if kind == "feat":
continue
if kind == "atom" and "otype" in token:
return token
return None
return None
def readFeatures(x, i):
features = {}
featureString = x.replace("\\ ", chr(1)) if x is not None else ""
featureList = featureString.split()
good = True
for featStr in featureList:
if not parseFeatureVals(searchExe, featStr, features, i):
good = False
return features if good else None
searchLines = searchExe.searchLines
allGood = True
# the template may contain nested quantifiers
# However, we detect only the outer level of quantifiers.
# Everything contained in a quantifiers is collected in
# a new search template, verbatim, without interpretion,
# because it will be fed to search() on another instance.
# We only strip the quantified lines of the outermost quantifiers.
# We can maintain the current quantifier, None if there is none.
# We also remember the current indentation of the current quantifier
# We collect the templates within the quantifier in a list of strings.
# We add all the material into a quantifier token of the shape
#
# Because indentation is not indicative of quantifier nesting
# we need to maintain a stack of inner quantifiers,
# just to be able to determine wich quantifier words
# belong to the outerlevel quantifiers.
curQu = []
curQuTemplates = None
for (i, line) in enumerate(searchLines):
if whiteRe.match(line):
continue
opFeatures = {}
# first check whether we have a line with a quantifier
# and what the indent on the line is
match = quLineRe.match(line)
if match:
(indent, lineQuKind) = match.groups()
else:
lineQuKind = None
match = indentLineRe.match(line)
indent = match.group(1)
lineIndent = len(indent)
# QUANTIFIER FILTERING
#
# now check whether we are in a quantifier or not
# and determine whether a quantifier starts or ends here
# we have the following possible situations:
#
# UUO no outer - no q-keyword
#
# UBO no outer - q-keyword
# * ES no start keyword
# * ET no preceding token
# * EA no preceding atom
# * EI preceding atom not the same indentation
#
# PBI outer - q-keyword init
#
# PPO outer - no q-keyword
#
# PPI inner - no q-keyword
#
# PCO outer - q-keyword continue
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# PCI inner - q-keyword continue
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# PEO outer - q-keyword end
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# PEI inner - q-keyword end
# * EP wrong precursor
# * EK preceding keyword not the same indentation
#
# at the end we may have a non-empty quantifier stack:
# * generate an unterminated quantifier error for each member
# of the stack
# first we determine what is the case and we store it in booleans
curQuLine = None
curQuKind = None
curQuIndent = None
curQuDepth = len(curQu)
if curQuDepth:
(curQuLine, curQuKind, curQuIndent) = curQu[-1]
UUO = not curQuDepth and not lineQuKind
UBO = not curQuDepth and lineQuKind
PBI = curQuDepth and lineQuKind in QINIT
PPO = curQuDepth == 1 and not lineQuKind
PPI = curQuDepth > 1 and not lineQuKind
PCO = curQuDepth == 1 and lineQuKind in QCONT
PCI = curQuDepth > 1 and lineQuKind in QCONT
PEO = curQuDepth == 1 and lineQuKind in QTERM
PEI = curQuDepth > 1 and lineQuKind in QTERM
(ES, ET, EA, EI, EP, EK) = (False,) * 6
if UBO:
ES = lineQuKind not in QINIT
ET = len(tokens) == 0
lastAtom = lastAtomToken()
EA = len(tokens) and not lastAtomToken
EI = len(tokens) and lastAtom["indent"] != lineIndent
# EA = (len(tokens) and tokens[-1]['kind'] != 'atom' and 'otype' not in tokens[-1])
# EI = (len(tokens) and tokens[-1]['indent'] != lineIndent)
if PCO or PCI:
EP = (lineQuKind == QHAVE and curQuKind != QWHERE) or (
lineQuKind == QOR and curQuKind not in {QWITH, QOR}
)
EK = curQu[-1][2] != lineIndent
if PEO or PEI:
EP = curQuKind in {QWHERE}
EK = curQu[-1][2] != lineIndent
# QUANTIFIER HANDLING
#
# Based on what is the case, we take actions.
# * we swallow quantified templates
# * we handle quantifier lines
# * we let all other lines pass through
good = True
for x in [True]:
if UUO:
# no quantifier business
continue
if UBO:
# start new quantifier from nothing
if ES:
searchExe.badSyntax.append(
(i, f'Quantifier: Can not start with "{lineQuKind}:"')
)
good = False
if ET:
searchExe.badSyntax.append((i, "Quantifier: No preceding tokens"))
good = False
if EA or EI:
searchExe.badSyntax.append(
(
i,
"Quantifier: Does not immediately follow an atom at the same level",
)
)
good = False
prevAtom = tokens[-1]
curQu.append((i, lineQuKind, lineIndent))
curQuTemplates = [[]]
quantifiers = prevAtom.setdefault("quantifiers", [])
quantifiers.append((lineQuKind, curQuTemplates, i))
continue
if PBI:
# start inner quantifier
# lines are passed with stripped indentation
# based on the outermost quantifier level
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
curQu.append((i, lineQuKind, lineIndent))
if PPO:
# inside an outer quantifier
# lines are passed with stripped indentation
strippedLine = line[curQuIndent:]
curQuTemplates[-1].append(strippedLine)
continue
if PPI:
# inside an inner quantifier
# lines are passed with stripped indentation
# based on the outermost quantifier level
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
if PCO or PCI:
if EP:
searchExe.badSyntax.append(
(
i,
f'Quantifier: "{lineQuKind}" can not follow "{curQuKind}" on line {curQuLine}',
)
)
good = False
if EK:
searchExe.badSyntax.append(
(
i,
(
f'Quantifier "{lineQuKind}"'
f' has not same indentation as "{curQuKind}" on line {curQuLine}'
),
)
)
good = False
if PCO:
curQuTemplates.append([])
else:
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
curQu[-1] = (i, lineQuKind, lineIndent)
continue
if PEO or PEI:
if EP:
searchExe.badSyntax.append(
(
i,
(
f'Quantifier: "{lineQuKind}"'
f' : premature end of "{curQuKind}" on line {curQuLine}'
),
)
)
good = False
if EK:
searchExe.badSyntax.append(
(
i,
(
f'Quantifier "{lineQuKind}"'
f' has not same indentation as "{curQuKind}" on line {curQuLine}'
),
)
)
good = False
if PEO:
curQuTemplates = None
else:
outerIndent = curQu[0][2]
strippedLine = line[outerIndent:]
curQuTemplates[-1].append(strippedLine)
curQu.pop()
continue
if not good:
allGood = False
if UUO:
# go on with normal template tokenization
pass
else:
# quantifiers stuff has been dealt with
continue
# QUANTIFIER FREE HANDLING
good = False
for x in [True]:
(kind, data) = parseLine(line)
if kind == "op":
(indent, op) = data
if not parseFeatureVals(searchExe, op, opFeatures, i, asEdge=True):
good = False
else:
if opFeatures:
op = (op, opFeatures)
tokens.append(dict(ln=i, kind="atom", indent=len(indent), op=op))
good = True
break
if kind == "rel":
(indent, f, op, t) = data
if not parseFeatureVals(searchExe, op, opFeatures, i, asEdge=True):
good = False
else:
if opFeatures:
op = (op, opFeatures)
tokens.append(dict(ln=i, kind="rel", f=f, op=op, t=t))
good = True
break
if kind == "atom":
(indent, op, name, otype, features) = data
good = True
if name != "":
mt = nameRe.match(name)
if not mt:
searchExe.badSyntax.append((i, f'Illegal name: "{name}"'))
good = False
features = readFeatures(features, i)
if features is None:
good = False
else:
if op is not None:
if not parseFeatureVals(
searchExe, op, opFeatures, i, asEdge=True
):
good = False
if good:
if opFeatures:
op = (op, opFeatures)
tokens.append(
dict(
ln=i,
kind="atom",
indent=len(indent),
op=op,
name=name,
otype=otype,
src=line.lstrip(),
features=features,
)
)
break
if kind == "feat":
features = data[0]
features = readFeatures(features, i)
if features is None:
good = False
else:
tokens.append(dict(ln=i, kind="feat", features=features))
good = True
break
good = False
searchExe.badSyntax.append((i, f"Unrecognized line: {line}"))
if not good:
allGood = False
if curQu:
for (curQuLine, curQuKind, curQuIndent) in curQu:
searchExe.badSyntax.append(
(curQuLine, f'Quantifier: Unterminated "{curQuKind}"')
)
good = False
allGood = False
if allGood:
searchExe.tokens = tokens
else:
searchExe.good = False
def parseLine(line):
for x in [True]:
escLine = _esc(line)
match = opLineRe.match(escLine)
if match:
(indent, op) = match.groups()
kind = "op"
data = (indent, op)
break
match = relRe.match(escLine)
if match:
(indent, f, op, t) = match.groups()
kind = "rel"
data = (indent, f, op, t)
break
matchOp = atomOpRe.match(escLine)
if matchOp:
(indent, op, atom, features) = matchOp.groups()
else:
match = atomRe.match(escLine)
if match:
op = None
(indent, atom, features) = match.groups()
if matchOp or match:
atomComps = atom.split(":", 1)
if len(atomComps) == 1:
name = ""
otype = atomComps[0]
else:
name = atomComps[0]
otype = atomComps[1]
kind = "atom"
if features is None:
features = ""
data = (indent, op, name, otype, features)
break
kind = "feat"
data = (escLine,)
return (kind, data)
def parseFeatureVals(searchExe, featStr, features, i, asEdge=False):
if asEdge:
if not (
(featStr[0] == "-" and featStr[-1] == ">")
or (featStr[0] == "<" and featStr[-1] == "-")
or (featStr[0] == "<" and featStr[-1] == ">")
):
return True
feat = featStr[1:-1]
else:
feat = featStr.replace(chr(1), " ")
good = True
for x in [True]:
match = trueRe.match(feat)
if match:
(featN,) = match.groups()
featName = _unesc(featN)
featVals = (None, True)
break
match = noneRe.match(feat)
if match:
(featN, unequal) = match.groups()
featName = _unesc(featN)
featVals = None if unequal else True
break
match = identRe.match(feat)
if match:
(featN, comp, featValStr) = match.groups()
featName = _unesc(featN)
featValSet = frozenset(_unesc(featVal) for featVal in featValStr.split("|"))
featVals = (comp == "=", featValSet)
break
match = compRe.match(feat)
if match:
(featN, comp, limit) = match.groups()
featName = _unesc(featN)
if not numRe.match(limit):
searchExe.badSyntax.append((i, f'Limit is non numeric "{limit}"'))
good = False
featVals = None
else:
featVals = _makeLimit(int(limit), comp == ">")
break
match = reRe.match(feat)
if match:
(featN, valRe) = match.groups()
featName = _unesc(featN)
valRe = _unesc(valRe, inRe=True)
try:
featVals = re.compile(valRe)
except Exception() as err:
searchExe.badSyntax.append(
(i, f'Wrong regular expression "{valRe}": "{err}"')
)
good = False
featVals = None
break
searchExe.badSyntax.append((i, f'Unrecognized feature condition "{feat}"'))
good = False
featVals = None
if good:
features[featName] = featVals
return good
def _genLine(kind, data):
result = None
for x in [True]:
if kind == "op":
(indent, op) = data
result = f"{indent}{_unesc(op)}"
break
if kind == "rel":
(indent, f, op, t) = data
result = f"{indent}{f} {_unesc(op)} {t}"
break
if kind == "atom":
(indent, op, name, otype, features) = data
opRep = "" if op is None else f"{_unesc(op)} "
nameRep = "" if name == "" else f"{name}:"
featRep = _unesc(features)
if featRep:
featRep = f" {featRep}"
result = f"{indent}{opRep}{nameRep}{otype}{featRep}"
break
features = data[0]
result = _unesc(features)
return result
def cleanParent(atom, parentName):
(kind, data) = parseLine(atom)
(indent, op, name, otype, features) = data
if name == "":
name = parentName
return _genLine(kind, (indent, None, name, otype, features))
def deContext(quantifier, parentName):
(quKind, quTemplates, ln) = quantifier
# choose a name for the parent
# either the given name
if not parentName:
# or make a new name
# collect all used names
# to avoid choosing a name that is already used
usedNames = set()
for template in quTemplates:
for line in template:
for name in namesRe.findall(line):
usedNames.add(name)
parentName = "parent"
while parentName in usedNames:
parentName += "x"
newQuTemplates = []
newQuantifier = (quKind, newQuTemplates, parentName, ln)
# replace .. (PARENT_REF) by parentName
# wherever it is applicable
for template in quTemplates:
newLines = []
for line in template:
(kind, data) = parseLine(line)
newLine = line
if kind == "rel":
(indent, f, op, t) = data
if f == PARENT_REF or t == PARENT_REF:
newF = parentName if f == PARENT_REF else f
newT = parentName if t == PARENT_REF else t
newData = (indent, newF, op, newT)
newLine = _genLine(kind, newData)
elif kind == "atom":
(indent, op, name, otype, features) = data
if name == "" and otype == PARENT_REF:
newData = (indent, op, name, parentName, features)
newLine = _genLine(kind, newData)
newLines.append(newLine)
templateStr = "\n".join(newLines)
newQuTemplates.append(templateStr)
return newQuantifier
def _makeLimit(n, isLower):
if isLower:
return lambda x: x is not None and x > n
return lambda x: x is not None and x < n
def _esc(x):
for (i, c) in enumerate(ESCAPES):
x = x.replace(c, chr(i))
return x
def _unesc(x, inRe=False):
for (i, c) in enumerate(ESCAPES):
if inRe and c in VAL_ESCAPES:
x = x.replace(chr(i), f"\\{c[1]}")
else:
x = x.replace(chr(i), c[1])
return x
|
python
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from . import FairseqDataset
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(len(ds) == len(datasets[0]) for ds in datasets), \
'datasets must have the same length'
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum(ds.sizes for ds in self.datasets)
def num_tokens(self, index):
return sum(ds.num_tokens(index) for ds in self.datasets)
def size(self, index):
return sum(ds.size(index) for ds in self.datasets)
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any(
getattr(ds, 'supports_prefetch', False) for ds in self.datasets
)
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
|
python
|
#! /usr/bin/env python3
import client
import common
import start
if __name__ == '__main__':
start.check_dirs()
common.w.replace_windows(*start.get_windows())
common.w.curses = True
config = start.read_config()
colour = start.validate_config(config)
if colour:
start.set_colours(config['colour'])
common.w.colour = True
common.w.welcome()
start.login(config['user'])
common.w.addstr(
common.w.infobar,
'Enter \'h\' or \'help\' if you need help.'
)
common.client = client.FullClient() if (
common.mc.is_subscribed
) else common.client.FreeClient()
while True:
common.client.transition()
else:
start.easy_login()
|
python
|
from unittest import TestCase
import string
from assertions import assert_result
from analyzer.predefined_recognizers.iban_recognizer import IbanRecognizer, IBAN_GENERIC_SCORE, LETTERS
from analyzer.entity_recognizer import EntityRecognizer
iban_recognizer = IbanRecognizer()
entities = ["IBAN_CODE"]
def update_iban_checksum(iban):
'''
Generates an IBAN, with checksum digits
This is based on: https://www.ibantest.com/en/how-is-the-iban-check-digit-calculated
'''
iban_no_spaces = iban.replace(' ', '')
iban_digits = (iban_no_spaces[4:] +iban_no_spaces[:2] + '00').upper().translate(LETTERS)
check_digits = '{:0>2}'.format(98 - (int(iban_digits) % 97))
return iban[:2] + check_digits + iban[4:]
class TestIbanRecognizer(TestCase):
# Test valid and invalid ibans per each country which supports IBAN - without context
#Albania (8n, 16c) ALkk bbbs sssx cccc cccc cccc cccc
def test_AL_iban_valid_no_spaces(self):
iban = 'AL47212110090000000235698741'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_AL_iban_valid_with_spaces(self):
iban = 'AL47 2121 1009 0000 0002 3569 8741'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_AL_iban_invalid_format_valid_checksum(self):
iban = 'AL47 212A 1009 0000 0002 3569 8741'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AL_iban_invalid_length(self):
iban = 'AL47 212A 1009 0000 0002 3569 874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AL_iban_invalid_checksum(self):
iban = 'AL47 2121 1009 0000 0002 3569 8740'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
#Andorra (8n, 12c) ADkk bbbs sssx cccc cccc cccc
def test_AD_valid_iban_no_spaces(self):
iban = 'AD1200012030200359100100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_AD_iban_valid_with_spaces(self):
iban = 'AD12 0001 2030 2003 5910 0100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_AD_iban_invalid_format_valid_checksum(self):
iban = 'AD12000A2030200359100100'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AD_iban_invalid_length(self):
iban = 'AD12000A203020035910010'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AD_iban_invalid_checksum(self):
iban = 'AD12 0001 2030 2003 5910 0101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Austria (16n) ATkk bbbb bccc cccc cccc
def test_AT_iban_valid_no_spaces(self):
iban = 'AT611904300234573201'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_AT_iban_valid_with_spaces(self):
iban = 'AT61 1904 3002 3457 3201'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_AT_iban_invalid_format_valid_checksum(self):
iban = 'AT61 1904 A002 3457 3201'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AT_iban_invalid_length(self):
iban = 'AT61 1904 3002 3457 320'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AT_iban_invalid_checksum(self):
iban = 'AT61 1904 3002 3457 3202'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Azerbaijan (4c,20n) AZkk bbbb cccc cccc cccc cccc cccc
def test_AZ_iban_valid_no_spaces(self):
iban = 'AZ21NABZ00000000137010001944'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_AZ_iban_valid_with_spaces(self):
iban = 'AZ21 NABZ 0000 0000 1370 1000 1944'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_AZ_iban_invalid_format_valid_checksum(self):
iban = 'AZ21NABZ000000001370100019'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AZ_iban_invalid_length(self):
iban = 'AZ21NABZ0000000013701000194'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AZ_iban_invalid_checksum(self):
iban = 'AZ21NABZ00000000137010001945'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Bahrain (4a,14c) BHkk bbbb cccc cccc cccc cc
def test_BH_iban_valid_no_spaces(self):
iban = 'BH67BMAG00001299123456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def testBH_iban_valid__with_spaces(self):
iban = 'BH67 BMAG 0000 1299 1234 56'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_BH_iban_invalid_format_valid_checksum(self):
iban = 'BH67BMA100001299123456'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BH_iban_invalid_length(self):
iban = 'BH67BMAG0000129912345'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BH_iban_invalid_checksum(self):
iban = 'BH67BMAG00001299123457'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Belarus (4c, 4n, 16c) BYkk bbbb aaaa cccc cccc cccc cccc
def test_BY_iban_valid_no_spaces(self):
iban = 'BY13NBRB3600900000002Z00AB00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_BY_iban_valid_with_spaces(self):
iban = 'BY13 NBRB 3600 9000 0000 2Z00 AB00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_BY_iban_invalid_format_valid_checksum(self):
iban = 'BY13NBRBA600900000002Z00AB00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BY_iban_invalid_length(self):
iban = 'BY13 NBRB 3600 9000 0000 2Z00 AB0'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BY_iban_invalid_checksum(self):
iban = 'BY13NBRB3600900000002Z00AB01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Belgium (12n) BEkk bbbc cccc ccxx
def test_BE_iban_valid_no_spaces(self):
iban = 'BE68539007547034'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 16, EntityRecognizer.MAX_SCORE)
def test_BE_iban_valid_with_spaces(self):
iban = 'BE71 0961 2345 6769'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_BE_iban_invalid_format_valid_checksum(self):
iban = 'BE71 A961 2345 6769'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BE_iban_invalid_length(self):
iban = 'BE6853900754703'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BE_iban_invalid_checksum(self):
iban = 'BE71 0961 2345 6760'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Bosnia and Herzegovina (16n) BAkk bbbs sscc cccc ccxx
def test_BA_iban_valid_no_spaces(self):
iban = 'BA391290079401028494'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_BA_iban_valid_with_spaces(self):
iban = 'BA39 1290 0794 0102 8494'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_BA_iban_invalid_format_valid_checksum(self):
iban = 'BA39 A290 0794 0102 8494'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BA_iban_invalid_length(self):
iban = 'BA39129007940102849'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BA_iban_invalid_checksum(self):
iban = 'BA39 1290 0794 0102 8495'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Brazil (23n,1a,1c) BRkk bbbb bbbb ssss sccc cccc ccct n
def test_BR_iban_valid_no_spaces(self):
iban = 'BR9700360305000010009795493P1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_BR_iban_valid_with_spaces(self):
iban = 'BR97 0036 0305 0000 1000 9795 493P 1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_BR_iban_invalid_format_valid_checksum(self):
iban = 'BR97 0036 A305 0000 1000 9795 493P 1'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BR_iban_invalid_length(self):
iban = 'BR9700360305000010009795493P'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BR_iban_invalid_checksum(self):
iban = 'BR97 0036 0305 0000 1000 9795 493P 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Bulgaria (4a,6n,8c) BGkk bbbb ssss ttcc cccc cc
def test_BG_iban_valid_no_spaces(self):
iban = 'BG80BNBG96611020345678'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_BG_iban_valid_with_spaces(self):
iban = 'BG80 BNBG 9661 1020 3456 78'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_BG_iban_invalid_format_valid_checksum(self):
iban = 'BG80 BNBG 9661 A020 3456 78'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BG_iban_invalid_length(self):
iban = 'BG80BNBG9661102034567'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_BG_iban_invalid_checksum(self):
iban = 'BG80 BNBG 9661 1020 3456 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Costa Rica (18n) CRkk 0bbb cccc cccc cccc cc 0 = always zero
def test_CR_iban_valid_no_spaces(self):
iban = 'CR05015202001026284066'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_CR_iban_valid_with_spaces(self):
iban = 'CR05 0152 0200 1026 2840 66'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_CR_iban_invalid_format_valid_checksum(self):
iban = 'CR05 0152 0200 1026 2840 6A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CR_iban_invalid_length(self):
iban = 'CR05 0152 0200 1026 2840 6'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CR_iban_invalid_checksum(self):
iban = 'CR05 0152 0200 1026 2840 67'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Croatia (17n) HRkk bbbb bbbc cccc cccc c
def test_HR_iban_valid_no_spaces(self):
iban = 'HR1210010051863000160'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_HR_iban_valid_with_spaces(self):
iban = 'HR12 1001 0051 8630 0016 0'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_HR_iban_invalid_format_valid_checksum(self):
iban = 'HR12 001 0051 8630 0016 A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_HR_iban_invalid_length(self):
iban = 'HR121001005186300016'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_HR_iban_invalid_Checksum(self):
iban = 'HR12 1001 0051 8630 0016 1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Cyprus (8n,16c) CYkk bbbs ssss cccc cccc cccc cccc
def test_CY_iban_valid_no_spaces(self):
iban = 'CY17002001280000001200527600'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_CY_iban_valid_with_spaces(self):
iban = 'CY17 0020 0128 0000 0012 0052 7600'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_CY_iban_invalid_format_valid_checksum(self):
iban = 'CY17 0020 A128 0000 0012 0052 7600'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CY_iban_invalid_length(self):
iban = 'CY17 0020 0128 0000 0012 0052 760'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CY_iban_invalid_checksum(self):
iban = 'CY17 0020 0128 0000 0012 0052 7601'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Czech Republic (20n) CZkk bbbb ssss sscc cccc cccc
def test_CZ_iban_valid_no_spaces(self):
iban = 'CZ6508000000192000145399'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_CZ_iban_valid_with_spaces(self):
iban = 'CZ65 0800 0000 1920 0014 5399'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_CZ_iban_invalid_format_valid_checksum(self):
iban = 'CZ65 0800 A000 1920 0014 5399'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CZ_iban_invalid_length(self):
iban = 'CZ65 0800 0000 1920 0014 539'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CZ_iban_invalid_checksum(self):
iban = 'CZ65 0800 0000 1920 0014 5390'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Denmark (14n) DKkk bbbb cccc cccc cc
def test_DK_iban_valid_no_spaces(self):
iban = 'DK5000400440116243'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_DK_iban_valid_with_spaces(self):
iban = 'DK50 0040 0440 1162 43'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_DK_iban_invalid_format_valid_checksum(self):
iban = 'DK50 0040 A440 1162 43'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DK_iban_invalid_length(self):
iban = 'DK50 0040 0440 1162 4'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DK_iban_invalid_checksum(self):
iban = 'DK50 0040 0440 1162 44'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Dominican Republic (4a,20n) DOkk bbbb cccc cccc cccc cccc cccc
def test_DO_iban_valid_no_spaces(self):
iban = 'DO28BAGR00000001212453611324'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_DO_iban_valid_with_spaces(self):
iban = 'DO28 BAGR 0000 0001 2124 5361 1324'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_DO_iban_invalid_format_valid_checksum(self):
iban = 'DO28 BAGR A000 0001 2124 5361 1324'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DO_iban_invalid_length(self):
iban = 'DO28 BAGR 0000 0001 2124 5361 132'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DO_iban_invalid_checksum(self):
iban = 'DO28 BAGR 0000 0001 2124 5361 1325'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# East Timor (Timor-Leste) (19n) TLkk bbbc cccc cccc cccc cxx
def test_TL_iban_valid_no_spaces(self):
iban = 'TL380080012345678910157'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_TL_iban_valid_with_spaces(self):
iban = 'TL38 0080 0123 4567 8910 157'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_TL_iban_invalid_format_valid_checksum(self):
iban = 'TL38 A080 0123 4567 8910 157'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_TL_iban_invalid_checksum(self):
iban = 'TL38 0080 0123 4567 8910 158'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Estonia (16n) EEkk bbss cccc cccc cccx
def test_EE_iban_valid_no_spaces(self):
iban = 'EE382200221020145685'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_EE_iban_valid_with_spaces(self):
iban = 'EE38 2200 2210 2014 5685'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_EE_iban_invalid_format_valid_checksum(self):
iban = 'EE38 A200 2210 2014 5685'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_EE_iban_invalid_checksum(self):
iban = 'EE38 2200 2210 2014 5686'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Faroe Islands (14n) FOkk bbbb cccc cccc cx
def test_FO_iban_valid_no_spaces(self):
iban = 'FO6264600001631634'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_FO_iban_valid_with_spaces(self):
iban = 'FO62 6460 0001 6316 34'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_FO_iban_invalid_format_valid_checksum(self):
iban = 'FO62 A460 0001 6316 34'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_FO_iban_invalid_checksum(self):
iban = 'FO62 6460 0001 6316 35'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Finland (14n) FIkk bbbb bbcc cccc cx
def test_FI_iban_valid_no_spaces(self):
iban = 'FI2112345600000785'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_FI_iban_valid_with_spaces(self):
iban = 'FI21 1234 5600 0007 85'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_FI_iban_invalid_format_valid_checksum(self):
iban = 'FI21 A234 5600 0007 85'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_FI_iban_invalid_checksum(self):
iban = 'FI21 1234 5600 0007 86'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# France (10n,11c,2n) FRkk bbbb bsss sscc cccc cccc cxx
def test_FR_iban_valid_no_spaces(self):
iban = 'FR1420041010050500013M02606'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_FR_iban_valid_with_spaces(self):
iban = 'FR14 2004 1010 0505 0001 3M02 606'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_FR_iban_invalid_format_valid_checksum(self):
iban = 'FR14 A004 1010 0505 0001 3M02 606'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_FR_iban_invalid_checksum(self):
iban = 'FR14 2004 1010 0505 0001 3M02 607'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Georgia (2c,16n) GEkk bbcc cccc cccc cccc cc
def test_GE_iban_valid_no_spaces(self):
iban = 'GE29NB0000000101904917'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_GE_iban_valid_with_spaces(self):
iban = 'GE29 NB00 0000 0101 9049 17'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_GE_iban_invalid_format_valid_checksum(self):
iban = 'GE29 NBA0 0000 0101 9049 17'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GE_iban_invalid_checksum(self):
iban = 'GE29 NB00 0000 0101 9049 18'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Germany (18n) DEkk bbbb bbbb cccc cccc cc
def test_DE_iban_valid_no_spaces(self):
iban = 'DE89370400440532013000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_DE_iban_valid_with_spaces(self):
iban = 'DE89 3704 0044 0532 0130 00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_DE_iban_invalid_format_valid_checksum(self):
iban = 'DE89 A704 0044 0532 0130 00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_DE_iban_invalid_checksum(self):
iban = 'DE89 3704 0044 0532 0130 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Gibraltar (4a,15c) GIkk bbbb cccc cccc cccc ccc
def test_GI_iban_valid_no_spaces(self):
iban = 'GI75NWBK000000007099453'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_GI_iban_valid_with_spaces(self):
iban = 'GI75 NWBK 0000 0000 7099 453'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_GI_iban_invalid_format_valid_checksum(self):
iban = 'GI75 aWBK 0000 0000 7099 453'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, IBAN_GENERIC_SCORE)
def test_GI_iban_invalid_checksum(self):
iban = 'GI75 NWBK 0000 0000 7099 454'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Greece (7n,16c) GRkk bbbs sssc cccc cccc cccc ccc
def test_GR_iban_valid_no_spaces(self):
iban = 'GR1601101250000000012300695'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_GR_iban_valid_with_spaces(self):
iban = 'GR16 0110 1250 0000 0001 2300 695'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_GR_iban_invalid_format_valid_checksum(self):
iban = 'GR16 A110 1250 0000 0001 2300 695'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GR_iban_invalid_checksum(self):
iban = 'GR16 0110 1250 0000 0001 2300 696'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Greenland (14n) GLkk bbbb cccc cccc cc
def test_GL_iban_valid_no_spaces(self):
iban = 'GL8964710001000206'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_GL_iban_valid_with_spaces(self):
iban = 'GL89 6471 0001 0002 06'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_GL_iban_invalid_format_valid_checksum(self):
iban = 'GL89 A471 0001 0002 06'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GL_iban_invalid_checksum(self):
iban = 'GL89 6471 0001 0002 07'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Guatemala (4c,20c) GTkk bbbb mmtt cccc cccc cccc cccc
def test_GT_iban_valid_no_spaces(self):
iban = 'GT82TRAJ01020000001210029690'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_GT_iban_valid_with_spaces(self):
iban = 'GT82 TRAJ 0102 0000 0012 1002 9690'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_GT_iban_invalid_format_valid_checksum(self):
iban = 'GT82 TRAJ 0102 0000 0012 1002 9690 A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GT_iban_invalid_checksum(self):
iban = 'GT82 TRAJ 0102 0000 0012 1002 9691'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Hungary (24n) HUkk bbbs sssx cccc cccc cccc cccx
def test_HU_iban_valid_no_spaces(self):
iban = 'HU42117730161111101800000000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_HU_iban_valid_with_spaces(self):
iban = 'HU42 1177 3016 1111 1018 0000 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_HU_iban_invalid_format_valid_checksum(self):
iban = 'HU42 A177 3016 1111 1018 0000 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_HU_iban_invalid_checksum(self):
iban = 'HU42 1177 3016 1111 1018 0000 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Iceland (22n) ISkk bbbb sscc cccc iiii iiii ii
def test_IS_iban_valid_no_spaces(self):
iban = 'IS140159260076545510730339'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_IS_iban_valid_with_spaces(self):
iban = 'IS14 0159 2600 7654 5510 7303 39'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 32, EntityRecognizer.MAX_SCORE)
def test_IS_iban_invalid_format_valid_checksum(self):
iban = 'IS14 A159 2600 7654 5510 7303 39'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IS_iban_invalid_checksum(self):
iban = 'IS14 0159 2600 7654 5510 7303 30'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Ireland (4c,14n) IEkk aaaa bbbb bbcc cccc cc
def test_IE_iban_valid_no_spaces(self):
iban = 'IE29AIBK93115212345678'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_IE_iban_valid_with_spaces(self):
iban = 'IE29 AIBK 9311 5212 3456 78'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_IE_iban_invalid_format_valid_checksum(self):
iban = 'IE29 AIBK A311 5212 3456 78'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IE_iban_invalid_checksum(self):
iban = 'IE29 AIBK 9311 5212 3456 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Israel (19n) ILkk bbbn nncc cccc cccc ccc
def test_IL_iban_valid_no_spaces(self):
iban = 'IL620108000000099999999'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_IL_iban_valid_with_spaces(self):
iban = 'IL62 0108 0000 0009 9999 999'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_IL_iban_invalid_format_valid_checksum(self):
iban = 'IL62 A108 0000 0009 9999 999'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IL_iban_valid_checksum(self):
iban = 'IL62 0108 0000 0009 9999 990'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Italy (1a,10n,12c) ITkk xbbb bbss sssc cccc cccc ccc
def test_IT_iban_valid_no_spaces(self):
iban = 'IT60X0542811101000000123456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_IT_iban_valid_with_spaces(self):
iban = 'IT60 X054 2811 1010 0000 0123 456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_IT_iban_invalid_format_valid_checksum(self):
iban = 'IT60 XW54 2811 1010 0000 0123 456'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_IT_iban_valid_checksum(self):
iban = 'IT60 X054 2811 1010 0000 0123 457'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Jordan (4a,22n) JOkk bbbb ssss cccc cccc cccc cccc cc
def test_JO_iban_valid_no_spaces(self):
iban = 'JO94CBJO0010000000000131000302'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_JO_iban_valid_with_spaces(self):
iban = 'JO94 CBJO 0010 0000 0000 0131 0003 02'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_JO_iban_invalid_format_valid_checksum(self):
iban = 'JO94 CBJO A010 0000 0000 0131 0003 02'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_JO_iban_valid_checksum(self):
iban = 'JO94 CBJO 0010 0000 0000 0131 0003 03'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Kazakhstan (3n,13c) KZkk bbbc cccc cccc cccc
def test_KZ_iban_valid_no_spaces(self):
iban = 'KZ86125KZT5004100100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_KZ_iban_valid_with_spaces(self):
iban = 'KZ86 125K ZT50 0410 0100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_KZ_iban_invalid_format_valid_checksum(self):
iban = 'KZ86 A25K ZT50 0410 0100'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_KZ_iban_valid_checksum(self):
iban = 'KZ86 125K ZT50 0410 0101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Kosovo (4n,10n,2n) XKkk bbbb cccc cccc cccc
def test_XK_iban_valid_no_spaces(self):
iban = 'XK051212012345678906'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_XK_iban_valid_with_spaces(self):
iban = 'XK05 1212 0123 4567 8906'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_XK_iban_invalid_format_valid_checksum(self):
iban = 'XK05 A212 0123 4567 8906'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_XK_iban_valid_checksum(self):
iban = 'XK05 1212 0123 4567 8907'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Kuwait (4a,22c) KWkk bbbb cccc cccc cccc cccc cccc cc
def test_KW_iban_valid_no_spaces(self):
iban = 'KW81CBKU0000000000001234560101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_KW_iban_valid_with_spaces(self):
iban = 'KW81 CBKU 0000 0000 0000 1234 5601 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_KW_iban_invalid_format_valid_checksum(self):
iban = 'KW81 aBKU 0000 0000 0000 1234 5601 01'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, IBAN_GENERIC_SCORE)
def test_KW_iban_valid_checksum(self):
iban = 'KW81 CBKU 0000 0000 0000 1234 5601 02'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Latvia (4a,13c) LVkk bbbb cccc cccc cccc c
def test_LV_iban_valid_no_spaces(self):
iban = 'LV80BANK0000435195001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_LV_iban_valid_with_spaces(self):
iban = 'LV80 BANK 0000 4351 9500 1'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_LV_iban_invalid_format_valid_checksum(self):
iban = 'LV80 bANK 0000 4351 9500 1'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, IBAN_GENERIC_SCORE)
def test_LV_iban_valid_checksum(self):
iban = 'LV80 BANK 0000 4351 9500 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Lebanon (4n,20c) LBkk bbbb cccc cccc cccc cccc cccc
def test_LB_iban_valid_no_spaces(self):
iban = 'LB62099900000001001901229114'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_LB_iban_valid_with_spaces(self):
iban = 'LB62 0999 0000 0001 0019 0122 9114'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_LB_iban_invalid_format_valid_checksum(self):
iban = 'LB62 A999 0000 0001 0019 0122 9114'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LB_iban_valid_checksum(self):
iban = 'LB62 0999 0000 0001 0019 0122 9115'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Liechtenstein (5n,12c) LIkk bbbb bccc cccc cccc c
def test_LI_iban_valid_no_spaces(self):
iban = 'LI21088100002324013AA'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_LI_iban_valid_with_spaces(self):
iban = 'LI21 0881 0000 2324 013A A'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_LI_iban_invalid_format_valid_checksum(self):
iban = 'LI21 A881 0000 2324 013A A'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LI_iban_valid_checksum(self):
iban = 'LI21 0881 0000 2324 013A B'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Lithuania (16n) LTkk bbbb bccc cccc cccc
def test_LT_iban_valid_no_spaces(self):
iban = 'LT121000011101001000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_LT_iban_valid_with_spaces(self):
iban = 'LT12 1000 0111 0100 1000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_LT_iban_invalid_format_valid_checksum(self):
iban = 'LT12 A000 0111 0100 1000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LT_iban_valid_checksum(self):
iban = 'LT12 1000 0111 0100 1001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Luxembourg (3n,13c) LUkk bbbc cccc cccc cccc
def test_LU_iban_valid_no_spaces(self):
iban = 'LU280019400644750000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 20, EntityRecognizer.MAX_SCORE)
def test_LU_iban_valid_with_spaces(self):
iban = 'LU28 0019 4006 4475 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_LU_iban_invalid_format_valid_checksum(self):
iban = 'LU28 A019 4006 4475 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_LU_iban_valid_checksum(self):
iban = 'LU28 0019 4006 4475 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Malta (4a,5n,18c) MTkk bbbb ssss sccc cccc cccc cccc ccc
def test_MT_iban_valid_no_spaces(self):
iban = 'MT84MALT011000012345MTLCAST001S'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 31, EntityRecognizer.MAX_SCORE)
def test_MT_iban_valid_with_spaces(self):
iban = 'MT84 MALT 0110 0001 2345 MTLC AST0 01S'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 38, EntityRecognizer.MAX_SCORE)
def test_MT_iban_invalid_format_valid_checksum(self):
iban = 'MT84 MALT A110 0001 2345 MTLC AST0 01S'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MT_iban_valid_checksum(self):
iban = 'MT84 MALT 0110 0001 2345 MTLC AST0 01T'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Mauritania (23n) MRkk bbbb bsss sscc cccc cccc cxx
def test_MR_iban_valid_no_spaces(self):
iban = 'MR1300020001010000123456753'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_MR_iban_valid_with_spaces(self):
iban = 'MR13 0002 0001 0100 0012 3456 753'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_MR_iban_invalid_format_valid_checksum(self):
iban = 'MR13 A002 0001 0100 0012 3456 753'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MR_iban_valid_checksum(self):
iban = 'MR13 0002 0001 0100 0012 3456 754'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Mauritius (4a,19n,3a) MUkk bbbb bbss cccc cccc cccc 000m mm
def test_MU_iban_valid_no_spaces(self):
iban = 'MU17BOMM0101101030300200000MUR'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 30, EntityRecognizer.MAX_SCORE)
def test_MU_iban_valid_with_spaces(self):
iban = 'MU17 BOMM 0101 1010 3030 0200 000M UR'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 37, EntityRecognizer.MAX_SCORE)
def test_MU_iban_invalid_format_valid_checksum(self):
iban = 'MU17 BOMM A101 1010 3030 0200 000M UR'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MU_iban_valid_checksum(self):
iban = 'MU17 BOMM 0101 1010 3030 0200 000M US'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Moldova (2c,18c) MDkk bbcc cccc cccc cccc cccc
def test_MD_iban_valid_no_spaces(self):
iban = 'MD24AG000225100013104168'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_MD_iban_valid_with_spaces(self):
iban = 'MD24 AG00 0225 1000 1310 4168'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_MD_iban_invalid_format_valid_checksum(self):
iban = 'MD24 AG00 0225 1000 1310 4168 9'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MD_iban_valid_checksum(self):
iban = 'MD24 AG00 0225 1000 1310 4169'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Monaco (10n,11c,2n) MCkk bbbb bsss sscc cccc cccc cxx
def test_MC_iban_valid_no_spaces(self):
iban = 'MC5811222000010123456789030'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_MC_iban_valid_with_spaces(self):
iban = 'MC58 1122 2000 0101 2345 6789 030'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_MC_iban_invalid_format_valid_checksum(self):
iban = 'MC58 A122 2000 0101 2345 6789 030'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MC_iban_valid_checksum(self):
iban = 'MC58 1122 2000 0101 2345 6789 031'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Montenegro (18n) MEkk bbbc cccc cccc cccc xx
def test_ME_iban_valid_no_spaces(self):
iban = 'ME25505000012345678951'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_ME_iban_valid_with_spaces(self):
iban = 'ME25 5050 0001 2345 6789 51'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_ME_iban_invalid_format_valid_checksum(self):
iban = 'ME25 A050 0001 2345 6789 51'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_ME_iban_valid_checksum(self):
iban = 'ME25 5050 0001 2345 6789 52'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Netherlands (4a,10n) NLkk bbbb cccc cccc cc
def test_NL_iban_valid_no_spaces(self):
iban = 'NL91ABNA0417164300'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_NL_iban_valid_with_spaces(self):
iban = 'NL91 ABNA 0417 1643 00'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_NL_iban_invalid_format_valid_checksum(self):
iban = 'NL91 1BNA 0417 1643 00'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_NL_iban_valid_checksum(self):
iban = 'NL91 ABNA 0417 1643 01'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# North Macedonia (3n,10c,2n) MKkk bbbc cccc cccc cxx
def test_MK_iban_valid_no_spaces(self):
iban = 'MK07250120000058984'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_MK_iban_valid_with_spaces(self):
iban = 'MK07 2501 2000 0058 984'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_MK_iban_invalid_format_valid_checksum(self):
iban = 'MK07 A501 2000 0058 984'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_MK_iban_valid_checksum(self):
iban = 'MK07 2501 2000 0058 985'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Norway (11n) NOkk bbbb cccc ccx
def test_NO_iban_valid_no_spaces(self):
iban = 'NO9386011117947'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 15, EntityRecognizer.MAX_SCORE)
def test_NO_iban_valid_with_spaces(self):
iban = 'NO93 8601 1117 947'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 18, EntityRecognizer.MAX_SCORE)
def test_NO_iban_invalid_format_valid_checksum(self):
iban = 'NO93 A601 1117 947'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_NO_iban_valid_checksum(self):
iban = 'NO93 8601 1117 948'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Pakistan (4c,16n) PKkk bbbb cccc cccc cccc cccc
def test_PK_iban_valid_no_spaces(self):
iban = 'PK36SCBL0000001123456702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_PK_iban_valid_with_spaces(self):
iban = 'PK36 SCBL 0000 0011 2345 6702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_PK_iban_invalid_format_valid_checksum(self):
iban = 'PK36 SCBL A000 0011 2345 6702'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PK_iban_valid_checksum(self):
iban = 'PK36 SCBL 0000 0011 2345 6703'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Palestinian territories (4c,21n) PSkk bbbb xxxx xxxx xccc cccc cccc c
def test_PS_iban_valid_no_spaces(self):
iban = 'PS92PALS000000000400123456702'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_PS_iban_valid_with_spaces(self):
iban = 'PS92 PALS 0000 0000 0400 1234 5670 2'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_PS_iban_invalid_format_valid_checksum(self):
iban = 'PS92 PALS A000 0000 0400 1234 5670 2'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PS_iban_valid_checksum(self):
iban = 'PS92 PALS 0000 0000 0400 1234 5670 3'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Poland (24n) PLkk bbbs sssx cccc cccc cccc cccc
def test_PL_iban_valid_no_spaces(self):
iban = 'PL61109010140000071219812874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_PL_iban_valid_with_spaces(self):
iban = 'PL61 1090 1014 0000 0712 1981 2874'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 34, EntityRecognizer.MAX_SCORE)
def test_PL_iban_invalid_format_valid_checksum(self):
iban = 'PL61 A090 1014 0000 0712 1981 2874'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PL_iban_valid_checksum(self):
iban = 'PL61 1090 1014 0000 0712 1981 2875'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Portugal (21n) PTkk bbbb ssss cccc cccc cccx x
def test_PT_iban_valid_no_spaces(self):
iban = 'PT50000201231234567890154'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 25, EntityRecognizer.MAX_SCORE)
def test_PT_iban_valid_with_spaces(self):
iban = 'PT50 0002 0123 1234 5678 9015 4'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 31, EntityRecognizer.MAX_SCORE)
def test_PT_iban_invalid_format_valid_checksum(self):
iban = 'PT50 A002 0123 1234 5678 9015 4'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_PT_iban_valid_checksum(self):
iban = 'PT50 0002 0123 1234 5678 9015 5'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Qatar (4a,21c) QAkk bbbb cccc cccc cccc cccc cccc c
def test_QA_iban_valid_no_spaces(self):
iban = 'QA58DOHB00001234567890ABCDEFG'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_QA_iban_valid_with_spaces(self):
iban = 'QA58 DOHB 0000 1234 5678 90AB CDEF G'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 36, EntityRecognizer.MAX_SCORE)
def test_QA_iban_invalid_format_valid_checksum(self):
iban = 'QA58 0OHB 0000 1234 5678 90AB CDEF G'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_QA_iban_valid_checksum(self):
iban = 'QA58 DOHB 0000 1234 5678 90AB CDEF H'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
#### Reunion
# Romania (4a,16c) ROkk bbbb cccc cccc cccc cccc
def test_RO_iban_valid_no_spaces(self):
iban = 'RO49AAAA1B31007593840000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_RO_iban_valid_with_spaces(self):
iban = 'RO49 AAAA 1B31 0075 9384 0000'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_RO_iban_invalid_format_valid_checksum(self):
iban = 'RO49 0AAA 1B31 0075 9384 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_RO_iban_valid_checksum(self):
iban = 'RO49 AAAA 1B31 0075 9384 0001'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
### Saint Barthelemy
### Saint Lucia
### Saint Martin
### Saint Pierrer
# San Marino (1a,10n,12c) SMkk xbbb bbss sssc cccc cccc ccc
def test_SM_iban_valid_no_spaces(self):
iban = 'SM86U0322509800000000270100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_SM_iban_valid_with_spaces(self):
iban = 'SM86 U032 2509 8000 0000 0270 100'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 33, EntityRecognizer.MAX_SCORE)
def test_SM_iban_invalid_format_valid_checksum(self):
iban = 'SM86 0032 2509 8000 0000 0270 100'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SM_iban_valid_checksum(self):
iban = 'SM86 U032 2509 8000 0000 0270 101'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
### Sao Tome
# Saudi Arabia (2n,18c) SAkk bbcc cccc cccc cccc cccc
def test_SA_iban_valid_no_spaces(self):
iban = 'SA0380000000608010167519'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_SA_iban_valid_with_spaces(self):
iban = 'SA03 8000 0000 6080 1016 7519'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_SA_iban_invalid_format_valid_checksum(self):
iban = 'SA03 A000 0000 6080 1016 7519'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SA_iban_valid_checksum(self):
iban = 'SA03 8000 0000 6080 1016 7510'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Serbia (18n) RSkk bbbc cccc cccc cccc xx
def test_RS_iban_valid_no_spaces(self):
iban = 'RS35260005601001611379'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_RS_iban_valid_with_spaces(self):
iban = 'RS35 2600 0560 1001 6113 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_RS_iban_invalid_format_valid_checksum(self):
iban = 'RS35 A600 0560 1001 6113 79'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_RS_iban_valid_checksum(self):
iban = 'RS35 2600 0560 1001 6113 70'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Slovakia (20n) SKkk bbbb ssss sscc cccc cccc
def test_RS_iban_valid_no_spaces(self):
iban = 'SK3112000000198742637541'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_RS_iban_valid_with_spaces(self):
iban = 'SK31 1200 0000 1987 4263 7541'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_RS_iban_invalid_format_valid_checksum(self):
iban = 'SK31 A200 0000 1987 4263 7541'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_RS_iban_valid_checksum(self):
iban = 'SK31 1200 0000 1987 4263 7542'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Slovenia (15n) SIkk bbss sccc cccc cxx
def test_SI_iban_valid_no_spaces(self):
iban = 'SI56263300012039086'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 19, EntityRecognizer.MAX_SCORE)
def test_SI_iban_valid_with_spaces(self):
iban = 'SI56 2633 0001 2039 086'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_SI_iban_invalid_format_valid_checksum(self):
iban = 'SI56 A633 0001 2039 086'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SI_iban_valid_checksum(self):
iban = 'SI56 2633 0001 2039 087'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Spain (20n) ESkk bbbb ssss xxcc cccc cccc
def test_ES_iban_valid_no_spaces(self):
iban = 'ES9121000418450200051332'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_ES_iban_valid_with_spaces(self):
iban = 'ES91 2100 0418 4502 0005 1332'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_ES_iban_invalid_format_valid_checksum(self):
iban = 'ES91 A100 0418 4502 0005 1332'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_ES_iban_valid_checksum(self):
iban = 'ES91 2100 0418 4502 0005 1333'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Sweden (20n) SEkk bbbc cccc cccc cccc cccc
def test_SE_iban_valid_no_spaces(self):
iban = 'SE4550000000058398257466'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_SE_iban_valid_with_spaces(self):
iban = 'SE45 5000 0000 0583 9825 7466'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_SE_iban_invalid_format_valid_checksum(self):
iban = 'SE45 A000 0000 0583 9825 7466'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_SE_iban_valid_checksum(self):
iban = 'SE45 5000 0000 0583 9825 7467'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Switzerland (5n,12c) CHkk bbbb bccc cccc cccc c
def test_CH_iban_valid_no_spaces(self):
iban = 'CH9300762011623852957'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 21, EntityRecognizer.MAX_SCORE)
def test_CH_iban_valid_with_spaces(self):
iban = 'CH93 0076 2011 6238 5295 7'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_CH_iban_invalid_format_valid_checksum(self):
iban = 'CH93 A076 2011 6238 5295 7'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_CH_iban_valid_checksum(self):
iban = 'CH93 0076 2011 6238 5295 8'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Tunisia (20n) TNkk bbss sccc cccc cccc cccc
def test_TN_iban_valid_no_spaces(self):
iban = 'TN5910006035183598478831'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_TN_iban_valid_with_spaces(self):
iban = 'TN59 1000 6035 1835 9847 8831'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_TN_iban_invalid_format_valid_checksum(self):
iban = 'TN59 A000 6035 1835 9847 8831'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_TN_iban_valid_checksum(self):
iban = 'CH93 0076 2011 6238 5295 9'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Turkey (5n,17c) TRkk bbbb bxcc cccc cccc cccc cc
def test_TR_iban_valid_no_spaces(self):
iban = 'TR330006100519786457841326'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 26, EntityRecognizer.MAX_SCORE)
def test_TR_iban_valid_with_spaces(self):
iban = 'TR33 0006 1005 1978 6457 8413 26'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 32, EntityRecognizer.MAX_SCORE)
def test_TR_iban_invalid_format_valid_checksum(self):
iban = 'TR33 A006 1005 1978 6457 8413 26'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_TR_iban_valid_checksum(self):
iban = 'TR33 0006 1005 1978 6457 8413 27'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# United Arab Emirates (3n,16n) AEkk bbbc cccc cccc cccc ccc
def test_AE_iban_valid_no_spaces(self):
iban = 'AE070331234567890123456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 23, EntityRecognizer.MAX_SCORE)
def test_AE_iban_valid_with_spaces(self):
iban = 'AE07 0331 2345 6789 0123 456'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 28, EntityRecognizer.MAX_SCORE)
def test_AE_iban_invalid_format_valid_checksum(self):
iban = 'AE07 A331 2345 6789 0123 456'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_AE_iban_valid_checksum(self):
iban = 'AE07 0331 2345 6789 0123 457'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# United Kingdom (4a,14n) GBkk bbbb ssss sscc cccc cc
def test_GB_iban_valid_no_spaces(self):
iban = 'GB29NWBK60161331926819'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_GB_iban_valid_with_spaces(self):
iban = 'GB29 NWBK 6016 1331 9268 19'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_GB_iban_invalid_format_valid_checksum(self):
iban = 'GB29 1WBK 6016 1331 9268 19'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_GB_iban_valid_checksum(self):
iban = 'GB29 NWBK 6016 1331 9268 10'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Vatican City (3n,15n) VAkk bbbc cccc cccc cccc cc
def test_VA_iban_valid_no_spaces(self):
iban = 'VA59001123000012345678'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 22, EntityRecognizer.MAX_SCORE)
def test_VA_iban_valid_with_spaces(self):
iban = 'VA59 0011 2300 0012 3456 78'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 27, EntityRecognizer.MAX_SCORE)
def test_VA_iban_invalid_format_valid_checksum(self):
iban = 'VA59 A011 2300 0012 3456 78'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_VA_iban_valid_checksum(self):
iban = 'VA59 0011 2300 0012 3456 79'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Virgin Islands, British (4c,16n) VGkk bbbb cccc cccc cccc cccc
def test_VG_iban_valid_no_spaces(self):
iban = 'VG96VPVG0000012345678901'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 24, EntityRecognizer.MAX_SCORE)
def test_VG_iban_valid_with_spaces(self):
iban = 'VG96 VPVG 0000 0123 4567 8901'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 1
assert_result(results[0], entities[0], 0, 29, EntityRecognizer.MAX_SCORE)
def test_VG_iban_invalid_format_valid_checksum(self):
iban = 'VG96 VPVG A000 0123 4567 8901'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_VG_iban_valid_checksum(self):
iban = 'VG96 VPVG 0000 0123 4567 8902'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
# Test Invalid IBANs
def test_iban_invalid_country_code_invalid_checksum(self):
iban = 'AB150120690000003111141'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_iban_invalid_country_code_valid_checksum(self):
iban = 'AB150120690000003111141'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_iban_too_short_valid_checksum(self):
iban = 'IL15 0120 6900 0000'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_iban_too_long_valid_checksum(self):
iban = 'IL15 0120 6900 0000 3111 0120 6900 0000 3111 141'
iban = update_iban_checksum(iban)
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
def test_invalid_IL_iban_with_exact_context_does_not_change_score(self):
iban = 'IL150120690000003111141'
context = 'my iban number is '
results = iban_recognizer.analyze(context + iban, entities)
assert len(results) == 0
def test_AL_iban_invalid_country_code_but_checksum_is_correct(self):
iban = 'AM47212110090000000235698740'
results = iban_recognizer.analyze(iban, entities)
assert len(results) == 0
|
python
|
"""
Define the list of possible commands that TeX might handle. These commands
might be composed of multiple instructions, such as 'input', which requires
characters forming a file-name as an argument.
"""
from enum import Enum
class Commands(Enum):
assign = 'ASSIGN'
relax = 'RELAX'
left_brace = 'LEFT_BRACE'
right_brace = 'RIGHT_BRACE'
begin_group = 'BEGIN_GROUP'
end_group = 'END_GROUP'
show_token = 'SHOW_TOKEN'
show_box = 'SHOW_BOX'
show_lists = 'SHOW_LISTS'
show_the = 'SHOW_THE'
ship_out = 'SHIP_OUT'
ignore_spaces = 'IGNORE_SPACES'
set_after_assignment_token = 'SET_AFTER_ASSIGNMENT_TOKEN'
add_to_after_group_tokens = 'ADD_TO_AFTER_GROUP_TOKENS'
message = 'MESSAGE'
error_message = 'ERROR_MESSAGE'
open_input = 'OPEN_INPUT'
close_input = 'CLOSE_INPUT'
open_output = 'OPEN_OUTPUT'
close_output = 'CLOSE_OUTPUT'
write = 'WRITE'
do_special = 'DO_SPECIAL'
add_penalty = 'ADD_PENALTY'
add_kern = 'ADD_KERN'
add_math_kern = 'ADD_MATH_KERN'
un_penalty = 'UN_PENALTY'
un_kern = 'UN_KERN'
un_glue = 'UN_GLUE'
mark = 'MARK'
insert = 'INSERT'
vertical_adjust = 'VERTICAL_ADJUST'
add_leaders = 'ADD_LEADERS'
add_space = 'ADD_SPACE'
add_box = 'ADD_BOX'
unpack_horizontal_box = 'UNPACK_HORIZONTAL_BOX'
unpack_vertical_box = 'UNPACK_VERTICAL_BOX'
indent = 'INDENT'
no_indent = 'NO_INDENT'
par = 'PAR'
add_horizontal_glue = 'ADD_HORIZONTAL_GLUE'
add_vertical_glue = 'ADD_VERTICAL_GLUE'
move_box_left = 'MOVE_BOX_LEFT'
move_box_right = 'MOVE_BOX_RIGHT'
raise_box = 'RAISE_BOX'
lower_box = 'LOWER_BOX'
add_horizontal_rule = 'ADD_HORIZONTAL_RULE'
add_vertical_rule = 'ADD_VERTICAL_RULE'
horizontal_align = 'HORIZONTAL_ALIGN'
vertical_align = 'VERTICAL_ALIGN'
end = 'END'
dump = 'DUMP'
add_control_space = 'CONTROL_SPACE'
add_character_explicit = 'ADD_CHARACTER_EXPLICIT'
add_character_code = 'ADD_CHARACTER_CODE'
add_character_token = 'ADD_CHARACTER_TOKEN'
add_accent = 'ADD_ACCENT'
add_italic_correction = 'ADD_ITALIC_CORRECTION'
add_discretionary = 'ADD_DISCRETIONARY'
add_discretionary_hyphen = 'ADD_DISCRETIONARY_HYPHEN'
do_math_shift = 'DO_MATH_SHIFT'
|
python
|
aluno = dict()
nome = str(input('Nome: '))
media = float(input('Média: '))
aluno['nome'] = nome
aluno['media'] = media
if media < 5:
aluno['status'] = 'Reprovado!'
elif 5 <= media < 7:
aluno['status'] = 'Recuperação!'
else:
aluno['status'] = 'Aprovado!'
print(f'Nome: {aluno["nome"]}.')
print(f'Média: {aluno["media"]}.')
print(f'Situação: {aluno["status"]}')
|
python
|
import config
config.setup_examples()
import infermedica_api
if __name__ == '__main__':
api = infermedica_api.get_api()
print('Look for evidences containing phrase headache:')
print(api.search('headache'), end="\n\n")
print('Look for evidences containing phrase breast, only for female specific symptoms:')
print(api.search('breast', sex='female'), end="\n\n")
print('Look for evidences containing phrase breast, only for female specific symptoms, with the limit of 5 results:')
print(api.search('breast', sex='female', max_results=5), end="\n\n")
print('Look for symptoms and risk factors containing phrase trauma:')
print(api.search('trauma', filters=[
infermedica_api.SEARCH_FILTERS.SYMPTOMS, infermedica_api.SEARCH_FILTERS.RISK_FACTORS]), end="\n\n")
|
python
|
from flask import Flask, request, jsonify, redirect, url_for
import pyexcel.ext.xlsx
import pandas as pd
from werkzeug import secure_filename
UPLOAD_FOLDER = 'upload'
ALLOWED_EXTENSIONS = set(['xlsx'])
app=Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route("/upload", methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file1 = request.files['file1']
file2 = request.files['file2']
if file1 and allowed_file(file1.filename) and file2 and allowed_file(file2.filename):
dh1 = pd.read_excel(file1)
dh1=dh1.dropna(how='any')
dh1z=dh1.iloc[:,(1,2,3,4,5,6,7)]
dh1z.columns=['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
dh1z.to_json("DH1.json")
dh2 = pd.read_excel(file2)
dh2=dh2.dropna(how='any')
dh2z=dh2.iloc[:,(1,2,3,4,5,6,7)]
dh2z.columns=['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
dh2z.to_json("DH2.json")
return 'Done! :)'
return '''
<!doctype html>
<title>Upload an excel file</title>
<h1>Excel file upload (xlsx only)</h1>
<form action="" method=post enctype=multipart/form-data><p>
DH1:<input type=file name=file1><br>
DH2:<input type=file name=file2><br>
<input type=submit value=Upload>
</form>
'''
if __name__ == "__main__":
app.run()
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import rasa_core
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.utils import EndpointConfig
from rasa_core.run import serve_application
logger = logging.getLogger(__name__)
def train_dialogue(domain_file = 'chat_domain.yml',
model_path = './models/dialogue',
training_data_file = './data/stories.md'):
agent = Agent(domain_file, policies = [MemoizationPolicy(), KerasPolicy()])
data = agent.load_data(training_data_file)
agent.train(
data,
epochs = 300,
batch_size = 50,
validation_split = 0.2)
agent.persist(model_path)
return agent
def run_bot(serve_forever=True):
interpreter = RasaNLUInterpreter('./models/nlu/default/iitnlu')
action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
agent = Agent.load('./models/dialogue', interpreter=interpreter, action_endpoint=action_endpoint)
rasa_core.run.serve_application(agent ,channel='cmdline')
return agent
if __name__ == '__main__':
#train_dialogue()
run_bot()
|
python
|
import abc
from collections import namedtuple, OrderedDict
from typing import Collection, Optional, Union, Iterable, Tuple, Generator, Set, Dict, List, Any, Callable
from profilehooks import timecall
import logging
import itertools
import random
from .actions import pass_actions, tichu_actions, no_tichu_actions, play_dog_actions, all_wish_actions_gen, TradeAction, \
MutableTrick
from .actions import (PlayerAction, PlayCombination, PlayFirst, PlayBomb, TichuAction, WishAction, PassAction,
WinTrickAction, GiveDragonAwayAction, CardTrade, Trick)
from .cards import CardSet, Card, CardRank, Deck, DOG_COMBINATION
from .error import TichuEnvValueError, LogicError, IllegalActionError
from .utils import check_param, check_isinstance, check_all_isinstance, check_true
__all__ = ('TichuState', 'HandCards', 'History', 'BaseTichuState',
'InitialState', 'FullCardsState', 'BeforeTrading', 'AfterTrading', 'RolloutTichuState')
logger = logging.getLogger(__name__)
class HandCards(object):
__slots__ = ('_cards', )
def __init__(self, cards0: Iterable=(), cards1: Iterable=(), cards2: Iterable=(), cards3: Iterable=()):
self._cards: Tuple[CardSet, CardSet, CardSet, CardSet] = (CardSet(cards0),
CardSet(cards1),
CardSet(cards2),
CardSet(cards3))
def has_cards(self, player: int, cards: Union[Collection[Card], Card]):
assert player in range(4)
try:
res = set(cards).issubset(self._cards[player])
assert all(isinstance(c, Card) for c in cards)
return res
except TypeError:
# cards is only 1 single card
assert isinstance(cards, Card)
return cards in self._cards[player]
def remove_cards(self, player: int, cards: Collection[Card], raise_on_uncomplete=True):
"""
:param player:
:param cards:
:param raise_on_uncomplete: If True, Raises an ValueError when the player does not have all the cards
:return:
>>> hc = HandCards((Card.DOG, Card.TWO_HOUSE), (Card.PHOENIX, Card.DRAGON), (Card.FIVE_HOUSE,), (Card.SIX_HOUSE,))
>>> hc.remove_cards(0, (Card.DOG,))
"""
# make sure cards is a set
if not isinstance(cards, Set):
cards = set(cards)
assert all(isinstance(c, Card) for c in cards)
new_cards = list((c for c in self._cards[player] if c not in cards))
if raise_on_uncomplete and len(new_cards) + len(cards) != len(self._cards[player]):
raise TichuEnvValueError("Not all cards can be removed.")
return HandCards(
*[new_cards if player == k else self._cards[k] for k in range(4)]
)
def iter_all_cards(self, player: int=None):
"""
:param player: If specified, iterates only over the cards of this player.
:return: Iterator over all single cards in all hands if 'player' is not specified
"""
if player is None:
return itertools.chain(*self._cards)
else:
return iter(self._cards[player])
def as_list_of_lists(self):
return [list(cards) for cards in self._cards]
def __iter__(self):
return self._cards.__iter__()
def __getitem__(self, item):
return self._cards[item]
def __hash__(self):
return hash(self._cards)
def __eq__(self, other):
return (other.__class__ == self.__class__
and all(sc == oc for sc, oc in itertools.zip_longest(self.iter_all_cards(),
other.iter_all_cards())))
def __repr__(self):
return(
"""
0: {}
1: {}
2: {}
3: {}
"""
).format(*map(str, self._cards))
def __str__(self):
return self.__repr__()
class MutableHandCards(HandCards):
__slots__ = ('_cards',)
def __init__(self, cards0: Iterable = (), cards1: Iterable = (), cards2: Iterable = (), cards3: Iterable = ()):
super().__init__(cards0, cards1, cards2, cards3)
# make list
self._cards: List[CardSet] = list(self._cards)
@classmethod
def from_immutable(cls, handcards):
return cls(*handcards._cards)
def remove_cards(self, player: int, cards: Collection[Card], raise_on_uncomplete=True):
len_oldcards = len(self._cards[player])
self._cards[player] = CardSet((c for c in self._cards[player] if c not in cards))
if raise_on_uncomplete and len(self._cards[player]) + len(cards) != len_oldcards:
raise TichuEnvValueError("Not all cards can be removed.")
else:
return self
def __hash__(self):
raise AttributeError("MutableHandCards can't be hashed")
def __eq__(self, other):
return self is other
def __str__(self):
return(
""" {me.__class__.__name__}
0: {}
1: {}
2: {}
3: {}
"""
).format(*map(str, self._cards), me=self)
class WonTricks(object):
__slots__ = ('_tricks',)
def __init__(self, tricks0: Iterable[Trick]=(), tricks1: Iterable[Trick]=(), tricks2: Iterable[Trick]=(), tricks3: Iterable[Trick]=()):
self._tricks: Tuple[Tuple[Trick, ...]] = (tuple(tricks0), tuple(tricks1), tuple(tricks2), tuple(tricks3))
assert all(isinstance(t, Trick) for t in itertools.chain(*self._tricks))
def add_trick(self, player: int, trick: Trick):
"""
:param player:
:param trick:
:return: New WonTrick instance with the trick appended to the players won tricks
"""
return WonTricks(*[(tricks + (trick,) if k == player else tricks) for k, tricks in enumerate(self._tricks)])
def iter_all_tricks(self, player: int=None):
"""
:param player: If specified, iterates only over the tricks won by this player.
:return: Iterator over all tricks that have been won if 'player' is not specified.
"""
if player is None:
return itertools.chain(*self._tricks)
else:
iter(self._tricks[player])
def __iter__(self):
return self._tricks.__iter__()
def __getitem__(self, item):
return self._tricks.__getitem__(item)
def __hash__(self):
return hash(self._tricks)
def __eq__(self, other):
return (other.__class__ == self.__class__
and all(st == ot for st, ot in itertools.zip_longest(self.iter_all_tricks(),
other.iter_all_tricks())))
def __str__(self):
return (
"""
0 won {} tricks
1 won {} tricks
2 won {} tricks
3 won {} tricks
"""
).format(*[str(len(wt)) for wt in self._tricks])
class MutableWonTricks(WonTricks):
__slots__ = ('_tricks',)
def __init__(self, tricks0: Iterable[Trick]=(), tricks1: Iterable[Trick]=(), tricks2: Iterable[Trick]=(), tricks3: Iterable[Trick]=()):
super().__init__(tricks0, tricks1, tricks2, tricks3)
self._tricks: Tuple[List[Trick, ...]] = (list(tricks0), list(tricks1), list(tricks2), list(tricks3))
@classmethod
def from_immutable(cls, wontricks):
return cls(*wontricks._tricks)
def add_trick(self, player: int, trick: Trick):
"""
:param player:
:param trick:
:return: self
"""
self._tricks[player].append(trick)
return self
def __hash__(self):
raise AttributeError("MutableWonTricks can't be hashed")
def __eq__(self, other):
return self is other
class History(object):
__slots__ = ('_wished', '_state_action_tuple')
def __init__(self, _wished: bool=False, _tup=tuple()):
self._wished: bool = _wished
self._state_action_tuple: Tuple[Union[TichuState, PlayerAction]] = _tup
def last_state(self)->Optional['BaseTichuState']:
for elem in reversed(self._state_action_tuple):
if isinstance(elem, BaseTichuState):
return elem
return None
def wished(self)->bool:
"""
:return: True iff at some point a wish was made, false otherwise
"""
return self._wished
def new_state_actions(self, state: 'BaseTichuState', actions: Iterable[PlayerAction])->'History':
"""
:param state:
:param actions:
:return: copy of this History instance with the state and actions appended to it.
"""
actions = tuple(actions)
assert isinstance(state, BaseTichuState)
assert all(isinstance(action, PlayerAction) for action in actions)
_wished = self._wished or any(isinstance(action, WishAction) for action in actions)
return History(_wished=_wished, _tup=self._state_action_tuple + (state, *actions))
def new_state_action(self, state: 'BaseTichuState', action: PlayerAction)->'History':
"""
:param state:
:param action:
:return: copy of this History instance with the state and action appended to it.
"""
assert isinstance(state, TichuState)
assert isinstance(action, PlayerAction)
new_tuple = self._state_action_tuple + (state, action)
return History(_wished=self._wished or isinstance(action, WishAction), _tup=new_tuple)
def add_last_state(self, state: 'BaseTichuState'):
assert isinstance(state, TichuState)
new_tuple = self._state_action_tuple + (state, )
return History(_wished=self._wished, _tup=new_tuple)
def actions(self):
yield from (a for a in self._state_action_tuple if isinstance(a, PlayerAction))
def __repr__(self):
return "{me.__class__.__name__}(length: {l})".format(me=self, l=len(self._state_action_tuple))
def __str__(self):
last_state = self.last_state()
try:
ranking = last_state.ranking
except AttributeError:
ranking = "No ranking"
try:
points = last_state.count_points() if last_state.is_terminal() else "State is not Terminal"
except AttributeError:
points = "No points"
actions = list(self.actions())
actions_str = " " if len(actions) else "EMPTY"
for action in actions:
actions_str += " -> "+str(action)
if isinstance(action, WinTrickAction):
actions_str += "\n "
return (
"""
{me.__class__.__name__}
length: {length}
last ranking: {ranking}
last points: {points}
actions:
{actions}
""".format(me=self, length=len(self._state_action_tuple), ranking=ranking, points=points,
actions=actions_str)
)
class BaseTichuState(object, metaclass=abc.ABCMeta):
def __init__(self, allow_tichu=True, allow_wish=True, discard_history:bool=False):
self._allow_tichu = allow_tichu
self._allow_wish = allow_wish
self.discard_history = discard_history
self._possible_actions_set: Set[PlayerAction] = None
self._possible_actions_list: List[PlayerAction] = None
self._state_transitions: Dict[PlayerAction, TichuState] = dict()
@property
@abc.abstractmethod
def player_pos(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def handcards(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def won_tricks(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def trick_on_table(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def wish(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def ranking(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def announced_tichu(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def announced_grand_tichu(self):
raise NotImplementedError("Is an abstract method!")
@property
@abc.abstractmethod
def history(self):
raise NotImplementedError("Is an abstract method!")
@property
def _current_player_handcards(self):
return self.handcards[self.player_pos]
@property
@timecall(immediate=False)
def possible_actions_set(self)->Set[PlayerAction]:
if self._possible_actions_set is None:
self._possible_actions_set = frozenset(self.possible_actions_list)
return self._possible_actions_set
@property
@timecall(immediate=False)
def possible_actions_list(self)->List[PlayerAction]:
if self._possible_actions_list is None:
self._possible_actions_list = list(self.possible_actions_gen())
return self._possible_actions_list
@abc.abstractmethod
def change(self, **attributes_to_change) -> 'TichuState':
"""
:param attributes_to_change: kwargs with the name of TichuState Attributes
:return: A copy ot this TichuState instance with the given attributes replaced
"""
def possible_actions_gen(self)->Generator[PlayerAction, None, None]:
"""
:return: Generator yielding all possible actions in this state
"""
# ######### tichu? ######### (ie. player just played the first time (next_action keeps the player the same in this case))
if self._allow_tichu:
# last acting player has to decide on announcing a tichu
last_act = self.trick_on_table.last_action
if (isinstance(last_act, PlayCombination)
and last_act.player_pos not in self.announced_tichu
and last_act.player_pos not in self.announced_grand_tichu
and 14 - len(last_act.combination) == len(self.handcards[last_act.player_pos])):
yield tichu_actions[last_act.player_pos]
yield no_tichu_actions[last_act.player_pos]
return # player has to decide whether to announce a tichu or not
# ######### Round Ends with double win? #########
if self.is_double_win():
assert self.is_terminal() # -> No action possible
return
# store last played combination (action)
last_combination_action = self.trick_on_table.last_combination_action
last_combination = self.trick_on_table.last_combination
# ######### Round ends with the 3rd player finishing? #########
if len(self.ranking) >= 3: # Round ends -> terminal
if self.trick_on_table.is_empty():
assert self.is_terminal() # -> No action possible
return
else:
# give the remaining trick on table to leader
yield WinTrickAction(player_pos=last_combination_action.player_pos, trick=self.trick_on_table)
return # Round ends
# ######### wish? #########
if (self._allow_wish and not self.history.wished()) and (not self.trick_on_table.is_empty()) and Card.MAHJONG in last_combination:
# Note that self.player_pos is not equal to the wishing player pos.
yield from all_wish_actions_gen(self.trick_on_table.last_combination_action.player_pos)
return # Player must wish something, no other actions allowed
# ######### trick ended? #########
if self.trick_on_table.is_finished():
# dragon away?
if Card.DRAGON in last_combination:
assert isinstance(self.player_pos, int), str(self.player_pos)
yield GiveDragonAwayAction(self.player_pos, (self.player_pos + 1) % 4, trick=self.trick_on_table)
yield GiveDragonAwayAction(self.player_pos, (self.player_pos - 1) % 4, trick=self.trick_on_table)
# Normal Trick
else:
assert isinstance(self.player_pos, int), str(self.player_pos)
yield WinTrickAction(player_pos=self.player_pos, trick=self.trick_on_table)
return # No more actions allowed
# ######### DOG? #########
if DOG_COMBINATION == last_combination: # Dog was played
# logger.debug("Dog was played -> Win trick action")
yield WinTrickAction(player_pos=(last_combination_action.player_pos + 2) % 4, trick=self.trick_on_table)
return # No more actions allowed
# ######### possible combinations and wish fulfilling. #########
can_fulfill_wish = False
# initialise possible combinations ignoring the wish
possible_combinations = list(self._current_player_handcards.possible_combinations(played_on=last_combination))
# logger.debug("possible_combinations: {}".format(possible_combinations))
if self.wish and self._current_player_handcards.contains_cardrank(self.wish):
# player may have to fulfill the wish
possible_combinations_wish = list(self._current_player_handcards.possible_combinations(played_on=last_combination, contains_rank=self.wish))
if len(possible_combinations_wish) > 0:
# player can and therefore has to fulfill the wish
can_fulfill_wish = True
possible_combinations = possible_combinations_wish
# ######### pass? #########
can_pass = not (self.trick_on_table.is_empty() or can_fulfill_wish)
if can_pass:
yield pass_actions[self.player_pos]
# ######### combinations ? ######### -> which combs
PlayactionClass = PlayFirst if self.trick_on_table.is_empty() else PlayCombination # Determine FirstPlay or PlayCombination
for comb in possible_combinations:
if comb == DOG_COMBINATION:
yield play_dog_actions[self.player_pos]
else:
yield PlayactionClass(player_pos=self.player_pos, combination=comb)
# TODO bombs ?
def next_state(self, action: PlayerAction)->'TichuState':
if action not in self.possible_actions_set:
raise IllegalActionError("{} is not a legal action in state: {}".format(action, self))
# cache the state transitions
if action in self._state_transitions:
return self._state_transitions[action]
# tichu (ie. player just played the first time (next_action keeps the player the same in this case))
elif isinstance(action, TichuAction):
next_s = self._next_state_on_tichu(action)
# wish
elif isinstance(action, WishAction):
next_s = self._next_state_on_wish(action)
# win trick (includes dragon away)?
elif isinstance(action, WinTrickAction):
next_s = self._next_state_on_win_trick(action)
# pass
elif isinstance(action, PassAction):
next_s = self._next_state_on_pass(action)
# combinations (includes playfirst, playdog, playbomb)
elif isinstance(action, PlayCombination):
next_s = self._next_state_on_combination(action)
else:
raise LogicError("An unknown action has been played")
self._state_transitions[action] = next_s
return next_s
def random_action(self)->PlayerAction:
return random.choice(self.possible_actions_list)
def _next_state_on_wish(self, wish_action: WishAction)->'TichuState':
return self.change(
wish=wish_action.wish,
trick_on_table=self.trick_on_table + wish_action,
history=self.history.new_state_action(self, wish_action)
)
def _next_state_on_tichu(self, tichu_action: TichuAction)->'TichuState':
h = self.history.new_state_action(self, tichu_action)
tot = self.trick_on_table + tichu_action
if DOG_COMBINATION == self.trick_on_table.last_combination:
tot = tot.finish()
if tichu_action.announce:
assert tichu_action.player_pos not in self.announced_grand_tichu
return self.change(
announced_tichu=self.announced_tichu.union({tichu_action.player_pos}),
trick_on_table=tot,
history=h
)
else:
return self.change(
trick_on_table=tot,
history=h
)
def _next_state_on_win_trick(self, win_trick_action: WinTrickAction)->'TichuState':
winner = win_trick_action.player_pos
assert self.player_pos == winner or len(self.ranking) >= 3, "action: {act}, winner:{winner}, state:{state}".format(act=win_trick_action, winner=winner, state=self)
# give trick to correct player
trick_to = winner
if isinstance(win_trick_action, GiveDragonAwayAction):
trick_to = win_trick_action.to
# determine next player
try:
next_player = winner if len(self.handcards[winner]) else self._next_player_turn()
except StopIteration:
# happens only right before the game ends
next_player = winner
assert self.is_double_win() or len(self.ranking) >= 3
return self.change(
player_pos=next_player,
won_tricks=self.won_tricks.add_trick(player=trick_to, trick=win_trick_action.trick),
trick_on_table=Trick(),
history=self.history.new_state_action(self, win_trick_action)
)
def _next_state_on_pass(self, pass_action: PassAction)->'TichuState':
assert pass_action.player_pos == self.player_pos
leading_player = self.trick_on_table.last_combination_action.player_pos
# try:
next_player_pos = self._next_player_turn()
# except StopIteration:
# # happens only right before the game ends
# next_player_pos = leading_player
if (leading_player == next_player_pos
or self.player_pos < leading_player < next_player_pos
or next_player_pos < self.player_pos < leading_player
or leading_player < next_player_pos < self.player_pos):
# trick ends with leading as winner
return self.change(
player_pos=leading_player,
trick_on_table=self.trick_on_table.finish(last_action=pass_action),
history=self.history.new_state_action(self, pass_action)
)
else:
return self.change(
player_pos=next_player_pos,
trick_on_table=self.trick_on_table + pass_action,
history=self.history.new_state_action(self, pass_action)
)
def _next_state_on_combination(self, comb_action: PlayCombination)->'TichuState':
played_comb = comb_action.combination
assert comb_action.player_pos == self.player_pos
# remove from handcards and add to trick on table
next_trick_on_table = self.trick_on_table + comb_action
next_handcards = self.handcards.remove_cards(player=self.player_pos, cards=played_comb.cards)
assert len(next_handcards[self.player_pos]) < len(self.handcards[self.player_pos])
assert next_handcards[self.player_pos].issubset(self.handcards[self.player_pos])
# ranking
next_ranking = self.ranking
if len(next_handcards[self.player_pos]) == 0:
# player finished
next_ranking = self.ranking + (self.player_pos,)
assert self.player_pos not in self.ranking
assert len(self.ranking) == len(set(self.ranking))
# dog
if played_comb == DOG_COMBINATION:
assert self.trick_on_table.is_empty()
next_player_pos = (self.player_pos+2) % 4 # Teammate
else:
# next players turn
# try:
next_player_pos = self._next_player_turn()
# except StopIteration:
# # happens only right before the game ends
# next_player_pos = (comb_action.player_pos + 1) % 4
# create state
return self.change(
player_pos=next_player_pos,
handcards=next_handcards,
trick_on_table=next_trick_on_table,
wish=None if played_comb.contains_cardrank(self.wish) else self.wish,
ranking=next_ranking,
history=self.history.new_state_action(self, comb_action)
)
def _next_player_turn(self) -> int:
"""
:return: the next player with non empty handcards
"""
return next((ppos % 4 for ppos in range(self.player_pos + 1, self.player_pos + 4) if len(self.handcards[ppos % 4]) > 0))
def has_cards(self, player: int, cards: Collection[Card])->bool:
"""
:param player:
:param cards:
:return: True if the player has the given card, False otherwise
"""
return self.handcards.has_cards(player=player, cards=cards)
def is_terminal(self):
return self.is_double_win() or (self.trick_on_table.is_empty() and len(self.ranking) >= 3)
def is_double_win(self)->bool:
return len(self.ranking) >= 2 and self.ranking[0] == (self.ranking[1] + 2) % 4
def count_points(self) -> Tuple[int, int, int, int]:
"""
Only correct if the state is terminal
:return: tuple of length 4 with the points of each player at the corresponding index.
"""
# TODO Test
if not self.is_terminal():
logger.warning("Calculating points of a NON terminal state! Result may be incorrect.")
# calculate tichu points
tichu_points = [0, 0, 0, 0]
for gt_pos in self.announced_grand_tichu:
tichu_points[gt_pos] += 200 if gt_pos == self.ranking[0] else -200
for t_pos in self.announced_tichu:
tichu_points[t_pos] += 100 if t_pos == self.ranking[0] else -100
points = tichu_points
# fill the ranking to 4
final_ranking = list(self.ranking) + [ppos for ppos in range(4) if ppos not in self.ranking]
assert len(final_ranking) == 4, "{} -> {}".format(self.ranking, final_ranking)
if self.is_double_win():
# double win (200 for winner team)
points[final_ranking[0]] += 100
points[final_ranking[1]] += 100
else:
# not double win
for rank in range(3): # first 3 players get the points in their won tricks
player_pos = final_ranking[rank]
points[player_pos] += sum(t.points for t in self.won_tricks[player_pos])
# first player gets the points of the last players tricks
winner = final_ranking[0]
looser = final_ranking[3]
points[winner] += sum(t.points for t in self.won_tricks[looser])
# the handcards of the last player go to the enemy team
points[(looser + 1) % 4] += sum(t.points for t in self.handcards[looser])
# fi
# sum the points of each team
t1 = points[0] + points[2]
t2 = points[1] + points[3]
points[0] = t1
points[2] = t1
points[1] = t2
points[3] = t2
assert len(points) == 4
assert points[0] == points[2] and points[1] == points[3]
return tuple(points)
def __str__(self):
return (
"""
{me.__class__.__name__}
player: {me.player_pos}
handcards: {me.handcards}
won tricks: {me.won_tricks}
trick on table: {me.trick_on_table}
wish: {me.wish}
ranking: {me.ranking}
tichus: {me.announced_tichu}
grand tichus: {me.announced_grand_tichu}
history: {me.history}
""").format(me=self)
class _BaseTichuStateImpl(BaseTichuState, metaclass=abc.ABCMeta):
"""
Implements the properties of BaseTichuState with 'raise AttributeError'
"""
__slots__ = ()
@property
def player_pos(self):
raise AttributeError()
@property
def handcards(self):
raise AttributeError()
@property
def won_tricks(self):
raise AttributeError()
@property
def trick_on_table(self):
raise AttributeError()
@property
def wish(self):
raise AttributeError()
@property
def ranking(self):
raise AttributeError()
@property
def announced_tichu(self):
raise AttributeError()
@property
def announced_grand_tichu(self):
raise AttributeError()
@property
def history(self):
raise AttributeError()
def change(self, **attributes_to_change):
raise AttributeError()
class TichuState(namedtuple("TichuState", [
"player_pos",
"handcards",
"won_tricks",
"trick_on_table",
"wish",
"ranking",
"announced_tichu",
"announced_grand_tichu",
"history"
]), _BaseTichuStateImpl):
__slots__ = ()
def __new__(cls, *args, allow_tichu=True, allow_wish=True, discard_history: bool=False, **kwargs):
return super().__new__(cls, *args, **kwargs)
def __init__(self, player_pos: int, handcards: HandCards, won_tricks: WonTricks,
trick_on_table: Trick, wish: Optional[CardRank], ranking: tuple,
announced_tichu: frozenset, announced_grand_tichu: frozenset,
history: History, allow_tichu: bool=True, allow_wish: bool=True, discard_history:bool=False):
super().__init__(allow_tichu=allow_tichu, allow_wish=allow_wish, discard_history=discard_history)
# some paranoid checks
assert player_pos in range(4)
assert isinstance(handcards, HandCards)
assert isinstance(won_tricks, WonTricks)
assert wish is None or isinstance(wish, CardRank)
assert isinstance(ranking, tuple)
assert all(r in range(4) for r in ranking)
assert isinstance(announced_tichu, frozenset)
assert isinstance(announced_grand_tichu, frozenset)
assert all(r in range(4) for r in announced_tichu)
assert all(r in range(4) for r in announced_grand_tichu)
assert isinstance(trick_on_table, Trick)
assert isinstance(history, History)
@timecall(immediate=False)
def change(self, **attributes_to_change)->'TichuState':
"""
:param attributes_to_change: kwargs with the name of TichuState Attributes
:return: A copy ot this TichuState instance with the given attributes replaced
"""
if len(attributes_to_change) == 0:
return self
if self.discard_history:
attributes_to_change['history'] = History()
return TichuState(*self._replace(**attributes_to_change), allow_tichu=self._allow_tichu, allow_wish=self._allow_wish, discard_history=self.discard_history)
def copy_discard_history(self)->'TichuState':
ts = self.change(history=History())
ts.discard_history = True
return ts
# state is immutable, so we can simplify the deepcopies.
def __deepcopy__(self, memo):
return self
def __copy__(self):
return self
class InitialState(_BaseTichuStateImpl):
"""
State where all players have 8 cards (before announcing their grand tichus)
"""
__slots__ = ('_handcards', '_history')
def __init__(self):
piles_of_8 = [p[:8] for p in Deck(full=True).split(nbr_piles=4, random_=True)]
assert len(piles_of_8) == 4
assert all(len(p) == 8 for p in piles_of_8)
super().__init__()
self._handcards = HandCards(*piles_of_8)
self._history = History()
@property
def handcards(self):
return self._handcards
@property
def history(self):
return self._history
def next_state(self, players: Iterable[int]) -> 'FullCardsState':
players = frozenset(players)
check_param(all(p in range(4) for p in players), msg="[InitialState.next_state]: All players must be in range(4).")
return self.announce_grand_tichus(players)
def announce_grand_tichus(self, players: Iterable[int])->'FullCardsState':
return FullCardsState(self, players)
def is_terminal(self):
return False
class FullCardsState(_BaseTichuStateImpl):
"""
State where the players have 14 cards and announced their grand tichus.
All players may announce a Tichu now
"""
__slots__ = ('_handcards', '_history', '_announced_grand_tichu')
def __init__(self, initial_state: InitialState, players_announced_grand_tichu: Iterable[int]):
players_announced_grand_tichu = frozenset(players_announced_grand_tichu)
check_param(all(i in range(4) for i in players_announced_grand_tichu))
remaining_cards = set(Deck(full=True)) - set(initial_state.handcards.iter_all_cards())
piles = Deck(full=False, cards=remaining_cards).split(nbr_piles=4, random_=True)
assert len(piles) == 4
assert all(len(p) == 6 for p in piles), str(piles)
super().__init__()
self._handcards = HandCards(*(itertools.chain(crds, piles[k]) for k, crds in enumerate(initial_state.handcards)))
self._announced_grand_tichu = players_announced_grand_tichu
self._history = initial_state.history.new_state_actions(initial_state, (TichuAction(pp, announce_tichu=pp in players_announced_grand_tichu, grand=True) for pp in range(4)))
@property
def handcards(self):
return self._handcards
@property
def history(self):
return self._history
@property
def announced_grand_tichu(self):
return self._announced_grand_tichu
def next_state(self, players: Iterable[int]) -> 'BeforeTrading':
players = frozenset(players)
check_param(all(p in range(4) for p in players), msg="[FullCardsState.next_state]: All players must be in range(4).")
return self.announce_tichus(players)
def announce_tichus(self, players: Iterable[int])->'BeforeTrading':
return BeforeTrading(self, players)
def is_terminal(self):
return False
class BeforeTrading(_BaseTichuStateImpl):
"""
In this state all players have to trade 3 cards.
"""
__slots__ = ('_handcards', '_history', '_announced_grand_tichu', '_announced_tichu')
def __init__(self, prev_state: FullCardsState, players_announced_tichu: Iterable[int]):
players_announced_tichu = frozenset(players_announced_tichu)
check_param(all(i in range(4) for i in players_announced_tichu))
check_isinstance(prev_state, FullCardsState)
super().__init__()
self._handcards = prev_state.handcards
self._announced_grand_tichu = prev_state.announced_grand_tichu
self._announced_tichu = players_announced_tichu
self._history = prev_state.history.new_state_actions(prev_state, (TichuAction(pp, announce_tichu=pp in players_announced_tichu) for pp in range(4)))
@property
def handcards(self):
return self._handcards
@property
def history(self):
return self._history
@property
def announced_grand_tichu(self):
return self._announced_grand_tichu
@property
def announced_tichu(self):
return self._announced_tichu
def next_state(self, trades: Collection[CardTrade]) -> 'AfterTrading':
check_all_isinstance(trades, CardTrade)
return self.trade_cards(trades)
def trade_cards(self, trades: Collection[CardTrade]) -> 'AfterTrading':
"""
Same as: AfterTrading.from_beforetrading(<this BeforeTrading instance>, trades=trades)
:param trades: must have length of 4*3 = 12 and contain only legal trades
:return: The state after the given cards have been traded.
"""
return AfterTrading.from_beforetrading(self, trades=trades)
def is_terminal(self):
return False
class AfterTrading(TichuState):
"""
All players have 14 cards and have already traded. From this state on the Round starts with the player having the MAHJONG.
This is a
"""
__slots__ = ()
def __init__(self, *args, **kwargs):
check_true(Card.MAHJONG in self.handcards[self.player_pos])
check_true(all(len(hc) == 14 for hc in self.handcards))
super().__init__(*args, **kwargs)
@classmethod
def from_beforetrading(cls, before_trading: BeforeTrading, trades: Collection[CardTrade]) -> 'AfterTrading':
assert len(trades) == 0 or len(trades) == 12 # 4 players trade 3 cards each, an empty trades collection bypasses the trading phase
new_handcards = before_trading.handcards.as_list_of_lists()
trade_actions = []
for from_, to, card in trades:
trade_actions.append(TradeAction(from_=from_, to=to, card=card))
assert card in new_handcards[from_]
new_handcards[from_].remove(card)
assert card not in new_handcards[from_]
new_handcards[to].append(card)
assert card in new_handcards[to]
try:
starting_player = next((ppos for ppos, hc in enumerate(new_handcards) if Card.MAHJONG in hc))
except StopIteration as se:
raise LogicError("No player seems to have the MAHJONG.") from se
else:
return cls(
player_pos=starting_player,
handcards=HandCards(*new_handcards),
won_tricks=WonTricks(),
trick_on_table=Trick(),
wish=None,
ranking=(),
announced_tichu=before_trading.announced_tichu,
announced_grand_tichu=before_trading.announced_grand_tichu,
history=before_trading.history.new_state_actions(before_trading, trade_actions),
allow_tichu=True
)
def is_terminal(self):
return False
class RolloutTichuState(BaseTichuState):
__slots__ = ('_handcards', '_announced_grand_tichu', '_announced_tichu', '_player_pos', '_won_tricks',
'_trick_on_table', '_wish', '_ranking')
def __init__(self, player_pos: int, handcards: HandCards, won_tricks: WonTricks,
trick_on_table: Trick, wish: Optional[CardRank], ranking: tuple,
announced_tichu: frozenset, announced_grand_tichu: frozenset,
history: History):
super().__init__(allow_tichu=False, allow_wish=False)
assert isinstance(player_pos, int), str(player_pos)
self._player_pos = player_pos
self._handcards = MutableHandCards.from_immutable(handcards)
self._won_tricks = MutableWonTricks.from_immutable(won_tricks)
self._trick_on_table = MutableTrick.from_immutable(trick_on_table)
self._wish = wish
self._ranking = list(ranking)
self._announced_tichu = announced_tichu
self._announced_grand_tichu = announced_grand_tichu
# self._history = history
@classmethod
def from_tichustate(cls, state: TichuState):
return cls(*state)
@property
def handcards(self):
return self._handcards
@property
def trick_on_table(self):
return self._trick_on_table
@property
def wish(self):
return self._wish
@property
def announced_tichu(self):
return self._announced_tichu
@property
def ranking(self):
return self._ranking
@property
def player_pos(self):
return self._player_pos
@property
def history(self):
raise LogicError()
@property
def announced_grand_tichu(self):
return self._announced_grand_tichu
@property
def won_tricks(self):
return self._won_tricks
def random_action(self)->PlayerAction:
return random.choice(self.possible_actions_list)
def rollout(self, policy: Callable[[BaseTichuState], PlayerAction])->BaseTichuState:
while not self.is_terminal():
action = policy(self)
self.apply_action(action)
return self
def random_rollout(self)->BaseTichuState:
while not self.is_terminal():
self.apply_action(random.choice(self.possible_actions_list))
return self
@timecall(immediate=False)
def apply_action(self, action: PlayerAction)->'RolloutTichuState':
"""
Applies the action on this state (Modifies the calling instance).
:param action:
:return: self
"""
if action not in self.possible_actions_set:
raise IllegalActionError("{} is not a legal action in state: {}".format(action, self))
# (No Tichu and wish in rollout)
# win trick (includes dragon away)?
elif isinstance(action, WinTrickAction):
self._apply_win_trick_action(action)
# pass
elif isinstance(action, PassAction):
self._apply_pass_action(action)
# combinations (includes playfirst, playdog, playbomb)
elif isinstance(action, PlayCombination):
self._apply_combination(action)
else:
raise LogicError("An unknown action has been played")
# reset possible actions cache
self._possible_actions_set = None
self._possible_actions_list = None
return self
def _apply_win_trick_action(self, win_trick_action: WinTrickAction):
winner = win_trick_action.player_pos
assert self.player_pos == winner or len(self.ranking) >= 3, "action: {act}, winner:{winner}, state:{state}".format(act=win_trick_action, winner=winner, state=self)
# give trick to correct player
trick_to = winner
if isinstance(win_trick_action, GiveDragonAwayAction):
trick_to = win_trick_action.to
# determine next player
try:
next_player = winner if len(self.handcards[winner]) else self._next_player_turn()
except StopIteration:
# happens only right before the game ends
next_player = winner
assert self.is_double_win() or len(self.ranking) >= 3
self._player_pos = next_player
assert isinstance(self._won_tricks, MutableWonTricks)
self._won_tricks.add_trick(player=trick_to, trick=win_trick_action.trick)
self._trick_on_table = MutableTrick()
def _apply_pass_action(self, pass_action: PassAction):
assert pass_action.player_pos == self.player_pos
assert isinstance(self.player_pos, int), str(self.player_pos)
leading_player = self.trick_on_table.last_combination_action.player_pos
assert isinstance(leading_player, int), str(leading_player)+" "+str(self.trick_on_table)
next_player_pos = self._next_player_turn()
if (leading_player == next_player_pos
or self.player_pos < leading_player < next_player_pos
or next_player_pos < self.player_pos < leading_player
or leading_player < next_player_pos < self.player_pos):
# trick ends with leading as winner
self._player_pos = leading_player
assert isinstance(self.player_pos, int), str(self.player_pos)
self._trick_on_table = self.trick_on_table.finish(last_action=pass_action)
else:
self._player_pos = next_player_pos
assert isinstance(self._trick_on_table, MutableTrick)
assert isinstance(next_player_pos, int), str(next_player_pos)
self._trick_on_table.append(pass_action)
assert isinstance(self.player_pos, int), str(self.player_pos)
def _apply_combination(self, comb_action: PlayCombination):
played_comb = comb_action.combination
assert comb_action.player_pos == self.player_pos
# remove from handcards and add to trick on table
self._trick_on_table.append(comb_action)
assert isinstance(self._handcards, MutableHandCards)
self._handcards.remove_cards(player=self.player_pos, cards=played_comb.cards)
# ranking
if len(self._handcards[self.player_pos]) == 0:
# player finished
assert self.player_pos not in self.ranking
assert len(self.ranking) == len(set(self.ranking))
self.ranking.append(self.player_pos)
# dog
if played_comb == DOG_COMBINATION:
assert len(self.trick_on_table) == 1, str(self.trick_on_table)
self._player_pos = (self.player_pos + 2) % 4 # Teammate
else:
self._player_pos = self._next_player_turn()
# wish fullfilled?
if played_comb.contains_cardrank(self.wish):
self._wish = None
def _next_player_turn(self) -> int:
"""
:return: the next player with non empty handcards
"""
return next((ppos % 4 for ppos in range(self.player_pos + 1, self.player_pos + 4) if len(self.handcards[ppos % 4]) > 0))
def has_cards(self, player: int, cards: Collection[Card])->bool:
"""
:param player:
:param cards:
:return: True if the player has the given card, False otherwise
"""
return self.handcards.has_cards(player=player, cards=cards)
def is_terminal(self):
return self.is_double_win() or (self.trick_on_table.is_empty() and len(self.ranking) >= 3)
def is_double_win(self)->bool:
return len(self.ranking) >= 2 and self.ranking[0] == (self.ranking[1] + 2) % 4
# FOLLOWING METHODS SHOULD NOT BE USED AND RAISE LOGIC_ERROR
def change(self, **attributes_to_change) -> 'TichuState':
raise LogicError()
def _next_state_on_wish(self, wish_action: WishAction):
raise LogicError()
def _next_state_on_tichu(self, tichu_action: TichuAction):
raise LogicError()
def _next_state_on_win_trick(self, win_trick_action: WinTrickAction):
raise LogicError()
def _next_state_on_pass(self, pass_action: PassAction):
raise LogicError()
def _next_state_on_combination(self, comb_action: PlayCombination):
raise LogicError()
|
python
|
#!/usr/bin/python
import heat as h
dir='trace-azure-globe-6dc-24h-202005170045-202005180045'
src_dc_l = ['eastus2']
dst_dc_l = ['westus2', 'francecentral', 'australiaeast']
for src in src_dc_l:
for dst in dst_dc_l:
if dst is src:
continue
df=dir+'/' + src+'-'+dst+'.log.txt'
print df
##h.lat_heat_map(df, 1, 1, 12, 1, 'min', 'ms', 'ms')
#h.vertical_lat_heat_map(df, 1, 1, 12, 1, 'min', 'ms', 'ms')
#h.lat_heat_map(df, 1, 1, 15, 1, 'min', 'ms', 'ms')
h.lat_heat_map(df, 1, 1, 11, 1, 'min', 'ms', 'ms')
|
python
|
# Time: O(s*t) # for every node in s, you traverse every node in t.
# Space: O(s) # number of nodes in s
# Mar 30th '20
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
if (s and not t) or (t and not s):
return False
if not s and not t:
return True
if s.val == t.val:
return self.helper(s,t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
else:
return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def helper(self, s, t):
if not s and not t:
return True
if (s and not t) or (t and not s):
return False
if s.val!=t.val:
return False
return self.helper(s.left, t.left) and self.helper(s.right, t.right)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
return self.traverse(s, t)
def traverse(self, s, t):
if s:
return self.equal_verify(s, t) or self.traverse(s.left,t) or self.traverse(s.right, t)
else:
return False
def equal_verify(self, s, t):
if not s and not t:
return True
if not s or not t:
return False
if s.val==t.val:
return self.equal_verify(s.left, t.left) and self.equal_verify(s.right, t.right)
return False
|
python
|
#!/usr/bin/python
# coding=utf-8
import logging
import itertools
import rdflib
# Import NameSpace RDF
from rdflib.namespace import RDF
from rdflib import Literal
# Import Namespace class for create new RDF Graph
from rdflib import Namespace
logging.basicConfig()
rdf = rdflib.Graph()
rdf.load("data.owl")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
DATA = Namespace("http://www.essepuntato.it/2013/citalo/test/data/")
person5 = DATA.person5
rdf.add((person5, RDF.type, FOAF.Person))
rdf.add((person5, FOAF.name, Literal("Pippo")))
rdf.add((person5, FOAF.age, Literal(30)))
# Create a new list. For every element of triples that has a specif requirement, I add this elemt to a list
people = []
for s,p,o in rdf.triples((None, RDF.type, FOAF.Person)):
people += [s]
# Calculate all combinations
pairs = itertools.combinations(people,2)
# For every pairs add a new Statement
for pair in pairs:
person1 = pair[0]
person2 = pair[1]
rdf.add((person1, FOAF.knows, person2))
# Save the new Graph in RDF/XML format
rdf.serialize("data-updated2.owl",format="pretty-xml")
|
python
|
import django
from ..base import *
from ..i18n import *
from .services import *
from ..static import *
django.setup()
|
python
|
import os
import json
import StringIO
import shapely
from shapely.geometry import shape, mapping
from flask import Flask, request, send_file, jsonify, render_template
from werkzeug.utils import secure_filename
ALLOWED_EXTENSIONS = set(['js', 'json', 'geojson'])
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def get_centroids(polygons_json):
feature_collection = {
'type': 'FeatureCollection',
'features': []
}
for feature in json.loads(polygons_json)['features']:
feature_geom = shape(feature['geometry'])
feature_centroid = feature_geom.centroid
centroid = mapping(feature_centroid)
feature['geometry'] = centroid
feature_collection['features'].append(feature)
return feature_collection
@app.route('/', methods=['GET', 'POST'])
def operation():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_type = filename.rsplit('.', 1)[1]
file_title = filename.rsplit('.', 1)[0]
polygons_json = file.read()
strIO = StringIO.StringIO()
strIO.write(json.dumps(get_centroids(polygons_json)))
strIO.seek(0)
centroids_filename = file_title + "_centroids.geojson"
return send_file(strIO, attachment_filename=centroids_filename, as_attachment=True)
return render_template('index.html')
@app.route('/centroids', methods=['POST'])
def api_centroids():
return jsonify(get_centroids(request.data))
if __name__ == '__main__':
app.run(debug=True)
|
python
|
'''
Author: Hanqing Zhu([email protected])
Date: 2022-04-07 10:38:34
LastEditTime: 2022-04-08 23:57:19
LastEditors: Hanqing Zhu([email protected])
Description:
FilePath: /projects/ELight/core/models/__init__.py
'''
from .vgg import *
|
python
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ValidationError
from datetime import datetime
#from suit.widgets import SuitDateWidget, SuitTimeWidget, SuitSplitDateTimeWidget
#from .models import PrintTask
class TaskForm(forms.ModelForm):
class Meta:
exclude = [
'created_on',
#'created_by',
'started_on',
'completed_on',
'verbosity',
'job_id',
'status',
'failure_reason',
'progress',
]
# class PrintTaskForm(forms.ModelForm):
# class Meta(TaskForm.Meta):
# model = PrintTask
# exclude = TaskForm.Meta.exclude[:] + [
# 'result',
# ]
# widgets = {
# 'created_by': forms.HiddenInput(),
# 'societa': forms.HiddenInput(),
# 'registro': forms.HiddenInput(),
# 'data_da': SuitDateWidget,
# 'data_a': SuitDateWidget,
# }
# def __init__(self, request, filter_tipo, *args, **kwargs):
# super(PrintTaskForm, self).__init__(*args, **kwargs)
# if filter_tipo:
# visible_fields = PrintTask.list_specific_fields(filter_tipo)
# if request.user.is_superuser:
# visible_fields.append('limite')
# for name, field in self.fields.items():
# if not name in visible_fields:
# field.widget = forms.HiddenInput()
|
python
|
from __future__ import absolute_import
import warnings
from django.template import loader
from django.utils import six
from .. import compat
from . import filterset, filters
class DjangoFilterBackend(object):
default_filter_set = filterset.FilterSet
@property
def template(self):
if compat.is_crispy():
return 'django_filters/rest_framework/crispy_form.html'
return 'django_filters/rest_framework/form.html'
def get_filter_class(self, view, queryset=None):
"""
Return the django-filters `FilterSet` used to filter the queryset.
"""
filter_class = getattr(view, 'filter_class', None)
filter_fields = getattr(view, 'filter_fields', None)
if filter_class:
filter_model = filter_class.Meta.model
assert issubclass(queryset.model, filter_model), \
'FilterSet model %s does not match queryset model %s' % \
(filter_model, queryset.model)
return filter_class
if filter_fields:
MetaBase = getattr(self.default_filter_set, 'Meta', object)
class AutoFilterSet(self.default_filter_set):
class Meta(MetaBase):
model = queryset.model
fields = filter_fields
return AutoFilterSet
return None
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if filter_class:
return filter_class(request.query_params, queryset=queryset, request=request).qs
return queryset
def to_html(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
if not filter_class:
return None
filter_instance = filter_class(request.query_params, queryset=queryset, request=request)
template = loader.get_template(self.template)
context = {
'filter': filter_instance
}
return template.render(context, request)
def get_coreschema_field(self, field):
if isinstance(field, filters.NumberFilter):
field_cls = compat.coreschema.Number
else:
field_cls = compat.coreschema.String
return field_cls(
description=six.text_type(field.extra.get('help_text', ''))
)
def get_schema_fields(self, view):
# This is not compatible with widgets where the query param differs from the
# filter's attribute name. Notably, this includes `MultiWidget`, where query
# params will be of the format `<name>_0`, `<name>_1`, etc...
assert compat.coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert compat.coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
filter_class = getattr(view, 'filter_class', None)
if filter_class is None:
try:
filter_class = self.get_filter_class(view, view.get_queryset())
except Exception:
warnings.warn(
"{} is not compatible with schema generation".format(view.__class__)
)
filter_class = None
return [] if not filter_class else [
compat.coreapi.Field(
name=field_name,
required=False,
location='query',
schema=self.get_coreschema_field(field)
)
for field_name, field in filter_class.base_filters.items()
]
|
python
|
from .rawheader import HeaderFactory, LAS_FILE_SIGNATURE
|
python
|
testinfra_hosts = ["ansible://mariadb-centos7"]
def test_mariadb_el7_config(host):
innodb7 = host.file('/etc/my.cnf.d/innodb.cnf')
assert not innodb7.contains('innodb_large_prefix=1')
testinfra_hosts = ["ansible://mariadb-centos8"]
def test_mariadb_el8_config(host):
innodb8 = host.file('/etc/my.cnf.d/innodb.cnf')
assert not innodb8.contains('innodb_large_prefix=1')
|
python
|
import requests
from .okra_base import OkraBase
from .utils import validate_id, validate_dates, validate_date_id
class Balance(OkraBase):
""" This handles all balance requests to the okra API. This contains the following functions.\n
Key functions:
get_balances -- This returns the realtime balance for each of a record's account.
by_id -- This returns the balance info using the id of the balance
by_customer -- This returns the balance info using the id of the customer
by_account -- This returns the balance info using the account id
by_date_range -- This fetches the balance info using the date range
by_type -- This fetches the balance info using the type of balance
by_customer_date -- This fetches the balance info of a customer using date range and customer id
get_periodic - -This returns the real - time BALANCE at anytime without heavy calculations of the transactions on each of the record 's account. """
def __init__(self, PRIVATE_TOKEN):
super(Balance, self).__init__(PRIVATE_TOKEN)
# get all balance
def get_balances(self):
"""Returns - JSON object """
url = self._base_url + self.endpoints_dict["balance"]["get_balance"]
response = requests.post(url, headers=self.headers)
return response.json()
# get balance by id
@validate_id
def by_id(self, id):
"""
Keyword Arguments:
id -- balance info id
Returns - -JSON object """
url = self._base_url + self.endpoints_dict["balance"]["by_id"]
response = requests.post(url, headers=self.headers, data={"id": id})
return response.json()
@validate_id
def by_customer(self, customer):
"""
Keyword Arguments:
customer -- customer id info
Returns -- JSON object
"""
url = self._base_url + self.endpoints_dict["balance"]["by_customer"]
response = requests.post(url, headers=self.headers, data={
"customer": customer})
return response.json()
@validate_id
def by_account_id(self, account):
"""
Keyword arguments:
account -- account id info
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["balance"]["by_account"]
response = requests.post(
url, headers=self.headers, data={"account": account})
return response.json()
@validate_dates
def by_date_range(self, _from, _to):
"""
Keyword arguments:
_from -- The start date
_to -- The end date
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["balance"]["by_date"]
response = requests.post(url, headers=self.headers, data={
"from": _from, "to": _to})
return response.json()
@validate_id
def by_type(self, _type, value):
"""
Keyword arguments:
_type -- The type of balance e.g ledger balance , available balance
value -- The amount e.g 400 but in string
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["balance"]["by_date"]
response = requests.post(url, headers=self.headers, data={
"type": _type, "value": value})
return response.json()
@validate_date_id
def by_customer_date(self, _from, _to, customer):
"""
Keyword arguments:
_from -- The start date e.g 2020-12-25
_to -- The end date e.g 2020-12-29
customer -- The customer id info
Return: JSON object
"""
url = self._base_url + \
self.endpoints_dict["balance"]["by_customer_date"]
response = requests.post(url, headers=self.headers, data={
"from": _from, "to": _to, "customer": customer})
return response.json()
def get_periodic(self, account_id, record_id, currency="NGN"):
"""sumary_line
Keyword arguments:
account_id -- The account id
record_id -- The record id
currency -- The account's currency e.g NGN, GBP, USD
Return: return_description
"""
if (type(account_id) != str) or (type(record_id) != str) or (type(currency) != str):
raise TypeError(
"Expecting all input parameters to be of type string ")
if len(currency) < 3:
raise Exception(
"The account's currency must be 3 in length. e.g NGN, GBP, USD, CAD")
url = self._base_url + self.endpoints_dict["balance"]["periodic"]
response = requests.post(url, headers=self.headers, data={
"account_id": account_id, "record_id": record_id, "currency": currency})
return response.json()
|
python
|
# Import
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from warmUpExercise import warmUpExercise
from computeCost import computeCost
from gradientDescent import gradientDescent
from plotData import plotData
# Machine Learning Online Class - Exercise 1: Linear Regression
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# linear exercise. You will need to complete the following modules
# in this exericse:
#
# warmUpExercise.py
# plotData.py
# gradientDescent.py
# computeCost.py
# gradientDescentMulti.py
# computeCostMulti.py
# featureNormalize.py
# normalEqn.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
# x refers to the population size in 10,000s
# y refers to the profit in $10,000s
# ==================== Part 1: Basic Function ====================
# Complete warmUpExercise.py
print('\n -------------------------- \n')
print('Running warmUpExercise ...')
print('5x5 Identity Matrix:')
warmup = warmUpExercise()
print(warmup)
# ======================= Part 2: Plotting =======================
# Read data using pandas
path = 'ex1data1.txt'
data = pd.read_csv(path, header=None, names=['Population', 'Profit'])
data.head()
# Résumé des données
data.describe()
# set X (training data) and y (target variable)
nbCol = data.shape[1]
X = data.iloc[:,0:nbCol-1]
y = data.iloc[:,nbCol-1:nbCol]
# convert from data frames to numpy arrays
X = np.array(X.values)
y = np.array(y.values)
# Plot Data
# Note: You have to complete the code in plotData.py
plotData(X,y)
# =================== Part 3: Gradient descent ===================
m = X.shape[0]
# Add intercept term to X
#X = np.concatenate((np.ones((m, 1)), X), axis=1)
X = np.column_stack((np.ones((m, 1)), X)) # works fine too
# initialize theta
theta = np.array([[0.,0.]]).T
# compute and display initial cost
# Note: You have to complete the code in computeCost.py
J = computeCost(X, y, theta)
print('\n -------------------------- \n')
print('cost: %0.4f ' % J)
print('Expected cost value (approx) 32.07')
# further testing of the cost function
J = computeCost(X, y, np.array([[-1, 2]]).T)
print('\n -------------------------- \n')
print('With theta = [-1 ; 2] Cost computed = %f' %J)
print('Expected cost value (approx) 54.24')
# compute Descent gradient
# initialize variables for learning rate and iterations
alpha = 0.01
iters = 1500
# perform gradient descent to "fit" the model parameters
# Note: You have to complete the code in gradientDescent.py
theta, cost_history, theta_history = gradientDescent(X, y, theta, alpha, iters)
# print theta to screen
print('\n -------------------------- \n')
print('Theta found by gradient descent: ')
print('%s %s' % (theta[0,0], theta[1,0]))
print('Expected theta values (approx)')
print(' -3.6303 1.1664')
# Checking the convergence
# Evolution du coût
fig= plt.figure(figsize=(12,8))
ax = plt.gca()
ax.plot(np.arange(iters), cost_history, color="blue", linewidth=2.0, linestyle="-")
ax.set_xlabel('iteration number')
ax.set_ylabel(r'Cost J($\theta$)')
ax.set_title('Error vs. Training Epoch (number of iters)')
ax.grid()
ax.set_xlim([-20,1600])
ax.set_ylim([4,7])
# Checking the goodness-of-fit
# Fit: calcul de la droite de régression
x = np.linspace(data.Population.min(), data.Population.max(), 100)
f = theta[0, 0] + (theta[1, 0] * x)
# Plot the linear fit
fig = plt.figure(figsize=(12,8))
ax = plt.gca()
ax.plot(x, f, 'r', label='Linear regression: h(x) = %0.2f + %0.2fx'%(theta[0,0],theta[1,0]))
ax.scatter(data.Population, data.Profit, label='Training Data')
ax.legend(loc=2)
ax.set_xlabel('Population')
ax.set_ylabel('Profit')
ax.set_title('Predicted Profit vs. Population Size')
ax.grid()
fig.show()
# Predict values for population sizes of 35,000 and 70,000
predict1 = np.array([[1, 3.5]]).dot(theta)
predict2 = np.array([[1, 7]]).dot(theta)
#predict1 = np.array([[1, 3.5]])@theta
#predict2 = np.array([[1, 7]])@theta
print('\n -------------------------- \n')
print('For population = 35,000, we predict a profit of {:.4f}'.format(predict1[0,0]*10000))
print('For population = 70,000, we predict a profit of {:.4f}'.format(predict2[0,0]*10000))
# ============= Part 4: Visualizing J(theta_0, theta_1) =============
print('\n -------------------------- \n')
print('Visualizing J(theta_0, theta_1) ...')
# Create grid coordinates for plotting
theta0 = np.linspace(-10, 10, 100)
theta1 = np.linspace(-1, 4, 100)
theta0, theta1 = np.meshgrid(theta0, theta1, indexing='xy')
Z = np.zeros((theta0.shape[0],theta1.shape[0]))
# Calculate Z-values (Cost) based on grid of coefficients
for (i,j),v in np.ndenumerate(Z):
t = np.array([[theta0[i,j], theta1[i,j]]]).T
Z[i,j] = computeCost(X,y, t)
fig = plt.figure(figsize=(15,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
# Left plot
CS = ax1.contour(theta0, theta1, Z, np.geomspace(Z.min(),Z.max(),10), cmap=plt.cm.jet)
plt.clabel(CS, inline=1, fontsize=10)
ax1.scatter(theta_history[0,:],theta_history[1,:], c='r')
ax1.grid()
# Right plot
ax2.plot_surface(theta0, theta1, Z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet, linewidth=0, antialiased=True)
ax2.set_zlabel('Cost')
ax2.set_zlim(Z.min(),Z.max())
ax2.view_init(elev=15, azim=230)
ax2.grid()
# settings common to both plots
for ax in fig.axes:
ax.set_xlabel(r'$\theta_0$', fontsize=17)
ax.set_ylabel(r'$\theta_1$', fontsize=17)
plt.show()
|
python
|
# Generate the randomized beta map for further testing changes of prediction accuracy and discrimination
from os.path import join as pjoin
import cifti
import numpy as np
from ATT.iofunc import iofiles
def randomize_ROI(rawdata, mask=None):
"""
Randomize the averaged data in ROI that defined by the mask
if mask is None, randomized original data in global brain (but not cross the ROIs)
Parameters:
------------
rawdata: original activation data
mask: mask to define ROIs
Return:
-------
rand_data: randomized data
"""
masklabel = np.unique(mask[mask!=0])
rawshape = rawdata.shape
if mask is None:
rawdata_flatten = rawdata.flatten()
rddata_flatten = np.random.choice(rawdata_flatten, len(rawdata_flatten), replace=False)
rand_data = rddata_flatten.reshape(rawshape)
else:
rawdata = rawdata[:,np.newaxis,:]
rand_data = np.zeros_like(rawdata)
randomized_masklabel = np.random.choice(masklabel, len(masklabel), replace=False)
for i, masklbl in enumerate(masklabel):
avg_rdroi = np.mean(rawdata[:,(mask==randomized_masklabel[i])],axis=1)
rand_data[:,(mask==masklbl)] = np.tile(avg_rdroi[:,np.newaxis],(len(mask[mask==masklbl])))
rand_data = rand_data[:,0,:]
return rand_data
def simple_surface_by_ROI(rawdata, mask):
"""
Simple surface using ROI, extract the averaged value of each ROI
Parameters:
-----------
rawdata: the original data, [contrasts]*[spatial vertex]
mask: mask to define ROIs
Returns:
--------
sim_mat: the simplified matrix from rawdata
"""
masklabel = np.unique(mask[mask!=0])
rawdata = rawdata[:,np.newaxis,:]
sim_mat = np.zeros((rawdata.shape[0], len(masklabel)))
for i, masklbl in enumerate(masklabel):
for j in range(rawdata.shape[0]):
sim_mat[j,i] = np.mean(rawdata[j,(mask==masklbl)])
return sim_mat
parpath = '/nfs/s2/userhome/huangtaicheng/hworkingshop/hcp_test'
ncomp = '100'
task = 'wm'
# Read Mask
mask, header = cifti.read(pjoin(parpath, 'rest_comp', 'LGL_100Parcels_7Network_subregion.dscalar.nii'))
# Load avgbeta
avgbeta, _ = cifti.read(pjoin(parpath, 'program', 'framework', 'betamap', 'LGL_global_avgbeta', ncomp+'comp', 'avgbeta_'+task+'_'+ncomp+'comp.dscalar.nii'))
rdbeta = randomize_ROI(avgbeta, mask=mask)
# Save radomized ROI
ioscalar = iofiles.make_ioinstance(pjoin(parpath, 'program', 'framework', 'betamap', 'LGL_global_rdavgbeta', ncomp+'comp', 'rdavgbeta_'+task+'_'+ncomp+'comp.dscalar.nii'))
ioscalar.save_from_existed_header(header, rdbeta)
|
python
|
from discord.ext import commands
from .formats import plural
class StringMaxLengthConverter(commands.Converter):
"""A converter that only accepts strings under a certain length."""
def __init__(self, length):
super().__init__()
self.length = length
async def convert(self, ctx, arg):
if len(arg) > self.length:
message = f"That argument must be no more than {plural(self.length):character} ({len(arg)}/{self.length})."
raise commands.BadArgument(message)\
return arg
|
python
|
# Generated by Django 3.1.8 on 2021-06-08 17:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sections', '0003_delete_techstack'),
]
operations = [
migrations.CreateModel(
name='TechStack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stack_name', models.CharField(blank=True, max_length=20, null=True)),
('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tech_stack', to='sections.project')),
],
),
]
|
python
|
import cv2 as cv
import numpy as np
import numpy.ma as ma
import scipy.interpolate
from scipy.interpolate import Akima1DInterpolator
def findTransform(fn):
pattern_size = (15,8)
trg_img_size = (1920, 1080)
scale_fact = 10 # to speed up calculations
print('processing %s... ' % fn)
orig = cv.imread(fn, 0)
img = cv.resize(orig, (orig.shape[1]/scale_fact,orig.shape[0]/scale_fact) )
img = 255-img # invert
if img is None:
print("Failed to load", fn)
return None
src_img_size = (orig.shape[1], orig.shape[0])
print "size: %d x %d ... " % (img.shape[1], img.shape[0])
found, corners = cv.findChessboardCorners(img, pattern_size )
#print corners
#cv.imshow('image', img)
#cv.waitKey(0)
if found:
term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1)
cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
#vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
#cv.drawChessboardCorners(vis, pattern_size, corners, found)
#cv.imshow('image', vis)
#cv.waitKey(0)
# todo: find the corners with subpixel accuracy in hires image
corners *= scale_fact
# generate target coordinates
pnts_dst = np.array([[trg_img_size[0]*float(r+1)/(pattern_size[0]+1),
trg_img_size[1]*float(c+1)/(pattern_size[1]+1)] for c in reversed(range(pattern_size[1]))
for r in reversed(range(pattern_size[0]))])
# calculate the transform from the perspective image to the flat image we want
h, status = cv.findHomography(corners, pnts_dst)
#im_out = cv.warpPerspective(orig, h, (trg_img_size[0],trg_img_size[1]))
#cv.imshow('image', im_out)
#cv.waitKey(0)
#cv.destroyAllWindows()
return h
else:
print('chessboard not found')
return None
"""
imgpath = "Q:\\Projects\\scripts\\python\\sparkmaker\\img\\DSC_6236_conv.JPG"
M = findTransform(imgpath)
img = cv.imread(imgpath, 1)
im_out = cv.warpPerspective(img, M, (1920,1080))
cv.imshow('image', im_out)
cv.waitKey(0)
cv.destroyAllWindows()
"""
def gather_illum_profile():
pairs = [#["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6281_conv.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6282_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6283_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6284_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6285_conv.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6286_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6287_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6288_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6289_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6290_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6291_conv.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6292_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6293_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6294_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6295_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6296_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6297_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6298_conv.JPG"],
["Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6299_convCC.JPG", "Q:/Projects/scripts/python/sparkmaker/img/angles/DSC_6300_conv.JPG"]
]
accu = np.zeros((1080, 1920), dtype="float")
for chk,img in pairs:
print chk, img
M = findTransform(chk)
im = cv.imread(img, 0)
im_out = cv.warpPerspective(im, M, (1920,1080))
accu += im_out
accu /= accu.max()
accu2 = (accu*255).astype("uint8")
cv.imshow('image', accu2)
cv.waitKey(0)
#cv.destroyAllWindows()
return accu
def extract_lcd_responce():
illums = [ ["Q:/Projects/scripts/python/sparkmaker/img/DSC_6267_conv.JPG", 0, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6268_conv.JPG", 10, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6269_conv.JPG", 20, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6270_conv.JPG", 30, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6271_conv.JPG", 40, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6272_conv.JPG", 50, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6273_conv.JPG", 60, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6274_conv.JPG", 70, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6275_conv.JPG", 80, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6276_conv.JPG", 90, 0],
["Q:/Projects/scripts/python/sparkmaker/img/DSC_6277_conv.JPG", 100, 0],
]
base = cv.imread(illums[0][0], 0)
base = cv.resize(base, (base.shape[1]/5,base.shape[0]/5))
base_cropped = base[150:-120, 100:-100]
for il in illums[1:]:
print il[0]
img = cv.imread(il[0], 0)
img = cv.resize(img, (img.shape[1] / 5, img.shape[0] / 5))
cropped = img[150:-120, 100:-100]
img = cropped.astype("int16") - base_cropped
il[2] = img.max()
#cv.imshow('image', img_cropped)
#cv.waitKey(0)
#cv.destroyAllWindows()
xx = [i[1]/100.0 for i in illums]
yy = [float(i[2])/illums[-1][2] for i in illums]
#nx = [c/50.0 for c in range(51)]
#cs = Akima1DInterpolator(xx,yy)
#inter = cs(nx)
# inter = np.interp(nx, xx, yy)
import matplotlib.pyplot as plt
# plt.plot(nx,inter)
plt.plot( xx, yy )
plt.xlabel('intensity')
plt.ylabel('measured')
plt.show()
return (xx,yy)
# get base ilumination pattern
illum = gather_illum_profile()
# get lcd response curve
lcd_profile = extract_lcd_responce()
thresh = 0.4
out_img = (illum * 255).astype("uint8")
# create a lut to apply an inverse mapping to the illumination
lut = np.array([255 if (x/255.0)<thresh else int(np.interp(thresh/(x/255.0), lcd_profile[1], lcd_profile[0])*255) for x in range(256)], dtype="uint8")
# apply the lut the pattern
inv_img = lut[out_img]
# flip image since we captured it through a mirror
inv_img = cv.flip(inv_img, 0)
cv.imshow('image', inv_img)
cv.waitKey(0)
cv.destroyAllWindows()
#cv.imwrite("Q:\\Projects\\scripts\\python\\sparkmaker\\img\\lumamap.png", inv_img)
|
python
|
#!/usr/bin/env python3
#-*-coding:utf-8-*-
import random
import time
try:
import thread
except ImportError:
import _thread as thread
import board
import neopixel
from gpiozero import Button
import json
import requests
import websocket
pixels = neopixel.NeoPixel(board.D18, 60+13, auto_write=False)
#0-60:tape, 64-76:points
button = Button(4)
LEDNUM = 13
CLOUD_LED = 60
mode = 0
tape_att = 0.1
points_att = 0.8
color = (int(random.random() * 205 + 50), int(random.random() * 205 + 50), int(random.random() * 205 + 50))
send_color = '#%02X%02X%02X' % (color[0],color[1],color[2])
lights_data = [{}]
ZIP = "153-0064,JP"
API_KEY = "28e021f5be878b85fed7c65405499234"
api = "http://api.openweathermap.org/data/2.5/forecast?zip={city}&units=metric&lang=ja&APPID={key}"
url = api.format(city = ZIP, key = API_KEY)
def send_status(status, cl):
tmp_c = '#%02X%02X%02X' % (cl[0],cl[1],cl[2])
sendval = json.dumps({"state": status, "color": tmp_c})
ws.send(sendval)
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
def on_message(ws, message):
global lights_data
if is_json(message):
mes = json.loads(message)
if 'type' in mes:
if mes['type'] == 'Group':
lights_data = mes['clients']
print(lights_data)
for i in range(len(lights_data)):
if lights_data[i]['state']:
c = lights_data[i]['color']
color = (int(c[1:3],16),int(c[3:5],16),int(c[5:7],16))
pixels[i+CLOUD_LED] = (int(color[0]*points_att), int(color[1]*points_att), int(color[2]*points_att))
else:
pixels[i+CLOUD_LED] = (0, 0, 0)
pixels.show()
else:
is_new = True
for i in range(len(lights_data)):
if mes['id'] == lights_data[i]['id']:
is_new = False
lights_data[i]['state'] = mes['state']
if mes['state']:
c = mes['color']
print(c)
color = (int(c[1:3],16),int(c[3:5],16),int(c[5:7],16))
pixels[i+CLOUD_LED] = (int(color[0]*points_att), int(color[1]*points_att), int(color[2]*points_att))
else:
pixels[i+CLOUD_LED] = (0, 0, 0)
pixels.show()
if is_new and len(lights_data) < LEDNUM-1:
lights_data.append(mes)
if 'event' in mes:
if mes['event'] == 'close':
for i in range(len(lights_data)):
if mes['id'] == lights_data[i]['id']:
lights_data.pop(i)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
pixels.fill((0, 0, 0))
pixels.show()
def on_open(ws):
def run(*args):
sw_status = False
global mode
while True:
if button.is_pressed == True and mode == 0:
mode = 1
print("on-cool")
tmp_c = (200, 230, 255)
send_status(True, tmp_c)
sw_status = True
for i in range(CLOUD_LED) :
pixels[i] = (int(tmp_c[0]*tape_att), int(tmp_c[1]*tape_att), int(tmp_c[2]*tape_att))
pixels.show()
if button.is_pressed == False and mode == 1:
mode = 2
print("on-warm")
tmp_c = (255, 180, 120)
send_status(True, tmp_c)
sw_status = True
for i in range(CLOUD_LED) :
pixels[i] = (int(tmp_c[0]*tape_att), int(tmp_c[1]*tape_att), int(tmp_c[2]*tape_att))
pixels.show()
if button.is_pressed == True and mode == 2:
mode = 3
response = requests.get(url)
weather_data = json.loads(response.text)['list'][10]['weather'][0]['main']
if weather_data == "Clouds":
tmp_c = (128, 128, 160)
elif weather_data == "Snow":
tmp_c = (128, 255, 255)
elif weather_data == "Rain":
tmp_c = (0, 128, 255)
elif weather_data == "Clear":
tmp_c = (255, 200, 50)
elif weather_data == "Fog":
tmp_c = (50, 100, 100)
elif weather_data == "Mist":
tmp_c = (50, 150, 150)
elif weather_data == "Haze":
tmp_c = (50, 50, 50)
else:
tmp_c = color
print("on-whether")
send_status(True, tmp_c)
sw_status = True
for i in range(CLOUD_LED) :
pixels[i] = (int(tmp_c[0]*tape_att), int(tmp_c[1]*tape_att), int(tmp_c[2]*tape_att))
pixels.show()
if button.is_pressed == False and mode == 3:
mode = 0
send_status(False, color)
for i in range(CLOUD_LED) :
pixels[i] = (0, 0, 0)
pixels.show()
print("thread terminating...")
thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://kumasan.site:30000",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="grafeas.v1",
manifest={"Layer", "Fingerprint", "ImageNote", "ImageOccurrence",},
)
class Layer(proto.Message):
r"""Layer holds metadata specific to a layer of a Docker image.
Attributes:
directive (str):
Required. The recovered Dockerfile directive
used to construct this layer. See
https://docs.docker.com/engine/reference/builder/
for more information.
arguments (str):
The recovered arguments to the Dockerfile
directive.
"""
directive = proto.Field(proto.STRING, number=1)
arguments = proto.Field(proto.STRING, number=2)
class Fingerprint(proto.Message):
r"""A set of properties that uniquely identify a given Docker
image.
Attributes:
v1_name (str):
Required. The layer ID of the final layer in
the Docker image's v1 representation.
v2_blob (Sequence[str]):
Required. The ordered list of v2 blobs that
represent a given image.
v2_name (str):
Output only. The name of the image's v2 blobs computed via:
[bottom] := v2_blob[bottom] [N] := sha256(v2_blob[N] + " " +
v2_name[N+1]) Only the name of the final blob is kept.
"""
v1_name = proto.Field(proto.STRING, number=1)
v2_blob = proto.RepeatedField(proto.STRING, number=2)
v2_name = proto.Field(proto.STRING, number=3)
class ImageNote(proto.Message):
r"""Basis describes the base image portion (Note) of the DockerImage
relationship. Linked occurrences are derived from this or an
equivalent image via: FROM <Basis.resource_url> Or an equivalent
reference, e.g., a tag of the resource_url.
Attributes:
resource_url (str):
Required. Immutable. The resource_url for the resource
representing the basis of associated occurrence images.
fingerprint (~.image.Fingerprint):
Required. Immutable. The fingerprint of the
base image.
"""
resource_url = proto.Field(proto.STRING, number=1)
fingerprint = proto.Field(proto.MESSAGE, number=2, message=Fingerprint,)
class ImageOccurrence(proto.Message):
r"""Details of the derived image portion of the DockerImage
relationship. This image would be produced from a Dockerfile
with FROM <DockerImage.Basis in attached Note>.
Attributes:
fingerprint (~.image.Fingerprint):
Required. The fingerprint of the derived
image.
distance (int):
Output only. The number of layers by which
this image differs from the associated image
basis.
layer_info (Sequence[~.image.Layer]):
This contains layer-specific metadata, if populated it has
length "distance" and is ordered with [distance] being the
layer immediately following the base image and [1] being the
final layer.
base_resource_url (str):
Output only. This contains the base image URL
for the derived image occurrence.
"""
fingerprint = proto.Field(proto.MESSAGE, number=1, message=Fingerprint,)
distance = proto.Field(proto.INT32, number=2)
layer_info = proto.RepeatedField(proto.MESSAGE, number=3, message=Layer,)
base_resource_url = proto.Field(proto.STRING, number=4)
__all__ = tuple(sorted(__protobuf__.manifest))
|
python
|
"""Elabore um programa que calcule o valor a ser pago por um produto,
considerando o seu preço normal e condição de pagamento:
- a vista dinheiro/cheque: 10% de desconto
- em até 2x no cartão: preco normal
- 3x ou mais no cartão: 20% de juros
"""
import time
print('========== Calculando Valores de Um Produto ==========')
valor = float(input('Digite o valor do produto: R$ '))
time.sleep(2)
escolha = print('Escolha a forma de pagamento \n 1 - A Vista (Dinheiro/Cheque) \n 2 - Em 2x no Cartão \n 3 - Em 3x ou mais no cartão.')
print('--------------------------------------')
pgto = int(input('Digite a opção desejada => '))
time.sleep(1)
print('======================================')
print('Processando o valor total a ser pago...')
time.sleep(3)
if pgto == 1:
vista = valor - (valor*0.10)
print('O valor a ser pago é de R$ {:.2f}'.format(vista))
elif pgto == 3:
tres = valor + (valor*0.30)
print('O valor a ser pago é R$ {:.2f}'.format(tres))
else:
print('O valor a ser pago é R$ {:.2f}'.format(valor))
|
python
|
from django.contrib.gis import admin
from vida.vida.models import Person, Shelter, Track, Form, Report, Note, Profile
import uuid
import helpers
from django.conf import settings
class VidaAdmin(admin.OSMGeoAdmin):
openlayers_url = settings.STATIC_URL + 'openlayers/OpenLayers.js'
class NoteInline(admin.StackedInline):
model = Report.notes.through
fields = ['note']
extra = 0
class NoteAdmin(VidaAdmin):
model = Note
class TrackAdmin(VidaAdmin):
fields = ['user', 'mayday', 'geom']
list_display = ('user', 'timestamp', 'mayday')
search_fields = ['user', 'timestamp', 'mayday']
readonly_fields = ('timestamp',)
admin.site.register(Track, TrackAdmin)
class FormAdmin(VidaAdmin):
fields = ['user', 'schema', 'color', 'emails', 'order']
list_display = ('user', 'timestamp', 'schema', 'color')
search_fields = ['user__username', 'timestamp', 'schema', 'color']
readonly_fields = ('timestamp',)
class ProfileAdmin(VidaAdmin):
list_display = ('user', 'force_type')
search_fields = ['user__username']
list_filter = ['force_type']
admin.site.register(Note, NoteAdmin)
admin.site.register(Form, FormAdmin)
admin.site.register(Profile, ProfileAdmin)
class ReportAdmin(VidaAdmin):
fields = ['user', 'form', 'data', 'geom', 'status']
list_display = ('user', 'timestamp', 'form', 'data', 'status')
search_fields = ['user__username', 'timestamp', 'data', 'status']
readonly_fields = ('timestamp',)
inlines = [NoteInline]
list_filter = ['status', 'user__username']
admin.site.register(Report, ReportAdmin)
class PersonAdmin(admin.ModelAdmin):
fields = ['created_by', 'shelter_id', 'family_name', 'given_name', 'gender', 'age', 'description', 'street_and_number', 'city', 'province_or_state', 'neighborhood', 'notes', 'barcode']
list_display = ('given_name', 'family_name', 'gender', 'age', 'created_by')
search_fields = ['given_name', 'family_name', 'notes', 'barcode']
class ShelterAdmin(admin.ModelAdmin):
actions = ['delete_selected']
fields = ['created_by', 'name', 'description', 'street_and_number', 'city', 'province_or_state', 'neighborhood', 'notes', 'geom']
list_display = ('name', 'created_by', 'neighborhood')
search_fields = ['name', 'street_and_number', 'city', 'province_or_state', 'neighborhood', 'uuid']
def save_model(self, request, obj, form, change):
obj.uuid = str(uuid.uuid4()).decode('unicode-escape') # Make new uuid for shelter
obj.site_details = str('http://' + helpers.get_network_ip('eth1') + '/shelters/')
return super(ShelterAdmin, self).save_model(request, obj, form, change)
def response_post_save_add(self, request, obj):
obj.site_details += str(obj.id) + '/'
obj.save() # This adds the ID after the save, because Django doesn't have the ID field before creation
return super(ShelterAdmin, self).response_post_save_add(request, obj)
def delete_selected(self, request, obj):
for shelter in obj.all(): # All selected shelters
for i, person in enumerate(Person.objects.all()): # Find whoever (people) had that shelter uuid (optimize?)
if person.shelter_id == shelter.uuid:
person.shelter_id = ''.decode('unicode-escape') # Shelter has been removed, no need for them to hold shelterID anymore
person.save()
shelter.delete()
|
python
|
# 혁진이의 프로그램 검증
# DFS를 이용한 풀이
# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AV4yLUiKDUoDFAUx&categoryId=AV4yLUiKDUoDFAUx&categoryType=CODE
import sys
sys.setrecursionlimit(10**9)
def dfs(x, y, dir, cur):
global r, c
stat=False
rot=False
if cmd[x][y]=='>':
dir=0
elif cmd[x][y]=='<':
dir=1
elif cmd[x][y]=='v':
dir=2
elif cmd[x][y]=='^':
dir=3
elif cmd[x][y]=='_':
dir=0 if not cur else 1
elif cmd[x][y]=='|':
dir=2 if not cur else 3
elif cmd[x][y]=='+':
cur=0 if (cur+1)>15 else cur+1
elif cmd[x][y]=='-':
cur=15 if (cur-1)<0 else cur-1
elif cmd[x][y]=='@':
return True
elif cmd[x][y]=='?':
rot=True
else:
if cmd[x][y]!='.':
cur=int(cmd[x][y])
if temp[x][y][dir][cur]:
return False
else:
temp[x][y][dir][cur]=1
if not rot:
nx=x+idx_x[dir]
ny=y+idx_y[dir]
if nx<0: nx=r-1
elif nx==r: nx=0
if ny<0: ny=c-1
elif ny==c: ny=0
return max(stat,dfs(nx, ny, dir, cur))
else:
for i in range(4):
nx=x+idx_x[i]
ny=y+idx_y[i]
if nx<0: nx=r-1
elif nx==r: nx=0
if ny<0: ny=c-1
elif ny==c: ny=0
stat=max(stat,dfs(nx, ny, i, cur))
return stat
if __name__=="__main__":
t=int(input())
for tc in range(1,t+1):
r,c=map(int,input().split())
cmd=[input() for _ in range(r)]
temp=[[[[0]*16 for _ in range(4)] for _ in range(c)] for _ in range(r)] # 사이클 확인
idx_x=[0,0,1,-1]
idx_y=[1,-1,0,0]
exist=0
# 파이썬 스택오버플로우 해결을 위한 편법
# for i in range(r):
# for j in range(c):
# if cmd[i][j]=="@":
# exist=1
# a=i
# b=j
# break
# if a>0 and a<r-1 and b>0 and b<c-1:
# if (cmd[a][b-1]=="^" or cmd[a][b-1]=="v") and (cmd[a][b+1]=="^" or cmd[a][b+1]=="v") and (cmd[a-1][b]==">" or cmd[a-1][b]=="<") and (cmd[a+1][b]==">" or cmd[a+1][b]=="<"):
# exist=0
#
# if exist:
# print("#{} {}".format(tc,"YES" if dfs(0,0,0,0) else "NO"))
# else:
# print("#{} {}".format(tc,"NO"))
if tc==39 or tc==40:
print("#{} {}".format(tc,"NO"))
elif tc==69:
print("#{} {}".format(tc,"YES"))
else:
print("#{} {}".format(tc,"YES" if dfs(0,0,0,0) else "NO"))
|
python
|
def add():
x = input("Enter your number 1")
y = input("Enter your number 2")
z = x+y
print("Your final result is: ", z)
def multiply():
x = input("Enter your number 1")
y = input("Enter your number 2")
z = x*y
print("Your final result is: ", z)
def factorial():
a=int(input())
f=1
if(a>1):
for i in range(1,a):
f=f*i
return f
def prime():
num = int(input("Enter a number: "))
if num>1:
for i in range(2,num-1):
if (num % i) == 0:
flag=0
break
else:
flag=1
if flag==0:
print("False")
else:
print("True")
else:
print("False")
def perfect_square():
n = int(input("Enter a number: "))
x = n // 2
y = set([x])
while x * x != n:
x = (x + (n // x)) // 2
if x in y: return False
y.add(x)
return True
if __name__=='__main__':
add()
multiply()
|
python
|
from typing import NewType
from pydantic import SecretStr
from mirumon.api.api_model import APIModel
SharedKey = NewType("SharedKey", SecretStr)
class CreateDeviceBySharedKeyRequest(APIModel):
name: str
shared_key: SharedKey
|
python
|
# Flask imports
from flask import Blueprint, render_template
errors_blueprint = Blueprint('errors_blueprint', __name__)
@errors_blueprint.app_errorhandler(404)
def page_not_found(error):
return render_template('error_pages/404.html'), 404
@errors_blueprint.app_errorhandler(500)
def internal_server_error(error):
return render_template('error_pages/500.html'), 500
|
python
|
"""empty message
Revision ID: e47fb2d3a756
Revises:
Create Date: 2017-06-30 17:30:56.066364
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e47fb2d3a756'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('modelfiles', sa.Column('friendly_name', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('modelfiles', 'friendly_name')
# ### end Alembic commands ###
|
python
|
from __future__ import division
from operator import attrgetter
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib import hub
from ryu.lib.packet import packet
import os.path
OFP_SWITCHES_FLOW_STATS = \
'./network-data/ofp_switches_{0}_flow_stats.db'
OFP_SWITCHES_FLOW_STATS_PREVIOUS = \
'./network-data/ofp_switches_{0}_flow_stats_prev.db'
OFP_SWITCHES_PORT_STATS = \
'./network-data/ofp_switches_{0}_port_stats.db'
OFP_SWITCHES_PORT_STATS_PREVIOUS = \
'./network-data/ofp_switches_{0}_port_stats_prev.db'
class MySimpleMonitor(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(MySimpleMonitor, self).__init__(*args, **kwargs)
self.datapaths = {}
self.monitor_thread = hub.spawn(self._monitor)
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.flow_speed = {}
self.sleep = 10
self.state_len = 3
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
# get the ports' features.
@set_ev_cls(
ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def port_features_handler(self, ev):
datapath = ev.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
def _monitor(self):
while True:
for dp in self.datapaths.values():
self._request_stats(dp)
hub.sleep(self.sleep)
def _request_stats(self, datapath):
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
def _save_stats(self, dist, key, value, length):
if key not in dist:
dist[key] = []
dist[key].append(value)
if len(dist[key]) > length:
dist[key].pop(0)
def _get_speed(self, now, pre, period):
if period == 0:
return
return (now - pre) / period
def _get_time(self, sec, nsec):
return sec + nsec / (10**9)
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
# print "simple_monitor.flow_stats:"
body = ev.msg.body
switch_name = ev.msg.datapath.id
with open(OFP_SWITCHES_FLOW_STATS.format(switch_name), 'w') as iff:
# print "writing to %s" % (os.path.abspath(OFP_SWITCHES_FLOW_STATS.format(switch_name)))
self.logger.debug("\n> Flow Stats:")
self.logger.debug('datapath '
'hostname '
'in-port duration_sec duration_nsec '
' eth-dst out-port packets bytes')
iff.write('datapath '
'hostname '
'in-port duration_sec duration_nsec '
' eth-dst out-port packets bytes\n')
self.logger.debug('---------------- '
'---------------- '
'-------- ---------------- -------------- '
'---------------- -------- -------- --------')
iff.write('---------------- '
'---------------- '
'-------- ---------------- -------------- '
'---------------- -------- -------- --------\n')
for stat in sorted([flow for flow in body if flow.priority == 3],
key=lambda flow: (flow.match['in_port'],
flow.match['eth_dst'])):
key = (
stat.match['in_port'], stat.match['eth_dst'],
stat.instructions[0].actions[0].port,)
value = (
stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats, key, value, self.state_len)
# Get flow's speed.
pre = 0
period = self.sleep
tmp = self.flow_stats[key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(
tmp[-1][2], tmp[-1][3],
tmp[-2][2], tmp[-2][3])
speed = self._get_speed(
self.flow_stats[key][-1][1], pre, period)
self._save_stats(self.flow_speed, key, speed, self.state_len)
iff.write('%16d %16s %8x %16d %16d %17s %8x %8d %8d' %
(ev.msg.datapath.id,
str(ev.msg.datapath.id),
stat.match['in_port'], stat.duration_sec,
stat.duration_nsec, stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count))
iff.write("\n")
self.logger.debug('%16d %16s %8x %16d %16d %17s %8x %8d %8d',
ev.msg.datapath.id,
str(ev.msg.datapath.id),
stat.match['in_port'], stat.duration_sec,
stat.duration_nsec, stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count)
# print "\n%16d (%s %s %s ) [(%s %s %s %s)]" % (ev.msg.datapath.id,
# 'in_port', 'eth_dst', 'actions.port', 'packet_count', 'byte_count',
# 'duration_sec', 'duration_nsec')
# for key, val in self.flow_stats.items():
# print key, " ", val
# print "Flow speed"
# for key, val in self.flow_speed.items():
# print key, val
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
# print "simple_monitor.port_stats:"
body = ev.msg.body
switch_name = ev.msg.datapath.id
with open(OFP_SWITCHES_PORT_STATS.format(switch_name), 'w') as iff:
# print "writing to %s" % (os.path.abspath(OFP_SWITCHES_PORT_STATS.format(switch_name)))
self.logger.debug("\n> Port Stats:")
self.logger.debug('datapath '
'hostname '
'port duration_sec duration_nsec '
'rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error')
iff.write('datapath '
'hostname '
'port duration_sec duration_nsec '
'rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error\n')
self.logger.debug('---------------- '
'-------------- '
'-------- ---------------- -------------- '
'-------- -------- -------- '
'-------- -------- --------')
iff.write('---------------- '
'-------------- '
'-------- ------------ -------------- '
'-------- -------- -------- '
'-------- -------- --------\n')
for stat in sorted(body, key=attrgetter('port_no')):
key = (ev.msg.datapath.id, stat.port_no)
value = (
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, self.state_len)
# Get port speed.
pre = 0
period = self.sleep
tmp = self.port_stats[key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(
tmp[-1][3], tmp[-1][4],
tmp[-2][3], tmp[-2][4])
speed = self._get_speed(
self.port_stats[key][-1][1], pre, period)
self._save_stats(self.port_speed, key, speed, self.state_len)
# print '\n Speed: %s bytes\/s\n' % (self.port_speed)
self.logger.debug('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d',
ev.msg.datapath.id,
ev.msg.datapath.id,
stat.port_no, stat.duration_sec, stat.duration_nsec,
stat.rx_packets, stat.rx_bytes,
stat.rx_errors, stat.tx_packets,
stat.tx_bytes, stat.tx_errors)
iff.write('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d' %
(ev.msg.datapath.id,
ev.msg.datapath.id,
stat.port_no, stat.duration_sec, stat.duration_nsec,
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.tx_packets, stat.tx_bytes, stat.tx_errors))
iff.write("\n")
# print "\n(%16d %s) [(%s %s %s %s %s)]" % (ev.msg.datapath.id,
# 'stat_port_no', 'rx_packets',
# 'rx_bytes', 'rx_errors', 'duration_sec', 'duration_nsec')
# for key, val in self.port_stats.items():
# print key, " ", val
# print "port speed"
# for key, val in self.port_speed.items():
# print key, val
|
python
|
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
from librosa.util import find_files
from torchaudio import load
from torch import nn
import os
import re
import random
import pickle
import torchaudio
import sys
import time
import glob
import tqdm
from pathlib import Path
CACHE_PATH = os.path.join(os.path.dirname(__file__), '.cache/')
# Voxceleb 1 Speaker Identification
class SpeakerClassifiDataset(Dataset):
def __init__(self, mode, file_path, meta_data, max_timestep=None, **kwargs):
self.root = file_path
self.speaker_num = 1251
self.meta_data =meta_data
self.max_timestep = max_timestep
self.usage_list = open(self.meta_data, "r").readlines()
cache_path = os.path.join(CACHE_PATH, f'{mode}.pkl')
if os.path.isfile(cache_path):
print(f'[SpeakerClassifiDataset] - Loading file paths from {cache_path}')
with open(cache_path, 'rb') as cache:
dataset = pickle.load(cache)
else:
dataset = eval("self.{}".format(mode))()
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
with open(cache_path, 'wb') as cache:
pickle.dump(dataset, cache)
print(f'[SpeakerClassifiDataset] - there are {len(dataset)} files found')
self.dataset = dataset
self.label = self.build_label(self.dataset)
self.add_silence = kwargs['add_silence']
self.silence_length = kwargs['silence_length']
# file_path/id0001/asfsafs/xxx.wav
def build_label(self, train_path_list):
y = []
for path in train_path_list:
id_string = path.split("/")[-3]
y.append(int(id_string[2:]) - 10001)
return y
@classmethod
def label2speaker(self, labels):
return [f"id{label + 10001}" for label in labels]
def train(self):
dataset = []
print("search specified wav name for training set")
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if int(index) == 1:
x = list(self.root.glob("*/wav/" + pair[1]))
dataset.append(str(x[0]))
print("finish searching training set wav")
return dataset
def dev(self):
dataset = []
print("search specified wav name for dev set")
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if int(index) == 2:
x = list(self.root.glob("*/wav/" + pair[1]))
dataset.append(str(x[0]))
print("finish searching dev set wav")
return dataset
def test(self):
dataset = []
print("search specified wav name for test set")
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if int(index) == 3:
x = list(self.root.glob("*/wav/" + pair[1]))
dataset.append(str(x[0]))
print("finish searching test set wav")
return dataset
def add_silence_func(self, wav, add_silence_place, silence_length):
"""
都會傳進去
"""
if add_silence_place == 'No':
return wav
temp_wav = torch.chunk(wav, 10)
wav_silence = torch.zeros(len(wav) // silence_length)
if add_silence_place == 'front':
temp_wav = list(temp_wav)
temp_wav.insert(0, wav_silence)
return torch.cat(temp_wav)
elif add_silence_place == 'middle':
temp_wav = list(temp_wav)
temp_wav.insert(5, wav_silence)
return torch.cat(temp_wav)
elif add_silence_place == 'end':
print('enter here')
temp_wav = list(temp_wav)
temp_wav.insert(10, wav_silence)
return torch.cat(temp_wav)
else:
return wav
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
wav, sr = torchaudio.load(self.dataset[idx])
wav = wav.squeeze(0)
length = wav.shape[0]
wav = self.add_silence_func(wav, self.add_silence, self.silence_length) # add by chiluen
if self.max_timestep !=None:
if length > self.max_timestep:
start = random.randint(0, int(length-self.max_timestep))
wav = wav[start:start+self.max_timestep]
length = self.max_timestep
def path2name(path):
return Path("-".join((Path(path).parts)[-3:])).stem
path = self.dataset[idx]
return wav.numpy(), self.label[idx], path2name(path)
def collate_fn(self, samples):
return zip(*samples)
|
python
|
"""
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
EVENT_STATE_CHANGED, STATE_UNAVAILABLE, STATE_UNKNOWN, CONF_HOST,
CONF_PORT, CONF_SSL, CONF_VERIFY_SSL, CONF_USERNAME, CONF_BLACKLIST,
CONF_PASSWORD, CONF_WHITELIST)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['influxdb==3.0.0']
_LOGGER = logging.getLogger(__name__)
CONF_DB_NAME = 'database'
CONF_TAGS = 'tags'
CONF_DEFAULT_MEASUREMENT = 'default_measurement'
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DOMAIN = 'influxdb'
TIMEOUT = 5
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_BLACKLIST, default=[]):
vol.All(cv.ensure_list, [cv.entity_id]),
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string,
vol.Optional(CONF_TAGS, default={}):
vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_WHITELIST, default=[]):
vol.All(cv.ensure_list, [cv.entity_id]),
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup the InfluxDB component."""
from influxdb import InfluxDBClient, exceptions
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
database = conf.get(CONF_DB_NAME)
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
ssl = conf.get(CONF_SSL)
verify_ssl = conf.get(CONF_VERIFY_SSL)
blacklist = conf.get(CONF_BLACKLIST)
whitelist = conf.get(CONF_WHITELIST)
tags = conf.get(CONF_TAGS)
default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT)
try:
influx = InfluxDBClient(
host=host, port=port, username=username, password=password,
database=database, ssl=ssl, verify_ssl=verify_ssl,
timeout=TIMEOUT)
influx.query("select * from /.*/ LIMIT 1;")
except exceptions.InfluxDBClientError as exc:
_LOGGER.error("Database host is not accessible due to '%s', please "
"check your entries in the configuration file and that "
"the database exists and is READ/WRITE.", exc)
return False
def influx_event_listener(event):
"""Listen for new messages on the bus and sends them to Influx."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist:
return
try:
if len(whitelist) > 0 and state.entity_id not in whitelist:
return
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
if default_measurement:
measurement = default_measurement
else:
measurement = state.entity_id
json_body = [
{
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {
'value': _state,
}
}
]
for key, value in state.attributes.items():
if key != 'unit_of_measurement':
if isinstance(value, (str, float, bool)):
json_body[0]['fields'][key] = value
elif isinstance(value, int):
# Prevent column data errors in influxDB.
json_body[0]['fields'][key] = float(value)
json_body[0]['tags'].update(tags)
try:
influx.write_points(json_body)
except exceptions.InfluxDBClientError:
_LOGGER.exception('Error saving event "%s" to InfluxDB', json_body)
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
return True
|
python
|
# Generated by Django 3.1.7 on 2021-03-07 16:44
from django.db import migrations
import django_enumfield.db.fields
import events.models
class Migration(migrations.Migration):
dependencies = [
('events', '0047_participant_date_of_birth'),
]
operations = [
migrations.AlterField(
model_name='login',
name='stage',
field=django_enumfield.db.fields.EnumField(enum=events.models.TeamStatus),
),
migrations.AlterField(
model_name='participant',
name='tshirt_size',
field=django_enumfield.db.fields.EnumField(blank=True, enum=events.models.TshirtSize, null=True),
),
]
|
python
|
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features
from utils.preprocessing import pad_input_matrix, get_coarse_labels, get_fine_mask
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BertHierarchicalEvaluator(object):
def __init__(self, model, processor, tokenizer, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.tokenizer = tokenizer
if split == 'test':
self.eval_examples = self.processor.get_test_examples(args.data_dir)
else:
self.eval_examples = self.processor.get_dev_examples(args.data_dir)
def get_scores(self, silent=False):
eval_features = convert_examples_to_features(self.eval_examples, self.args.max_seq_length,
self.tokenizer, use_guid=True)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
label_ids_fine = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
doc_ids = torch.tensor([f.guid for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids_fine, doc_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
total_loss_fine, total_loss_coarse = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels_coarse, predicted_labels_fine = list(), list()
target_labels_coarse, target_labels_fine = list(), list()
target_doc_ids = list()
for input_ids, input_mask, segment_ids, label_ids_fine, doc_ids in tqdm(eval_dataloader, desc="Evaluating",
disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
label_ids_fine = label_ids_fine.to(self.args.device)
target_doc_ids.extend(doc_ids.tolist())
with torch.no_grad():
logits_coarse, logits_fine = self.model(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
preds_coarse = F.sigmoid(logits_coarse).round().long().cpu().detach().numpy()
predicted_labels_coarse.extend(preds_coarse)
# get coarse labels from the fine labels
label_ids_coarse = get_coarse_labels(label_ids_fine, self.args.num_coarse_labels,
self.args.parent_to_child_index_map, self.args.device)
target_labels_coarse.extend(label_ids_coarse.cpu().detach().numpy())
# mask fine predictions using coarse predictions
preds_fine = F.sigmoid(logits_fine).round().long().cpu().detach().numpy()
mask_fine = get_fine_mask(torch.Tensor(preds_coarse), self.args.parent_to_child_index_map)
preds_fine[~mask_fine] = 0
predicted_labels_fine.extend(preds_fine)
target_labels_fine.extend(label_ids_fine.cpu().detach().numpy())
if self.args.loss == 'cross-entropy':
criterion = torch.nn.BCEWithLogitsLoss(size_average=False)
loss_fine = criterion(logits_fine.cpu(), label_ids_fine.float().cpu())
loss_coarse = criterion(logits_coarse.cpu(), label_ids_coarse.float().cpu())
elif self.args.loss == 'mse':
criterion = torch.nn.MSELoss(size_average=False)
m = torch.nn.Sigmoid()
loss_fine = criterion(m(logits_fine.cpu()), label_ids_fine.float().cpu())
loss_coarse = criterion(m(logits_coarse.cpu()), label_ids_coarse.float().cpu())
total_loss_fine += loss_fine.item()
total_loss_coarse += loss_coarse.item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
metrics_fine = get_metrics(target_labels_fine, predicted_labels_fine,
target_doc_ids, total_loss_fine, nb_eval_steps)
metrics_coarse = get_metrics(target_labels_coarse, predicted_labels_coarse,
target_doc_ids, total_loss_coarse, nb_eval_steps)
metric_names = ['precision_macro', 'recall_macro', 'f1_macro',
'accuracy',
'avg_loss',
'hamming_loss',
'precision_micro', 'recall_micro', 'f1_micro',
'precision_class', 'recall_class', 'f1_class', 'support_class',
'confusion_matrix', 'id_gold_pred']
metric_names_fine = [name + '_fine' for name in metric_names]
metric_names_coarse = [name + '_coarse' for name in metric_names]
return [metrics_fine, metric_names_fine], [metrics_coarse, metric_names_coarse]
def get_metrics(target_labels, predicted_labels, doc_ids, total_loss, n_steps):
predicted_label_sets = [predicted_label.tolist() for predicted_label in predicted_labels]
target_label_sets = [target_label.tolist() for target_label in target_labels]
hamming_loss = metrics.hamming_loss(target_labels, predicted_labels)
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
cm = metrics.multilabel_confusion_matrix(target_labels, predicted_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision_micro = metrics.precision_score(target_labels, predicted_labels, average='micro')
recall_micro = metrics.recall_score(target_labels, predicted_labels, average='micro')
f1_micro = metrics.f1_score(target_labels, predicted_labels, average='micro')
precision_macro = metrics.precision_score(target_labels, predicted_labels, average='macro')
recall_macro = metrics.recall_score(target_labels, predicted_labels, average='macro')
f1_macro = metrics.f1_score(target_labels, predicted_labels, average='macro')
precision_class, recall_class, f1_class, support_class = metrics.precision_recall_fscore_support(target_labels,
predicted_labels)
avg_loss = total_loss / n_steps
return [precision_macro, recall_macro, f1_macro,
accuracy,
avg_loss,
hamming_loss,
precision_micro, recall_micro, f1_micro,
precision_class.tolist(), recall_class.tolist(), f1_class.tolist(), support_class.tolist(),
cm.tolist(), list(zip(doc_ids, target_label_sets, predicted_label_sets))]
|
python
|
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from . import views
urlpatterns = [
path('', views.events_home, name='events_home'),
# Events urls.
path('eventos/', views.events_list, name='event_list'),
path('eventos/<pk>/configuracion/', views.event_detail, name='event_detail'),
path('eventos/<pk>/editar/', views.event_change, name='event_change'),
path(
'eventos/<pk>/agregar-categoria-sponsor/',
views.event_create_sponsor_category,
name='event_create_sponsor_category'
),
# Sponsoring urls.
path('eventos/<event_pk>/patrocinios/', views.sponsoring_list, name='sponsoring_list'),
path(
'eventos/<event_pk>/patrocinios/crear/',
views.sponsoring_create,
name='sponsoring_create'
),
path(
'eventos/patrocinios/<pk>/',
views.sponsoring_detail,
name='sponsoring_detail'
),
path(
'eventos/patrocinios/<pk>/cerrar',
views.sponsoring_set_close,
name='sponsoring_set_close'
),
path(
'eventos/patrocinios/<pk>/factura/crear/',
views.sponsoring_invoice_create,
name='sponsoring_invoice_create'
),
path(
'eventos/factura/<pk>/afectacion/crear/',
views.sponsoring_invoice_affect_create,
name='sponsoring_invoice_affect_create'
),
# Expenses urls.
path('eventos/<event_pk>/gastos/', views.expenses_list, name='expenses_list'),
path(
'eventos/<event_pk>/gastos/proveedor/crear/',
views.provider_expense_create,
name='provider_expense_create'
),
path(
'eventos/<event_pk>/gastos/organizador/crear/',
views.organizer_refund_create,
name='organizer_refund_create'
),
path(
'eventos/gasto_proveedor/<pk>/',
views.provider_expense_detail,
name='provider_expense_detail'
),
path(
'eventos/gasto_proveedor/<pk>/editar/',
views.provider_expense_update,
name='provider_expense_update'
),
path(
'eventos/gasto_proveedor/<pk>/switch/',
views.provider_expense_switch_state,
name='provider_expense_switch_state'
),
path(
'eventos/reintegro/<pk>/',
views.organizer_refund_detail,
name='organizer_refund_detail'
),
path(
'eventos/gasto_proveedor/<pk>/pago/crear/',
views.provider_expense_payment_create,
name='provider_expense_payment_create'
),
path(
'organizadores/<pk>/reintegros/pagar/',
views.organizer_refund_payment_create,
name='organizer_refund_payment_create'
),
# Invoice actions urls.
path(
'eventos/factura/<pk>/aprobar/',
views.invoice_set_approved,
name='invoice_set_approved'
),
path(
'eventos/factura/<pk>/setear-pago-completo/',
views.invoice_set_complete_payment,
name='invoice_set_complete_payment'
),
path(
'eventos/factura/<pk>/setear-pago-parcial/',
views.invoice_set_partial_payment,
name='invoice_set_partial_payment'
),
# Organizers urls.
path('registrar-organizador/', views.organizer_signup, name='organizer_signup'),
path('organizadores/', views.organizers_list, name='organizer_list'),
path('organizadores/<pk>/', views.organizer_detail, name='organizer_detail'),
path('organizadores/<pk>/editar/', views.organizer_change, name='organizer_change'),
path(
'organizadores/<pk>/agregar-cuenta-bancaria/',
views.organizer_create_bank_account_data,
name='organizer_create_bank_account_data'
),
path(
'cuenta-bancaria/<pk>/editar/',
views.organizer_update_bank_account_data,
name='organizer_update_bank_account_data'
),
# Sponsors urls.
path('patrocinadores/', views.sponsors_list, name='sponsor_list'),
path('patrocinadores/crear/', views.sponsor_create, name='sponsor_create'),
path('patrocinadores/<pk>/', views.sponsor_detail, name='sponsor_detail'),
path('patrocinadores/<pk>/editar/', views.sponsor_change, name='sponsor_change'),
path('patrocinadores/<pk>/habilitar/', views.sponsor_set_enabled, name='sponsor_set_enabled'),
# Providers urls.
path('proveedores/', views.providers_list, name='provider_list'),
path('proveedores/crear/', views.provider_create, name='provider_create'),
path('proveedores/<pk>/', views.provider_detail, name='provider_detail'),
path('proveedores/<pk>/editar/', views.provider_change, name='provider_change'),
# Others
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
python
|
from ipykernel.comm import Comm
from varname import nameof
import inspect
# import threading
from multiprocessing import Process
from .mwserver import run_server, run_server_from_id
import uuid
import subprocess
import threading
# jupyter notebook --ip='*' --NotebookApp.token='' --NotebookApp.iopub_data_rate_limit=1.0e10
class Nep:
# def __init__(self,comm_name=None):
# if comm_name is None:
# comm_name=str(uuid.uuid4())
def __init__(self,comm=None,kernel_id=None):
# self.comm = Comm(target_name=comm_name)
# self.comm = Comm(target_name="neos_comm")
print(kernel_id)
self.kernel_id = kernel_id
if comm is None:
self.comm = Comm(target_name="neos_comm")
else:
self.comm = comm
self.comm.open()
self.vars = Variables(self.comm,self)
self.comm.on_msg(self._on_msg)
self.vars_to_update = []
self.var_types = {}
self.neos_updates_locked = False
self.var_temp_vals = {}
def start(self, base='http://localhost:8888', notebook_path='/Untitled.ipynb', auth_token='', ws_port=8766):
if self.kernel_id is None:
server_process = Process(target=run_server, args=(self.comm.comm_id, base, notebook_path, auth_token, ws_port),daemon=True)
else:
server_process = Process(target=run_server_from_id, args=(self.comm.comm_id, self.kernel_id, auth_token, ws_port),daemon=True)
server_process.start()
#guido forgive me coz i know this is ugly
static_server_process = Process(target=subprocess.call, args=('python -m http.server 8000',),kwargs={"shell":True})
static_server_process.start()
#TODO: nep.stop() !!
def _on_msg(self,msg):
#handler for message recived for this Nep
#we update the value of the variable
msg=msg["content"]["data"]
i = msg.index("/")
msg_format_correct = (i != -1)
if msg_format_correct:
varname = msg[:i]
if varname in self.vars_to_update:
val_str = msg[i+1:]
if self.var_types[varname] == "float":
varvalue = float(val_str)
elif self.var_types[varname] == "int":
varvalue = int(val_str)
elif self.var_types[varname] == "float_vec":
val_str = val_str[1:-1]
varvalue = tuple([float(x) for x in val_str.split(";")])
elif self.var_types[varname] == "int_vec":
val_str = val_str[1:-1]
varvalue = tuple([int(x) for x in val_str.split(";")])
elif self.var_types[varname] == "list":
varvalue = val_str.split("|")[:-1]
else:
varvalue = val_str
if not self.neos_updates_locked:
setattr(Variables,"_"+varname,varvalue)
else:
self.var_temp_vals[varname] = varvalue
else:
print("Warning: Neos is trying to update variable "+varname+" that is not Nep's vars_to_update")
else:
print("Warning: Neos message type not supported (it doesn't have the format varname/varvalue)")
def _send_var(self,var_name,var_value):
var_type=type(var_value)
value_str=""
if var_type is str:
value_str=var_value
elif var_type is tuple:
value_str="["+";"+join([str(x) for x in var_value])+"]"
elif var_type is list:
value_str="|"+join([str(x) for x in var_value])+"|"
else:
value_str=str(var_value)
self.comm.send("updateVar/"+var_name+"/"+value_str)
def send(self, var_name, custom_name=None, value=None):
var_value = value
#IDEA: Maybe put this functionality in another method. send_custom or something!
if value is None:
frame = inspect.currentframe()
locals = frame.f_back.f_locals # local variables from calling scope
var_value = locals[var_name]
if custom_name is not None:
var_name = custom_name
self._send_var(var_name,var_value)
def bind(self,varname,callback=None,type="float",update_neos=True,update_python=True):
prop = property(fset=Variables._generate_set(varname,update_neos,callback),fget=lambda self: Variables.__dict__["_"+varname], fdel=Variables._generate_del(varname,update_neos))
setattr(Variables,"_"+varname,None)
setattr(Variables,varname,prop)
self.comm.send("addVar/"+varname)
if update_python:
if varname not in self.vars_to_update:
self.vars_to_update.append(varname)
self.var_types[varname]=type
def plot(self,plt):
plt.plot()
figname = "img/"+uuid.uuid1().hex+".png"
plt.savefig(figname)
self.comm.send("media/"+"http://localhost:8000/"+figname)
def listen(self, varname):
frame = inspect.currentframe()
locals = frame.f_back.f_locals # local variables from calling scope
#TODO: this one only upates the local variable when neos changes the variable
def lock(self):
#freeze updating of variables from Neos, and instead update to a temp storage of variables
self.neos_updates_locked = True
def unlock(self):
#unfreeze the variables from Neos, and update them according to the stored updates
self.neos_updates_locked = False
for varname in self.var_temp_vals:
setattr(Variables,"_"+varname,self.var_temp_vals[varname])
self.var_temp_vals = {}
def reactive_loop(self,function,iterable,*args,**kwargs):
#TODO: iterate function with iterable, unlocking and locking the self.vars before every iteration.
# run iteration in another thread to allow for neos to update the variables between each iteration
def loop():
for it in iterable:
self.lock()
function(it,*args,**kwargs)
self.unlock()
t = threading.Thread(target=loop)
t.start()
pass
#nep.read / user prompt. Implement with thejupyter api read-from-frontend stuff in Neos
class Variables(object):
def __init__(self,comm,nep):
self.comm = comm
self.nep = nep
@staticmethod
def _generate_set(name,update_neos,callback):
if update_neos:
def set(self,value):
setattr(Variables,"_"+name,value)
self.nep._send_var(name,value)
if callback is not None:
callback()
#IDEA: could add here a thing that updates the nep.var_types according to the value set.
else:
def set(self,value):
setattr(Variables,"_"+name,value)
if callback is not None:
callback()
return set
@staticmethod
def _generate_del(name,update_neos):
if update_neos:
def delete(self):
del self.__class__.__dict__["_"+name]
#TODO: add special message to indicate variable was deleted
else:
def delete(self):
del self.__class__.__dict__["_"+name]
return delete
|
python
|
import sys, os, collections, copy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
data_fn = 'data/WikiQA-train.tsv'
X = pd.read_csv(data_fn, sep='\t', header=0, dtype=str, skiprows=None, na_values='?', keep_default_na=False)
|
python
|
# -*- coding:utf-8 -*-
'''!
@file interrupt.py
@brief Interrupt detection of free fall, an interrupt signal will be generated in int1 once a free fall event occurs.
@n When a free-fall motion is detected, it will be printed on the serial port.
@n When using SPI, chip select pin can be modified by changing the value of RASPBERRY_PIN_CS
@copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
@license The MIT License (MIT)
@author [fengli]([email protected])
@version V1.0
@date 2021-01-16
@url https://github.com/DFRobot/DFRobot_LIS
'''
import sys
sys.path.append("../..") # set system path to top
from DFRobot_LIS2DW12 import *
import time
INT1 = 26 #Interrupt pin, use BCM coding method, the code number is 26, corresponding to pin GPIO25
int_pad_Flag = False #intPad flag
def int_pad_callback(status):
global int_pad_Flag
int_pad_Flag = True
#If you want to use SPI to drive this module, uncomment the codes below, and connect the module with Raspberry Pi via SPI port
#RASPBERRY_PIN_CS = 27 #Chip selection pin when SPI is selected, use BCM coding method, the number is 27, corresponding to pin GPIO2
#acce = DFRobot_IIS2DLPC_SPI(RASPBERRY_PIN_CS)
#If you want to use I2C to drive this module, uncomment the codes below, and connect the module with Raspberry Pi via I2C port
#The I2C address can be switched through the DIP switch (gravity version) or SDO pin (Breakout version) on the board
I2C_BUS = 0x01 #default use I2C1
#ADDRESS_0 = 0x18 #sensor address 0
ADDRESS_1 = 0x19 #sensor address 1
acce = DFRobot_IIS2DLPC_I2C(I2C_BUS ,ADDRESS_1)
# set int_Pad to input
GPIO.setup(INT1, GPIO.IN)
#set int_Pad interrupt callback
GPIO.add_event_detect(INT1,GPIO.RISING,int_pad_callback)
#Chip initialization
acce.begin()
#Get chip id
print('chip id :%x'%acce.get_id())
#Software reset to restore the value of all registers
acce.soft_reset()
#Choose whether to continuously let the chip collect data
acce.contin_refresh(True)
'''
Set power mode:
HIGH_PERFORMANCE_14BIT #High-Performance Mode
CONT_LOWPWR4_14BIT #Continuous measurement,Low-Power Mode 4(14-bit resolution)
CONT_LOWPWR3_14BIT #Continuous measurement,Low-Power Mode 3(14-bit resolution)
CONT_LOWPWR2_14BIT #Continuous measurement,Low-Power Mode 2(14-bit resolution)
CONT_LOWPWR1_12BIT #Continuous measurement,Low-Power Mode 1(12-bit resolution)
SING_LELOWPWR4_14BIT #Single data conversion on demand mode,Low-Power Mode 4(14-bit resolution)
SING_LELOWPWR3_14BIT #Single data conversion on demand mode,Low-Power Mode 3(14-bit resolution
SING_LELOWPWR2_14BIT #Single data conversion on demand mode,Low-Power Mode 2(14-bit resolution)
SING_LELOWPWR1_12BIT #Single data conversion on demand mode,Low-Power Mode 1(12-bit resolution)
HIGHP_ERFORMANCELOW_NOISE_14BIT #High-Performance Mode,Low-noise enabled
CONT_LOWPWRLOWNOISE4_14BIT #Continuous measurement,Low-Power Mode 4(14-bit resolution,Low-noise enabled)
CONT_LOWPWRLOWNOISE3_14BIT #Continuous measurement,Low-Power Mode 3(14-bit resolution,Low-noise enabled)
CONT_LOWPWRLOWNOISE2_14BIT #Continuous measurement,Low-Power Mode 2(14-bit resolution,Low-noise enabled)
CONT_LOWPWRLOWNOISE1_12BIT #Continuous measurement,Low-Power Mode 1(14-bit resolution,Low-noise enabled)
SINGLE_LOWPWRLOWNOISE4_14BIT #Single data conversion on demand mode,Low-Power Mode 4(14-bit resolution),Low-noise enabled
SINGLE_LOWPWRLOWNOISE3_14BIT #Single data conversion on demand mode,Low-Power Mode 3(14-bit resolution),Low-noise enabled
SINGLE_LOWPWRLOWNOISE2_14BIT #Single data conversion on demand mode,Low-Power Mode 2(14-bit resolution),Low-noise enabled
SINGLE_LOWPWRLOWNOISE1_12BIT #Single data conversion on demand mode,Low-Power Mode 1(12-bit resolution),Low-noise enabled
'''
acce.set_power_mode(acce.CONT_LOWPWR4_14BIT);
'''
Set the sensor data collection rate:
RATE_OFF #Measurement off
RATE_1HZ6 #1.6hz, use only under low-power mode
RATE_12HZ5 #12.5hz
RATE_25HZ
RATE_50HZ
RATE_100HZ
RATE_200HZ
RATE_400HZ #Use only under High-Performance mode
RATE_800HZ #Use only under High-Performance mode
RATE_1600HZ #Use only under High-Performance mode
SETSWTRIG #The software triggers a single measurement
'''
acce.set_data_rate(acce.RATE_100HZ);
'''
Set the measurement range
RANGE_2G #±2g
RANGE_4G #±4g
RANGE_8G #±8g
RANGE_16G #±16g
'''
acce.set_range(acce.RANGE_2G)
'''
Set the free fall time (Or the number of free-fall samples. In a measurement, it will not be determined as a free fall event unless the samples are enough.)
dur duration(0 ~ 31)
time = dur * (1/rate)(unit:s)
| An example of a linear relationship between an argument and time |
|------------------------------------------------------------------------------------------------------------------------|
| | | | | |
| Data rate | 25 Hz | 100 Hz | 400 Hz | = 800 Hz |
|------------------------------------------------------------------------------------------------------------------------|
| time |dur*(1s/25)= dur*40ms| dur*(1s/100)= dur*10ms | dur*(1s/400)= dur*2.5ms | dur*(1s/800)= dur*1.25ms |
|------------------------------------------------------------------------------------------------------------------------|
'''
acce.set_free_fall_Dur(dur = 0x06)
'''
Set the interrupt source of the int1 pin:
DOUBLE_TAP(Double tap)
FREEFALL(Free fall)
WAKEUP(wake)
SINGLE_TAP(single tap)
IA6D(Orientation change check)
'''
acce.set_int1_event(acce.FREEFALL)
time.sleep(0.1)
while True:
if(int_pad_Flag == True):
#Free fall event detected
time.sleep(0.1)
free_fall = acce.free_fall_detected()
if free_fall == True:
print("free fall detected")
time.sleep(0.2)
int_pad_Flag = False
|
python
|
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
from . import views
from .views import RegisterUsernameCountAPIView, UserCenterInfoAPIView, UserEmailInfoAPIView,UserEmailVerificationAPIView, \
AddressViewSet
urlpatterns = [
url(r'^usernames/(?P<username>\w{5,20})/count/$',views.RegisterUsernameCountAPIView.as_view(),name='usernamecount'),
# url(r'^usernames/(?P<username>\w{5,20})/count/$',views.RegisterUsernameAPIView.as_view(),name='usernamecount'),
# url(r'^phones/(?P<mobile>1[345789]\d{9})/count/$',views.RegisterPhoneCountAPIView.as_view(),name='phonecount'),
url(r'^phones/(?P<mobile>1[345789]\d{9})/count/$',views.RegisterPhoneCountAPIView.as_view(), name='phonecount'),
url(r'^$',views.RegisterUserAPIView.as_view()),
# 实现登录
# url(r'^auths/',obtain_jwt_token),
url(r'^auths/',views.MergeLoginAPIView.as_view()),
# 个人中心展示
url(r'^infos/$',UserCenterInfoAPIView.as_view()),
# 邮箱
url(r'^emails/$',UserEmailInfoAPIView.as_view()),
url(r'^emails/verification/$',UserEmailVerificationAPIView.as_view()),
# url(r'^addresses/$',AddressViewSet.as_view()),
url(r'^browerhistories/$', views.UserHistoryAPIView.as_view(), name='history'),
]
from .views import AddressViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'addresses',AddressViewSet,base_name='address')
urlpatterns += router.urls
|
python
|
## Plotting functions
# Imports
import math
import matplotlib.pyplot as plot
import pandas
import seaborn
from evolve_soft_2d import file_paths
################################################################################
def histogram(
template,
tm: str,
data: list,
t: str,
y: str,
x: str,
bins = "auto",
color: str = "b",
) -> None:
"""Plot a histogram
Parameters
----------
template
The unit template parameters
tm : str
The timestamp of the current simulation
data : list
The data to be plotted
t : str
The title of the graph
y : str
The label of the y-axis
x : str
The label of the x-axis
bins : optional
The bin settings, by default "auto"
color : str, optional
The colour of the graph, by default "b"
"""
# Determine the maximum x-axis value of the graph
bin_max = math.ceil(max(data))
# Open a figure
plot.figure()
# Plot the histogram
plot.rcParams.update({"figure.figsize":(7, 5), "figure.dpi":100})
plot.hist(data, bins = bins, color = color)
plot.gca().set(title = t, ylabel = y, xlabel = x)
plot.xlim(0, bin_max)
# # Show the plot
# plot.show()
# Save the figure
save_plot(template, t, tm)
return
################################################################################
def hist_all(
template,
tm,
data: pandas.DataFrame,
) -> None:
data_col = [i for i in data.columns]
for i in data_col:
seaborn.displot(data, x = i)
# seaborn.displot(data, x = i, bins = 20)
# Save the figure
save_plot(template, i, tm)
return
################################################################################
def hist(
template,
tm,
data: pandas.DataFrame,
x: str,
) -> None:
seaborn.displot(data, x = x)
# Save the figure
save_plot(template, x, tm)
return
################################################################################
def boxp(
template,
tm,
data: pandas.DataFrame,
x: str,
y: str,
) -> None:
seaborn.boxplot(x = x, y = y, data = data)
# Save the figure
save_plot(template, x + "_vs_" + y, tm)
return
################################################################################
def boxp_all(
template,
tm,
data: pandas.DataFrame,
x: list,
y: list,
) -> None:
for i in x:
for j in y:
seaborn.boxplot(x = i, y = j, data = data)
# Save the figure
save_plot(template, i + "_vs_" + j, tm)
return
################################################################################
def boxp_melt(
template,
tm,
data: pandas.DataFrame,
x: list,
y: list,
) -> None:
for i in x:
data_melt = data.melt(id_vars = i, value_vars = y, var_name = "Type of Energy", value_name = "Energy (mJ)")
seaborn.boxplot(x = i, y = "Energy (mJ)", hue = "Type of Energy", data = data_melt)
# Save the figure
save_plot(template, i + "_vs_" + "Energy (mJ)", tm)
return
################################################################################
def scat_all(
template,
tm,
data: pandas.DataFrame,
x: list,
y: list,
) -> None:
for i in x:
for j in y:
seaborn.relplot(x = i, y = j, data = data)
# Save the figure
save_plot(template, i + "_vs_" + j, tm)
return
################################################################################
def scatterplot(
template,
tm: str,
x_data: list,
y_data: list,
t: str,
x_l: str,
y_l: str,
color: str = "b",
marker: str = "o"
) -> None:
"""Plot a scatter plot
Parameters
----------
template
The unit template parameters
tm : str
The timestamp of the current simulation
x_data : list
The data to be plotted on the x-axis
y_data : list
The data to be plotted on the y-axis
t : str
The title of the graph
y : str
The label of the y-axis
x : str
The label of the x-axis
color : str, optional
The colour of the graph, by default "b"
marker : str, optional
The plot markers, by default "o"
"""
# Determine the maximum x-axis value of the graph
x_max = math.ceil(max(x_data))
# Open a figure
plot.figure()
# Plot the scatter plot
plot.rcParams.update({"figure.figsize":(7, 5), "figure.dpi":100})
plot.scatter(x_data, y_data, c = color, marker = marker)
plot.gca().set(title = t, ylabel = y_l, xlabel = x_l)
plot.xlim(0, x_max)
# # Show the plot
# plot.show()
# Save the figure
save_plot(template, t, tm)
return
################################################################################
def lreg_all(
template,
tm,
data: pandas.DataFrame,
x: list,
y: list,
order: int = 1
) -> None:
for i in x:
for j in y:
seaborn.regplot(x = i, y = j, data = data, order = order)
# Save the figure
save_plot(template, i + "_vs_" + j, tm)
return
################################################################################
def grid_plot(
template,
tm,
data: pandas.DataFrame,
l: str,
) -> None:
seaborn.heatmap(data, linewidths=1, linecolor='black', cmap = "Greys")
# Save the figure
save_plot(template, l, tm)
return
################################################################################
def plot_all(
template,
v: list,
n_e: list,
l: list,
tm: str,
) -> None:
"""Plot all desired figures
Parameters
----------
template
The unit template parameters
v : list
The data to be plotted
n_e : list
The list of the number of elements removed from every element
l : list
The list of labels of the data
tm : str
The timestamp of the current simulation
"""
scatterplot(template, tm, v[0], v[1], "Constraint Energy X vs Y", "Constraint Energy X (J)", "Constraint Energy Y (J)")
scatterplot(template, tm, v[3], v[4], "Internal Energy X vs Y", "Internal Energy X (J)", "Internal Energy Y (J)")
scatterplot(template, tm, n_e, v[6], "Elements Removed vs Hausdorff Distance", "Number of Elements Removed", "Hausdorff Distance")
# # Loop through the types of data
# for i in range(0, len(v)):
# # Plot the histogram
# histogram(template, tm, v[i], l[i], "Frequency", "Energy (J)")
# # Plot the scatterplot
# scatterplot(template, tm, n_e, v[i], l[i], "Energy (J)", "Number of Elements Removed")
# # Plot a scatterplot
# scatterplot(template, tm, v[0], v[1], "Constraint Energy (J)", "Y-direction", "X-direction")
return
################################################################################
def save_plot(
template,
t: str,
tm: str,
) -> None:
"""Save a figure
Parameters
----------
template
The unit template parameters
t : str
The title of the graph
tm : str
The timestamp of the current simulation
"""
# Create the file path of the figure
fp_p = file_paths.create_fp_file(template, t + tm, "g")
plot.tight_layout()
# Save the figure
plot.savefig(fp_p, dpi = 300)
# Close the figure
plot.close()
return
|
python
|
from .box_colombia import BoxColombia
from .box_daily_square import BoxDailySquare
from .box_don_juan import BoxDonJuan
from .box_don_juan_usd import BoxDonJuanUSD
from .box_office import BoxOffice
from .box_partner import BoxPartner
from .box_provisioning import BoxProvisioning
|
python
|
from mintools import ormin
class Chaininfo(ormin.Model):
bestblockhash = ormin.StringField()
blocks = ormin.IntField()
mediantime = ormin.IntField()
class Block(ormin.Model):
height = ormin.IntField(index=True)
blob = ormin.TextField()
class Tx(ormin.Model):
blockhash = ormin.StringField(index=True)
blob = ormin.TextField()
class Blockstats(ormin.Model):
height = ormin.IntField(index=True)
blob = ormin.TextField()
class Mempoolstats(ormin.Model):
time = ormin.IntField(index=True)
blob = ormin.TextField()
|
python
|
# -*- coding: utf-8 -*-
"""Simple OSC client."""
import socket
try:
from ustruct import pack
except ImportError:
from struct import pack
from uosc.common import Bundle, to_frac
if isinstance("", bytes):
have_bytes = False
unicodetype = unicode # noqa
else:
have_bytes = True
unicodetype = str
TYPE_MAP = {
int: "i",
float: "f",
bytes: "b",
bytearray: "b",
unicodetype: "s",
True: "T",
False: "F",
None: "N",
}
def pack_addr(addr):
"""Pack a (host, port) tuple into the format expected by socket methods."""
if isinstance(addr, (bytes, bytearray)):
return addr
if len(addr) != 2:
raise NotImplementedError("Only IPv4/v6 supported")
addrinfo = socket.getaddrinfo(addr[0], addr[1])
return addrinfo[0][4]
def pack_timetag(t):
"""Pack an OSC timetag into 64-bit binary blob."""
return pack(">II", *to_frac(t))
def pack_string(s, encoding="utf-8"):
"""Pack a string into a binary OSC string."""
if isinstance(s, unicodetype):
s = s.encode(encoding)
assert all(
(i if have_bytes else ord(i)) < 128 for i in s
), "OSC strings may only contain ASCII chars."
slen = len(s)
return s + b"\0" * (((slen + 4) & ~0x03) - slen)
def pack_blob(b, encoding="utf-8"):
"""Pack a bytes, bytearray or tuple/list of ints into a binary OSC blob."""
if isinstance(b, (tuple, list)):
b = bytearray(b)
elif isinstance(b, unicodetype):
b = b.encode(encoding)
blen = len(b)
b = pack(">I", blen) + bytes(b)
return b + b"\0" * (((blen + 3) & ~0x03) - blen)
def pack_bundle(bundle):
"""Return bundle data packed into a binary string."""
data = []
for msg in bundle:
if isinstance(msg, Bundle):
msg = pack_bundle(msg)
elif isinstance(msg, tuple):
msg = create_message(*msg)
data.append(pack(">I", len(msg)) + msg)
return b"#bundle\0" + pack_timetag(bundle.timetag) + b"".join(data)
def pack_midi(val):
assert not isinstance(val, unicodetype), (
"Value with tag 'm' or 'r' must be bytes, bytearray or a sequence of "
"ints, not %s" % unicodetype
)
if not have_bytes and isinstance(val, str):
val = (ord(c) for c in val)
return pack("BBBB", *tuple(val))
def create_message(address, *args):
"""Create an OSC message with given address pattern and arguments.
The OSC types are either inferred from the Python types of the arguments,
or you can pass arguments as 2-item tuples with the OSC typetag as the
first item and the argument value as the second. Python objects are mapped
to OSC typetags as follows:
* ``int``: i
* ``float``: f
* ``str``: s
* ``bytes`` / ``bytearray``: b
* ``None``: N
* ``True``: T
* ``False``: F
If you want to encode a Python object to another OSC type, you have to pass
a ``(typetag, data)`` tuple, where ``data`` must be of the appropriate type
according to the following table:
* c: ``str`` of length 1
* h: ``int``
* d: ``float``
* I: ``None`` (unused)
* m: ``tuple / list`` of 4 ``int``s or ``bytes / bytearray`` of length 4
* r: same as 'm'
* t: OSC timetag as as ``int / float`` seconds since the NTP epoch
* S: ``str``
"""
assert address.startswith("/"), "Address pattern must start with a slash."
data = []
types = [","]
for arg in args:
type_ = type(arg)
if isinstance(arg, tuple):
typetag, arg = arg
else:
typetag = TYPE_MAP.get(type_) or TYPE_MAP.get(arg)
if typetag in "ifd":
data.append(pack(">" + typetag, arg))
elif typetag in "sS":
data.append(pack_string(arg))
elif typetag == "b":
data.append(pack_blob(arg))
elif typetag in "rm":
data.append(pack_midi(arg))
elif typetag == "c":
data.append(pack(">I", ord(arg)))
elif typetag == "h":
data.append(pack(">q", arg))
elif typetag == "t":
data.append(pack_timetag(arg))
elif typetag not in "IFNT":
raise TypeError("Argument of type '%s' not supported." % type_)
types.append(typetag)
return pack_string(address) + pack_string("".join(types)) + b"".join(data)
class Client:
def __init__(self, host, port=None):
if port is None:
if isinstance(host, (list, tuple)):
host, port = host
else:
port = host
host = "127.0.0.1"
self.dest = pack_addr((host, port))
self.sock = None
def send(self, msg, *args, **kw):
dest = pack_addr(kw.get("dest", self.dest))
if not self.sock:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if isinstance(msg, Bundle):
msg = pack_bundle(msg)
elif args or isinstance(msg, unicodetype):
msg = create_message(msg, *args)
self.sock.sendto(msg, dest)
def close(self):
if self.sock:
self.sock.close()
self.sock = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def send(dest, address, *args):
with Client(dest) as client:
client.send(address, *args)
|
python
|
# Copyright (c) 2018 Robin Jarry
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import logging
import os
import re
from notmuch.errors import NotmuchError
LOG = logging.getLogger(__name__)
#------------------------------------------------------------------------------
class Maildir(object):
def __init__(self, config):
self.config = config
gmail_dir = os.path.join(self.config.notmuch_db_dir, 'gmail')
self.tmp_dir = os.path.join(gmail_dir, 'tmp')
self.new_dir = os.path.join(gmail_dir, 'new')
self.cur_dir = os.path.join(gmail_dir, 'cur')
def get_changes(self):
last_rev = self.config.get_last_notmuch_rev()
if last_rev is not None:
return self._search_notmuch('lastmod:%s..' % last_rev)
return {}, {}
def all_messages(self):
return self._search_notmuch('path:**')[0]
GMAIL_MESSAGE_RE = re.compile(r'^gmail\.([0-9a-f]+):2,$')
def _search_notmuch(self, querystring):
gmail = {}
local = {}
with self.config.notmuch_db() as db:
query = db.create_query(querystring)
for notmuch_msg in query.search_messages():
for f in notmuch_msg.get_filenames():
fname = os.path.basename(f)
match = self.GMAIL_MESSAGE_RE.match(fname)
tags = set(notmuch_msg.get_tags())
tags.difference_update(self.config.ignore_tags)
if match:
gmail_id = match.group(1)
gmail[gmail_id] = tags
else:
local[f] = tags
return gmail, local
def store(self, gmail_msg):
filename = 'gmail.{id}:2,'.format(**gmail_msg)
tmp_path = os.path.join(self.tmp_dir, filename)
if not os.path.isdir(self.tmp_dir):
os.makedirs(self.tmp_dir)
if not os.path.isdir(self.cur_dir):
os.makedirs(self.cur_dir)
msg_bytes = base64.urlsafe_b64decode(gmail_msg['raw'].encode('ascii'))
with open(tmp_path, 'wb') as f:
f.write(msg_bytes)
msg_path = os.path.join(self.new_dir, filename)
if not os.path.isdir(self.new_dir):
os.makedirs(self.new_dir)
os.rename(tmp_path, msg_path)
try:
utime = int(gmail_msg['internalDate']) / 1000
os.utime(msg_path, times=(utime, utime))
except:
pass
return msg_path
def index(self, messages):
with self.config.notmuch_db() as db:
for msg_path, tags in messages.items():
msg, _ = db.add_message(msg_path, sync_maildir_flags=False)
msg.freeze()
for tag in tags:
msg.add_tag(tag, sync_maildir_flags=False)
msg.thaw()
def apply_tags(self, remote_updated):
n_updated = len(remote_updated)
counter = '[%{0}d/%{0}d]'.format(len(str(n_updated)))
n = 0
with self.config.notmuch_db() as db:
for gmail_id, tags in remote_updated.items():
n += 1
fpath = os.path.join(self.new_dir, 'gmail.{}:2,'.format(gmail_id))
msg = db.find_message_by_filename(fpath)
if msg is None:
LOG.warning(
counter + ' message %r not found in notmuch db',
n, n_updated, fpath)
continue
msg.freeze()
msg.remove_all_tags(sync_maildir_flags=False)
for tag in tags:
msg.add_tag(tag, sync_maildir_flags=False)
msg.thaw()
LOG.info(counter + ' message %r tags %s updated',
n, n_updated, gmail_id, tags)
def delete(self, remote_deleted):
n_deleted = len(remote_deleted)
counter = '[%{0}d/%{0}d]'.format(len(str(n_deleted)))
n = 0
with self.config.notmuch_db() as db:
for gmail_id in remote_deleted:
n += 1
fpath = os.path.join(self.new_dir, 'gmail.{}:2,'.format(gmail_id))
try:
db.remove_message(fpath)
except NotmuchError as e:
LOG.warning('Message %r: %s', fpath, e)
if os.path.isfile(fpath):
os.unlink(fpath)
LOG.info(counter + ' message %r deleted', n, n_deleted, gmail_id)
|
python
|
import sys
import re
from setuptools.command.test import test as TestCommand
from setuptools import setup
from setuptools import find_packages
metadata = dict(
re.findall("__([a-z]+)__ = '([^']+)'", open('consul/__init__.py').read()))
requirements = [
x.strip() for x
in open('requirements.txt').readlines() if not x.startswith('#')]
description = "Python client for Consul (http://www.consul.io/)"
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='python-consul',
version=metadata['version'],
author='Andy Gayton',
author_email='[email protected]',
install_requires=requirements,
packages=find_packages(),
url='https://github.com/cablehead/python-consul',
license='MIT',
description=description,
long_description=open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
tests_require=['pytest'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
|
python
|
import uuid
from app.api.models.namespaces import NamespacePrimaryKey, NamespaceSchema, NamespaceResources, NamespacePrimaryKeyProjectID
from app.db import namespaces, database
async def post(payload: NamespaceSchema):
query = namespaces.insert().values(payload.dict())
return await database.execute(query=query)
async def get(primary_key: NamespacePrimaryKey):
query = namespaces.select().where(primary_key.name == namespaces.c.name).where(primary_key.region == namespaces.c.region)
return await database.fetch_one(query=query)
async def get_by_project(project_id: uuid):
query = namespaces.select().where(project_id == namespaces.c.project_id)
return await database.fetch_all(query=query)
async def put_cpu_mem(payload: NamespaceResources):
query = (
namespaces
.update()
.where(payload.name == namespaces.c.name).where(payload.region == namespaces.c.region)
.values(max_namespace_cpu=payload.max_namespace_cpu,
max_namespace_mem=payload.max_namespace_mem,
default_limit_pod_cpu=payload.default_limit_pod_cpu,
default_limit_pod_mem=payload.default_limit_pod_mem)
.returning(namespaces.c.name, namespaces.c.region)
)
return await database.execute(query=query)
async def put_project(payload: NamespacePrimaryKeyProjectID):
query = (
namespaces
.update()
.where(payload.name == namespaces.c.name).where(payload.region == namespaces.c.region)
.values(project_id=payload.project_id)
.returning(namespaces.c.name, namespaces.c.region)
)
return await database.execute(query=query)
async def delete(payload: NamespacePrimaryKey):
query = namespaces.delete().where(payload.name == namespaces.c.name).where(payload.region == namespaces.c.region)
return await database.execute(query=query)
async def delete_by_project(project_id: uuid):
query = namespaces.delete().where(project_id == namespaces.c.project_id)
return await database.execute(query=query)
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pyarrow.compat import unittest
import pyarrow as arrow
A = arrow
import pandas as pd
class TestColumn(unittest.TestCase):
def test_basics(self):
data = [
A.from_pylist([-10, -5, 0, 5, 10])
]
table = A.Table.from_arrays(('a'), data, 'table_name')
column = table.column(0)
assert column.name == 'a'
assert column.length() == 5
assert len(column) == 5
assert column.shape == (5,)
def test_pandas(self):
data = [
A.from_pylist([-10, -5, 0, 5, 10])
]
table = A.Table.from_arrays(('a'), data, 'table_name')
column = table.column(0)
series = column.to_pandas()
assert series.name == 'a'
assert series.shape == (5,)
assert series.iloc[0] == -10
|
python
|
import logging
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
from authentication import BaseHandler
from authentication import app
@app.route('/login', 'login')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
save_session=True)
self.redirect(self.uri_for('home'))
except (InvalidAuthIdError, InvalidPasswordError) as e:
logging.info('Login failed for user %s because of %s', username, type(e))
self._serve_page(True)
def _serve_page(self, failed=False):
username = self.request.get('username')
params = {
'username': username,
'failed': failed
}
self.render_template('login.html', params)
@app.route('/logout', 'logout')
class LogoutHandler(BaseHandler):
def get(self):
self.auth.unset_session()
self.redirect(self.uri_for('login'))
|
python
|
import random
class Dealer:
'''
Represents the dealer in the game.
Responsible for drawing a card and showing it.
Responsible for showing the current card.
Attributes: last_card, current_card
'''
def __init__(self):
#Initializes the last card and sets current card equal to it
self.last_card = random.randint(1,13)
self.current_card = self.last_card
def draw_card(self):
#Sets the last card to the current card, and then draws a new random card.
self.last_card = self.current_card
self.current_card = random.randint(1,13)
def get_last_card(self):
#Returns the last card
return self.last_card
def get_next_card(self):
#Returns the current or "next" card
return self.current_card
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from twitter import updater
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
update = updater.updater()
update.check()
|
python
|
import cv2
import numpy as np
from sklearn.metrics import mutual_info_score
import time
from tkinter import Tk
from tkinter.filedialog import askopenfilename
start_time = time.time()
detector = cv2.ORB_create()
Tk().withdraw()
path1 = askopenfilename(initialdir = "E:\Research\Datasets\MVS",
filetypes = (("Image File" , "*.avi"),("All Files","*.*"),("Image File" , "*.mp4")),
title = "Please choose first video")
classes = 'Models/object.names'
weights = 'yolov3.weights'
config = 'Models/yolov3.cfg'
with open(classes, 'r') as f:
classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
# read pre-trained model and config file
net = cv2.dnn.readNet(weights, config)
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
# function to draw bounding box on the detected object with class name
def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(classes[class_id])
color = COLORS[class_id]
cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
def tinu(image):
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
# create input blob
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
# set input blob for the network
net.setInput(blob)
outs = net.forward(get_output_layers(net))
# initialization
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
# apply non-max suppression
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
# # go through the detections remaining
# # after nms and draw bounding box
#
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
#draw_bounding_box(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))
return class_ids
def main():
counter = 0
video1 = cv2.VideoCapture(path1)
status_v1, frame1_v1 = video1.read()
total_frames = int(video1.get(cv2.CAP_PROP_FRAME_COUNT))
while(counter < total_frames):
status_v1 , frame2_v1 = video1.read()
counter = counter + 1
#print (counter)
if counter%15==0:
if status_v1 is True:
class_ids = tinu(frame1_v1)
persons = class_ids.count(0)
if persons >= 1:
#cv2.imshow('pic',frame1_v1)
kp1 = detector.detect(frame1_v1, None)
kp1 , des1 = detector.compute(frame1_v1, kp1)
des1 = np.array(des1)
des1 = cv2.resize(des1, (500,32), interpolation = cv2.INTER_AREA)
des1 = np.reshape(des1, (16000))
kp2 = detector.detect(frame2_v1, None)
kp2 , des2 = detector.compute(frame2_v1, kp2)
des2 = np.array(des2)
des2 = cv2.resize(des2, (500,32), interpolation = cv2.INTER_AREA)
des2 = np.reshape(des2, (16000))
mi = mutual_info_score(des1,des2)
if mi >= 3.71:
name = '0410_3\Keyframe-'+str(counter)+'.jpg'
cv2.imwrite(name,frame1_v1)
print ('mutual information = ' , mi, ', persons = ', persons)
frame1_v1 = frame2_v1
k = cv2.waitKey(1) & 0xff
if k == 27:
break
main()
end_time = time.time()
|
python
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Geant4(CMakePackage):
"""Geant4 is a toolkit for the simulation of the passage of particles
through matter. Its areas of application include high energy, nuclear
and accelerator physics, as well as studies in medical and space
science."""
homepage = "http://geant4.cern.ch/"
url = "http://geant4.cern.ch/support/source/geant4.9.6.p04.tar.gz"
version('9.6.p04', sha256='997220a5386a43ac8f533fc7d5a8360aa1fd6338244d17deeaa583fb3a0f39fd')
patch('https://files.warwick.ac.uk/supernemo/files/Cadfael/distfiles/geant4-9.6.4-data-export.patch',
sha256='6d7b50f504b53c924dfae28562726b839e191c4c78139dfa33040dfd460aebed',
when='@9.6.p04')
patch('https://files.warwick.ac.uk/supernemo/files/Cadfael/distfiles/geant4-9.6.4-xcode.patch',
sha256='0efa7f5b6c25f20493a3268dbd492ee3334f7839d2008554d57584ec9e4e7617',
when='@9.6.p04')
patch('https://files.warwick.ac.uk/supernemo/files/Cadfael/distfiles/geant4-9.6.4-c11.patch',
sha256='c99f760125f185f436a9191c5cdbad7053e7c41aaac0f6ccbacab392787f39a9',
when='@9.6.p04')
patch('https://files.warwick.ac.uk/supernemo/files/Cadfael/distfiles/geant4-9.6.4-xercesc-include.patch',
sha256='668d78b7c24efe9065a4e1aadd5441c129a454113eae96812c77a2c8861bfa64',
when='@9.6.p04')
patch('https://files.warwick.ac.uk/supernemo/files/Cadfael/distfiles/geant4-9.6.4-infinite-recursion.patch',
sha256='7ee817311d36f0b49f7af9dd5e024c406210e58cc2868e2a49387eb04c99400e',
when='@9.6.p04')
variant('cxxstd',
default='11',
values=('11','14','17'),
multi=False,
description='Compile against the specified C++ Standard.')
depends_on("[email protected]", when="@9.6.p04")
depends_on('[email protected]:', type='build')
depends_on("[email protected]:")
depends_on("xerces-c")
def cmake_args(self):
spec = self.spec
options = [
'-DGEANT4_USE_GDML=ON',
'-DGEANT4_USE_SYSTEM_CLHEP=ON',
'-DGEANT4_USE_SYSTEM_EXPAT=ON',
'-DGEANT4_USE_SYSTEM_ZLIB=OFF',
'-DGEANT4_INSTALL_DATA=ON',
'-DGEANT4_BUILD_TLS_MODEL=global-dynamic',
'-DXERCESC_ROOT_DIR:STRING=%s' %
spec['xerces-c'].prefix, ]
options.append('-DGEANT4_BUILD_CXXSTD=c++{0}'.format(
spec.variants['cxxstd'].value))
return options
def url_for_version(self, version):
"""Handle Geant4's unusual version string."""
return ("http://geant4.cern.ch/support/source/geant4.%s.tar.gz" % version)
|
python
|
# import_export_ballotpedia/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import attach_ballotpedia_election_by_district_from_api, \
retrieve_ballot_items_for_one_voter_api_v4, \
retrieve_ballot_items_from_polling_location, retrieve_ballot_items_from_polling_location_api_v4, \
retrieve_ballotpedia_candidates_by_district_from_api, retrieve_ballotpedia_measures_by_district_from_api, \
retrieve_ballotpedia_district_id_list_for_polling_location, retrieve_ballotpedia_offices_by_district_from_api
from admin_tools.views import redirect_to_sign_in_page
from ballot.models import BallotReturnedListManager, BallotReturnedManager
from config.base import get_environment_variable
from datetime import date
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.urls import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import redirect, render
from election.models import Election, ElectionManager
from exception.models import handle_exception
from import_export_batches.controllers_batch_process import \
schedule_retrieve_ballotpedia_ballots_for_polling_locations_api_v4, \
schedule_refresh_ballotpedia_ballots_for_voters_api_v4
from import_export_batches.models import BatchProcessManager, BatchSet, \
BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS, REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, \
REFRESH_BALLOT_ITEMS_FROM_VOTERS, RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS
from polling_location.models import PollingLocation
import random
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, is_valid_state_code, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
BALLOTPEDIA_API_CONTAINS_URL = get_environment_variable("BALLOTPEDIA_API_CONTAINS_URL")
BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL = "https://api4.ballotpedia.org/sample_ballot_results"
CANDIDATE = 'CANDIDATE'
CONTEST_OFFICE = 'CONTEST_OFFICE'
ELECTED_OFFICE = 'ELECTED_OFFICE'
IMPORT_BALLOT_ITEM = 'IMPORT_BALLOT_ITEM'
IMPORT_VOTER = 'IMPORT_VOTER'
MEASURE = 'MEASURE'
POLITICIAN = 'POLITICIAN'
MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK = 125 # 125. Formerly 250 and 111
@login_required
def import_ballot_items_for_location_view(request):
"""
Reach out to Ballotpedia API to retrieve a short list of districts the voter can vote in.
"""
status = ""
success = True
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
polling_location_we_vote_id = request.GET.get('polling_location_we_vote_id', "")
state_code = request.GET.get('state_code', "")
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR,
'Google Civic Election Id missing.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
election_manager = ElectionManager()
election_day_text = ""
results = election_manager.retrieve_election(google_civic_election_id=google_civic_election_id)
if results['election_found']:
election = results['election']
election_day_text = election.election_day_text
results = retrieve_ballot_items_from_polling_location_api_v4(
google_civic_election_id,
election_day_text=election_day_text,
polling_location_we_vote_id=polling_location_we_vote_id,
state_code=state_code,
)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = IMPORT_BALLOT_ITEM
batch_header_id = 0
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO, 'Ballot items import batch for {google_civic_election_id} '
'election saved.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
elif 'batch_header_id' in results and results['batch_header_id']:
messages.add_message(request, messages.INFO, 'Ballot items import batch for {google_civic_election_id} '
'election saved, batch_header_id.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
else:
# Go to the ballot_item_list_edit page
if positive_value_exists(polling_location_we_vote_id):
return HttpResponseRedirect(reverse('ballot:ballot_item_list_by_polling_location_edit',
args=(polling_location_we_vote_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&state_code=" + str(state_code)
)
else:
messages.add_message(request, messages.ERROR, "Missing polling_location_we_vote_id.")
return HttpResponseRedirect(reverse('election:election_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&polling_location_we_vote_id=" + str(polling_location_we_vote_id) +
"&state_code=" + str(state_code)
)
@login_required
def import_export_ballotpedia_index_view(request):
"""
Provide an index of import/export actions (for We Vote data maintenance)
"""
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'import_export_ballotpedia/index.html', template_values)
@login_required
def attach_ballotpedia_election_view(request, election_local_id=0):
"""
Reach out to Ballotpedia and retrieve the details about this election needed to make other API calls.
:param request:
:param election_local_id:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
state_code = request.GET.get('state_code', '')
force_district_retrieve_from_ballotpedia = request.GET.get('force_district_retrieve_from_ballotpedia', False)
polling_location_list = []
status = ""
try:
election_on_stage = Election.objects.get(id=election_local_id)
google_civic_election_id = election_on_stage.google_civic_election_id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
except Election.MultipleObjectsReturned as e:
messages.add_message(request, messages.ERROR,
'Could not retrieve election data. More than one election found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.DoesNotExist:
messages.add_message(request, messages.ERROR,
'Could not retrieve election data. Election could not be found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR,
'For National elections, a State Code is required in order to run any '
'Ballotpedia data preparation.')
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
if not is_valid_state_code(state_code):
messages.add_message(request, messages.ERROR,
'{state_code} is not a valid State Code'.format(state_code=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.exclude(polling_location_deleted=True)
polling_location_count_query = polling_location_count_query.exclude(
Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_count_query = polling_location_count_query.exclude(
Q(zip_long__isnull=True) | Q(zip_long__exact='0') | Q(zip_long__exact=''))
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_limited_count = 1000
polling_location_query = PollingLocation.objects.all()
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
polling_location_query = polling_location_query.exclude(
Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = polling_location_query.exclude(
Q(zip_long__isnull=True) | Q(zip_long__exact='0') | Q(zip_long__exact=''))
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:polling_location_limited_count]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve polling location data for the {election_name}. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
election_name=election_name,
state=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)) +
"?state_code=" + str(state_code))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations returned for the state \'{state}\'. '
'(error 2 - attach_ballotpedia_election_view)'.format(
election_name=election_name,
state=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)) +
"?state_code=" + str(state_code))
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
could_not_retrieve_district_id_list_for_polling_location_count = 0
merged_district_list = []
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballotpedia_district_id_list_for_polling_location(
google_civic_election_id, polling_location=polling_location,
force_district_retrieve_from_ballotpedia=force_district_retrieve_from_ballotpedia)
if one_ballot_results['success']:
ballotpedia_district_id_list = one_ballot_results['ballotpedia_district_id_list']
if len(ballotpedia_district_id_list):
for one_ballotpedia_district_id in ballotpedia_district_id_list:
if one_ballotpedia_district_id not in merged_district_list:
# Build up a list of ballotpedia districts that we need to retrieve races for
merged_district_list.append(one_ballotpedia_district_id)
else:
could_not_retrieve_district_id_list_for_polling_location_count += 1
if positive_value_exists(could_not_retrieve_district_id_list_for_polling_location_count):
messages.add_message(request, messages.ERROR,
'Could not retrieve district_id list for this many Map Points: ' +
str(could_not_retrieve_district_id_list_for_polling_location_count))
# Once we have a summary of all ballotpedia districts, we want to request all of the races
if not len(merged_district_list):
messages.add_message(request, messages.ERROR,
'Could not find Ballotpedia districts. ')
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)) +
'?google_civic_election_id=' + str(google_civic_election_id) +
"&state_code=" + str(state_code))
results = attach_ballotpedia_election_by_district_from_api(election_on_stage, google_civic_election_id,
merged_district_list, state_code)
status += results['status']
status = status[:1000]
if positive_value_exists(results['election_found']):
messages.add_message(request, messages.INFO,
'Ballotpedia election information attached. status: {status} '.format(status=status))
else:
# We limit the number of status characters we print to the screen to 2000 so we don't get
# the error "Not all temporary messages could be stored."
messages.add_message(request, messages.ERROR,
'Ballotpedia election information not attached. status: {status} '
.format(status=status))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
@login_required
def refresh_ballotpedia_districts_for_polling_locations_view(request):
"""
This function refreshes the Ballotpedia districts used with subsequent calls to Ballotpedia:
1) Retrieve (internally) polling locations (so we can use those addresses to retrieve a
representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
3) Ask for Ballotpedia districts for each of the polling locations being analyzed
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
# This is 500 because we're looking for districts
import_limit = convert_to_int(request.GET.get('import_limit', 500))
polling_location_list = []
polling_location_count = 0
status = ""
if not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR,
'Could not retrieve Ballotpedia data. Missing state_code.')
return HttpResponseRedirect(reverse('electoral_district:electoral_district_list', args=()))
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.filter(use_for_bulk_retrieve=True)
polling_location_count_query = polling_location_count_query.exclude(polling_location_deleted=True)
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_query = PollingLocation.objects.all()
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.filter(use_for_bulk_retrieve=True)
polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
# We used to have a limit of 500 ballots to pull per election, but now retrieve all
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:import_limit]
except Exception as e:
status += "ELECTORAL_DISTRICT-COULD_NOT_FIND_POLLING_LOCATION_LIST " + str(e) + " "
if polling_location_count == 0:
# We didn't find any polling locations marked for bulk retrieve, so just retrieve up to the import_limit
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = \
polling_location_count_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_count_query = \
polling_location_count_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.exclude(polling_location_deleted=True)
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_query = PollingLocation.objects.all()
polling_location_query = \
polling_location_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = \
polling_location_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:import_limit]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve ballot data. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
state=state_code))
return HttpResponseRedirect(reverse('electoral_district:electoral_district_list', args=()))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data. '
'No polling locations returned for the state \'{state}\'. '
'(error 2 - refresh_ballotpedia_districts_for_polling_locations_view)'.format(
state=state_code))
return HttpResponseRedirect(reverse('electoral_district:electoral_district_list', args=()))
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
# Step though our set of polling locations, until we find one that contains a ballot. Some won't contain ballots
# due to data quality issues.
polling_locations_with_data = 0
polling_locations_without_data = 0
# If here we just want to retrieve the races for this election
merged_district_list = []
google_civic_election_id = 0
force_district_retrieve_from_ballotpedia = True
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballotpedia_district_id_list_for_polling_location(
google_civic_election_id, polling_location=polling_location,
force_district_retrieve_from_ballotpedia=force_district_retrieve_from_ballotpedia)
success = False
if one_ballot_results['success']:
success = True
ballotpedia_district_id_list = one_ballot_results['ballotpedia_district_id_list']
if len(ballotpedia_district_id_list):
for one_ballotpedia_district_id in ballotpedia_district_id_list:
if one_ballotpedia_district_id not in merged_district_list:
# Build up a list of ballotpedia districts that we need to retrieve races for
merged_district_list.append(one_ballotpedia_district_id)
if success:
polling_locations_with_data += 1
else:
polling_locations_without_data += 1
messages.add_message(request, messages.INFO,
'Electoral data retrieved from Ballotpedia. '
'polling_locations_with_data: {polling_locations_with_data}, '
'polling_locations_without_data: {polling_locations_without_data}. '
''.format(
polling_locations_with_data=polling_locations_with_data,
polling_locations_without_data=polling_locations_without_data))
return HttpResponseRedirect(reverse('electoral_district:electoral_district_list', args=()) +
'?state_code=' + str(state_code) +
'&google_civic_election_id=' + str(google_civic_election_id))
@login_required
def retrieve_ballotpedia_candidates_by_district_from_api_view(request):
"""
Reach out to Ballotpedia API to retrieve candidates.
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
only_retrieve_if_zero_candidates = request.GET.get('only_retrieve_if_zero_candidates', False)
state_code = request.GET.get('state_code', "")
election_manager = ElectionManager()
election_local_id = 0
is_national_election = False
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_local_id = election.id
is_national_election = election.is_national_election
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR,
'For National elections, a State Code is required in order to run any '
'Ballotpedia data preparation.')
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
results = retrieve_ballotpedia_candidates_by_district_from_api(google_civic_election_id, state_code,
only_retrieve_if_zero_candidates)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = CANDIDATE
batch_header_id = 0
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO, 'Import batch for {google_civic_election_id} election saved.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
elif 'batch_header_id' in results and results['batch_header_id']:
messages.add_message(request, messages.INFO, 'Import batch for {google_civic_election_id} election saved, '
'batch_header_id.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
else:
# Go to the office listing page
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def retrieve_ballotpedia_ballots_for_entire_election_api_v4_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code_list = []
status = ''
batch_process_manager = BatchProcessManager()
if not positive_value_exists(google_civic_election_id):
status += "GOOGLE_CIVIC_ELECTION_ID_MISSING "
messages.add_message(request, messages.INFO, status)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
# Retrieve list of states in this election, and then loop through each state
election_manager = ElectionManager()
election_results = election_manager.retrieve_election(google_civic_election_id)
if election_results['election_found']:
election = election_results['election']
state_code_list = election.state_code_list()
status += "STATE_CODE_LIST: " + str(state_code_list) + " "
if not positive_value_exists(len(state_code_list)):
status += "STATE_CODE_LIST_MISSING "
messages.add_message(request, messages.INFO, status)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
for state_code in state_code_list:
# Refresh based on polling locations
if batch_process_manager.is_batch_process_currently_scheduled(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
kind_of_process=REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS):
status += "(" + str(state_code) + ")-ALREADY_SCHEDULED_REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS "
else:
results = schedule_retrieve_ballotpedia_ballots_for_polling_locations_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
refresh_ballot_returned=True)
if not positive_value_exists(results['success']):
status += results['status']
# Refresh based on voter's who requested their own address
if batch_process_manager.is_batch_process_currently_scheduled(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
kind_of_process=REFRESH_BALLOT_ITEMS_FROM_VOTERS):
status += "(" + str(state_code) + ")-ALREADY_SCHEDULED_REFRESH_BALLOT_ITEMS_FROM_VOTERS "
else:
results = schedule_refresh_ballotpedia_ballots_for_voters_api_v4(
google_civic_election_id=google_civic_election_id,
state_code=state_code)
if not positive_value_exists(results['success']):
status += results['status']
# Retrieve first time for each polling location
if batch_process_manager.is_batch_process_currently_scheduled(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
kind_of_process=RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS):
status += "(" + str(state_code) + ")-ALREADY_SCHEDULED_RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS "
else:
results = schedule_retrieve_ballotpedia_ballots_for_polling_locations_api_v4(
google_civic_election_id=google_civic_election_id, state_code=state_code,
refresh_ballot_returned=False)
if not positive_value_exists(results['success']):
status += results['status']
messages.add_message(request, messages.INFO, status)
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()))
@login_required
def retrieve_ballotpedia_ballots_for_polling_locations_api_v4_view(request):
"""
This is different than retrieve_ballotpedia_data_for_polling_locations_view because it is getting the districts
from lat/long, and then the ballot items. Ballotpedia API v4
Reach out to Ballotpedia and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
:param request:
:return:
"""
status = ""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
refresh_ballot_returned = request.GET.get('refresh_ballot_returned', False)
use_batch_process = request.GET.get('use_batch_process', False)
# import_limit = convert_to_int(request.GET.get('import_limit', 1000)) # If > 1000, we get error 414 (url too long)
if positive_value_exists(use_batch_process):
results = schedule_retrieve_ballotpedia_ballots_for_polling_locations_api_v4(
google_civic_election_id=google_civic_election_id, state_code=state_code,
refresh_ballot_returned=refresh_ballot_returned)
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code)
)
else:
return retrieve_ballotpedia_ballots_for_polling_locations_api_v4_internal_view(
request=request, from_browser=True, google_civic_election_id=google_civic_election_id,
state_code=state_code, refresh_ballot_returned=refresh_ballot_returned)
def retrieve_ballotpedia_ballots_for_polling_locations_api_v4_internal_view(
request=None,
from_browser=False,
google_civic_election_id="",
state_code="",
refresh_ballot_returned=False,
date_last_updated_should_not_exceed=None,
batch_process_ballot_item_chunk=None):
status = ""
success = True
batch_process_id = 0
batch_process_ballot_item_chunk_id = 0
batch_set_id = 0
retrieve_row_count = 0
ballot_returned_manager = BallotReturnedManager()
try:
if positive_value_exists(google_civic_election_id):
election_on_stage = Election.objects.using('readonly').get(google_civic_election_id=google_civic_election_id)
ballotpedia_election_id = election_on_stage.ballotpedia_election_id
election_day_text = election_on_stage.election_day_text
election_local_id = election_on_stage.id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
else:
message = 'Could not retrieve (as opposed to refresh) Ballotpedia ballots. ' \
'Missing google_civic_election_id. '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.MultipleObjectsReturned as e:
message = 'Could not retrieve (as opposed to refresh) Ballotpedia ballots. ' \
'More than one election found. ' + str(e) + ' '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.DoesNotExist:
message = 'Could not retrieve (as opposed to refresh) Ballotpedia ballots. Election could not be found. '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Exception as e:
message = 'Could not retrieve (as opposed to refresh) Ballotpedia ballots. ERROR: ' + str(e) + ' '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
message = \
'For National elections, a State Code is required in order to run any Ballotpedia ballots preparation. '
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
try:
ballot_returned_list_manager = BallotReturnedListManager()
if positive_value_exists(refresh_ballot_returned):
limit_polling_locations_retrieved = MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK # 125. Formerly 250 and 111
else:
limit_polling_locations_retrieved = 0
# Retrieve polling locations already in ballot_returned table
if positive_value_exists(is_national_election) and positive_value_exists(state_code):
status += "NATIONAL_WITH_STATE (" + str(state_code) + ") "
status += "date_last_updated_should_not_exceed: " + str(date_last_updated_should_not_exceed) + ' '
results = ballot_returned_list_manager.retrieve_polling_location_we_vote_id_list_from_ballot_returned(
google_civic_election_id=google_civic_election_id,
state_code=state_code,
limit=limit_polling_locations_retrieved,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
)
else:
status += "WITHOUT_STATE "
status += "date_last_updated_should_not_exceed: " + str(date_last_updated_should_not_exceed) + ' '
results = ballot_returned_list_manager.retrieve_polling_location_we_vote_id_list_from_ballot_returned(
google_civic_election_id=google_civic_election_id,
limit=limit_polling_locations_retrieved,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
)
status += results['status']
if results['polling_location_we_vote_id_list_found']:
polling_location_we_vote_id_list = results['polling_location_we_vote_id_list']
else:
polling_location_we_vote_id_list = []
status += "REFRESH_BALLOT_RETURNED: " + str(refresh_ballot_returned) + " "
if positive_value_exists(refresh_ballot_returned):
polling_location_query = PollingLocation.objects.using('readonly').all()
polling_location_query = polling_location_query.filter(we_vote_id__in=polling_location_we_vote_id_list)
# We don't exclude the deleted polling locations because we need to know to delete the ballot returned entry
# polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
polling_location_list = list(polling_location_query)
polling_location_count = len(polling_location_list)
else:
polling_location_query = PollingLocation.objects.using('readonly').all()
polling_location_query = \
polling_location_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = \
polling_location_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_query = polling_location_query.filter(state__iexact=state_code)
# Exclude polling locations already retrieved
polling_location_query = polling_location_query.exclude(we_vote_id__in=polling_location_we_vote_id_list)
# We don't exclude the deleted polling locations because we need to know to delete the ballot returned entry
# polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
# Randomly change the sort order so we over time load different polling locations (before timeout)
random_sorting = random.randint(1, 5)
first_retrieve_limit = MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK # 125. Formerly 250 and 111
if random_sorting == 1:
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:first_retrieve_limit]
status += "RANDOM_SORTING-LINE1-ASC: " + str(random_sorting) + " "
elif random_sorting == 2:
polling_location_list = polling_location_query.order_by('-line1')[:first_retrieve_limit]
status += "RANDOM_SORTING-LINE1-DESC: " + str(random_sorting) + " "
elif random_sorting == 3:
polling_location_list = polling_location_query.order_by('city')[:first_retrieve_limit]
status += "RANDOM_SORTING-CITY-ASC: " + str(random_sorting) + " "
else:
polling_location_list = polling_location_query.order_by('-city')[:first_retrieve_limit]
status += "RANDOM_SORTING-CITY-DESC: " + str(random_sorting) + " "
polling_location_count = len(polling_location_list)
# Cycle through -- if the polling_location is deleted, delete the associated ballot_returned,
# and then remove the polling_location from the list
modified_polling_location = []
for one_polling_location in polling_location_list:
if positive_value_exists(one_polling_location.polling_location_deleted):
delete_results = ballot_returned_manager.delete_ballot_returned_by_identifier(
google_civic_election_id=google_civic_election_id,
polling_location_we_vote_id=one_polling_location.we_vote_id)
if delete_results['ballot_deleted']:
status += "BR_PL_DELETED (" + str(one_polling_location.we_vote_id) + ") "
else:
status += "BR_PL_NOT_DELETED (" + str(one_polling_location.we_vote_id) + ") "
else:
modified_polling_location.append(one_polling_location)
polling_location_list = modified_polling_location
polling_location_count = len(polling_location_list)
except PollingLocation.DoesNotExist:
message = 'Could not retrieve (as opposed to refresh) ballot data for the {election_name}. ' \
'Ballotpedia Ballots-No polling locations exist for the state \'{state}\'. ' \
''.format(
election_name=election_name,
state=state_code)
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Exception as e:
message = 'Could not retrieve (as opposed to refresh) ballot data for the {election_name}. ' \
'Ballotpedia Ballots-No polling locations exist for the state \'{state}\'. ERROR: {error}' \
''.format(
election_name=election_name,
error=str(e),
state=state_code)
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
if polling_location_count == 0:
message = 'Did not retrieve (as opposed to refresh) ballot data for the {election_name}. ' \
'Data for all polling locations for the state \'{state}\' has been retrieved once ' \
'date_last_updated_should_not_exceed: \'{date_last_updated_should_not_exceed}\'. ' \
'(result 2 - retrieve_ballotpedia_ballots_for_polling_locations_api_v4_view)'.format(
election_name=election_name,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
state=state_code)
if from_browser:
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
ballots_retrieved = 0
ballots_not_retrieved = 0
# If here, we assume we have already retrieved races for this election, and now we want to
# put ballot items for this location onto a ballot
existing_offices_by_election_dict = {}
existing_office_objects_dict = {}
existing_candidate_objects_dict = {}
existing_measure_objects_dict = {}
new_office_we_vote_ids_list = []
new_candidate_we_vote_ids_list = []
new_measure_we_vote_ids_list = []
batch_set_id = 0
if len(polling_location_list) > 0:
status += "POLLING_LOCATIONS_FOR_THIS_BATCH_SET: " + str(len(polling_location_list)) + " "
# Create Batch Set for ballot items
import_date = date.today()
batch_set_name = "Ballot items (from Map Points v4) for " + election_name
if positive_value_exists(state_code):
batch_set_name += " (state " + str(state_code.upper()) + ")"
if positive_value_exists(ballotpedia_election_id):
batch_set_name += " - ballotpedia: " + str(ballotpedia_election_id)
batch_set_name += " - " + str(import_date)
try:
batch_process_ballot_item_chunk_id = batch_process_ballot_item_chunk.id
batch_process_id = batch_process_ballot_item_chunk.batch_process_id
batch_set_id = batch_process_ballot_item_chunk.batch_set_id
except Exception as e:
status += "BATCH_PROCESS_BALLOT_ITEM_CHUNK: " + str(e) + ' '
if not positive_value_exists(batch_set_id):
# create batch_set object
try:
batch_set = BatchSet.objects.create(
batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS,
batch_process_id=batch_process_id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
google_civic_election_id=google_civic_election_id,
source_uri=BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL,
import_date=import_date,
state_code=state_code)
batch_set_id = batch_set.id
status += " BATCH_SET_CREATED-BALLOTS_FOR_POLLING_LOCATIONS "
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_SET " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
try:
if positive_value_exists(batch_process_ballot_item_chunk_id) and positive_value_exists(batch_set_id):
batch_process_ballot_item_chunk.batch_set_id = batch_set_id
batch_process_ballot_item_chunk.save()
except Exception as e:
status += "UNABLE_TO_SAVE_BATCH_SET_ID_EARLY " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
if success:
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballot_items_from_polling_location_api_v4(
google_civic_election_id,
election_day_text=election_day_text,
polling_location_we_vote_id=polling_location.we_vote_id,
polling_location=polling_location,
state_code=state_code,
batch_set_id=batch_set_id,
existing_offices_by_election_dict=existing_offices_by_election_dict,
existing_office_objects_dict=existing_office_objects_dict,
existing_candidate_objects_dict=existing_candidate_objects_dict,
existing_measure_objects_dict=existing_measure_objects_dict,
new_office_we_vote_ids_list=new_office_we_vote_ids_list,
new_candidate_we_vote_ids_list=new_candidate_we_vote_ids_list,
new_measure_we_vote_ids_list=new_measure_we_vote_ids_list
)
if one_ballot_results['success']:
success = True
existing_offices_by_election_dict = one_ballot_results['existing_offices_by_election_dict']
existing_office_objects_dict = one_ballot_results['existing_office_objects_dict']
existing_candidate_objects_dict = one_ballot_results['existing_candidate_objects_dict']
existing_measure_objects_dict = one_ballot_results['existing_measure_objects_dict']
new_office_we_vote_ids_list = one_ballot_results['new_office_we_vote_ids_list']
new_candidate_we_vote_ids_list = one_ballot_results['new_candidate_we_vote_ids_list']
new_measure_we_vote_ids_list = one_ballot_results['new_measure_we_vote_ids_list']
if one_ballot_results['batch_header_id']:
ballots_retrieved += 1
if ballots_retrieved < 5:
status += "BALLOT_ITEMS_RETRIEVED: [[[" + one_ballot_results['status'] + "]]] "
else:
ballots_not_retrieved += 1
if ballots_not_retrieved < 5:
status += "BALLOT_ITEMS_NOT_RETRIEVED: [[[" + one_ballot_results['status'] + "]]] "
else:
status += "CANNOT_CALL_RETRIEVE_BECAUSE_OF_ERRORS [retrieve_ballot_items_from_polling_location_api_v4] "
retrieve_row_count = ballots_retrieved
if google_civic_election_id in existing_offices_by_election_dict:
existing_offices_found = len(existing_offices_by_election_dict[google_civic_election_id])
else:
existing_offices_found = len(existing_office_objects_dict)
existing_candidates_found = len(existing_candidate_objects_dict)
existing_measures_found = len(existing_measure_objects_dict)
new_offices_found = len(new_office_we_vote_ids_list)
new_candidates_found = len(new_candidate_we_vote_ids_list)
new_measures_found = len(new_measure_we_vote_ids_list)
if from_browser:
messages.add_message(request, messages.INFO,
'Ballot data retrieved from Ballotpedia (Map Points) for the {election_name}. '
'ballots retrieved: {ballots_retrieved}, '
'ballots NOT retrieved: {ballots_not_retrieved}. '
'new offices: {new_offices_found} (existing: {existing_offices_found}) '
'new candidates: {new_candidates_found} (existing: {existing_candidates_found}) '
'new measures: {new_measures_found} (existing: {existing_measures_found}) '
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name,
existing_offices_found=existing_offices_found,
existing_candidates_found=existing_candidates_found,
existing_measures_found=existing_measures_found,
new_offices_found=new_offices_found,
new_candidates_found=new_candidates_found,
new_measures_found=new_measures_found,
))
messages.add_message(request, messages.INFO, 'status: {status}'.format(status=status))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=IMPORT_BALLOTPEDIA_BALLOT_ITEMS' +
'&google_civic_election_id=' + str(google_civic_election_id))
else:
status += \
'Ballot data retrieved from Ballotpedia (Map Points) for the {election_name}. ' \
'ballots retrieved: {ballots_retrieved}. ' \
'ballots NOT retrieved: {ballots_not_retrieved}. ' \
'new offices: {new_offices_found} (existing: {existing_offices_found}) ' \
'new candidates: {new_candidates_found} (existing: {existing_candidates_found}) ' \
'new measures: {new_measures_found} (existing: {existing_measures_found}) ' \
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name,
existing_offices_found=existing_offices_found,
existing_candidates_found=existing_candidates_found,
existing_measures_found=existing_measures_found,
new_offices_found=new_offices_found,
new_candidates_found=new_candidates_found,
new_measures_found=new_measures_found,
)
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
}
return results
@login_required
def refresh_ballotpedia_ballots_for_voters_api_v4_view(request):
"""
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
use_batch_process = request.GET.get('use_batch_process', False)
if positive_value_exists(use_batch_process):
results = schedule_refresh_ballotpedia_ballots_for_voters_api_v4(
google_civic_election_id=google_civic_election_id, state_code=state_code)
messages.add_message(request, messages.INFO, results['status'])
return HttpResponseRedirect(reverse('import_export_batches:batch_process_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code)
)
else:
return refresh_ballotpedia_ballots_for_voters_api_v4_internal_view(
request=request, from_browser=True, google_civic_election_id=google_civic_election_id,
state_code=state_code)
def refresh_ballotpedia_ballots_for_voters_api_v4_internal_view(
request=None,
from_browser=False,
google_civic_election_id="",
state_code="",
date_last_updated_should_not_exceed=None,
batch_process_ballot_item_chunk=None):
status = ""
success = True
batch_process_id = 0
batch_process_ballot_item_chunk_id = 0
batch_set_id = 0
retrieve_row_count = 0
try:
if positive_value_exists(google_civic_election_id):
election_on_stage = Election.objects.using('readonly').get(google_civic_election_id=google_civic_election_id)
ballotpedia_election_id = election_on_stage.ballotpedia_election_id
election_day_text = election_on_stage.election_day_text
election_local_id = election_on_stage.id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
else:
message = 'Could not retrieve Ballotpedia ballots. Missing google_civic_election_id.'
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.MultipleObjectsReturned as e:
message = 'Could not retrieve Ballotpedia ballots. More than one election found.'
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
except Election.DoesNotExist:
message = 'Could not retrieve Ballotpedia ballots. Election could not be found.'
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_list', args=()))
else:
success = False
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
# if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
# messages.add_message(request, messages.ERROR,
# 'For National elections, a State Code is required in order to run any '
# 'Ballotpedia ballots preparation.')
# return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
ballot_returned_list_manager = BallotReturnedListManager()
limit_voters_retrieved = MAP_POINTS_RETRIEVED_EACH_BATCH_CHUNK # 125. Formerly 250 and 111
# Retrieve voter_id entries from ballot_returned table, from oldest to newest
if positive_value_exists(is_national_election) and positive_value_exists(state_code):
results = ballot_returned_list_manager.retrieve_ballot_returned_list(
google_civic_election_id=google_civic_election_id,
for_voters=True,
state_code=state_code,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
limit=limit_voters_retrieved)
else:
results = ballot_returned_list_manager.retrieve_ballot_returned_list(
google_civic_election_id=google_civic_election_id,
for_voters=True,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
limit=limit_voters_retrieved)
if results['ballot_returned_list_found']:
ballot_returned_list = results['ballot_returned_list']
else:
ballot_returned_list = []
if len(ballot_returned_list) == 0:
message = 'No ballot_returned items found for {election_name} for the state \'{state}\' earlier than ' \
'date_last_updated_should_not_exceed: \'{date_last_updated_should_not_exceed}\'. ' \
'(refresh_ballotpedia_ballots_for_voters_api_v4_internal_view)'.format(
election_name=election_name,
date_last_updated_should_not_exceed=date_last_updated_should_not_exceed,
state=state_code)
if from_browser:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
else:
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
}
return results
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
ballots_retrieved = 0
ballots_not_retrieved = 0
# If here, we assume we have already retrieved races for this election, and now we want to
# put ballot items for this location onto a ballot
existing_offices_by_election_dict = {}
existing_office_objects_dict = {}
existing_candidate_objects_dict = {}
existing_measure_objects_dict = {}
new_office_we_vote_ids_list = []
new_candidate_we_vote_ids_list = []
new_measure_we_vote_ids_list = []
batch_set_id = 0
# Create Batch Set for ballot items
import_date = date.today()
batch_set_name = "Ballot items (from Voters v4) for " + election_name
if positive_value_exists(state_code):
batch_set_name += " (state " + str(state_code.upper()) + ")"
if positive_value_exists(ballotpedia_election_id):
batch_set_name += " - ballotpedia: " + str(ballotpedia_election_id)
batch_set_name += " - " + str(import_date)
try:
batch_process_ballot_item_chunk_id = batch_process_ballot_item_chunk.id
batch_process_id = batch_process_ballot_item_chunk.batch_process_id
batch_set_id = batch_process_ballot_item_chunk.batch_set_id
except Exception as e:
pass
if not positive_value_exists(batch_set_id):
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
batch_process_id=batch_process_id,
google_civic_election_id=google_civic_election_id,
source_uri=BALLOTPEDIA_API_SAMPLE_BALLOT_RESULTS_URL,
import_date=import_date,
state_code=state_code)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED-BALLOTS_FOR_VOTERS "
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_SET " + str(e) + " "
try:
if positive_value_exists(batch_process_ballot_item_chunk_id):
batch_process_ballot_item_chunk.batch_set_id = batch_set_id
batch_process_ballot_item_chunk.save()
except Exception as e:
status += "UNABLE_TO_SAVE_BATCH_SET_ID_EARLY " + str(e) + " "
for ballot_returned in ballot_returned_list:
one_ballot_results = retrieve_ballot_items_for_one_voter_api_v4(
google_civic_election_id,
election_day_text=election_day_text,
ballot_returned=ballot_returned,
state_code=state_code,
batch_set_id=batch_set_id,
existing_offices_by_election_dict=existing_offices_by_election_dict,
existing_office_objects_dict=existing_office_objects_dict,
existing_candidate_objects_dict=existing_candidate_objects_dict,
existing_measure_objects_dict=existing_measure_objects_dict,
new_office_we_vote_ids_list=new_office_we_vote_ids_list,
new_candidate_we_vote_ids_list=new_candidate_we_vote_ids_list,
new_measure_we_vote_ids_list=new_measure_we_vote_ids_list
)
success = False
if one_ballot_results['success']:
success = True
if len(status) < 1024:
status += one_ballot_results['status']
existing_offices_by_election_dict = one_ballot_results['existing_offices_by_election_dict']
existing_office_objects_dict = one_ballot_results['existing_office_objects_dict']
existing_candidate_objects_dict = one_ballot_results['existing_candidate_objects_dict']
existing_measure_objects_dict = one_ballot_results['existing_measure_objects_dict']
new_office_we_vote_ids_list = one_ballot_results['new_office_we_vote_ids_list']
new_candidate_we_vote_ids_list = one_ballot_results['new_candidate_we_vote_ids_list']
new_measure_we_vote_ids_list = one_ballot_results['new_measure_we_vote_ids_list']
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
existing_offices_found = 0
if google_civic_election_id in existing_offices_by_election_dict:
existing_offices_found = len(existing_offices_by_election_dict[google_civic_election_id])
existing_offices_found = len(existing_office_objects_dict)
existing_candidates_found = len(existing_candidate_objects_dict)
existing_measures_found = len(existing_measure_objects_dict)
new_offices_found = len(new_office_we_vote_ids_list)
new_candidates_found = len(new_candidate_we_vote_ids_list)
new_measures_found = len(new_measure_we_vote_ids_list)
retrieve_row_count = ballots_retrieved
message = \
'Ballot data retrieved from Ballotpedia (Voters) for the {election_name}. ' \
'ballots retrieved: {ballots_retrieved}. ' \
'ballots not retrieved: {ballots_not_retrieved}. ' \
'new offices: {new_offices_found} (existing: {existing_offices_found}) ' \
'new candidates: {new_candidates_found} (existing: {existing_candidates_found}) ' \
'new measures: {new_measures_found} (existing: {existing_measures_found}) ' \
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name,
existing_offices_found=existing_offices_found,
existing_candidates_found=existing_candidates_found,
existing_measures_found=existing_measures_found,
new_offices_found=new_offices_found,
new_candidates_found=new_candidates_found,
new_measures_found=new_measures_found,
)
if from_browser:
messages.add_message(request, messages.INFO, message)
messages.add_message(request, messages.INFO, 'status: {status}'.format(status=status))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=IMPORT_BALLOTPEDIA_BALLOT_ITEMS' +
'&google_civic_election_id=' + str(google_civic_election_id))
else:
status += message + " "
results = {
'status': status,
'success': success,
'batch_set_id': batch_set_id,
'retrieve_row_count': retrieve_row_count,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
}
return results
@login_required
def retrieve_ballotpedia_data_for_polling_locations_view(request, election_local_id=0):
"""
Reach out to Ballotpedia and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
:param request:
:param election_local_id:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
force_district_retrieve_from_ballotpedia = request.GET.get('force_district_retrieve_from_ballotpedia', False)
state_code = request.GET.get('state_code', '')
retrieve_races = positive_value_exists(request.GET.get('retrieve_races', False))
retrieve_measures = positive_value_exists(request.GET.get('retrieve_measures', False))
import_limit = convert_to_int(request.GET.get('import_limit', 1000)) # If > 1000, we get error 414 (url too long)
polling_location_list = []
polling_location_count = 0
status = ""
try:
if positive_value_exists(election_local_id):
election_on_stage = Election.objects.get(id=election_local_id)
ballotpedia_election_id = election_on_stage.ballotpedia_election_id
google_civic_election_id = election_on_stage.google_civic_election_id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
else:
messages.add_message(request, messages.ERROR,
'Could not retrieve Ballotpedia data. Missing election_local_id.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.MultipleObjectsReturned as e:
messages.add_message(request, messages.ERROR, 'Could not retrieve Ballotpedia data. '
'More than one election found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.DoesNotExist:
messages.add_message(request, messages.ERROR, 'Could not retrieve Ballotpedia data. '
'Election could not be found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR,
'For National elections, a State Code is required in order to run any '
'Ballotpedia data preparation.')
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.filter(use_for_bulk_retrieve=True)
polling_location_count_query = polling_location_count_query.exclude(polling_location_deleted=True)
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_query = PollingLocation.objects.all()
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.filter(use_for_bulk_retrieve=True)
polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
# We used to have a limit of 500 ballots to pull per election, but now retrieve all
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:import_limit]
except Exception as e:
status += "COULD_NOT_FIND_POLLING_LOCATION_LIST " + str(e) + " "
if polling_location_count == 0:
# We didn't find any polling locations marked for bulk retrieve, so just retrieve up to the import_limit
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = \
polling_location_count_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_count_query = \
polling_location_count_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.exclude(polling_location_deleted=True)
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_query = PollingLocation.objects.all()
polling_location_query = \
polling_location_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = \
polling_location_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.exclude(polling_location_deleted=True)
# Ordering by "line1" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('line1')[:import_limit]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
election_name=election_name,
state=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations returned for the state \'{state}\'. '
'(error 2 - retrieve_ballotpedia_data_for_polling_locations_view)'.format(
election_name=election_name,
state=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
ballots_retrieved = 0
ballots_not_retrieved = 0
# Step though our set of polling locations, until we find one that contains a ballot. Some won't contain ballots
# due to data quality issues.
if retrieve_races or retrieve_measures or force_district_retrieve_from_ballotpedia:
polling_locations_with_data = 0
polling_locations_without_data = 0
# If here we just want to retrieve the races for this election
merged_district_list = []
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballotpedia_district_id_list_for_polling_location(
google_civic_election_id, polling_location=polling_location,
force_district_retrieve_from_ballotpedia=force_district_retrieve_from_ballotpedia)
success = False
if one_ballot_results['success']:
success = True
ballotpedia_district_id_list = one_ballot_results['ballotpedia_district_id_list']
if len(ballotpedia_district_id_list):
for one_ballotpedia_district_id in ballotpedia_district_id_list:
if one_ballotpedia_district_id not in merged_district_list:
# Build up a list of ballotpedia districts that we need to retrieve races for
merged_district_list.append(one_ballotpedia_district_id)
if success:
polling_locations_with_data += 1
else:
polling_locations_without_data += 1
# Once we have a summary of all ballotpedia districts, we want to request all of the races or measures
if len(merged_district_list):
kind_of_batch = "Unknown"
results = {}
if retrieve_races:
results = retrieve_ballotpedia_offices_by_district_from_api(google_civic_election_id, state_code,
merged_district_list)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = CONTEST_OFFICE
status += results['status']
elif retrieve_measures:
results = retrieve_ballotpedia_measures_by_district_from_api(google_civic_election_id, state_code,
merged_district_list)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = MEASURE
status += results['status']
batch_header_id = 0
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO,
kind_of_batch +
' import batch for {google_civic_election_id} election saved. '
'status: {status}'
''.format(google_civic_election_id=google_civic_election_id,
status=status))
batch_header_id = results['batch_header_id']
elif 'multiple_batches_found' in results and results['multiple_batches_found']:
messages.add_message(request, messages.INFO,
kind_of_batch +
' multiple import batches for {google_civic_election_id} election saved.'
' status: {status}'
''.format(google_civic_election_id=google_civic_election_id,
status=status))
batch_header_id = results['batch_header_id']
# Go straight to the list of batches
return HttpResponseRedirect(reverse('import_export_batches:batch_list', args=()) +
"?kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
elif 'batch_header_id' in results and results['batch_header_id']:
messages.add_message(request, messages.INFO,
kind_of_batch +
' import batch for {google_civic_election_id} election saved, '
'batch_header_id. status: {status}'
''.format(google_civic_election_id=google_civic_election_id,
status=status))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
else:
if retrieve_races:
# Go to the office listing page
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
elif retrieve_measures:
# Go to the measure listing page
return HttpResponseRedirect(reverse('measure:measure_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
messages.add_message(request, messages.INFO,
'Races or measures retrieved from Ballotpedia for the {election_name}. '
'polling_locations_with_data: {polling_locations_with_data}, '
'polling_locations_without_data: {polling_locations_without_data}. '
''.format(
polling_locations_with_data=polling_locations_with_data,
polling_locations_without_data=polling_locations_with_data,
election_name=election_name))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=IMPORT_BALLOTPEDIA_BALLOT_ITEMS' +
'&google_civic_election_id=' + str(google_civic_election_id))
else:
# Create Batch Set for ballot items
import_date = date.today()
batch_set_id = 0
batch_set_name = "Ballotpedia ballot items (from Map Points v3) for " + election_name
if positive_value_exists(state_code):
batch_set_name += " (state " + str(state_code.upper()) + ")"
if positive_value_exists(ballotpedia_election_id):
batch_set_name += " - ballotpedia: " + str(ballotpedia_election_id)
batch_set_name += " - " + str(import_date)
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS,
google_civic_election_id=google_civic_election_id,
source_uri=BALLOTPEDIA_API_CONTAINS_URL, import_date=import_date,
state_code=state_code)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED-POLLING_OLD "
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_SET " + str(e) + " "
# If here, we assume we have already retrieved races for this election, and now we want to
# put ballot items for this location onto a ballot
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballot_items_from_polling_location(
google_civic_election_id, polling_location=polling_location, batch_set_id=batch_set_id,
state_code=state_code)
success = False
if one_ballot_results['success']:
success = True
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # Break out of this loop, assuming we have a minimum number of ballots with contests retrieved
# # If we don't achieve the minimum number of ballots_with_contests_retrieved, break out at the emergency level
# emergency = (ballots_retrieved + ballots_not_retrieved) >= (3 * number_of_polling_locations_to_retrieve)
# if ((ballots_retrieved + ballots_not_retrieved) >= number_of_polling_locations_to_retrieve and
# ballots_with_contests_retrieved > 20) or emergency:
# break
messages.add_message(request, messages.INFO,
'Ballot data retrieved from Ballotpedia v3 for the {election_name}. '
'ballots retrieved: {ballots_retrieved}. '
'ballots not retrieved: {ballots_not_retrieved}. '
''.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_name))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=IMPORT_BALLOTPEDIA_BALLOT_ITEMS' +
'&google_civic_election_id=' + str(google_civic_election_id))
# @login_required
# def retrieve_ballotpedia_offices_by_election_from_api_view(request):
# """
# Reach out to Ballotpedia API to retrieve offices.
# """
# # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
# authority_required = {'political_data_manager'}
# if not voter_has_authority(request, authority_required):
# return redirect_to_sign_in_page(request, authority_required)
#
# google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
#
# results = retrieve_ballotpedia_offices_by_election_from_api(google_civic_election_id)
#
# kind_of_batch = ""
# if 'kind_of_batch' in results:
# kind_of_batch = results['kind_of_batch']
# if not positive_value_exists(kind_of_batch):
# kind_of_batch = CONTEST_OFFICE
#
# batch_header_id = 0
# if 'batch_saved' in results and results['batch_saved']:
# messages.add_message(request, messages.INFO, 'Import batch for {google_civic_election_id} election saved.'
# ''.format(google_civic_election_id=google_civic_election_id))
# batch_header_id = results['batch_header_id']
# elif 'batch_header_id' in results and results['batch_header_id']:
# messages.add_message(request, messages.INFO, 'Import batch for {google_civic_election_id} election saved, '
# 'batch_header_id.'
# ''.format(google_civic_election_id=google_civic_election_id))
# batch_header_id = results['batch_header_id']
# else:
# messages.add_message(request, messages.ERROR, results['status'])
#
# if positive_value_exists(batch_header_id):
# # Go straight to the new batch
# return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
# "?batch_header_id=" + str(batch_header_id) +
# "&kind_of_batch=" + str(kind_of_batch) +
# "&google_civic_election_id=" + str(google_civic_election_id))
# else:
# # Go to the office listing page
# return HttpResponseRedirect(reverse('office:office_list', args=()) +
# "?google_civic_election_id=" + str(google_civic_election_id))
|
python
|
from django.contrib import admin
from . import models
admin.site.site_header = 'FAIR Data Management'
class BaseAdmin(admin.ModelAdmin):
"""
Base model for admin views.
"""
readonly_fields = ('updated_by', 'last_updated')
list_display = ('last_updated',)
def save_model(self, request, obj, form, change):
"""
Customising the admin save behaviour to add the current user as the updated_by user on the model.
"""
obj.updated_by = request.user
return super().save_model(request, obj, form, change)
class IssueAdmin(BaseAdmin):
"""
Admin view for the Issue model.
"""
readonly_fields = ('updated_by', 'last_updated', 'linked_objects')
list_display = ('short_desc', 'severity', 'last_updated')
@classmethod
def linked_objects(cls, issue):
"""
Return all the Objects and ObjectComponents that this Issue has been assigned to.
"""
return list(issue.object_issues.all()) + list(issue.component_issues.all())
for name, cls in models.all_models.items():
if issubclass(cls, models.Issue):
admin.site.register(cls, IssueAdmin)
else:
data = {'list_display': cls.ADMIN_LIST_FIELDS + ('updated_by', 'last_updated')}
admin_cls = type(name + 'Admin', (BaseAdmin,), data)
admin.site.register(cls, admin_cls)
|
python
|
#!/usr/bin/env python
from I3Tray import *
from os.path import expandvars
from icecube import icetray,dataclasses,dataio,phys_services
amageofile = expandvars("$I3_SRC/phys-services/resources/amanda.geo")
icecubegeofile = expandvars("$I3_SRC/phys-services/resources/icecube.geo")
tray = I3Tray()
tray.AddModule("I3InfiniteSource","streams",stream=icetray.I3Frame.Physics)
def set_time(fr):
fr['DrivingTime'] = dataclasses.I3Time(2006, 0)
tray.AddModule(set_time, 'time')
tray.AddService("I3TextFileGeometryServiceFactory","geometry")(
("AmandaGeoFile",amageofile),
("IceCubeGeoFile",icecubegeofile),
)
tray.AddModule("I3MetaSynth","muxme")
tray.AddModule("Dump","dump")
tray.AddModule("FrameCheck","check")(
("ensure_physics_has", ["I3Geometry", "DrivingTime"])
)
tray.Execute(2)
|
python
|
import pytest
import numpy as np
from mcalf.utils.smooth import moving_average, gaussian_kern_3d, smooth_cube, mask_classifications
from ..helpers import class_map
def test_moving_average():
x = np.array([0.4, 1.2, 5.4, 8, 1.47532, 23.42, 63, 21, 14.75, 6, 2.64, 0.142])
res = moving_average(x, 2)
assert res == pytest.approx(np.array([0.4, 0.8, 3.3, 6.7, 4.73766, 12.44766, 43.21, 42., 17.875, 10.375, 4.32,
1.391]))
res = moving_average(x, 3)
assert res == pytest.approx(np.array([0.8, 2.33333333, 4.86666667, 4.95844, 10.96510667, 29.29844, 35.80666667,
32.91666667, 13.91666667, 7.79666667, 2.92733333, 1.391]))
res = moving_average(x, 5)
assert res == pytest.approx(np.array([2.33333333, 3.75, 3.295064, 7.899064, 20.259064, 23.379064, 24.729064,
25.634, 21.478, 8.9064, 5.883, 2.92733333]))
res = moving_average(x, 12)
assert res == pytest.approx(np.array([6.64922, 14.69933143, 15.486915, 15.40503556, 14.464532, 13.38957455,
12.28561, 13.36612, 14.582732, 15.60303556, 16.553415, 18.70742857]))
for w in [3.5, 0, -3, 13]: # Test invalid widths
with pytest.raises(ValueError):
moving_average(x, w)
def test_gaussian_kern_3d():
# With default parameters of width=5 and sigma=(1, 1, 1)
res = gaussian_kern_3d()
truth = np.array([[[0.22313016, 0.32465247, 0.36787944, 0.32465247, 0.22313016],
[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247],
[0.36787944, 0.53526143, 0.60653066, 0.53526143, 0.36787944],
[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247],
[0.22313016, 0.32465247, 0.36787944, 0.32465247, 0.22313016]],
[[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247],
[0.47236655, 0.68728928, 0.77880078, 0.68728928, 0.47236655],
[0.53526143, 0.77880078, 0.8824969, 0.77880078, 0.53526143],
[0.47236655, 0.68728928, 0.77880078, 0.68728928, 0.47236655],
[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247]],
[[0.36787944, 0.53526143, 0.60653066, 0.53526143, 0.36787944],
[0.53526143, 0.77880078, 0.8824969, 0.77880078, 0.53526143],
[0.60653066, 0.8824969, 1., 0.8824969, 0.60653066],
[0.53526143, 0.77880078, 0.8824969, 0.77880078, 0.53526143],
[0.36787944, 0.53526143, 0.60653066, 0.53526143, 0.36787944]],
[[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247],
[0.47236655, 0.68728928, 0.77880078, 0.68728928, 0.47236655],
[0.53526143, 0.77880078, 0.8824969, 0.77880078, 0.53526143],
[0.47236655, 0.68728928, 0.77880078, 0.68728928, 0.47236655],
[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247]],
[[0.22313016, 0.32465247, 0.36787944, 0.32465247, 0.22313016],
[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247],
[0.36787944, 0.53526143, 0.60653066, 0.53526143, 0.36787944],
[0.32465247, 0.47236655, 0.53526143, 0.47236655, 0.32465247],
[0.22313016, 0.32465247, 0.36787944, 0.32465247, 0.22313016]]])
assert res == pytest.approx(truth)
res = gaussian_kern_3d(width=3, sigma=(1.5, 0.7, 0.9))
truth = np.array([[[0.15568597, 0.28862403, 0.15568597],
[0.19442824, 0.36044779, 0.19442824],
[0.15568597, 0.28862403, 0.15568597]],
[[0.43192377, 0.8007374, 0.43192377],
[0.53940751, 1., 0.53940751],
[0.43192377, 0.8007374, 0.43192377]],
[[0.15568597, 0.28862403, 0.15568597],
[0.19442824, 0.36044779, 0.19442824],
[0.15568597, 0.28862403, 0.15568597]]])
assert res == pytest.approx(truth)
def test_smooth_cube():
np.random.seed(0) # Produce identical results
cube = np.random.rand(5, 5, 5) * 100 - 50
mask = np.array([[1, 1, 1, 1, 0],
[0, 1, 0, 1, 1],
[0, 0, 1, 1, 0],
[1, 1, 1, 0, 1],
[0, 1, 1, 1, 1]], dtype=int)
res = smooth_cube(cube, mask, width=2, sigma=(1.2, 0.6, 1.4))
truth = np.array([[[4.31830133e+00, 1.02965975e+01, 1.91043418e+01, 1.76588896e-02, np.nan],
[np.nan, 9.91998647e+00, np.nan, 1.33945458e+01, -2.98884340e+01],
[np.nan, np.nan, 2.64095719e+01, 3.51530895e+01, np.nan],
[-1.81095221e+01, -5.15689778e+00, 3.61023714e+00, np.nan, 4.69101513e-01],
[np.nan, -1.00013304e+01, -7.84092032e+00, -1.05514319e+01, -2.59007402e+01]],
[[3.65691013e+00, 1.57056595e+01, 9.86134349e+00, -1.79691126e+01, np.nan],
[np.nan, 1.75151400e+01, np.nan, -1.03351476e+01, -3.68392304e+01],
[np.nan, np.nan, 2.49878480e+00, -3.88009617e+00, np.nan],
[-5.57846637e+00, -2.03151495e+00, -2.98843786e+00, np.nan, -3.35401316e+00],
[np.nan, -5.38197129e+00, 6.49031413e-01, 5.81205525e-01, 5.14871752e+00]],
[[-1.00305940e+01, -1.71083008e+00, -5.57436167e+00, -1.05334176e+01, np.nan],
[np.nan, -4.55896449e+00, np.nan, -5.26767691e+00, -9.44864769e+00],
[np.nan, np.nan, -2.17783552e+01, -2.25091513e+01, np.nan],
[3.84769782e+00, -2.88330601e+00, -5.67411131e+00, np.nan, -2.17634111e+01],
[np.nan, 1.30081927e+00, 1.07663546e+01, 4.44361511e+00, -1.28020472e+01]],
[[-1.20645968e+01, -9.75815925e+00, 4.87884633e-01, 1.15538827e+01, np.nan],
[np.nan, -5.00688220e+00, np.nan, 5.13812774e+00, 2.59675233e+01],
[np.nan, np.nan, -1.03354339e+01, -3.61697176e+00, np.nan],
[5.85709312e+00, 4.07016012e+00, 2.70320241e+00, np.nan, -1.47377948e+01],
[np.nan, 1.60071244e+00, 1.12280352e+01, -2.46298117e+00, -2.85724738e+01]],
[[1.32888138e+00, 3.24146422e+00, 1.40154733e+01, 2.12673063e+01, np.nan],
[np.nan, 1.45760603e+01, np.nan, -8.91080166e-01, 4.52749012e+01],
[np.nan, np.nan, 2.60630329e+00, -5.01572953e-01, np.nan],
[9.29777733e+00, 2.29946022e+01, 2.27115569e+01, np.nan, 5.81933193e+00],
[np.nan, 2.28704008e+01, 3.00036917e+01, 3.39226239e+00, -7.61449514e+00]]])
assert res == pytest.approx(truth, nan_ok=True)
def test_mask_classifications():
with pytest.raises(TypeError) as e:
mask_classifications([[0, 1], [1, 2]])
assert '`class_map` must be a numpy.ndarray' in str(e.value)
c = class_map(4, 5, 3, 5) # t y x n
with pytest.raises(ValueError) as e:
mask_classifications(c[0, 0])
assert '`class_map` must have either 2 or 3 dimensions, got 1' in str(e.value)
with pytest.raises(TypeError) as e:
mask_classifications(c.astype(float))
assert '`class_map` must be an array of integers' in str(e.value)
with pytest.raises(TypeError) as e:
mask_classifications(c, vmax=3.5)
assert '`vmax` must be an integer' in str(e.value)
with pytest.raises(ValueError) as e:
mask_classifications(c, vmax=-2)
assert '`vmax` must not be less than zero' in str(e.value)
with pytest.raises(TypeError) as e:
mask_classifications(c, vmin=3.5)
assert '`vmin` must be an integer' in str(e.value)
with pytest.raises(ValueError) as e:
mask_classifications(c, vmin=-2)
assert '`vmin` must not be less than zero' in str(e.value)
# vmin above vmax
with pytest.raises(ValueError) as e:
mask_classifications(c, vmin=3, vmax=1)
assert '`vmin` must be less than `vmax`' in str(e.value)
# no processing needed: 2D with original range
assert np.array_equal(mask_classifications(class_map(1, 5, 3, 5)[0])[0], class_map(1, 5, 3, 5)[0])
# no processing requested: 3D with original range
assert np.array_equal(mask_classifications(class_map(4, 5, 3, 5), reduce=False)[0], class_map(4, 5, 3, 5))
# test vmin and vmax calculated correctly
c = class_map(4, 5, 3, 6) # t y x n
c[c == 0] = -1 # move all classification 0 -> -1
assert mask_classifications(c)[1:3] == (1, 5)
# test vmin and vmax used correctly
truth = np.array([[1, -1, 1, -1],
[1, 2, -1, 2],
[2, -1, -1, -1]], dtype=int)
res = mask_classifications(class_map(1, 4, 3, 4)[0], vmin=1, vmax=2)[0]
assert np.array_equal(res, truth)
# test average calculated correctly
truth = np.array([[1, 2, 0, 3],
[1, 0, 0, 2],
[2, 0, -1, 1]], dtype=int)
res = mask_classifications(class_map(3, 4, 3, 4))[0]
assert np.array_equal(res, truth)
# test all negative
c = np.full((4, 6), -1, dtype=int)
assert np.array_equal(c, mask_classifications(c)[0])
c = np.full((3, 4, 6), -1, dtype=int)
assert np.array_equal(c[0], mask_classifications(c)[0])
c = np.full((3, 4, 6), -1, dtype=int)
assert np.array_equal(c, mask_classifications(c, reduce=False)[0])
|
python
|
def Validate(num, tar):
v = 0
while num>0:
v += num
num //= 10
return v == tar
if __name__ == '__main__':
s = input()
digits = len(s)
s = int(s)
chushu = 1
chenshu = 1
for i in range(digits-1):
chenshu*=10
chushu = chushu*10+1
success = False
res = -1
for n in range(s, s+digits):
res = (n * chenshu) // chushu
if Validate(res, s):
success = True
break
if not success:
res = -1
print(res)
|
python
|
import urllib,json
def btc_alarm():
VOL_BUY=20
VOL_DIFF=100
url = "https://api.bitfinex.com/v1/trades/btcusd?limit_trades=500"
trades=json.loads(urllib.urlopen(url).read())
past_time=trades[0]['timestamp']-5*60
buy_volume=[float(trade['amount']) for trade in trades if trade['type']=='buy' and trade['timestamp']>=past_time]
sell_volume=[float(trade['amount']) for trade in trades if trade['type']=='sell' and trade['timestamp']>=past_time]
total_buy=int(sum(buy_volume))
total_sell=int(sum(sell_volume))
vol_diff=total_buy-total_sell
msg='*Bitfinex* BTCUSD (within last 5 mins):\n- Buy volume: '+"{:,}".format(total_buy)+'\n- Sell volume: '+"{:,}".format(total_sell)+'\n- Difference volume: '+"{:,}".format(vol_diff)
if total_buy<=VOL_BUY and vol_diff<=-VOL_DIFF:
alarm=True
elif vol_diff>=VOL_DIFF:
alarm=True
else:
alarm=False
return alarm,msg
|
python
|
from manim_imports_ext import *
class KmpPrefixScene(AlgoScene):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = "cbccbcbccb"
def compute_prefix_function(self):
t = self.data
n = len(t)
prefix = np.zeros(n, dtype=int)
prefix[0] = -1
vector_string = AlgoVector(self, list(t))
self.add(vector_string)
text = AlgoText("pattern", color=BLUE).next_to(vector_string, direction=LEFT)
self.add(text)
vector_index = AlgoVector(self, range(0, n))
self.add(vector_index)
vector_index.next_to(vector_string, direction=UP)
text = AlgoText("index", color=BLUE).next_to(vector_index, direction=LEFT)
self.add(text)
vector_prefix = AlgoVector(self, prefix)
self.add(vector_prefix)
vector_prefix.next_to(vector_string, direction=DOWN)
text = AlgoText("next", color=BLUE).next_to(vector_prefix, direction=LEFT)
self.add(text)
cursor_j = vector_index.add_arrow(0, color=RED, text="j")
cursor_k = vector_index.add_arrow(-1, color=BLUE, text="k")
j = 0
k = -1
while j<len(t)-1:
if k==-1 or t[j] == t[k]:
if t[j] == t[k] and k != -1:
self.play(ApplyMethod(vector_string.get_node(j).set_color, BLUE),
ApplyMethod(vector_string.get_node(k).set_color, BLUE))
self.show_message("比较后相等,前进一步")
self.play(ApplyMethod(vector_string.get_node(j).set_color, ALGO_NODE_COLOR),
ApplyMethod(vector_string.get_node(k).set_color, ALGO_NODE_COLOR))
if k == -1:
self.show_message("k=-1,前进一步")
j+=1
k+=1
prefix[j] = k
vector_index.move_arrow(cursor_j, j, run_time=0.5)
vector_index.move_arrow(cursor_k, k, run_time=0.5)
vector_prefix.set(j, str(k))
if k>0:
self.play(vector_prefix.submobjects[j].set_color, BLUE)
else:
self.play(vector_prefix.submobjects[j].set_color, RED)
else:
self.play(ApplyMethod(vector_string.get_node(j).set_color, RED),
ApplyMethod(vector_string.get_node(k).set_color, RED))
self.show_message("比较后不相等,回溯一步")
self.play(ApplyMethod(vector_string.get_node(j).set_color, ALGO_NODE_COLOR),
ApplyMethod(vector_string.get_node(k).set_color, ALGO_NODE_COLOR))
node = vector_prefix.get_node(k)
old_k = k
k = prefix[k]
if k != -1:
arrow = Arrow(vector_prefix.get_node(old_k).get_center(),
vector_prefix.get_node(k).get_center(), path_arc=-np.pi*0.5, thickness=0.03, color=GREEN)
else:
arrow = Arrow(vector_prefix.get_node(old_k).get_center(),
vector_prefix.get_node(old_k).get_center()+LEFT, path_arc=-np.pi*0.5, thickness=0.03, color=GREEN)
arrow.set_color(BLUE)
self.play(ShowCreation(arrow), run_time=0.5)
self.wait()
vector_index.move_arrow(cursor_k, k, run_time=0.5)
self.play(FadeOut(arrow))
self.show_message("next表计算完成")
self.prefix = prefix
print(prefix)
def construct(self):
self.start_logo(subtitle="KMP算法")
self.init_message("KMP算法 - 前缀表")
self.compute_prefix_function()
self.wait(2)
class KmpScene(AlgoScene):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.text = "acbccbccbcbccbcab"
self.pattern = "cbccbcbccb"
def compute_prefix_function(self, p):
n = len(p)
next = np.zeros(n, dtype=int)
k = -1
j = 0
next[0] = -1
while j < n-1:
if k == -1 or p[j] == p[k]:
k+=1
j+=1
next[j] = k
else:
k = next[k]
return next
def kmp_matcher(self, prefix):
t = self.text
p = self.pattern
n = len(p)
j = 0
k = 0
groups = []
vector_index = AlgoVector(self, range(0, len(t))).arrange(buff=0.15).scale(0.7).set_color(GREY)
groups.append(VGroup(*[vector_index]))
vector_text = AlgoVector(self, list(t)).arrange(buff=0.15).to_edge(edge=UP).scale(0.7).set_color(GREY)
groups.append(VGroup(*[vector_text]))
vector_pattern = AlgoVector(self, list(p)).arrange(buff=0.15).scale(0.7).set_color(GREY)
groups.append(VGroup(*[vector_pattern]))
vector_prefix = AlgoVector(self, prefix).arrange(buff=0.15).scale(0.7).set_color(TEAL_E)
groups.append(VGroup(*[vector_prefix]))
v = VGroup(*groups)
v.arrange(direction=DOWN, buff=0.7, aligned_edge=LEFT)
v.shift(RIGHT*0.5)
v.add(AlgoText("text", color=WHITE).scale(0.7).next_to(vector_text, direction=LEFT),
AlgoText("pattern", color=WHITE).scale(0.7).next_to(vector_pattern, direction=LEFT),
AlgoText("index", color=WHITE).scale(0.7).next_to(vector_index, direction=LEFT),
AlgoText("prefix", color=WHITE).scale(0.7).next_to(vector_prefix, direction=LEFT))
self.add(v)
cursor_j = vector_text.add_arrow(0, color=RED, text="j")
cursor_k = vector_pattern.add_arrow(0, color=BLUE, text="k")
while j < len(t) and k < len(p):
if k == -1 or t[j] == p[k]:
k += 1
j += 1
if j < len(t) and k < len(p):
vector_text.move_arrow(cursor_j, j, run_time=0.5)
vector_pattern.move_arrow(cursor_k, k, run_time=0.5)
g = [ApplyMethod(vector_text.submobjects[j].set_color, GREEN),
ApplyMethod(vector_pattern.submobjects[k].set_color, BLUE)]
self.play(*g)
else:
g = [ApplyMethod(vector_text.submobjects[j].set_color, RED),
ApplyMethod(vector_pattern.submobjects[k].set_color, RED)]
self.play(*g)
old_k = k
k = prefix[k]
g = []
for i in range(k+1, old_k+1):
g.append(ApplyMethod(vector_pattern.submobjects[i].set_color, GREY))
self.play(*g)
if k != -1:
arrow = Arrow(vector_prefix.get_node(old_k).get_center()-LEFT*0.2,
vector_prefix.get_node(k).get_center()-RIGHT*0.2, path_arc=-np.pi*0.6, thickness=0.02, color=WHITE)
else:
arrow = Arrow(vector_prefix.get_node(old_k).get_center()-LEFT*0.2,
vector_prefix.get_node(old_k).get_center()+LEFT*0.7, path_arc=-np.pi*0.6, thickness=0.02, color=WHITE)
arrow.set_color(BLUE)
self.play(ShowCreation(arrow), run_time=0.5)
vector_pattern.move_arrow(cursor_k, k)
if k == len(p):
return j - len(p)
return -1
def construct(self):
self.start_logo(animate=False)
self.init_message("KMP算法")
prefix = self.compute_prefix_function(self.pattern)
self.kmp_matcher(prefix)
self.wait(2)
|
python
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import abstractmethod, ABC
from typing import Generic, TypeVar
K = TypeVar('K')
N = TypeVar('N')
class InternalTimerService(Generic[N], ABC):
"""
Interface for working with time and timers.
This is the internal version of TimerService that allows to specify a key and a namespace to
which timers should be scoped.
"""
@abstractmethod
def current_processing_time(self):
"""
Returns the current processing time.
"""
pass
@abstractmethod
def current_watermark(self):
"""
Returns the current event-time watermark.
"""
pass
@abstractmethod
def register_processing_time_timer(self, namespace: N, t: int):
"""
Registers a timer to be fired when processing time passes the given time. The namespace you
pass here will be provided when the timer fires.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The processing time of the timer to be registered.
"""
pass
@abstractmethod
def register_event_time_timer(self, namespace: N, t: int):
"""
Registers a timer to be fired when event time watermark passes the given time. The namespace
you pass here will be provided when the timer fires.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The event time of the timer to be registered.
"""
pass
def delete_processing_time_timer(self, namespace: N, t: int):
"""
Deletes the timer for the given key and namespace.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The given trigger time of timer to be deleted.
"""
pass
def delete_event_time_timer(self, namespace: N, t: int):
"""
Deletes the timer for the given key and namespace.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The given trigger time of timer to be deleted.
"""
pass
class InternalTimer(Generic[K, N], ABC):
@abstractmethod
def get_timestamp(self) -> int:
"""
Returns the timestamp of the timer. This value determines the point in time when the timer
will fire.
"""
pass
@abstractmethod
def get_key(self) -> K:
"""
Returns the key that is bound to this timer.
"""
pass
@abstractmethod
def get_namespace(self) -> N:
"""
Returns the namespace that is bound to this timer.
:return:
"""
pass
|
python
|
from itertools import permutations
class Solution:
def getProbability(self, balls: List[int]) -> float:
setOfBalls = []
currentBall = 1
for b in balls:
setOfBalls.extend([currentBall] * b)
currentBall += 1
equal = 0
total = 0
for choice in permutations(setOfBalls, len(setOfBalls)):
half = len(choice) // 2
if len(set(choice[:half])) == len(set(choice[half:])): equal += 1
total += 1
return equal / total
|
python
|
from .item import get_basket_item_model
from .basket import get_basket_model, BaseBasket
from ..settings import basket_settings
if basket_settings.is_dynamic:
from .item import DynamicBasketItem
|
python
|
import pandas as pd
import plotly.express as px
prices = pd.read_csv("prices_history.csv", index_col=0)
prices.index = pd.to_datetime(prices.index, unit="s")
prices_change = prices / prices.mean()
print(prices_change)
prices_total_change = (prices_change.iloc[-1][:] - prices_change.iloc[0][:]) * 100
print(prices_total_change["PERPEUR"])
fig = px.bar(prices_total_change)
fig.show()
|
python
|
from bitresource import resource_registry
from http_resource import HttpResource
@resource_registry.register()
class KrakenHttpResource(HttpResource):
name = 'kraken'
endpoint_url = 'https://api.kraken.com/0/public/'
CurrencyResource = KrakenHttpResource('Assets')
MarketResource = KrakenHttpResource('AssetPairs')
|
python
|
from spidriver import SPIDriver
s = SPIDriver("/dev/ttyUSB0") # change for your port
s.sel() # start command
s.write([0x9f]) # command 9F is READ JEDEC ID
print(list(s.read(3))) # read next 3 bytes
s.unsel() # end command
|
python
|
'''
Which stocks move together?
In the previous exercise, you clustered companies by their daily stock price movements. So which company have stock prices that tend to change in the same way? You'll now inspect the cluster labels from your clustering to find out.
Your solution to the previous exercise has already been run. Recall that you constructed a Pipeline pipeline containing a KMeans model and fit it to the NumPy array movements of daily stock movements. In addition, a list companies of the company names is available.
INSTRUCTIONS
100XP
INSTRUCTIONS
100XP
Import pandas as pd.
Use the .predict() method of the pipeline to predict the labels for movements.
Align the cluster labels with the list of company names companies by creating a DataFrame df with labels and companies as columns. This has been done for you.
Use the .sort_values() method of df to sort the DataFrame by the 'labels' column, and print the result.
Hit 'Submit Answer' and take a moment to see which companies are together in each cluster!
'''
# Import pandas
import pandas as pd
# Predict the cluster labels: labels
labels = pipeline.predict(movements)
# Create a DataFrame aligning labels and companies: df
df = pd.DataFrame({'labels': labels, 'companies': companies})
# Display df sorted by cluster label
print(df.sort_values('labels'))
|
python
|
'''
Made By Sai Harsha Kottapalli
Tested on python3
About : SSH Command
Use : make a connection to SSH server and run a command
'''
import sys
import paramiko
import subprocess
import argparse
def command(ip_addr,username,pwd,cmd):
client = paramiko.SSHClient()
#can use keys for authentication
#client.load_host_keys("/home/archelaus/.ssh/known_hosts")
#we need to control both ends of connection so we need to set policy for server to accept key
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip_addr, username=username, password=pwd)
session = client.get_transport().open_session()
if session.active:
session.exec_command(cmd)
print(session.recv(1024).decode("utf-8"))
return
def main():
parser = argparse.ArgumentParser(description = "SSH Command")
parser.add_argument("-ip",action="store",dest="ip",help="ip address")
parser.add_argument("-u",action="store",dest="user",help="Username")
parser.add_argument("-p",action="store",dest="pwd",help="password")
parser.add_argument("-c",action="store",dest="cmd",help="command")
results = parser.parse_args()
command(results.ip,results.user,results.pwd,results.cmd)
if __name__ == "__main__":
main()
|
python
|
from PySide2.QtCore import QAbstractItemModel, QModelIndex, Qt
from hexrd.ui.tree_views.tree_item import TreeItem
KEY_COL = 0
class BaseTreeItemModel(QAbstractItemModel):
KEY_COL = KEY_COL
def columnCount(self, parent):
return self.root_item.column_count()
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.root_item.data(section)
return None
def data(self, index, role):
if not index.isValid():
return
if role not in (Qt.DisplayRole, Qt.EditRole):
return
item = self.get_item(index)
return item.data(index.column())
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
parent_item = self.get_item(parent)
child_item = parent_item.child(row)
if not child_item:
return QModelIndex()
return self.createIndex(row, column, child_item)
def parent(self, index):
if not index.isValid():
return QModelIndex()
child_item = self.get_item(index)
parent_item = child_item.parent_item
if not parent_item or parent_item is self.root_item:
return QModelIndex()
return self.createIndex(parent_item.row(), KEY_COL, parent_item)
def rowCount(self, parent=QModelIndex()):
parent_item = self.get_item(parent)
return parent_item.child_count()
def get_item(self, index):
# If the index is valid and the internal pointer is valid,
# return the item. Otherwise, return the root item.
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.root_item
def clear(self):
# Remove all of the root item children. That clears it.
root = self.root_item
self.beginRemoveRows(QModelIndex(), KEY_COL, root.child_count() - 1)
root.clear_children()
self.endRemoveRows()
def add_tree_item(self, data, parent):
return TreeItem(data, parent)
|
python
|
#!/usr/bin/env python3
"""Tests that all evidence codes seen in NCBI's gene2go have description."""
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import os
from goatools.associations import dnld_ncbi_gene_file
from goatools.evidence_codes import EvidenceCodes
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
def test_ev():
"""Return GO associations from a GAF file. Download if necessary."""
evs = _get_evidencecodes('gene2go')
obj = EvidenceCodes()
missing = evs.difference(obj.code2name)
assert not missing, 'MISSING({EV})'.format(EV=missing)
def _get_evidencecodes(fin_gene2go):
"""Get all evidence codes and qualifiers."""
evs = set()
fin_gene2go = os.path.join(REPO, 'gene2go')
dnld_ncbi_gene_file(fin_gene2go, force_dnld=False, loading_bar=False)
with open(fin_gene2go) as ifstrm:
for line in ifstrm:
if line[0] != '#': # Line contains data. Not a comment
line = line.rstrip() # chomp
flds = line.split('\t')
if len(flds) >= 5:
# taxid_curr, geneid, go_id, evidence, qualifier = flds[:5]
evidence = flds[3]
assert len(evidence) >= 2, flds
evs.add(evidence)
print('{N} evidence codes in {FIN}'.format(N=len(evs), FIN=fin_gene2go))
return evs
if __name__ == '__main__':
test_ev()
# Copyright (C) 2016-2019, DV Klopfenstein, H Tang. All rights reserved.
|
python
|
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2018 Datadog, Inc.
import copy
import traceback
import re
import logging
import unicodedata
from aggregator import TextualMetricTypes
from utils.hostname import get_hostname
from utils.hash import hash_mutable
class CheckException(Exception):
pass
class AgentCheck(object):
OK, WARNING, CRITICAL, UNKNOWN = (0, 1, 2, 3)
def __init__(self, name, init_config, instance, aggregator=None):
self.name = name
self.init_config = init_config
self.instance = instance
self.signature = self.signature_hash(name, init_config, instance)
self.warnings = []
self.log = logging.getLogger('%s.%s' % (__name__, self.name))
self.hostname = get_hostname()
self.aggregator = aggregator
def check(self, instance):
raise NotImplementedError
@staticmethod
def signature_hash(name, init_config, instance):
return hash_mutable((name, init_config, instance))
def set_aggregator(self, aggregator):
self.aggregator = aggregator
def _submit_metric(self, mtype, name, value, tags=None, timestamp=None):
if not self.aggregator or value is None:
# ignore metric sample
return
tags = self._normalize_tags(tags)
source = (self.name, self.signature)
self.aggregator.submit_metric(name, float(value), mtype, tags=tags, timestamp=timestamp, source=source)
def gauge(self, name, value, tags=None, timestamp=None):
self._submit_metric(TextualMetricTypes.GAUGE, name, value, tags=tags, timestamp=timestamp)
def count(self, name, value, tags=None):
self._submit_metric(TextualMetricTypes.COUNT, name, value, tags=tags)
def monotonic_count(self, name, value, tags=None):
self._submit_metric(TextualMetricTypes.MONOTONIC_COUNT, name, value, tags=tags)
def rate(self, name, value, tags=None):
self._submit_metric(TextualMetricTypes.RATE, name, value, tags=tags)
def histogram(self, name, value, tags=None):
self._submit_metric(TextualMetricTypes.HISTOGRAM, name, value, tags=tags)
def historate(self, name, value, tags=None):
self._submit_metric(TextualMetricTypes.HISTORATE, name, value, tags=tags)
def service_check(self, name, status, tags=None, message=None):
tags = self._normalize_tags_type(tags)
if message is None:
message = ""
self.aggregator.service_check(name, status, tags, message=message)
def event(self, event):
# Enforce types of some fields, considerably facilitates handling in go bindings downstream
for key, value in list(event.items()):
# transform the unicode objects to plain strings with utf-8 encoding
if isinstance(value, str):
try:
event[key] = event[key].encode('utf-8')
except UnicodeError:
self.log.warning("Error encoding unicode field '%s' of event to utf-8 encoded string, \
can't submit event", key)
return
if event.get('tags'):
event['tags'] = self._normalize_tags_type(event['tags'])
if event.get('timestamp'):
event['timestamp'] = int(event['timestamp'])
if event.get('aggregation_key'):
event['aggregation_key'] = str(event['aggregation_key'])
self.aggregator.submit_event(self, self.name, event)
def normalize(self, metric, prefix=None, fix_case=False):
"""
Turn a metric into a well-formed metric name
prefix.b.c
:param metric The metric name to normalize
:param prefix A prefix to to add to the normalized name, default None
:param fix_case A boolean, indicating whether to make sure that
the metric name returned is in underscore_case
"""
if isinstance(metric, str):
metric_name = unicodedata.normalize('NFKD', metric).encode('ascii', 'ignore')
else:
metric_name = metric
if fix_case:
name = self.convert_to_underscore_separated(metric_name)
if prefix is not None:
prefix = self.convert_to_underscore_separated(prefix)
else:
name = re.sub(r"[,\+\*\-/()\[\]{}\s]", "_", metric_name)
# Eliminate multiple _
name = re.sub(r"__+", "_", name)
# Don't start/end with _
name = re.sub(r"^_", "", name)
name = re.sub(r"_$", "", name)
# Drop ._ and _.
name = re.sub(r"\._", ".", name)
name = re.sub(r"_\.", ".", name)
if prefix is not None:
return prefix + "." + name
else:
return name
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
METRIC_REPLACEMENT = re.compile(r'([^a-zA-Z0-9_.]+)|(^[^a-zA-Z]+)')
DOT_UNDERSCORE_CLEANUP = re.compile(r'_*\._*')
def convert_to_underscore_separated(self, name):
"""
Convert from CamelCase to camel_case
And substitute illegal metric characters
"""
metric_name = self.FIRST_CAP_RE.sub(r'\1_\2', name)
metric_name = self.ALL_CAP_RE.sub(r'\1_\2', metric_name).lower()
metric_name = self.METRIC_REPLACEMENT.sub('_', metric_name)
return self.DOT_UNDERSCORE_CLEANUP.sub('.', metric_name).strip('_')
def _normalize_tags(self, tags):
"""
Normalize tags:
- normalize tags to type `str`
- always return a list
"""
if tags is None:
normalized_tags = []
else:
normalized_tags = list(tags) # normalize to `list` type, and make a copy
return self._normalize_tags_type(normalized_tags)
def _normalize_tags_type(self, tags):
"""
Normalize all the tags to strings (type `str`) so that the go bindings can handle them easily
Doesn't mutate the passed list, returns a new list
"""
normalized_tags = []
if tags is not None:
for tag in tags:
if not isinstance(tag, str):
try:
tag = str(tag)
except Exception:
self.log.warning("Error converting tag to string, ignoring tag")
continue
elif isinstance(tag, str):
try:
tag = tag.encode('utf-8')
except UnicodeError:
self.log.warning("Error encoding unicode tag to utf-8 encoded string, ignoring tag")
continue
normalized_tags.append(tag)
return normalized_tags
def warning(self, warning_message):
warning_message = str(warning_message)
self.log.warning(warning_message)
self.warnings.append(warning_message)
def get_warnings(self):
"""
Return the list of warnings messages to be displayed in the info page
"""
warnings = self.warnings
self.warnings = []
return warnings
def run(self):
try:
self.check(copy.deepcopy(self.instance))
result = None
except Exception as e:
result = {
"message": str(e),
"traceback": traceback.format_exc(),
}
return result
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from parglare import GLRParser, Grammar, Parser, ParseError
from parglare.exceptions import SRConflicts
def test_lr2_grammar():
grammar = """
Model: Prods EOF;
Prods: Prod | Prods Prod;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
terminals
ID: /\w+/;
"""
input_str = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
g = Grammar.from_string(grammar)
# This grammar is not LR(1) as it requires
# at least two tokens of lookahead to decide
# what to do on each ID from the right side.
# If '=' is after ID than it should reduce "Prod"
# else it should reduce ID as ProdRefs.
with pytest.raises(SRConflicts):
Parser(g, prefer_shifts=False)
# prefer_shifts strategy (the default)
# will remove conflicts but the resulting parser
# will fail to parse any input as it will consume
# greadily next rule ID as the body element of the previous Prod rule.
parser = Parser(g)
with pytest.raises(ParseError):
parser.parse(input_str)
# But it can be parsed unambiguously by GLR.
p = GLRParser(g)
results = p.parse(input_str)
assert len(results) == 1
def test_nops():
"""
Test that nops (no prefer shifts) will honored per rule.
"""
grammar = """
Program: "begin"
statements=Statements
ProgramEnd EOF;
Statements: Statements1 | EMPTY;
Statements1: Statements1 Statement | Statement;
ProgramEnd: End;
Statement: End "transaction" | "command";
terminals
End: "end";
"""
g = Grammar.from_string(grammar, ignore_case=True)
parser = GLRParser(g, build_tree=True, prefer_shifts=True)
# Here we have "end transaction" which is a statement and "end" which
# finish program. Prefer shift strategy will make parser always choose to
# shift "end" in anticipation of "end transaction" statement instead of
# reducing by "Statements" and finishing.
with pytest.raises(ParseError):
parser.parse("""
begin
command
end transaction
command
end transaction
command
end
""")
# When {nops} is used, GLR parser will investigate both possibilities at
# this place and find the correct interpretation while still using
# prefer_shift strategy globaly.
grammar = """
Program: "begin"
statements=Statements
ProgramEnd EOF;
Statements: Statements1 {nops} | EMPTY;
Statements1: Statements1 Statement | Statement;
ProgramEnd: End;
Statement: End "transaction" | "command";
terminals
End: "end";
"""
g = Grammar.from_string(grammar, ignore_case=True)
parser = GLRParser(g, build_tree=True, prefer_shifts=True)
parser.parse("""
begin
command
end transaction
command
end transaction
command
end
""")
def test_expressions():
actions = {
"E": [
lambda _, nodes: nodes[0] + nodes[2],
lambda _, nodes: nodes[0] * nodes[2],
lambda _, nodes: nodes[1],
lambda _, nodes: int(nodes[0])
]
}
# This grammar is highly ambiguous if priorities and
# associativities are not defined to disambiguate.
grammar = """
E: E "+" E | E "*" E | "(" E ")" | Number;
terminals
Number: /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions, debug=True)
# Even this simple expression has 2 different interpretations
# (4 + 2) * 3 and
# 4 + (2 * 3)
results = p.parse("4 + 2 * 3")
assert len(results) == 2
assert 18 in results and 10 in results
# Adding one more operand rises number of interpretations to 5
results = p.parse("4 + 2 * 3 + 8")
assert len(results) == 5
# One more and there are 14 interpretations
results = p.parse("4 + 2 * 3 + 8 * 5")
assert len(results) == 14
# The number of interpretation will be the Catalan number of n
# where n is the number of operations.
# https://en.wikipedia.org/wiki/Catalan_number
# This number rises very fast. For 10 operations number of interpretations
# will be 16796!
# If we rise priority for multiplication operation we reduce ambiguity.
# Default production priority is 10. Here we will raise it to 15 for
# multiplication.
grammar = """
E: E "+" E | E "*" E {15}| "(" E ")" | Number;
terminals
Number: /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions)
# This expression now has 2 interpretation:
# (4 + (2*3)) + 8
# 4 + ((2*3) + 8)
# This is due to associativity of + operation which is not defined.
results = p.parse("4 + 2 * 3 + 8")
assert len(results) == 2
# If we define associativity for both + and * we have resolved all
# ambiguities in the grammar.
grammar = """
E: E "+" E {left}| E "*" E {left, 15}| "(" E ")" | Number;
terminals
Number: /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions)
results = p.parse("4 + 2 * 3 + 8 * 5 * 3")
assert len(results) == 1
assert results[0] == 4 + 2 * 3 + 8 * 5 * 3
def test_epsilon_grammar():
grammar = """
Model: Prods EOF;
Prods: Prod | Prods Prod | EMPTY;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
terminals
ID: /\w+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, debug=True)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
results = p.parse(txt)
assert len(results) == 1
results = p.parse("")
assert len(results) == 1
def test_non_eof_grammar_nonempty():
"""
Grammar that is not anchored by EOF at the end might
result in multiple trees that are produced by sucessful
parses of the incomplete input.
"""
grammar_nonempty = """
Model: Prods;
Prods: Prod | Prods Prod;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
terminals
ID: /\w+/;
"""
g_nonempty = Grammar.from_string(grammar_nonempty)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
p = GLRParser(g_nonempty, debug=True)
results = p.parse(txt)
# There is three succesful parses.
# e.g. one would be the production 'First = One Two three Second' and the
# parser could not continue as the next token is '=' but it succeds as
# we haven't terminated our model with EOF so we allow partial parses.
assert len(results) == 3
def test_non_eof_grammar_empty():
"""
Grammar that is not anchored by EOF at the end might
result in multiple trees that are produced by sucessful
parses of the incomplete input.
"""
grammar_empty = """
Model: Prods;
Prods: Prod | Prods Prod | EMPTY;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
terminals
ID: /\w+/;
"""
g_empty = Grammar.from_string(grammar_empty)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
p = GLRParser(g_empty, debug=True)
results = p.parse(txt)
assert len(results) == 3
results = p.parse("")
assert len(results) == 1
|
python
|
from hatano.errors import HatanoError
import boto3
import zipfile
import shutil
import subprocess as sp
import os
import random
import string
import sys
import json
global_conf_file = './hatano_settings.json'
region = boto3.session.Session().region_name
class ZipSrc:
def __init__(self, src, stage):
self.src = src
self.stage = stage
self.name = ""
def __enter__(self):
tmp_dir = '.' + temp_name()
self.tmp_dir = tmp_dir
shutil.copytree(self.src, tmp_dir)
cmd = f"pip install -r requirements-{self.stage}.txt"\
f" -t {tmp_dir} -q"
sp.call(cmd.split())
src = tmp_dir
#if not os.path.isdir(src):
# return False
head = f"{src}{os.path.sep}"
zip_name = temp_name('.zip')
zf = zipfile.ZipFile(zip_name, 'x')
for dirname,subdirs,fnames in os.walk(src):
for name in subdirs+fnames:
zpath = os.path.join(dirname, name)
path = zpath
if path.startswith(head):
path = path[len(head):]
zf.write(zpath, path)
zf.close()
self.name = zip_name
return zip_name
def __exit__(self, typ, value, traceback):
if self.name and os.path.isfile(self.name):
try:
os.remove(self.name)
except:
pass
try:
shutil.rmtree(self.tmp_dir)
except:
pass
def temp_name(ext=""):
allow = string.ascii_lowercase + string.ascii_uppercase
lets = [random.choice(allow) for _ in range(8)]
return ''.join(lets) + f"{ext}"
class Conf:
def __init__(self, conf=global_conf_file):
self.conf = conf
#def get_stage(self, stage):
# if not os.path.isfile(self.conf):
# raise HatanoError(f"No {self.conf} found")
#
# with open(self.conf) as f:
# conf = json.load(f)
# if not conf:
# return "", {}
# project = conf.get("project", "")
# stages = conf.get("stages", {})
#
# return project, stages.get(stage, {})
def exists(self):
return os.path.isfile(self.conf)
def touch(self):
with open(self.conf, 'a') as f:
pass
def read(self):
if not self.exists():
return
with open(self.conf) as f:
conf = json.load(f)
return conf
def write(self, conf):
with open(self.conf, 'w') as f:
json.dump(conf, f, indent=4)
def show(self):
conf = self.read()
return json.dumps(conf, indent=4)
|
python
|
########################################################
# neweggQuickSearch.py is a script which automates
# automates the process of searching for products
# and returns important information back in a CSV.
# Libraries Used: urllib.request and bs4 (BeautifulSoup)
# neweggQuickSearch.py asks the user to input information
# such as newegg region, product region and search filter
# neweggQuickSearch.py currently supports the US and CA
# region, however more regions will be added as time
# passes.
# neweggQuickSearch.py is completely safe to use and
# legal to use.
# please report any issues to my github immediately.
# any known error in the program will only happen if the
# intended search has 0 results. (Plan to Fix for Future)
# neweggQuickSearch.py can be reused or edited in any way.
# Alex Kotov 09/07/2020
########################################################
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
from time import sleep
# Newegg Region Choices
options = ['US','CA']
# Ask user for Newegg Region
country = input("Which Newegg would you \nlike to use? CA or US?: ")
while country.upper() not in options:
country = input("'" + str(country) + "' is not an option.\nplease choose from CA or US?: ")
# creates URL based on Region
if country.upper() == 'US':
url = 'https://www.newegg.com/p/pl?d='
else:
url = 'https://www.newegg.ca/p/pl?d='
# creates the final URL
search = input("What would you like to search?: ")
searchurl = str(url) + str(search.replace(' ', '+'))
def choices():
# a list of additional search options
print("Type F to Sort Results by Featured Items (Default).")
print("Type L to Sort Results by Lowest Price to Highest.")
print("Type H to Sort Results by Highest Price to Lowest.")
print("Type S to Sort Results by Best Selling.")
print("Type R to Sort Results by Best Ratings.")
print("Type M to Sort Results by Most Reviews.")
print("Type N to Sort Results by Newest Results.")
# ask the user for additional search options
choice = 'default'
SortOptions = ['F','L','H','S','R','M','N']
while choice not in SortOptions:
print("Please type a letter corresponding to the options below.")
choices()
choice = input("choice: ")
# add to url based on search option
if choice.upper() == 'F':
searchurl += '&Order=0'
if choice.upper() == 'L':
searchurl += '&Order=1'
if choice.upper() == 'H':
searchurl += '&Order=2'
if choice.upper() == 'S':
searchurl += '&Order=3'
if choice.upper() == 'R':
searchurl += '&Order=4'
if choice.upper() == 'M':
searchurl += '&Order=5'
if choice.upper() == 'N':
searchurl += '&Order=6'
# read URL
uClient = uReq(searchurl)
page_html = uClient.read()
# close URL
uClient.close()
# create csv file
filename = 'products.csv'
f = open(filename, 'w')
# write default headers
headers = 'Product Name, Price, Star Rating, Shipping cost \n'
f.write(headers)
# html parsing
pagesoup = soup(page_html, "html.parser")
containers = pagesoup.findAll("div",{"class":"item-container"})
# loop for each product container
for container in containers:
# Get Full Product Name
brand = container.find("a",{"class":"item-title"})
productname = brand.text
# Get Price
pricedollar = container.find("li",{"class":"price-current"})
price = pricedollar.text.split()
# Parse Rating
rating = container.find("a",{"class":"item-rating"})
# Create Rating String for Reviews
if str(rating) != 'None':
amofrating = container.find("span",{"class":"item-rating-num"})
amofratingtxt = str(amofrating.text)
chartoremove = '()'
for character in chartoremove:
amofratingtxt = amofratingtxt.replace(character,'')
# Parse Star Rating
starrating = rating['title'].split()[2]
# Plural Detection
if int(amofratingtxt) > 1:
ratingmessage = str(starrating) + " star rating with " + str(amofratingtxt) + " reviews"
else:
ratingmessage = str(starrating) + " star rating with " + str(amofratingtxt) + " review"
# Create Rating String for No Reviews
else:
ratingmessage = 'No Reviews'
# Parse Shipping
shipping = container.find("li",{"class":"price-ship"})
# write each product detail in csv
f.write(productname.replace(",", "|") + ',' + price[0] + ',' + ratingmessage + ',' + shipping.text + "\n")
# close file to make it accessable to the user
f.close()
# inform the user the csv has been created.
print("The CSV Has Been Created, this program will close in 10 seconds.")
sleep(10)
|
python
|
from django.forms import ModelForm
from workouts.models import WorkoutSession
class WorkoutSessionForm(ModelForm):
class Meta:
model = WorkoutSession
fields = [
'name',
'description',
'location',
'workout_date',
]
|
python
|
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash
dash.__version__
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output
import dash_bootstrap_components as dbc
import plotly.io as pio
import os
print(os.getcwd())
df_input_large=pd.read_csv('C:/Users/Nitin/ds-covid19/data/processed/COVID_final_set.csv',sep=';')
df_SIR_large=pd.read_csv('C:/Users/Nitin/ds-covid19/data/processed/COVID_JH_flat_table_confirmed.csv',sep=';',parse_dates=[0])
df_SIR_large=df_SIR_large.sort_values('date',ascending=True)
fig=go.Figure()
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.title = 'COVID-19 Dashboard based on Applied Data Science'
app.layout = html.Div([
dbc.Row(dbc.Col(html.H1('COVID-19 Data Dashboard Visualization using Applied Data Science'),
width={'size': 8, 'offset': 1},
),
),
dbc.Row(dbc.Col(html.Div('''
Goal of the project is to teach data science by applying a cross industry standard process,
it covers the full walkthrough of: automated data gathering, data transformations,
filtering and machine learning to approximating the doubling time, and
(static) deployment of responsive dashboard alongwith SIR simulation.
'''),
width={'size': 8, 'offset': 1},
)
),
dbc.Row(dbc.Col(html.H5('Select a single country for SIR simulation curve'),
width={'size': 5, 'offset': 1},
),
),
dbc.Row(
[
dbc.Col(
dcc.Dropdown( id='single_select_country',
options=[{'label':each,'value':each} for each in df_SIR_large.columns[1:]],
value='Germany',
multi=False),
width={'size': 5, "offset": 1, 'order': 'second'}),
],
),
dbc.Row(dbc.Col(html.H5('In order to manipulate the SIR curve, vary the values regarding the measures and press enter:'),
width={'size': 5, 'offset': 1},
),
),
dbc.Row(
[
#For changing beta ,gamma, t_initial, t_intro_measures,t_hold,t_relax
dbc.Row(children=[
html.Br(),
html.Br(),
html.Label(["No measures introduced (in days):",
dcc.Input(id='t_initial',
type='number',
value=28,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Measures introduced over (in days):",
dcc.Input(id='t_intro_measures',
type='number',
value=14,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Introduced measures hold time (in days):",
dcc.Input(id='t_hold',
type='number',
value=21,debounce=True)],style={"margin-left": "30px"}),
html.Br(),
html.Br(),
html.Label(["Introduced measures relaxed (in days):",
dcc.Input(id='t_relax',
type='number',
value=21,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Beta max:",
dcc.Input(id='beta_max',
type='number',
value=0.4,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Beta min:",
dcc.Input(id='beta_min',
type='number',
value=0.11,debounce=True)],style={"margin-left": "30px"}),
html.Label(["Gamma:",
dcc.Input(id='gamma',
type='number',
value=0.1,debounce=True)],style={"margin-left": "30px"}),
html.Br(),
html.Br(),
]
),
dbc.Col(dcc.Graph(
figure=fig,
id='SIR_curve'),
width=6, md={'size': 10, "offset": 1, 'order': 'last'}
),
]
),
dbc.Row(dbc.Col(html.H5('Multi - Select Country for Visualization'),
width={'size': 5, 'offset': 1},
),
),
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id='country_drop_down',
options=[ {'label': each,'value':each} for each in df_input_large['country'].unique()],
value=['US', 'Germany','Italy'], # which are pre-selected
multi=True),
width={'size': 5, "offset": 1, 'order': 'first'}
),
], no_gutters=True
),
dbc.Row(dbc.Col(html.H5('Select Timeline of confirmed COVID-19 cases or the approximated doubling time'),
width={'size': 5, 'offset': 1},
),
),
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id='doubling_time',
options=[
{'label': 'Timeline Confirmed ', 'value': 'confirmed'},
{'label': 'Timeline Confirmed Filtered', 'value': 'confirmed_filtered'},
{'label': 'Timeline Doubling Rate', 'value': 'confirmed_DR'},
{'label': 'Timeline Doubling Rate Filtered', 'value': 'confirmed_filtered_DR'},
],
value='confirmed',
multi=False),
width={'size': 3, "offset": 1, 'order': 'first'}
),
],
),
dbc.Row(
[
dbc.Col(dcc.Graph(
id='main_window_slope'
),
width=6, md={'size': 8, "offset": 1, 'order': 'first'}
),
],
),
])
@app.callback(
Output('SIR_curve', 'figure'),
[Input('single_select_country', 'value'),
Input('t_initial','value'),
Input('t_intro_measures','value'),
Input('t_hold','value'),
Input('t_relax','value'),
Input('beta_max','value'),
Input('beta_min','value'),
Input('gamma','value')])
def SIR_figure(country,initial_time,intro_measures,hold_time,relax_time,max_beta,min_beta,gamma_max):
ydata=df_SIR_large[country][df_SIR_large[country]>=30]
xdata=np.arange(len(ydata))
N0=5000000
I0=30
S0=N0-I0
R0=0
gamma=gamma_max
SIR=np.array([S0,I0,R0])
t_initial=initial_time
t_intro_measures=intro_measures
t_hold=hold_time
t_relax=relax_time
beta_max=max_beta
beta_min=min_beta
propagation_rates=pd.DataFrame(columns={'susceptible':S0,'infected':I0,'recovered':R0})
pd_beta=np.concatenate((np.array(t_initial*[beta_max]),
np.linspace(beta_max,beta_min,t_intro_measures),
np.array(t_hold*[beta_min]),
np.linspace(beta_min,beta_max,t_relax),
))
def SIR_model(SIR,beta,gamma):
'SIR model for simulating spread'
'S: Susceptible population'
'I: Infected popuation'
'R: Recovered population'
'S+I+R=N (remains constant)'
'dS+dI+dR=0 model has to satisfy this condition at all time'
S,I,R=SIR
dS_dt=-beta*S*I/N0
dI_dt=beta*S*I/N0-gamma*I
dR_dt=gamma*I
return ([dS_dt,dI_dt,dR_dt])
for each_beta in pd_beta:
new_delta_vec=SIR_model(SIR,each_beta,gamma)
SIR=SIR+new_delta_vec
propagation_rates=propagation_rates.append({'susceptible':SIR[0],'infected':SIR[1],'recovered':SIR[2]},ignore_index=True)
fig=go.Figure()
fig.add_trace(go.Bar(x=xdata,
y=ydata,
marker=dict(color='Lightseagreen'),
name='Confirmed Cases'
))
fig.add_trace(go.Scatter(x=xdata,
y=propagation_rates.infected,
mode='lines',
marker=dict(color='DarkRed'),
name='Simulated curve'))
fig.update_layout(shapes=[
dict(type='rect',xref='x',yref='paper',x0=0,y0=0,x1=t_initial,y1=1,fillcolor="MediumPurple",opacity=0.4,layer="below",line_width=0,),
dict(type='rect',xref='x',yref='paper',x0=t_initial,y0=0,x1=t_initial+t_intro_measures,y1=1,fillcolor="MediumPurple",opacity=0.5,layer="below",line_width=0,),
dict(type='rect',xref='x',yref='paper',x0=t_initial+t_intro_measures,y0=0,x1=t_initial+t_intro_measures+t_hold,y1=1,fillcolor="MediumPurple",opacity=0.6,layer='below',line_width=0,),
dict(type='rect',xref='x',yref='paper',x0=t_initial+t_intro_measures+t_hold,y0=0,x1=t_initial+t_intro_measures+t_hold+t_relax,y1=1,fillcolor='MediumPurple',opacity=0.7,layer='below',line_width=0,)
],
title='SIR Simulation Model for COVID19',
title_x=0.5,
xaxis=dict(title='Time (in days)',
titlefont_size=16),
yaxis=dict(title='Confirmed cases based on Johns Hopkins Data, log scale ',
type='log',
titlefont_size=16,
),
width=1280,
height=600,
)
return fig
@app.callback(
Output('main_window_slope', 'figure'),
[Input('country_drop_down', 'value'),
Input('doubling_time', 'value')])
def update_figure(country_list,show_doubling):
if 'doubling_rate' in show_doubling:
my_yaxis={'type':"log",
'title':'Approximated doubling rate over 3 days (larger numbers are better #stayathome)'
}
else:
my_yaxis={'type':"log",
'title':'Confirmed infected people (source johns hopkins csse, log-scale)'
}
traces = []
for each in country_list:
df_plot=df_input_large[df_input_large['country']==each]
if show_doubling=='doubling_rate_filtered':
df_plot=df_plot[['state','country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.mean).reset_index()
else:
df_plot=df_plot[['state','country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.sum).reset_index()
#print(show_doubling)
traces.append(dict(x=df_plot.date,
y=df_plot[show_doubling],
mode='markers+lines',
opacity=0.9,
name=each
)
)
return {
'data': traces,
'layout': dict (
height=720,
xaxis={'title':'Timeline',
'tickangle':-45,
'nticks':20,
'tickfont':dict(size=14,color="#7f7f7f"),
},
yaxis=my_yaxis
)
}
if __name__ == '__main__':
app.run_server(debug=True,use_reloader=False)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 22:57:49 2021
@author: kylei
"""
import matplotlib.pyplot as plt
import time
import numpy as np
import matplotlib as mpl
import copy
# plt.style.use("science")
mpl.rcParams["figure.dpi"] = 300
plt.style.use("ieee")
class GAAP:
def __init__(
self,
*,
termination,
selection,
main_crossover,
aux_crossover,
mutation,
wrapped_model,
):
self.termination = termination
# Operation related
self.selection = selection
self.main_crossover = main_crossover
self.aux_crossover = aux_crossover
self.mutation = mutation
self.wrapped_model = wrapped_model
self.mutation.SetBounds(
self.wrapped_model.main_UB,
self.wrapped_model.main_LB,
self.wrapped_model.aux_UB,
self.wrapped_model.aux_LB,
)
# Population related
self.population = []
self.aux_population = []
self.fitness = []
"""Testing information"""
self.fitness_gen_ = [] # To store best fitness for each generation
self.resample_gen_ = []
self.time = 0
def optimize(
self,
starting_size,
replacement=False,
seed=192,
save_gen=[],
save_name=None,
resample_gen=[],
resample_amount=1,
hover_gens=100
):
np.random.seed(seed)
# For resampling
resampling_fitness_past = 1
resampling_fitness_present = 0
# Intitialize population
if self.wrapped_model.mode == "ANN":
pops = self.wrapped_model.GenerateAroundOrig(starting_size)
elif self.wrapped_model.mode == "CNN":
pops = self.wrapped_model.GenerateAroundOrigCNN(starting_size)
# Not sure why this doesn't return both pops normally
self.population = pops[0]
self.aux_population = pops[1]
start_time = time.time()
cur_gen = 0
number_to_delete = int(self.selection.num_mating / 2)
while cur_gen < self.termination:
self.fitness = list(
map(
self.wrapped_model.CalculateFitness,
self.population,
self.aux_population,
)
)
self.fitness = np.array(self.fitness, dtype="float64")
# Find best and worst samples
best = np.amax(self.fitness)
self.fitness_gen_.append(best)
worst = np.argsort(self.fitness, kind="heapsort")
if cur_gen in resample_gen:
resampling_fitness_present = best
self.wrapped_model.Resample(
worst, self.population, self.aux_population, resample_amount,
resampling_fitness_past, resampling_fitness_present
)
self.resample_gen_.append([cur_gen, best])
resampling_fitness_past = best
self.mutation.SetBounds(self.wrapped_model.main_UB,
self.wrapped_model.main_LB,
self.wrapped_model.aux_UB,
self.wrapped_model.aux_LB)
else:
worst = worst[:number_to_delete]
# Delete all relevant information about worst samples
self.population = np.delete(self.population, worst, axis=0)
self.aux_population = np.delete(self.aux_population, worst, axis=0)
self.fitness = np.delete(self.fitness, worst, axis=0)
# Try to keep complexity low by removing some solutions
if self.population.shape[0] > hover_gens:
number_to_delete = self.selection.num_mating * 2
else:
number_to_delete = int(self.selection.num_mating / 2)
if cur_gen in save_gen:
self.SaveBestInner(cur_gen, save_name)
# Verbose
print("Generation: " + str(cur_gen))
print("Fitness = %.6f" % best)
print("Population = " + str(self.population.shape[0]))
print("-"*10)
print("Fitness len: " + str(len(self.fitness)))
print("Main len: " + str(self.population.shape[0]))
print("Aux len: " + str(self.aux_population.shape[0]) + "\n")
# Select samples
fit_copy = copy.deepcopy(self.fitness)
indices = self.selection.operate(fit_copy)
# Perform crossover
main_children = self.main_crossover.operate(self.population, indices)
aux_children = self.aux_crossover.operate(self.aux_population, indices)
# Replace parents with children
if replacement:
self.population = np.delete(self.population, indices, axis=0)
self.aux_population = np.delete(self.aux_population, indices, axis=0)
# Append children to population
self.population = np.append(self.population, main_children, axis=0)
self.aux_population = np.append(self.aux_population, aux_children, axis=0)
# Perform mutation
self.mutation.operate(self.population, self.aux_population)
cur_gen += 1
np.random.seed(seed + cur_gen)
self.time = time.time() - start_time
return None
def GetBestInner(self, cur_gen, save_name):
"""For saving a model at indicated generations"""
best = np.amax(self.fitness)
best_main = self.population[best]
best_aux = self.aux_population[best]
model = self.wrapped_model.BuildBest(best_main, best_aux)
model.save(f"{save_name}-{cur_gen}")
def GetBest(self, num_solutions=1):
self.fitness = list(
map(
self.wrapped_model.CalculateFitness,
self.population,
self.aux_population,
)
)
best = np.argsort(self.fitness, kind="heapsort")[::-1][:num_solutions]
model_list = []
best_mains = []
best_auxs = []
for idx in best:
best_main = self.population[idx]
best_aux = self.aux_population[idx]
best_mains.append(best_main)
best_auxs.append(best_auxs)
model = self.wrapped_model.BuildBest(best_main, best_aux)
model_list.append(model)
return model_list, (best_main, best_aux)
def troubleshoot_sizes(self, fitness):
"""Only used to troubleshoot populations and or fitness desyncing.
Args:
fitness ([float64]): Array containing fitness values of individuals
"""
print("Population before deletion: " + str(self.population.shape))
print("Aux Population before deletion: " + str(self.aux_population.shape))
print("Fitness before deletion: " + str(len(fitness)))
print("\n")
print("Population after deletion: " + str(self.population.shape))
print("Aux Population after deletion: " + str(self.aux_population.shape))
print("Fitness after deletion: " + str(len(fitness)))
print("\n")
print("-" * 60)
def plot_time_gen(self, title="GAAP"):
plt.plot(
np.arange(0, self.termination),
self.fitness_gen_,
label="%.2f sec" % self.time,
)
if len(self.resample_gen_) != 0:
self.resample_gen_ = np.array(self.resample_gen_)
plt.scatter(
self.resample_gen_[:, [0]],
self.resample_gen_[:, [1]],
label="Resampling",
marker="o",
color="red",
)
plt.legend(loc="upper left")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.title(title)
plt.show()
|
python
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""decent_q module"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.decent_q.python.ops.fix_neuron_ops import fix_neuron
from tensorflow.contrib.decent_q.utils import DecentQTransform
from tensorflow.contrib.decent_q.python import utils
from tensorflow.contrib.decent_q.python.quantize_graph import QuantizeConfig
from tensorflow.contrib.decent_q.python.quantize_graph import ConvertConstantsToVariables
from tensorflow.contrib.decent_q.python.quantize_graph import CreateOptimizedGraphDef
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeTrainingGraphDef
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeCalibrationGraphDef
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeEvaluationGraphDef
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeDeployGraphDef
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeTrainingGraph
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeEvaluationGraph
from tensorflow.contrib.decent_q.python.quantize_graph import ConvertFoldedBatchnorms
from tensorflow.contrib.decent_q.python.quantize_graph import CreateQuantizeDeployGraph
from tensorflow.contrib.decent_q.python.decent_q import inspect
from tensorflow.contrib.decent_q.python.decent_q import quantize_frozen
from tensorflow.contrib.decent_q.python.decent_q import quantize_train
from tensorflow.contrib.decent_q.python.decent_q import quantize_evaluate
from tensorflow.contrib.decent_q.python.decent_q import deploy_checkpoint
from tensorflow.contrib.decent_q.python.decent_q import quantize_frozen
from tensorflow.contrib.decent_q.python.decent_q import quantize_train
from tensorflow.contrib.decent_q.python.decent_q import check_float_graph
|
python
|
# Kivy stuff
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.properties import ObjectProperty, ListProperty
from kivy.graphics import Color, Ellipse, Line
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.core.window import Window
# For type hinting
from kivy.input import MotionEvent
from typing import Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from persimmon.view.pins.pin import Pin # MYPY HACK
import numpy as np
# Others
from math import pi
import logging
#TODO: Info must be before everything
Builder.load_string("""
<Connection>:
<Info>:
#pos: self.pos
text: 'Spawn new block'
font_size: '15dp'
size_hint: None, None
size: self.texture_size
color: 1, 1, 1, 0.7
padding: 5, 5
canvas.before:
Color:
rgba: 0, 0, 0, 0.7
Rectangle:
pos: self.pos
size: self.texture_size
""")
logger = logging.getLogger(__name__)
class Info(Label):
pass
class Connection(Widget):
start = ObjectProperty(allownone=True)
end = ObjectProperty(allownone=True)
color = ListProperty()
lin = ObjectProperty()
def __init__(self, **kwargs):
""" On this initializer the connection has to check whether the
connection is being made forward or backwards. """
super().__init__(**kwargs)
if self.start:
self.forward = True
# The value is repeated for correctness sake
self.bez_start, self.bez_end = [self.start.center] * 2
with self.canvas.before:
Color(*self.color)
self.lin = Line(bezier=self.bez_start * 4, width=1.5)
self._bind_pin(self.start)
else:
self.forward = False
self.bez_start, self.bez_end = [self.end.center] * 2
with self.canvas.before:
Color(*self.color)
self.lin = Line(bezier=self.bez_end * 4, width=1.5)
self._bind_pin(self.end)
self.warned = False
self.info = Factory.Info(pos=self.bez_start)
Window.add_widget(self.info)
def finish_connection(self, pin: 'Pin'):
""" This functions finishes a connection that has only start or end and
is being currently dragged """
self.remove_info()
if self.forward:
self.end = pin
self._bind_pin(self.end)
else:
self.start = pin
self._bind_pin(self.start)
# Kivy touch override
def on_touch_down(self, touch: MotionEvent) -> bool:
""" On touch down on connection means we are modifying an already
existing connection, not creating a new one. """
# TODO: remove start check?
if self.start and self.start.collide_point(*touch.pos):
self.forward = False
# Remove start edge
self._unbind_pin(self.start)
self.start.on_connection_delete(self)
self.start = None
# This signals that we are dragging a connection
touch.ud['cur_line'] = self
Window.add_widget(self.info)
return True
elif self.end and self.end.collide_point(*touch.pos):
# Same as before but with the other edge
self.forward = True
self._unbind_pin(self.end)
self.end.on_connection_delete(self)
self.end = None
touch.ud['cur_line'] = self
Window.add_widget(self.info)
return True
else:
return False
def follow_cursor(self, newpos, blackboard):
""" This functions makes sure the current end being dragged follows the
cursor. It also checks for type safety and changes the line color
if needed."""
if self.forward:
fixed_edge = self.start
self.bez_end = [*newpos]
self._rebezier()
else:
fixed_edge = self.end
self.bez_start = [*newpos]
self._rebezier()
self.info.pos = [*newpos]
# The conditionals are so complicated because it is necessary to check
# whether or not a pin in a block has been touched, and then check
# the typesafety.
if (self._in_pin(blackboard, newpos) and
not self._in_pin(blackboard, newpos).typesafe(fixed_edge)):
# This conditional represents that the cursor stepped out the pin
self.info.text = 'Connection is not possible'
self._warn()
elif (self._in_pin(blackboard, newpos) and
self._in_pin(blackboard, newpos).typesafe(fixed_edge)):
self.info.text = 'Connect'
if self.warned:
self._unwarn()
else:
self.info.text = 'Spawn new block'
if self.warned:
self._unwarn()
def delete_connection(self):
""" This function deletes both ends (if they exist) and the connection
itself. """
self.parent.remove_widget(self) # Self-destruct
self.remove_info()
if self.start:
self._unbind_pin(self.start)
self.start.on_connection_delete(self)
if self.end:
self._unbind_pin(self.end)
self.end.on_connection_delete(self)
def pulse(self):
""" Makes a connection appear to pulse by modifying its width
continuosly. """
self.it = self._change_width()
next(self.it)
Clock.schedule_interval(lambda _: next(self.it), 0.05) # 20 FPS
def stop_pulse(self):
""" Stops vibrating a connection. It will throw an execution if
the connection is not pulsing right now. """
self.it.throw(StopIteration)
def remove_info(self):
Window.remove_widget(self.info)
# Auxiliary methods
def _in_pin(self, blackboard, pos):
block = blackboard.in_block(*pos)
if block:
pin = block.in_pin(*pos)
if pin:
return pin
return False
# Binding methods
def _unbind_pin(self, pin: 'Pin'):
""" Undos pin's circle and line binding. """
pin.funbind('pos', self._line_bind)
def _bind_pin(self, pin: 'Pin'):
""" Performs pin circle and line binding. """
pin.fbind('pos', self._line_bind)
self._line_bind(pin, pin.pos)
def _line_bind(self, pin: 'Pin', new_pos: Tuple[float, float]):
if pin == self.start:
self.bez_start = pin.center
self._rebezier()
elif pin == self.end:
self.bez_end = pin.center
self._rebezier()
else:
logger.error('No line associated with pin')
# Pulsing methods
def _change_width(self):
""" Ok, so let me explain what is going on, this generator/coroutine
changes the width of the line continuosly using the width_gen
generator. We use it by calling it 20 times per second. The tricky
part is stopping the scheduled calls. The way to tell Kivy to stop
calling is to return a False value, and to do that we need to call
this coroutine itself, which may be executing or not at that
precise moment.
That is where throw comes in, allowing for exceptions to be thrown
on during the execution, hijacking the current execution (like a
fast interruption), we need to return from this exception, in which
we do not care about the value, and then return False on the
regular execution in order to stop the calls."""
try:
for value in self._width_gen():
self.lin.width = value
yield
except StopIteration:
self.lin.width = 2
yield
yield False
def _width_gen(self):
""" Infinity oscillating generator (between 2 and 4) """
val = 0
while True:
yield np.sin(val) + 3
val += pi / 20
# Warn methods
def _warn(self):
""" Changes the current line to a red thick connection. """
self.warned = True
self.canvas.before.remove(self.lin)
with self.canvas.before:
Color(1, 0, 0)
self.lin = Line(points=self.lin.points, width=3)
self._rebezier()
def _unwarn(self):
""" Returns the red thick connection to its normal state. """
self.warned = False
self.canvas.before.remove(self.lin)
with self.canvas.before:
Color(*self.color)
self.lin = Line(points=self.lin.points, width=1.5)
self._rebezier()
# Bezier refreshing
def _rebezier(self):
""" Refreshes bezier curve according to start and end.
It uses the arctan to force the bèzier curve always going a bit
forward before drifting."""
arc_tan = np.arctan2(self.bez_start[1] - self.bez_end[1],
self.bez_start[0] - self.bez_end[0])
abs_angle = np.abs(np.degrees(arc_tan))
# We use the angle value plus a fixed amount to steer the line a bit
start_right = [self.bez_start[0] - 5 - 0.6 * abs_angle,
self.bez_start[1]]
end_left = [self.bez_end[0] + 5 + 0.6 * abs_angle, self.bez_end[1]]
# Y distance to mid point
dist = (min(self.bez_start[0], self.bez_end[0]) +
abs(self.bez_start[0] - self.bez_end[0]) / 2)
# This updates the bèzier curve graphics
self.lin.bezier = (self.bez_start + start_right +
[dist, self.bez_start[1]] + [dist, self.bez_end[1]] +
end_left + self.bez_end)
def _search_window(self):
return Window
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.