max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
verteste/ui/ui_about.py | Chum4k3r/Verteste | 0 | 7200 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'aboutdialog.ui'
##
## Created by: Qt User Interface Compiler version 6.1.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
class Ui_AboutDialog(QDialog):
# Caixa de diálogo utilizada para criação ou edição de linhas
def __init__(self, parent=None):
QDialog.__init__(self, parent=parent)
self.setupUi(self)
return
def setupUi(self, Dialog):
if not Dialog.objectName():
Dialog.setObjectName(u"Dialog")
Dialog.resize(400, 300)
self.verticalLayout = QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(u"verticalLayout")
self.label = QLabel(Dialog)
self.label.setObjectName(u"label")
font = QFont()
font.setFamilies([u"Sandoval"])
font.setPointSize(18)
self.label.setFont(font)
self.label.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label)
self.label_4 = QLabel(Dialog)
self.label_4.setObjectName(u"label_4")
self.label_4.setTextFormat(Qt.AutoText)
self.label_4.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout.addWidget(self.label_4)
self.label_2 = QLabel(Dialog)
self.label_2.setObjectName(u"label_2")
self.verticalLayout.addWidget(self.label_2)
self.label_3 = QLabel(Dialog)
self.label_3.setObjectName(u"label_3")
self.label_3.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label_3)
self.label_5 = QLabel(Dialog)
self.label_5.setObjectName(u"label_5")
self.verticalLayout.addWidget(self.label_5)
self.label_6 = QLabel(Dialog)
self.label_6.setObjectName(u"label_6")
self.label_6.setTextFormat(Qt.MarkdownText)
self.label_6.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label_6)
self.retranslateUi(Dialog)
QMetaObject.connectSlotsByName(Dialog)
# setupUi
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QCoreApplication.translate("Dialog", u"Sobre", None))
self.label.setText(QCoreApplication.translate("Dialog", u"Verteste", None))
self.label_4.setText(QCoreApplication.translate("Dialog", u"Vers\u00e3o 1.0.0", None))
self.label_2.setText(QCoreApplication.translate("Dialog", u"Desenvolvido por:", None))
self.label_3.setText(QCoreApplication.translate("Dialog", u"Jo\u00e3o <NAME>", None))
self.label_5.setText(QCoreApplication.translate("Dialog", u"C\u00f3digo fonte dispon\u00edvel em:", None))
self.label_6.setText(QCoreApplication.translate("Dialog", u"https://github.com/Chum4k3r/Verteste.git", None))
# retranslateUi
| 2.15625 | 2 |
tests/test_base_protocol.py | Qix-/aiohttp | 3 | 7201 | import asyncio
from contextlib import suppress
from unittest import mock
import pytest
from aiohttp.base_protocol import BaseProtocol
async def test_loop() -> None:
loop = asyncio.get_event_loop()
asyncio.set_event_loop(None)
pr = BaseProtocol(loop)
assert pr._loop is loop
async def test_pause_writing() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop)
assert not pr._paused
pr.pause_writing()
assert pr._paused
async def test_resume_writing_no_waiters() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
pr.pause_writing()
assert pr._paused
pr.resume_writing()
assert not pr._paused
async def test_connection_made() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
assert pr.transport is None
pr.connection_made(tr)
assert pr.transport is not None
async def test_connection_lost_not_paused() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert not pr._connection_lost
pr.connection_lost(None)
assert pr.transport is None
assert pr._connection_lost
async def test_connection_lost_paused_without_waiter() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert not pr._connection_lost
pr.pause_writing()
pr.connection_lost(None)
assert pr.transport is None
assert pr._connection_lost
async def test_drain_lost() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.connection_lost(None)
with pytest.raises(ConnectionResetError):
await pr._drain_helper()
async def test_drain_not_paused() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
assert pr._drain_waiter is None
await pr._drain_helper()
assert pr._drain_waiter is None
async def test_resume_drain_waited() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
pr.resume_writing()
assert (await t) is None
assert pr._drain_waiter is None
async def test_lost_drain_waited_ok() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
pr.connection_lost(None)
assert (await t) is None
assert pr._drain_waiter is None
async def test_lost_drain_waited_exception() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
t = loop.create_task(pr._drain_helper())
await asyncio.sleep(0)
assert pr._drain_waiter is not None
exc = RuntimeError()
pr.connection_lost(exc)
with pytest.raises(RuntimeError) as cm:
await t
assert cm.value is exc
assert pr._drain_waiter is None
async def test_lost_drain_cancelled() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
fut = loop.create_future()
async def wait():
fut.set_result(None)
await pr._drain_helper()
t = loop.create_task(wait())
await fut
t.cancel()
assert pr._drain_waiter is not None
pr.connection_lost(None)
with suppress(asyncio.CancelledError):
await t
assert pr._drain_waiter is None
async def test_resume_drain_cancelled() -> None:
loop = asyncio.get_event_loop()
pr = BaseProtocol(loop=loop)
tr = mock.Mock()
pr.connection_made(tr)
pr.pause_writing()
fut = loop.create_future()
async def wait():
fut.set_result(None)
await pr._drain_helper()
t = loop.create_task(wait())
await fut
t.cancel()
assert pr._drain_waiter is not None
pr.resume_writing()
with suppress(asyncio.CancelledError):
await t
assert pr._drain_waiter is None
| 2.203125 | 2 |
main.py | marcusviniciusteixeira/RPAPython | 1 | 7202 | <gh_stars>1-10
import PySimpleGUI as sg
import os
import time
import pyautogui
class TelaPython:
def __init__(self):
layout = [
[sg.Text('Usuário',size=(10,0)), sg.Input(size=(20,0),key='usuario')],
[sg.Text('Senha',size=(10,0)), sg.Input(size=(20,0),key='senha')],
[sg.Text('Número',size=(10,0)), sg.Input(size=(20,0),key='num')],
[sg.Text('Time1',size=(10,0)), sg.Slider(range=(0,30), default_value=0, orientation='h',size=(10,15),key='time1')],
[sg.Text('Time2',size=(10,0)), sg.Slider(range=(0,30), default_value=0, orientation='h',size=(10,15),key='time2')],
[sg.Button('Executar')]
]
janela = sg.Window("Macro Portal CLARO").layout(layout)
self.button, self.values = janela.read()
def Iniciar(self):
usuario = self.values['usuario']
senha = self.values['senha']
num = self.values['num']
time1 = self.values['time1']
time2 = self.values['time2']
os.startfile('PortalClaro.exe')
time.sleep(time1)
pyautogui.moveTo(571, 409)#USUÁRIO
pyautogui.click()
pyautogui.write(usuario)
pyautogui.press('tab')#SENHA
pyautogui.write(senha)#Pjfa#412
pyautogui.moveTo(672, 530)
pyautogui.click()
time.sleep(time2)
pyautogui.moveTo(556, 472)#NUM
pyautogui.click()
pyautogui.write(num)
pyautogui.moveTo(683, 505)
pyautogui.click()
time.sleep(1)
pyautogui.moveTo(576, 437)
pyautogui.click()
tela = TelaPython()
tela.Iniciar() | 2.71875 | 3 |
logistic-regression/plot_binary_losses.py | eliben/deep-learning-samples | 183 | 7203 | # Helper code to plot binary losses.
#
# <NAME> (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xs = np.linspace(-2, 2, 500)
# plot L0/1 loss
ax.plot(xs, np.where(xs < 0, np.ones_like(xs), np.zeros_like(xs)),
color='r', linewidth=2.0, label='$L_{01}$')
# plot square loss
ax.plot(xs, (xs - 1) ** 2, linestyle='-.', label='$L_2$')
# plot hinge loss
ax.plot(xs, np.maximum(np.zeros_like(xs), 1 - xs),
color='g', linewidth=2.0, label='$L_h$')
ax.grid(True)
plt.ylim((-1, 4))
ax.legend()
fig.savefig('loss.png', dpi=80)
plt.show()
| 2.984375 | 3 |
utils/watch-less.py | K-Fitzpatrick/crop_planner | 91 | 7204 | #!/usr/bin/env python3
################################
# Development tool
# Auto-compiles style.less to style.css
#
# Requires lessc and less clean css to be installed:
# npm install -g less
# npm install -g less-plugin-clean-css
################################
import os, time
from os import path
from math import floor
from _helper import *
# Main application
class Main:
style_less = "style.less"
style_css = "style.css"
def __init__(self):
clear()
os.chdir("../")
header("Watching style.less for changes\nctrl+c to exit")
print()
while True:
if not os.path.exists(self.style_less):
print(self.style_less + " does not exist. Exiting.")
return
if not os.path.exists(self.style_css):
self.compile()
elif path.getmtime(self.style_less) > path.getmtime(self.style_css):
self.compile()
time.sleep(.2)
def compile(self):
start = time.time()
os.system("lessc " + self.style_less + " " + self.style_css + " --clean-css")
touch(self.style_css, path.getmtime(self.style_less))
print("Recompiled [" + str(floor((time.time() - start) * 100)) + " ms]")
print()
# Run application
if __name__ == "__main__":
try:
app = Main()
except KeyboardInterrupt:
print("Exiting") | 2.953125 | 3 |
testapp/app/app/tests/test_export_action.py | instituciones-abiertas/django-admin-export-action | 5 | 7205 | # -- encoding: UTF-8 --
import json
import uuid
from admin_export_action import report
from admin_export_action.admin import export_selected_objects
from admin_export_action.config import default_config, get_config
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils.http import urlencode
from news.models import Attachment, Category, News, NewsTag, Video
from news.admin import NewsAdmin
class FakeDict(object):
def __getitem__(self, key):
return object()
class WS(object):
def __init__(self):
self.rows = []
self.cells = []
self.column_dimensions = FakeDict()
def cell(self, row, column):
pass
def append(self, row):
self.rows.append(row)
class FakeQueryset(object):
def __init__(self, num):
self.num = num
self.model = News
def values_list(self, field, flat=True):
return [i for i in range(1, self.num)]
class AdminExportActionTest(TestCase):
fixtures = ["tests.json"]
def test_config(self):
self.assertEqual(default_config.get('ENABLE_SITEWIDE'), True)
self.assertEqual(get_config('ENABLE_SITEWIDE'), False)
with self.settings(ADMIN_EXPORT_ACTION=None):
self.assertEqual(get_config('ENABLE_SITEWIDE'), True)
def test_export_selected_objects_session(self):
factory = RequestFactory()
request = factory.get('/news/admin/')
request.session = {}
modeladmin = NewsAdmin(model=News, admin_site=AdminSite())
qs = FakeQueryset(2000)
self.assertEqual(len(request.session), 0)
export_selected_objects(modeladmin, request, qs)
self.assertEqual(len(request.session), 1)
els = list(request.session.items())
self.assertEqual(els[0][1], qs.values_list('id'))
def test_get_field_verbose_name(self):
res = report.get_field_verbose_name(News.objects, 'tags__name')
assert res == 'all tags verbose name'
res = report.get_field_verbose_name(News.objects, 'share')
assert res == 'share'
def test_list_to_method_response_should_return_200_and_correct_values(
self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title', 'status'],
admin)
method = getattr(report, 'list_to_{}_response'.format('html'))
res = method(data)
assert res.status_code == 200
method = getattr(report, 'list_to_{}_response'.format('csv'))
res = method(data)
assert res.status_code == 200
assert res.content == b'1,<NAME>,published\r\n2,La mano de Dios,draft\r\n'
method = getattr(report, 'list_to_{}_response'.format('xlsx'))
res = method(data)
assert res.status_code == 200
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title', 'status'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "<NAME>"
assert d[0]['status'] == 'published'
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert d[1]['status'] == 'draft'
assert res.status_code == 200
data, messages = report.report_to_list(News.objects.all(),
['id', 'title', 'status'],
admin,
raw_choices=True)
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title', 'status'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "<NAME>"
assert d[0]['status'] == 2
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert d[1]['status'] == 1
assert res.status_code == 200
def test_list_to_csv_response_should_have_expected_content(self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title'], admin)
method = getattr(report, 'list_to_{}_response'.format('csv'))
res = method(data)
assert res.status_code == 200
assert res.content == b'1,<NAME>\r\n2,La mano de Dios\r\n'
def test_list_to_json_response_should_have_expected_content(self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title'], admin)
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "<NAME>"
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert res.status_code == 200
def test_admin_export_post_should_return_200(self):
for output_format in ['html', 'csv', 'xslx', 'json']:
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk)
for pk in News.objects.values_list('pk', flat=True))
}
data = {
"title": "on",
"__format": output_format,
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(url, data=data)
assert response.status_code == 200
def test_admin_export_get_should_return_200(self):
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk) for pk in News.objects.values_list('pk', flat=True))
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_related_get_should_return_200(self):
params = {
'related': True,
'model_ct': ContentType.objects.get_for_model(News).pk,
'field': 'category',
'path': 'category.name',
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_related_of_indirect_field_get_should_return_200(
self):
params = {
'related': True,
'model_ct': ContentType.objects.get_for_model(News).pk,
'field': 'newstag',
'path': 'newstag.id',
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_unregistered_model_should_raise_ValueError(
self):
params = {
'ct':
ContentType.objects.get_for_model(NewsTag).pk,
'ids':
','.join(
repr(pk)
for pk in NewsTag.objects.values_list('pk', flat=True))
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
try:
self.client.get(url)
self.fail()
except ValueError:
pass
def test_admin_action_should_redirect_to_export_view(self):
objects = News.objects.all()
ids = [repr(obj.pk) for obj in objects]
data = {
"action": "export_selected_objects",
"_selected_action": ids,
}
url = reverse('admin:news_news_changelist')
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(url, data=data)
expected_url = "{}?ct={ct}&ids={ids}".format(
reverse('admin_export_action:export'),
ct=ContentType.objects.get_for_model(News).pk,
ids=','.join(reversed(ids)))
assert response.status_code == 302
assert response.url.endswith(expected_url)
def test_export_with_related_should_return_200(self):
for output_format in ['html', 'csv', 'xslx', 'json']:
news = News.objects.all()
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk)
for pk in News.objects.values_list('pk', flat=True))
}
data = {
'id': 'on',
'title': 'on',
'status': 'on',
'category__name': 'on',
'tags__name': 'on',
'newstag__created_on': 'on',
"__format": output_format,
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='<PASSWORD>')
response = self.client.post(url, data=data)
assert response.status_code == 200
assert response.content
def test_build_sheet_convert_function(self):
data = [
['1', 5, 'convert', 9, {"foo": "bar"}, [1, 2], uuid.UUID("12345678123456781234567812345678")],
]
ws = WS()
report.build_sheet(data, ws, sheet_name='report', header=None, widths=None)
self.assertEqual(ws.rows, [['1', 5, 'converted', 9, "{'foo': 'bar'}", '[1, 2]', '12345678-1234-5678-1234-567812345678']])
| 2.09375 | 2 |
shapeshifter/tests/conftest.py | martinogden/django-shapeshifter | 164 | 7206 | <filename>shapeshifter/tests/conftest.py
from pytest_djangoapp import configure_djangoapp_plugin
pytest_plugins = configure_djangoapp_plugin(
extend_INSTALLED_APPS=[
'django.contrib.sessions',
'django.contrib.messages',
],
extend_MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
)
| 1.289063 | 1 |
face_attribute_verification.py | seymayucer/FacialPhenotypes | 2 | 7207 | <filename>face_attribute_verification.py
import argparse
import numpy as np
from sklearn.model_selection import StratifiedKFold
import sklearn
import cv2
import datetime
import mxnet as mx
from mxnet import ndarray as nd
import pandas as pd
from numpy import linalg as line
import logging
logging.basicConfig(
format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", level=logging.INFO
)
class FaceVerification:
def __init__(self, model=None, batch_size=32, data_dir=None):
super().__init__()
logging.info("Face Verification for RFW.")
self.data_dir = data_dir
self.image_size = 112
self.batch_size = batch_size
self.model = model
def load_model(self, model_dir=None):
logging.info("Model Loading")
ctx = mx.gpu(0)
sym, arg_params, aux_params = mx.model.load_checkpoint(model_dir, 1)
all_layers = sym.get_internals()
sym = all_layers["fc1_output"]
self.model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
self.model.bind(
data_shapes=[
("data", (self.batch_size, 3, self.image_size, self.image_size))
]
)
self.model.set_params(arg_params, aux_params)
return self.model
def load_images(self, inp_csv_file):
logging.info("Image Data Loading")
issame_list, data_list = [], []
pairs = pd.read_csv(inp_csv_file)
# data_list = list(
# np.empty((2, pairs.shape[0] * 2, 3, self.image_size, self.image_size))
# )
for flip in [0, 1]:
data = nd.empty((pairs.shape[0] * 2, 3, self.image_size, self.image_size))
data_list.append(data)
j = 0
for i, row in pairs.iterrows():
if i % 1000 == 0:
logging.info("processing {}".format(i))
issame_list.append(row.issame)
path1 = "{}/{}/{}_{:04d}.jpg".format(
self.data_dir,
row.Class_ID_s1,
row.Class_ID_s1.split("/")[1],
int(row.img_id_s1),
)
path2 = "{}/{}/{}_{:04d}.jpg".format(
self.data_dir,
row.Class_ID_s2,
row.Class_ID_s2.split("/")[1],
int(row.img_id_s2),
)
im1 = cv2.imread(path1)
im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)
im1 = np.transpose(im1, (2, 0, 1)) # 3*112*112, RGB
im1 = mx.nd.array(im1)
im2 = cv2.imread(path2)
im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2RGB)
im2 = np.transpose(im2, (2, 0, 1)) # 3*112*112, RGB
im2 = mx.nd.array(im2)
for flip in [0, 1]:
if flip == 1:
im1 = mx.ndarray.flip(im1, 2)
data_list[flip][j][:] = im1
for flip in [0, 1]:
if flip == 1:
im2 = mx.ndarray.flip(im2, 2)
data_list[flip][j + 1][:] = im2
# data_list[flip][i][:] = img
j = j + 2
# bins shape should be 2,12000,3,112,112
# data = np.asarray(data_list)
self.issame = np.asarray(issame_list)
self.data = data_list
logging.info("Pairs are loaded, shape: 2x{}.".format(self.data[0].shape))
return self.data, self.issame, pairs.shape
def clean_data(self):
self.data = None
self.issame = None
def verify(self, model=None):
data_list = self.data
embeddings_list = []
time_consumed = 0
_label = nd.ones((self.batch_size,))
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + self.batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - self.batch_size, end=bb)
time0 = datetime.datetime.now()
db = mx.io.DataBatch(data=(_data,), label=(_label,))
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(self.batch_size - count) :, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
print("infer time", time_consumed)
tpr, fpr, accuracy, best_thresholds = self.evaluate(
embeddings, self.issame, nrof_folds=10
)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
logging.info("Accuracy {}".format(acc2))
return tpr, fpr, acc2, std2
def evaluate(self, embeddings, actual_issame, nrof_folds=10):
# Calculate evaluation metrics
thresholds = np.arange(-1, 1, 0.001)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, best_thresholds = self.calculate_roc(
thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
)
return tpr, fpr, accuracy, best_thresholds
def calculate_roc(
self, thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10
):
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
# k_fold = LFold(n_splits=nrof_folds, shuffle=False)
k_fold = StratifiedKFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
tnrs = np.zeros((nrof_folds, nrof_thresholds))
fnrs = np.zeros((nrof_folds, nrof_thresholds))
f1s = np.zeros((nrof_folds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
veclist = np.concatenate((embeddings1, embeddings2), axis=0)
meana = np.mean(veclist, axis=0)
embeddings1 -= meana
embeddings2 -= meana
dist = np.sum(embeddings1 * embeddings2, axis=1)
dist = dist / line.norm(embeddings1, axis=1) / line.norm(embeddings2, axis=1)
for fold_idx, (train_set, test_set) in enumerate(
k_fold.split(indices, actual_issame)
):
# print(train_set.shape, actual_issame[train_set].sum())
# print(test_set.shape, actual_issame[test_set].sum())
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, _, _, acc_train[threshold_idx], f1 = self.calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set]
)
best_threshold_index = np.argmax(acc_train)
# print('threshold', thresholds[best_threshold_index])
for threshold_idx, threshold in enumerate(thresholds):
(
tprs[fold_idx, threshold_idx],
fprs[fold_idx, threshold_idx],
tnrs[fold_idx, threshold_idx],
fnrs[fold_idx, threshold_idx],
_,
_,
) = self.calculate_accuracy(
threshold, dist[test_set], actual_issame[test_set]
)
_, _, _, _, accuracy[fold_idx], f1s[fold_idx] = self.calculate_accuracy(
thresholds[best_threshold_index],
dist[test_set],
actual_issame[test_set],
)
tpr = np.mean(tprs, 0)[best_threshold_index]
fpr = np.mean(fprs, 0)[best_threshold_index]
# tnr = np.mean(tnrs, 0)[best_threshold_index]
# fnr = np.mean(fnrs, 0)[best_threshold_index]
return tpr, fpr, accuracy, thresholds[best_threshold_index]
def calculate_accuracy(self, threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
actual_issame = np.less(actual_issame, 0.5)
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(
actual_issame, predict_issame
).ravel()
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
tnr = 0 if (fp + tn == 0) else float(tn) / float(fp + tn)
fnr = 0 if (fn + tp == 0) else float(fn) / float(fn + tp)
acc = float(tp + tn) / dist.size
f1 = sklearn.metrics.f1_score(predict_issame, actual_issame)
return tpr, fpr, tnr, fnr, acc, f1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Face Verification for RFW")
parser.add_argument(
"--data_dir", type=str, default="RFW/test/aligned_data", help="dataset root"
)
parser.add_argument(
"--pair_file",
type=str,
default="./AttributePairs/eye_narrow_pairs_6000_selected.csv",
help="pair file to test",
)
parser.add_argument(
"--model_dir", type=str, default="/model/", help="pre-trained model directory"
)
parser.add_argument("--batch_size", type=int, default="32", help="batch_size")
args = parser.parse_args()
validation = FaceVerification(
batch_size=args.batch_size, model=None, data_dir=args.data_dir
)
validation.load_model(model_dir=args.model_dir)
_, _, _shape = validation.load_images(args.pair_file)
tpr, fpr, acc, std = validation.verify()
logging.info(
"Testing Accuracy {} for {} in shape {}".format(acc, args.pair_file, _shape[0])
)
| 2.359375 | 2 |
pkgs/applications/virtualization/virt-manager/custom_runner.py | mornfall/nixpkgs | 1 | 7208 | #!/usr/bin/python -t
# this script was written to use /etc/nixos/nixpkgs/pkgs/development/python-modules/generic/wrap.sh
# which already automates python executable wrapping by extending the PATH/pythonPath
# from http://docs.python.org/library/subprocess.html
# Warning Invoking the system shell with shell=True can be a security hazard if combined with untrusted input. See the warning under Frequently Used Arguments for details.
from subprocess import Popen, PIPE, STDOUT
cmd = 'PYTHON_EXECUTABLE_PATH -t THE_CUSTOM_PATH/share/virt-manager/THE_CUSTOM_PROGRAM.py'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
print output
| 1.882813 | 2 |
jsonform/fields.py | Pix-00/jsonform | 0 | 7209 | import base64
import datetime
from abc import ABC, abstractmethod
from .conditions import AnyValue
from .errors import FieldError, FormError
__all__ = [
'Field', 'StringField', 'IntegerField', 'FloatField', 'BooleanField',
'DateTimeField', 'DateField', 'TimeField', 'ListField','SetField', 'EnumField', 'BytesField'
]
class Field(ABC):
_default = None
def __new__(cls, *args, **kwargs):
if 'init' in kwargs:
kwargs.pop('init')
return super().__new__(cls)
return UnboundField(cls, *args, **kwargs)
def __init__(self,
condition=AnyValue(),
optional: bool = False,
default=None,
init=False):
self.condition = condition
self.optional = optional
self.default = default or self._default
self._data = None
self.is_empty = False
@property
def data(self):
return self._data
def mark_empty(self):
if not self.optional:
raise FieldError('cannot be blank')
self.is_empty = True
if callable(self.default):
self._data = self.default()
else:
self._data = self.default
@abstractmethod
def process_data(self, value):
self.condition.check(self)
class UnboundField:
def __init__(self, field_cls, *args, **kwargs):
self.field_cls = field_cls
self.args = args
self.kwargs = kwargs
self.kwargs['init'] = True
def bind(self):
return self.field_cls(*self.args, **self.kwargs)
class StringField(Field):
_default = ''
def process_data(self, value):
if not isinstance(value, str):
raise FieldError('invalid string')
self._data = value
super().process_data(value)
class IntegerField(Field):
_default = 0
def process_data(self, value):
if not isinstance(value, int):
raise FieldError('invalid integer')
self._data = value
super().process_data(value)
class FloatField(Field):
_default = 0.0
def process_data(self, value):
if not isinstance(value, float):
raise FieldError('invalid float')
self._data = value
super().process_data(value)
class BooleanField(Field):
def process_data(self, value):
if not isinstance(value, bool):
raise FieldError('invalid boolean')
self._data = value
super().process_data(value)
class DateTimeField(Field):
def __init__(self, pattern='%Y-%m-%dT%H:%M:%S', **kwargs):
super().__init__(**kwargs)
self.pattern = pattern
def process_data(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern)
except ValueError:
raise FieldError('invalid datetime')
super().process_data(value)
class DateField(DateTimeField):
def __init__(self, pattern='%Y-%m-%d', **kwargs):
super().__init__(pattern, **kwargs)
def process_data(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern).date()
except ValueError:
raise FieldError('invalid date')
super().process_data(value)
class TimeField(DateTimeField):
def __init__(self, pattern='%H:%M:%S', **kwargs):
super().__init__(pattern, **kwargs)
def process_jsondata(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern).time()
except ValueError:
raise FieldError('invalid time')
super().process_data(value)
class EnumField(Field):
def __init__(self, enum_class, **kwargs):
super().__init__(**kwargs)
self.enum_class = enum_class
def process_data(self, value):
try:
enum_obj = self.enum_class[value]
except KeyError:
raise FieldError('invalid enum')
self._data = enum_obj
super().process_data(value)
class BytesField(Field):
def __init__(self, length, **kwargs):
super().__init__(**kwargs)
self.length = length
def process_data(self, value):
try:
self.data = base64.decodebytes(value)
except (ValueError, TypeError):
raise FieldError('invalid base64 string')
if len(self.data) != self.length:
raise FieldError('invalid length')
super().process_data(value)
class ListField(Field):
def __init__(self, field, default=list, **kwargs):
self.field = field
self.data_ = None
super().__init__(default=default, **kwargs)
@property
def data(self):
if not self.data_:
self.data_ = [field.data for field in self._data]
return self.data_
def process_data(self, value):
if not isinstance(value, list):
raise FieldError('invalid list')
self._data = list()
e = FieldError()
for i, val in enumerate(value):
field = self.field.bind()
try:
field.process_data(val)
except FieldError as e_:
e[i] = e_.error
self._data.append(field)
if e:
raise e
super().process_data(value)
class SetField(Field):
def __init__(self, field, default=set, **kwargs):
self.field = field
self.data_ = None
super().__init__(default=default, **kwargs)
@property
def data(self):
if not self.data_:
self.data_ = {field.data for field in self._data}
return self.data_
def process_data(self, value):
if not isinstance(value, list):
raise FieldError('invalid list')
self._data = set()
e = FieldError()
for i, val in enumerate(set(value)):
field = self.field.bind()
try:
field.process_data(val)
except FieldError as e_:
e[i] = e_.error
self._data.add(field)
if e:
raise e
super().process_data(value)
class SubForm(Field):
def __init__(self, form, **kwargs):
self.form = form
kwargs.pop('condition', None)
super().__init__(**kwargs)
def process_data(self, value):
try:
self.form.process(jsondata=value)
except FormError as e_:
e = FieldError()
if e_.error:
e['error'] = e_.error
if e_.f_errors:
e['f_errors'] = e_.f_errors
raise e
self._data = {name: self.form[name] for name in self.form.fields}
| 2.828125 | 3 |
napari/layers/_source.py | napari/napari-gui | 7 | 7210 | <gh_stars>1-10
from __future__ import annotations
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Optional, Tuple
from magicgui.widgets import FunctionGui
from pydantic import BaseModel
class Source(BaseModel):
"""An object to store the provenance of a layer.
Parameters
----------
path: str, optional
filpath/url associated with layer
reader_plugin: str, optional
name of reader plugin that loaded the file (if applicable)
sample: Tuple[str, str], optional
Tuple of (sample_plugin, sample_name), if layer was loaded via
`viewer.open_sample`.
widget: FunctionGui, optional
magicgui widget, if the layer was added via a magicgui widget.
"""
path: Optional[str] = None
reader_plugin: Optional[str] = None
sample: Optional[Tuple[str, str]] = None
widget: Optional[FunctionGui] = None
class Config:
arbitrary_types_allowed = True
frozen = True
def __deepcopy__(self, memo):
"""Custom deepcopy implementation.
this prevents deep copy. `Source` doesn't really need to be copied
(i.e. if we deepcopy a layer, it essentially has the same `Source`).
Moreover, deepcopying a widget is challenging, and maybe odd anyway.
"""
return self
# layer source context management
_LAYER_SOURCE: ContextVar[dict] = ContextVar('_LAYER_SOURCE', default={})
@contextmanager
def layer_source(**source_kwargs):
"""Creates context in which all layers will be given `source_kwargs`.
The module-level variable `_LAYER_SOURCE` holds a set of key-value pairs
that can be used to create a new `Source` object. Any routine in napari
that may result in the creation of a new layer (such as opening a file,
using a particular plugin, or calling a magicgui widget) can use this
context manager to declare that any layers created within the context
result from a specific source. (This applies even if the layer
isn't "directly" created in the context, but perhaps in some sub-function
within the context).
`Layer.__init__` will call :func:`current_source`, to query the current
state of the `_LAYER_SOURCE` variable.
Contexts may be stacked, meaning a given layer.source can reflect the
actions of multiple events (for instance, an `open_sample` call that in
turn resulted in a `reader_plugin` opening a file). However, the "deepest"
context will "win" in the case where multiple calls to `layer_source`
provide conflicting values.
Parameters
----------
**source_kwargs
keys/values should be valid parameters for :class:`Source`.
Examples
--------
>>> with layer_source(path='file.ext', reader_plugin='plugin'): # doctest: +SKIP
... points = some_function_that_creates_points()
...
>>> assert points.source == Source(path='file.ext', reader_plugin='plugin') # doctest: +SKIP
"""
token = _LAYER_SOURCE.set({**_LAYER_SOURCE.get(), **source_kwargs})
try:
yield
finally:
_LAYER_SOURCE.reset(token)
def current_source():
"""Get the current layer :class:`Source` (inferred from context).
The main place this function is used is in :meth:`Layer.__init__`.
"""
return Source(**_LAYER_SOURCE.get())
| 2.484375 | 2 |
tests/unit/test_BaseDirection.py | vpalex999/project-mars | 0 | 7211 | <filename>tests/unit/test_BaseDirection.py
import pytest
import src.constants as cnst
from src.directions import BaseDirection
@pytest.fixture
def base_direction():
return BaseDirection()
def test_init_BaseDirection(base_direction):
assert isinstance(base_direction, BaseDirection)
def test_current_direction_is(base_direction):
assert base_direction.current == cnst.NORTH
@pytest.mark.parametrize(["turn_func", "expected_direction"], [
# turn_left
(lambda f: f.turn_left(), cnst.WEST),
(lambda f: f.turn_left().turn_left(), cnst.SOUTH),
(lambda f: f.turn_left().turn_left().turn_left(), cnst.EAST),
(lambda f: f.turn_left().turn_left().turn_left().turn_left(), cnst.NORTH),
(lambda f: f.turn_left().turn_left().turn_left().turn_left().turn_left(), cnst.WEST),
# turn_right()
(lambda f: f.turn_right(), cnst.EAST),
(lambda f: f.turn_right().turn_right(), cnst.SOUTH),
(lambda f: f.turn_right().turn_right().turn_right(), cnst.WEST),
(lambda f: f.turn_right().turn_right().turn_right().turn_right(), cnst.NORTH),
(lambda f: f.turn_right().turn_right().turn_right().turn_right().turn_right(), cnst.EAST),
# any combinations
(lambda f: f.turn_left().turn_right(), cnst.NORTH),
(lambda f: f.turn_left().turn_left().turn_right(), cnst.WEST),
(lambda f: f.turn_left().turn_right().turn_left(), cnst.WEST),
(lambda f: f.turn_left().turn_right().turn_left().turn_right().turn_right(), cnst.EAST),
]
)
def test_turn_direction(base_direction, turn_func, expected_direction):
turn_func(base_direction)
assert base_direction.current == expected_direction
| 2.9375 | 3 |
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py | Payal197bhadra/ComputerVision | 6 | 7212 | import numpy as np
import cv2
#define a canvas of size 300x300 px, with 3 channels (R,G,B) and data type as 8 bit unsigned integer
canvas = np.zeros((300,300,3), dtype ="uint8")
#define color
#draw a circle
#arguments are canvas/image, midpoint, radius, color, thickness(optional)
#display in cv2 window
green = (0,255,0)
cv2.circle(canvas,(100,100), 10, green)
cv2.imshow("Single circle", canvas)
cv2.waitKey(0)
# draw concentric white circles
# calculate the center point of canvas
# generate circles using for loop
# clearning the canvas
canvas = np.zeros((300,300,3), dtype ="uint8")
white = (255,255,255)
(centerX, centerY) = (canvas.shape[1]//2, canvas.shape[0]//2)
for r in range(0,175,25):
cv2.circle(canvas, (centerX,centerY), r, white)
cv2.imshow("concentric circles", canvas)
cv2.waitKey(0)
# generate random radius, center point, color
# draw circles in for loop
canvas = np.zeros((300,300,3), dtype ="uint8")
for i in range(0, 25):
radius = np.random.randint(5, high = 200)
color = np.random.randint(0, high = 256, size = (3,)).tolist()
pt = np.random.randint(0, high = 300, size = (2,))
cv2.circle(canvas, tuple(pt), radius, color, -1)
cv2.imshow("Canvas", canvas)
cv2.waitKey(0) | 3.46875 | 3 |
tmux_cssh/main.py | cscutcher/tmux_cssh | 0 | 7213 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Main Script
"""
import logging
import argh
import sarge
import tmuxp
DEV_LOGGER = logging.getLogger(__name__)
def get_current_session(server=None):
'''
Seems to be no easy way to grab current attached session in tmuxp so
this provides a simple alternative.
'''
server = tmuxp.Server() if server is None else server
session_name = sarge.get_stdout('tmux display-message -p "#S"').strip()
session = server.findWhere({"session_name": session_name})
return session
@argh.arg('commands', nargs='+')
def clustered_window(commands):
'''
Creates new clustered window on session with commands.
A clustered session is one where you operate on all panes/commands at once
using the synchronized-panes option.
:param commands: Sequence of commands. Each one will run in its own pane.
'''
session = get_current_session()
window = session.new_window()
# Create additional panes
while len(window.panes) < len(commands):
window.panes[-1].split_window()
for pane, command in zip(window.panes, commands):
pane.send_keys(command)
window.select_layout('tiled')
window.set_window_option('synchronize-panes', 'on')
return window
@argh.arg('hosts', nargs='+')
def clustered_ssh(hosts):
'''
Creates new cluster window with an ssh connection to each host.
A clustered session is one where you operate on all panes/commands at once
using the synchronized-panes option.
:param hosts: Sequence of hosts to connect to.
'''
return clustered_window(
['ssh \'{}\''.format(host) for host in hosts])
| 2.84375 | 3 |
nautobot_capacity_metrics/management/commands/__init__.py | david-kn/nautobot-plugin-capacity-metrics | 6 | 7214 | """Additional Django management commands added by nautobot_capacity_metrics plugin."""
| 1.078125 | 1 |
nptweak/__init__.py | kmedian/nptweak | 0 | 7215 | <reponame>kmedian/nptweak<filename>nptweak/__init__.py<gh_stars>0
from .to_2darray import to_2darray
| 1.0625 | 1 |
resources/models/Image.py | sphildreth/roadie-python | 0 | 7216 | import io
from PIL import Image as PILImage
from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String
from resources.models.ModelBase import Base
class Image(Base):
# If this is used then the image is stored in the database
image = Column(LargeBinary(length=16777215), default=None)
# If this is used then the image is remote and this is the url
url = Column(String(500))
caption = Column(String(100))
# This is a PhotoHash of the image for assistance in deduping
signature = Column(String(50))
artistId = Column(Integer, ForeignKey("artist.id"), index=True)
releaseId = Column(Integer, ForeignKey("release.id"), index=True)
def averageHash(self):
try:
hash_size = 8
# Open the image, resize it and convert it to black & white.
image = PILImage.open(io.BytesIO(self.image)).resize((hash_size, hash_size), PILImage.ANTIALIAS).convert(
'L')
pixels = list(image.getdata())
# Compute the hash based on each pixels value compared to the average.
avg = sum(pixels) / len(pixels)
bits = "".join(map(lambda pixel: '1' if pixel > avg else '0', pixels))
hashformat = "0{hashlength}x".format(hashlength=hash_size ** 2 // 4)
return int(bits, 2).__format__(hashformat)
except:
return None
def __unicode__(self):
return self.caption
def __str__(self):
return self.caption or self.signature
| 2.640625 | 3 |
arch2vec/search_methods/reinforce_darts.py | gabrielasuchopar/arch2vec | 0 | 7217 | import os
import sys
import argparse
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from arch2vec.models.pretraining_nasbench101 import configs
from arch2vec.utils import load_json, preprocessing, one_hot_darts
from arch2vec.preprocessing.gen_isomorphism_graphs import process
from arch2vec.models.model import Model
from torch.distributions import MultivariateNormal
from arch2vec.darts.cnn.train_search import Train
class Env(object):
def __init__(self, name, seed, cfg, data_path=None, save=False):
self.name = name
self.seed = seed
self.model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim,
num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda()
self.dir_name = 'pretrained/dim-{}'.format(args.dim)
if not os.path.exists(os.path.join(self.dir_name, 'model-darts.pt')):
exit()
self.model.load_state_dict(torch.load(os.path.join(self.dir_name, 'model-darts.pt').format(args.dim))['model_state'])
self.visited = {}
self.features = []
self.genotype = []
self.embedding = {}
self._reset(data_path, save)
def _reset(self, data_path, save):
if not save:
print("extract arch2vec on DARTS search space ...")
dataset = load_json(data_path)
print("length of the dataset: {}".format(len(dataset)))
self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt')
if os.path.exists(self.f_path):
print('{} is already saved'.format(self.f_path))
exit()
print('save to {}'.format(self.f_path))
counter = 0
self.model.eval()
for k, v in dataset.items():
adj = torch.Tensor(v[0]).unsqueeze(0).cuda()
ops = torch.Tensor(one_hot_darts(v[1])).unsqueeze(0).cuda()
adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep'])
with torch.no_grad():
x, _ = self.model._encoder(ops, adj)
self.embedding[counter] = {'feature': x.squeeze(0).mean(dim=0).cpu(), 'genotype': process(v[2])}
print("{}/{}".format(counter, len(dataset)))
counter += 1
torch.save(self.embedding, self.f_path)
print("finished arch2vec extraction")
exit()
else:
self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt')
print("load arch2vec from: {}".format(self.f_path))
self.embedding = torch.load(self.f_path)
for ind in range(len(self.embedding)):
self.features.append(self.embedding[ind]['feature'])
self.genotype.append(self.embedding[ind]['genotype'])
self.features = torch.stack(self.features, dim=0)
print('loading finished. pretrained embeddings shape: {}'.format(self.features.shape))
def get_init_state(self):
"""
:return: 1 x dim
"""
rand_indices = random.randint(0, self.features.shape[0])
self.visited[rand_indices] = True
return self.features[rand_indices], self.genotype[rand_indices]
def step(self, action):
"""
action: 1 x dim
self.features. N x dim
"""
dist = torch.norm(self.features - action.cpu(), dim=1)
knn = (-1 * dist).topk(dist.shape[0])
min_dist, min_idx = knn.values, knn.indices
count = 0
while True:
if len(self.visited) == dist.shape[0]:
print("CANNOT FIND IN THE DATASET!")
exit()
if min_idx[count].item() not in self.visited:
self.visited[min_idx[count].item()] = True
break
count += 1
return self.features[min_idx[count].item()], self.genotype[min_idx[count].item()]
class Policy(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy, self).__init__()
self.fc1 = nn.Linear(hidden_dim1, hidden_dim2)
self.fc2 = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
def forward(self, input):
x = F.relu(self.fc1(input))
out = self.fc2(x)
return out
class Policy_LSTM(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy_LSTM, self).__init__()
self.lstm = torch.nn.LSTMCell(input_size=hidden_dim1, hidden_size=hidden_dim2)
self.fc = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
self.hx = None
self.cx = None
def forward(self, input):
if self.hx is None and self.cx is None:
self.hx, self.cx = self.lstm(input)
else:
self.hx, self.cx = self.lstm(input, (self.hx, self.cx))
mean = self.fc(self.hx)
return mean
def select_action(state, policy):
"""
MVN based action selection.
:param state: 1 x dim
:param policy: policy network
:return: selected action: 1 x dim
"""
mean = policy(state.view(1, state.shape[0]))
mvn = MultivariateNormal(mean, torch.eye(state.shape[0]).cuda())
action = mvn.sample()
policy.saved_log_probs.append(torch.mean(mvn.log_prob(action)))
return action
def finish_episode(policy, optimizer):
R = 0
policy_loss = []
returns = []
for r in policy.rewards:
R = r + args.gamma * R
returns.append(R)
returns = torch.Tensor(policy.rewards)
val, indices = torch.sort(returns)
print("sorted validation reward:", val)
returns = returns - args.objective
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.mean(torch.stack(policy_loss, dim=0))
print("average reward: {}, policy loss: {}".format(sum(policy.rewards)/len(policy.rewards), policy_loss.item()))
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
policy.hx = None
policy.cx = None
def query(counter, seed, genotype, epochs):
trainer = Train()
rewards, rewards_test = trainer.main(counter, seed, genotype, epochs=epochs, train_portion=args.train_portion, save=args.logging_path)
val_sum = 0
for epoch, val_acc in rewards:
val_sum += val_acc
val_avg = val_sum / len(rewards)
return val_avg / 100. , rewards_test[-1][-1] / 100.
def reinforce_search(env):
""" implementation of arch2vec-RL on DARTS Search Space """
policy = Policy_LSTM(args.dim, 128).cuda()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
counter = 0
MAX_BUDGET = args.max_budgets
state, genotype = env.get_init_state()
CURR_BEST_VALID = 0
CURR_BEST_TEST = 0
CURR_BEST_GENOTYPE = None
test_trace = []
valid_trace = []
genotype_trace = []
counter_trace = []
while counter < MAX_BUDGET:
for c in range(args.bs):
state = state.cuda()
action = select_action(state, policy)
state, genotype = env.step(action)
reward, reward_test = query(counter=counter, seed=args.seed, genotype=genotype, epochs=args.inner_epochs)
policy.rewards.append(reward)
counter += 1
print('counter: {}, validation reward: {}, test reward: {}, genotype: {}'.format(counter, reward, reward_test, genotype))
if reward > CURR_BEST_VALID:
CURR_BEST_VALID = reward
CURR_BEST_TEST = reward_test
CURR_BEST_GENOTYPE = genotype
valid_trace.append(float(CURR_BEST_VALID))
test_trace.append(float(CURR_BEST_TEST))
genotype_trace.append(CURR_BEST_GENOTYPE)
counter_trace.append(counter)
if counter >= MAX_BUDGET:
break
finish_episode(policy, optimizer)
res = dict()
res['validation_acc'] = valid_trace
res['test_acc'] = test_trace
res['genotype'] = genotype_trace
res['counter'] = counter_trace
save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save to {}'.format(save_path))
fh = open(os.path.join(save_path, 'run_{}_arch2vec_model_darts.json'.format(args.seed)), 'w')
json.dump(res, fh)
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="arch2vec-REINFORCE")
parser.add_argument("--gamma", type=float, default=0.8, help="discount factor (default 0.99)")
parser.add_argument("--seed", type=int, default=3, help="random seed")
parser.add_argument('--cfg', type=int, default=4, help='configuration (default: 4)')
parser.add_argument('--bs', type=int, default=16, help='batch size')
parser.add_argument('--objective', type=float, default=0.95, help='rl baseline')
parser.add_argument('--max_budgets', type=int, default=100, help='number of queries')
parser.add_argument('--inner_epochs', type=int, default=50, help='inner loop epochs')
parser.add_argument('--train_portion', type=float, default=0.9, help='train/validation split portion')
parser.add_argument('--output_path', type=str, default='rl', help='rl/bo (default: rl)')
parser.add_argument('--logging_path', type=str, default='', help='search logging path')
parser.add_argument('--saved_arch2vec', action="store_true", default=False)
parser.add_argument('--input_dim', type=int, default=11)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--dim', type=int, default=16,
help='feature dimension (default: 16)')
parser.add_argument('--hops', type=int, default=5)
parser.add_argument('--mlps', type=int, default=2)
parser.add_argument('--dropout', type=float, default=0.3)
args = parser.parse_args()
cfg = configs[args.cfg]
env = Env('REINFORCE', args.seed, cfg, data_path='data/data_darts_counter600000.json', save=args.saved_arch2vec)
torch.manual_seed(args.seed)
reinforce_search(env)
| 2.34375 | 2 |
setup.py | mentaal/r_map | 0 | 7218 | <reponame>mentaal/r_map
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='r_map', # Required
version='0.9.0', # Required
description='A data structure for working with register map information', # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/mentaal/r_map', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='<NAME>', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='<EMAIL>', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
keywords='register bitfield registermap', # Optional
packages=['r_map'],
python_requires='>=3.6',
project_urls={ # Optional
'Bug Reports': 'https://github.com/mentaal/r_map/issues',
'Source': 'https://github.com/mentaal/r_map',
},
)
| 1.546875 | 2 |
dont_worry.py | karianjahi/fahrer_minijob | 0 | 7219 | class Hey:
def __init__(jose, name="mours"):
jose.name = name
def get_name(jose):
return jose.name
class Person(object):
def __init__(self, name, phone):
self.name = name
self.phone = phone
class Teenager(Person):
def __init__(self, *args, **kwargs):
self.website = kwargs.pop("website")
super(Teenager, self).__init__(*args, **kwargs)
if __name__ == "__main__":
#print(Hey().get_name())
teen = Teenager("<NAME>", 924, "www.fowr.gd")
print(teen.website) | 3.5 | 4 |
tests/zoo/tree.py | dynalz/odmantic | 486 | 7220 | <reponame>dynalz/odmantic
import enum
from typing import Dict, List
from odmantic.field import Field
from odmantic.model import Model
class TreeKind(str, enum.Enum):
BIG = "big"
SMALL = "small"
class TreeModel(Model):
name: str = Field(primary_key=True, default="<NAME> montagnes")
average_size: float = Field(mongo_name="size")
discovery_year: int
kind: TreeKind
genesis_continents: List[str]
per_continent_density: Dict[str, float]
| 2.78125 | 3 |
adanet/core/estimator_test.py | eustomaqua/adanet | 0 | 7221 | """Test AdaNet estimator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
from absl.testing import parameterized
from adanet import replay
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.distributed.placement import RoundRobinStrategy
from adanet.ensemble import AllStrategy
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import GrowStrategy
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import SoloStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import Generator
from adanet.subnetwork import MaterializedReport
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import numpy as np
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.tools import saved_model_utils
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
subnetwork_chief_hooks=None,
subnetwork_hooks=None,
mixture_weight_chief_hooks=None,
mixture_weight_hooks=None,
seed=13):
self._name = name
self._learning_rate = learning_rate
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._return_penultimate_layer = return_penultimate_layer
self._layer_size = layer_size
self._subnetwork_chief_hooks = subnetwork_chief_hooks
self._subnetwork_hooks = subnetwork_hooks
self._mixture_weight_chief_hooks = mixture_weight_chief_hooks
self._mixture_weight_hooks = mixture_weight_hooks
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("dnn"):
persisted_tensors = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
disjoint_op = tf.constant([1], name="disjoint_op")
with tf_compat.v1.colocate_with(disjoint_op): # tests b/118865235
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
persisted_tensors["hidden_layer"] = hidden_layer
if training:
# This change will only be in the next iteration if
# `freeze_training_graph` is `True`.
persisted_tensors["hidden_layer"] = 2 * hidden_layer
last_layer = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
batch_size = features["x"].get_shape().as_list()[0]
summary.image("image", tf.ones([batch_size, 3, 3, 1]))
with tf_compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=last_layer if self._return_penultimate_layer else logits,
logits=logits,
complexity=3,
persisted_tensors=persisted_tensors,
shared=persisted_tensors)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._subnetwork_hooks:
return train_op
return TrainOpSpec(train_op, self._subnetwork_chief_hooks,
self._subnetwork_hooks)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._mixture_weight_hooks:
return train_op
return TrainOpSpec(train_op, self._mixture_weight_chief_hooks,
self._mixture_weight_hooks)
def build_subnetwork_report(self):
return Report(
hparams={"layer_size": self._layer_size},
attributes={"complexity": tf.constant(3, dtype=tf.int32)},
metrics={
"moo": (tf.constant(3,
dtype=tf.int32), tf.constant(3, dtype=tf.int32))
})
class _SimpleBuilder(Builder):
"""A simple subnetwork builder that takes feature_columns."""
def __init__(self, name, feature_columns, seed=42):
self._name = name
self._feature_columns = feature_columns
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("simple"):
input_layer = tf_compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
last_layer = input_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
last_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
return Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
class _NanLossBuilder(Builder):
"""A subnetwork builder always produces a NaN loss."""
@property
def name(self):
return "nan"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=42)) * np.nan
return Subnetwork(last_layer=logits, logits=logits, complexity=0)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _LinearBuilder(Builder):
"""A simple linear subnetwork builder."""
def __init__(self, name, mixture_weight_learning_rate=.001, seed=42):
self._name = name
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
return Subnetwork(
last_layer=features["x"],
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
return optimizer.minimize(loss, var_list=var_list)
class _FakeGenerator(Generator):
"""Generator that exposed generate_candidates' arguments."""
def __init__(self, spy_fn, subnetwork_builders):
"""Checks the arguments passed to generate_candidates.
Args:
spy_fn: (iteration_number, previous_ensemble_reports, all_reports) -> ().
Spies on the arguments passed to generate_candidates whenever it is
called.
subnetwork_builders: List of `Builder`s to return in every call to
generate_candidates.
"""
self._spy_fn = spy_fn
self._subnetwork_builders = subnetwork_builders
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""Spys on arguments passed in, then returns a fixed list of candidates."""
del previous_ensemble # unused
self._spy_fn(iteration_number, previous_ensemble_reports, all_reports)
return self._subnetwork_builders
class _WidthLimitingDNNBuilder(_DNNBuilder):
"""Limits the width of the previous_ensemble."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
width_limit=None,
seed=13):
if width_limit is not None and width_limit == 0:
raise ValueError("width_limit must be at least 1 or None.")
super(_WidthLimitingDNNBuilder,
self).__init__(name, learning_rate, mixture_weight_learning_rate,
return_penultimate_layer, layer_size, seed)
self._width_limit = width_limit
def prune_previous_ensemble(self, previous_ensemble):
indices = range(len(previous_ensemble.weighted_subnetworks))
if self._width_limit is None:
return indices
if self._width_limit == 1:
return []
return indices[-self._width_limit + 1:] # pylint: disable=invalid-unary-operand-type
class _FakeEvaluator(object):
"""Fakes an `adanet.Evaluator`."""
def __init__(self, input_fn):
self._input_fn = input_fn
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return 1
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return "adanet_loss"
@property
def objective_fn(self):
"""Always returns the minimize objective."""
return np.nanargmin
def evaluate(self, sess, ensemble_metrics):
"""Abstract method to be overridden in subclasses."""
del sess, ensemble_metrics # Unused.
raise NotImplementedError
class _AlwaysLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-1] = 0.
return losses
class _AlwaysSecondToLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the second to last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-2] = 0.
return losses
class _EarlyStoppingHook(tf_compat.SessionRunHook):
"""Hook that immediately requests training to stop."""
def after_run(self, run_context, run_values):
run_context.request_stop()
class EstimatorTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "one_step",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": None,
"want_loss": 0.49899703,
"want_iteration": 0,
"want_global_step": 1,
},
{
"testcase_name": "none_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": None,
"steps": 300,
"max_steps": None,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"steps": 300,
"max_steps": None,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_two_max_iteration_fewer_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_iterations": 2,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_no_bias",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"use_bias": False,
"want_loss": 0.496736,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name":
"single_builder_subnetwork_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
subnetwork_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
subnetwork_hooks=[tu.ModifierSessionRunHook("hook_var")])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_mixture_weight_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
mixture_weight_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
mixture_weight_hooks=[
tu.ModifierSessionRunHook("hook_var")
])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_scalar_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.SCALAR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_vector_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.VECTOR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name": "single_builder_replicate_ensemble_in_training",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"replicate_ensemble_in_training": True,
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420215,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_with_hook",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"hooks": [tu.ModifierSessionRunHook()],
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "high_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 500,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name":
"two_builders",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", seed=99)]),
"max_iteration_steps":
200,
"want_loss":
0.27713922,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_different_layer_sizes",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_one_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
None,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_one_max_iteration_two_hundred_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
300,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_two_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
2,
"steps":
None,
"max_steps":
None,
"want_loss":
0.26503286,
"want_iteration":
1,
"want_global_step":
400,
},
{
"testcase_name":
"two_builders_different_layer_sizes_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"two_dnn_export_subnetworks",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
"export_subnetworks":
True,
},
{
"testcase_name":
"width_limiting_builder_no_pruning",
"subnetwork_generator":
SimpleGenerator([_WidthLimitingDNNBuilder("no_pruning")]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_some_pruning",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("some_pruning", width_limit=2)]),
"max_iteration_steps":
75,
"want_loss":
0.38592532,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_prune_all",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("prune_all", width_limit=1)]),
"max_iteration_steps":
75,
"want_loss":
0.43492866,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_mixed",
"subnetwork_generator":
SimpleGenerator([
_WidthLimitingDNNBuilder("no_pruning"),
_WidthLimitingDNNBuilder("some_pruning", width_limit=2),
_WidthLimitingDNNBuilder("prune_all", width_limit=1)
]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_good_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.36189985,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_bad_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[1.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.31389591,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_second_to_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysSecondToLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.32487726,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"report_materializer",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"report_materializer":
ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.29196805,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy_multiple_ensemblers",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"ensemblers": [
ComplexityRegularizedEnsembler(),
ComplexityRegularizedEnsembler(use_bias=True, name="with_bias")
],
"max_iteration_steps":
200,
"want_loss":
0.23053232,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.35249719,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.36163166,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"multi_ensemble_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies":
[AllStrategy(), GrowStrategy(),
SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.24838975,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"dataset_train_input_fn",
"subnetwork_generator":
SimpleGenerator([_DNNBuilder("dnn")]),
# pylint: disable=g-long-lambda
"train_input_fn":
lambda: tf.data.Dataset.from_tensors(({
"x": XOR_FEATURES
}, XOR_LABELS)).repeat(),
# pylint: enable=g-long-lambda
"max_iteration_steps":
100,
"want_loss":
0.32219219,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"early_stopping_subnetwork",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", subnetwork_hooks=[_EarlyStoppingHook()])
]),
"max_iteration_steps":
100,
"max_steps":
200,
"want_loss":
0.2958503,
# Since one subnetwork stops after 1 step and global step is the
# mean of iteration steps, global step will be incremented at half
# the rate.
"want_iteration":
3,
"want_global_step":
200,
})
def test_lifecycle(self,
subnetwork_generator,
want_loss,
want_iteration,
want_global_step,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.MATRIX,
evaluator=None,
use_bias=True,
replicate_ensemble_in_training=False,
hooks=None,
ensemblers=None,
ensemble_strategies=None,
max_steps=300,
steps=None,
report_materializer=None,
train_input_fn=None,
max_iterations=None,
export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
def _metric_fn(predictions):
mean = tf.keras.metrics.Mean()
mean.update_state(predictions["predictions"])
return {"keras_mean": mean}
default_ensembler_kwargs = {
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": tf_compat.v1.zeros_initializer(),
"warm_start_mixture_weights": True,
"use_bias": use_bias,
}
if ensemblers:
default_ensembler_kwargs = {}
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
evaluator=evaluator,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
report_materializer=report_materializer,
replicate_ensemble_in_training=replicate_ensemble_in_training,
metric_fn=_metric_fn,
model_dir=self.test_subdirectory,
config=run_config,
max_iterations=max_iterations,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks,
**default_ensembler_kwargs)
if not train_input_fn:
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(
input_fn=train_input_fn, steps=steps, max_steps=max_steps, hooks=hooks)
# Evaluate.
eval_results = estimator.evaluate(
input_fn=train_input_fn, steps=10, hooks=hooks)
logging.info("%s", eval_results)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
self.assertEqual(want_global_step, eval_results["global_step"])
self.assertEqual(want_iteration, eval_results["iteration"])
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits", export_signature_def.keys())
self.assertIn("subnetwork_last_layer", export_signature_def.keys())
@parameterized.named_parameters(
{
"testcase_name":
"hash_bucket_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)))
),
}, {
"testcase_name":
"vocab_list_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)))),
}, {
"testcase_name":
"hash_bucket_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)),
dimension=2)),
}, {
"testcase_name":
"vocab_list_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)),
dimension=2)),
})
def test_categorical_columns(self, feature_column):
def train_input_fn():
input_features = {
"human_names": tf.constant([["alice"], ["bob"]], name="human_names")
}
input_labels = tf.constant([[1.], [0.]], name="starts_with_a")
return input_features, input_labels
report_materializer = ReportMaterializer(input_fn=train_input_fn, steps=1)
estimator = Estimator(
head=regression_head.RegressionHead(),
subnetwork_generator=SimpleGenerator(
[_SimpleBuilder(name="simple", feature_columns=[feature_column])]),
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory)
estimator.train(input_fn=train_input_fn, max_steps=3)
@parameterized.named_parameters(
{
"testcase_name": "no_subnetwork_generator",
"subnetwork_generator": None,
"max_iteration_steps": 100,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 0,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": 0,
"want_error": ValueError,
},
{
"testcase_name": "steps_and_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": 1,
"want_error": ValueError,
},
{
"testcase_name": "zero_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 0,
"max_steps": None,
"want_error": ValueError,
},
{
"testcase_name": "nan_loss_builder",
"subnetwork_generator": SimpleGenerator([_NanLossBuilder()]),
"max_iteration_steps": 1,
"max_steps": None,
"want_error": tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_first",
"subnetwork_generator":
SimpleGenerator([
_NanLossBuilder(),
_DNNBuilder("dnn"),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
)
def test_train_error(self,
subnetwork_generator,
max_iteration_steps,
want_error,
steps=None,
max_steps=10,
max_iterations=None):
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
with self.assertRaises(want_error):
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
max_iterations=max_iterations,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, steps=steps, max_steps=max_steps)
def test_binary_head_asserts_are_disabled(self):
"""Tests b/140267630."""
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
])
estimator = Estimator(
head=binary_class_head_v1(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory)
eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class KerasCNNBuilder(Builder):
"""Builds a CNN subnetwork for AdaNet."""
def __init__(self, learning_rate, seed=42):
"""Initializes a `SimpleCNNBuilder`.
Args:
learning_rate: The float learning rate to use.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
"""
self._learning_rate = learning_rate
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
images = list(features.values())[0]
images = tf.reshape(images, [-1, 2, 2, 1])
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = tf.keras.layers.Conv2D(
filters=3,
kernel_size=1,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
images)
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=1)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
units=3, activation="relu", kernel_initializer=kernel_initializer)(
x)
logits = tf_compat.v1.layers.Dense(
units=1, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return Subnetwork(
last_layer=x,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
return tf.no_op()
@property
def name(self):
return "simple_cnn"
class EstimatorKerasLayersTest(tu.AdanetTestCase):
def test_lifecycle(self):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=SimpleGenerator(
[KerasCNNBuilder(learning_rate=.001)]),
max_iteration_steps=3,
evaluator=Evaluator(
input_fn=tu.dummy_input_fn([[1., 1., .1, .1]], [[0.]]), steps=3),
model_dir=self.test_subdirectory,
config=run_config)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
logging.info("%s", eval_results)
want_loss = 0.16915826
if tf_compat.version_greater_or_equal("1.10.0"):
# After TF v1.10.0 the loss computed from a neural network using Keras
# layers changed, however it is not clear why.
want_loss = 0.26195815
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class MultiHeadBuilder(Builder):
"""Builds a subnetwork for AdaNet that uses dict labels."""
def __init__(self, learning_rate=.001, split_logits=False, seed=42):
"""Initializes a `LabelsDictBuilder`.
Args:
learning_rate: The float learning rate to use.
split_logits: Whether to return a dict of logits or a single concatenated
logits `Tensor`.
seed: The random seed.
Returns:
An instance of `MultiHeadBuilder`.
"""
self._learning_rate = learning_rate
self._split_logits = split_logits
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = features["x"]
logits = tf_compat.v1.layers.dense(
x,
units=logits_dimension,
activation=None,
kernel_initializer=kernel_initializer)
if self._split_logits:
# Return different logits, one for each head.
logits1, logits2 = tf.split(logits, [1, 1], 1)
logits = {
"head1": logits1,
"head2": logits2,
}
complexity = tf.constant(1)
return Subnetwork(
last_layer=logits,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return "multi_head"
class EstimatorMultiHeadTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "concatenated_logits",
"builders": [MultiHeadBuilder()],
"want_loss": 3.218,
}, {
"testcase_name": "split_logits_with_export_subnetworks",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
"export_subnetworks": True,
}, {
"testcase_name": "split_logits",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
})
def test_lifecycle(self, builders, want_loss, export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
def train_input_fn():
return {
"x": tf.constant(xor_features)
}, {
"head1": tf.constant(xor_labels),
"head2": tf.constant(xor_labels)
}
estimator = Estimator(
head=multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="head1", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
regression_head.RegressionHead(
name="head2", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
subnetwork_generator=SimpleGenerator(builders),
max_iteration_steps=3,
evaluator=Evaluator(input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory,
config=run_config,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction[("head1", "predictions")])
self.assertIsNotNone(prediction[("head2", "predictions")])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits_head1", export_signature_def.keys())
self.assertIn("subnetwork_logits_head2", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head1", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head2", export_signature_def.keys())
class EstimatorCallingModelFnDirectlyTest(tu.AdanetTestCase):
"""Tests b/112108745. Warn users not to call model_fn directly."""
def test_calling_model_fn_directly(self):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
with self.assertRaises(UserWarning):
model_fn(
features=features,
mode=tf.estimator.ModeKeys.TRAIN,
labels=labels,
config={})
def test_calling_model_fn_directly_for_predict(self):
with context.graph_mode():
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
model_fn(
features=features,
mode=tf.estimator.ModeKeys.PREDICT,
labels=labels,
config=tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=3,
model_dir=self.test_subdirectory,
))
class EstimatorCheckpointTest(tu.AdanetTestCase):
"""Tests estimator checkpoints."""
@parameterized.named_parameters(
{
"testcase_name": "single_iteration",
"max_iteration_steps": 3,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "single_iteration_keep_one",
"max_iteration_steps": 3,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
}, {
"testcase_name": "three_iterations",
"max_iteration_steps": 1,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "three_iterations_keep_one",
"max_iteration_steps": 1,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
})
def test_checkpoints(self,
max_iteration_steps,
keep_checkpoint_max,
want_num_checkpoints,
max_steps=3):
config = tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=keep_checkpoint_max,
)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
config=config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
checkpoints = tf.io.gfile.glob(
os.path.join(self.test_subdirectory, "*.meta"))
self.assertEqual(want_num_checkpoints, len(checkpoints))
def _check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
class _FakeMetric(object):
"""A fake metric."""
def __init__(self, value, dtype):
self._value = value
self._dtype = dtype
def to_metric(self):
tensor = tf.convert_to_tensor(value=self._value, dtype=self._dtype)
return (tensor, tensor)
class _EvalMetricsHead(object):
"""A fake head with the given evaluation metrics."""
def __init__(self, fake_metrics):
self._fake_metrics = fake_metrics
@property
def logits_dimension(self):
return 1
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
train_op_fn=None):
del features # Unused
metric_ops = None
if self._fake_metrics:
metric_ops = {}
for k, fake_metric in self._fake_metrics.items():
metric_ops[k] = fake_metric.to_metric()
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
loss=tf.reduce_mean(input_tensor=labels - logits),
eval_metric_ops=metric_ops,
train_op=train_op_fn(1))
def _mean_keras_metric(value):
"""Returns the mean of given value as a Keras metric."""
mean = tf.keras.metrics.Mean()
mean.update_state(value)
return mean
class EstimatorSummaryWriterTest(tu.AdanetTestCase):
"""Test that Tensorboard summaries get written correctly."""
@tf_compat.skip_for_tf2
def test_summaries(self):
"""Tests that summaries are written to candidate directory."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
self.assertAlmostEqual(
3., _check_eventfile_for_keyword("scalar", subnetwork_subdir), places=3)
self.assertEqual((3, 3, 1),
_check_eventfile_for_keyword("image/image/0",
subnetwork_subdir))
self.assertAlmostEqual(
5.,
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=3)
@tf_compat.skip_for_tf2
def test_disable_summaries(self):
"""Tests that summaries can be disabled for ensembles and subnetworks."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory,
enable_ensemble_summaries=False,
enable_subnetwork_summaries=False,
)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("scalar", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("image/image/0", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "none_metrics",
"head": _EvalMetricsHead(None),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": tf_compat.v1.metrics.mean(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name":
"keras_metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": _mean_keras_metric(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name": "empty_metrics",
"head": _EvalMetricsHead({}),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"evaluation_name",
"head":
_EvalMetricsHead({}),
"evaluation_name":
"continuous",
"want_summaries": [],
"want_loss":
-1.791,
"global_subdir":
"eval_continuous",
"subnetwork_subdir":
"subnetwork/t0_dnn/eval_continuous",
"ensemble_subdir":
"ensemble/t0_dnn_grow_complexity_regularized/eval_continuous",
}, {
"testcase_name":
"regression_head",
"head":
regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"want_summaries": ["average_loss"],
"want_loss":
.256,
}, {
"testcase_name":
"binary_classification_head",
"head":
binary_class_head.BinaryClassHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"learning_rate":
.6,
"want_summaries": ["average_loss", "accuracy", "recall"],
"want_loss":
0.122,
}, {
"testcase_name":
"all_metrics",
"head":
_EvalMetricsHead({
"float32":
_FakeMetric(1., tf.float32),
"float64":
_FakeMetric(1., tf.float64),
"serialized_summary":
_FakeMetric(
tf_compat.v1.Summary(value=[
tf_compat.v1.Summary.Value(
tag="summary_tag", simple_value=1.)
]).SerializeToString(), tf.string),
}),
"want_summaries": [
"float32",
"float64",
"serialized_summary/0",
],
"want_loss":
-1.791,
})
# pylint: enable=g-long-lambda
def test_eval_metrics(
self,
head,
want_loss,
want_summaries,
evaluation_name=None,
metric_fn=None,
learning_rate=.01,
global_subdir="eval",
subnetwork_subdir="subnetwork/t0_dnn/eval",
ensemble_subdir="ensemble/t0_dnn_grow_complexity_regularized/eval"):
"""Test that AdaNet evaluation metrics get persisted correctly."""
seed = 42
run_config = tf.estimator.RunConfig(tf_random_seed=seed)
subnetwork_generator = SimpleGenerator([
_DNNBuilder(
"dnn",
learning_rate=learning_rate,
mixture_weight_learning_rate=0.,
layer_size=8,
seed=seed)
])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
metric_fn=metric_fn,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
estimator.train(input_fn=train_input_fn, max_steps=100)
metrics = estimator.evaluate(
input_fn=train_input_fn, steps=1, name=evaluation_name)
self.assertAlmostEqual(want_loss, metrics["loss"], places=3)
global_subdir = os.path.join(self.test_subdirectory, global_subdir)
subnetwork_subdir = os.path.join(self.test_subdirectory, subnetwork_subdir)
ensemble_subdir = os.path.join(self.test_subdirectory, ensemble_subdir)
self.assertAlmostEqual(
want_loss,
_check_eventfile_for_keyword("loss", subnetwork_subdir),
places=3)
for metric in want_summaries:
self.assertIsNotNone(
_check_eventfile_for_keyword(metric, subnetwork_subdir),
msg="{} should be under 'eval'.".format(metric))
for dir_ in [global_subdir, ensemble_subdir]:
self.assertAlmostEqual(metrics["loss"],
_check_eventfile_for_keyword("loss", dir_))
self.assertEqual([b"| dnn |"],
_check_eventfile_for_keyword(
"architecture/adanet/ensembles/0", dir_))
for metric in want_summaries:
self.assertTrue(
_check_eventfile_for_keyword(metric, dir_) > 0.,
msg="{} should be under 'eval'.".format(metric))
class EstimatorMembersOverrideTest(tu.AdanetTestCase):
"""Tests b/77494544 fix."""
def test_assert_members_are_not_overridden(self):
"""Assert that AdaNet estimator does not break other estimators."""
config = tf.estimator.RunConfig()
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
adanet = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=config)
self.assertIsNotNone(adanet)
if hasattr(tf.estimator, "LinearEstimator"):
estimator_fn = tf.estimator.LinearEstimator
else:
estimator_fn = tf.contrib.estimator.LinearEstimator
linear = estimator_fn(
head=tu.head(), feature_columns=[tf.feature_column.numeric_column("x")])
self.assertIsNotNone(linear)
def _dummy_feature_dict_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
return _input_fn
class EstimatorDifferentFeaturesPerModeTest(tu.AdanetTestCase):
"""Tests b/109751254."""
@parameterized.named_parameters(
{
"testcase_name": "extra_train_features",
"train_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_eval_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_predict_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
})
def test_different_features_per_mode(self, train_features, eval_features,
predict_features):
"""Tests tests different numbers of features per mode."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(train_features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Evaluate.
eval_input_fn = _dummy_feature_dict_input_fn(eval_features, labels)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
# Predict.
predict_input_fn = _dummy_feature_dict_input_fn(predict_features, None)
estimator.predict(input_fn=predict_input_fn)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
features = {}
for key, value in predict_features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class EstimatorExportSavedModelTest(tu.AdanetTestCase):
def test_export_saved_model_for_predict(self):
"""Tests SavedModel exporting functionality for predict (b/110435640)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
@test_util.run_in_graph_and_eager_modes
def test_export_saved_model_for_eval(self):
"""Tests SavedModel exporting functionality for eval (b/110991908)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", layer_size=8, learning_rate=1.)])
estimator = Estimator(
head=binary_class_head.BinaryClassHead(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=300)
metrics = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAlmostEqual(.067, metrics["average_loss"], places=3)
self.assertAlmostEqual(1., metrics["recall"], places=3)
self.assertAlmostEqual(1., metrics["accuracy"], places=3)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return export.SupervisedInputReceiver(
features={"x": tf.constant(XOR_FEATURES)},
labels=tf.constant(XOR_LABELS),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
try:
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
try:
tf.contrib.estimator.export_saved_model_for_mode(
estimator,
export_dir_base=export_dir_base,
input_receiver_fn=serving_input_fn,
mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
subdir = tf.io.gfile.listdir(export_dir_base)[0]
with context.graph_mode(), self.test_session() as sess:
meta_graph_def = tf_compat.v1.saved_model.loader.load(
sess, ["eval"], os.path.join(export_dir_base, subdir))
signature_def = meta_graph_def.signature_def.get("eval")
# Read zero metric.
self.assertAlmostEqual(
0.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
# Run metric update op.
sess.run((tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/update_op"])))
# Read metric again; it should no longer be zero.
self.assertAlmostEqual(
0.067,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/value"])),
places=3)
def test_export_saved_model_always_uses_replication_placement(self):
"""Tests b/137675014."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn1"), _DNNBuilder("dnn2")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config,
experimental_placement_strategy=RoundRobinStrategy())
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
tensor_features = {}
for key, value in features.items():
tensor_features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=tensor_features, receiver_tensors=serialized_example)
# Fake the number of PS replicas so RoundRobinStrategy will be used.
estimator._config._num_ps_replicas = 2
# If we're still using RoundRobinStrategy, this call will fail by trying
# to place ops on non-existent devices.
# Check all three export methods.
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
try:
estimator.export_savedmodel(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
except AttributeError as error:
# Log deprecation errors.
logging.warning("Testing estimator#export_savedmodel: %s", error)
estimator.experimental_export_all_saved_models(
export_dir_base=self.test_subdirectory,
input_receiver_fn_map={
tf.estimator.ModeKeys.PREDICT: serving_input_fn,
})
class EstimatorReportTest(tu.AdanetTestCase):
"""Tests report generation and usage."""
def compare_report_lists(self, report_list1, report_list2):
# Essentially assertEqual(report_list1, report_list2), but ignoring
# the "metrics" attribute.
def make_qualified_name(iteration_number, name):
return "iteration_{}/{}".format(iteration_number, name)
report_dict_1 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list1
}
report_dict_2 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list2
}
self.assertEqual(len(report_list1), len(report_list2))
for qualified_name in report_dict_1.keys():
report_1 = report_dict_1[qualified_name]
report_2 = report_dict_2[qualified_name]
self.assertEqual(
report_1.hparams,
report_2.hparams,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.attributes,
report_2.attributes,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.included_in_final_ensemble,
report_2.included_in_final_ensemble,
msg="{} vs. {}".format(report_1, report_2))
for metric_key, metric_value in report_1.metrics.items():
self.assertEqual(
metric_value,
report_2.metrics[metric_key],
msg="{} vs. {}".format(report_1, report_2))
@parameterized.named_parameters(
{
"testcase_name": "one_iteration_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name": "one_iteration_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name":
"three_iterations_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
)
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
{
"testcase_name":
"three_iterations_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win in every iteration.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
)
def test_report_generation_and_usage(self, subnetwork_builders,
num_iterations,
want_materialized_iteration_reports,
want_previous_ensemble_reports,
want_all_reports):
# Stores the iteration_number, previous_ensemble_reports and all_reports
# arguments in the self._iteration_reports dictionary, overwriting what
# was seen in previous iterations.
spied_iteration_reports = {}
def _spy_fn(iteration_number, previous_ensemble_reports, all_reports):
spied_iteration_reports[iteration_number] = {
"previous_ensemble_reports": previous_ensemble_reports,
"all_reports": all_reports,
}
subnetwork_generator = _FakeGenerator(
spy_fn=_spy_fn, subnetwork_builders=subnetwork_builders)
max_iteration_steps = 5
max_steps = max_iteration_steps * num_iterations + 1
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
report_materializer=ReportMaterializer(
input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory)
report_accessor = estimator._report_accessor
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
materialized_iteration_reports = list(
report_accessor.read_iteration_reports())
self.assertEqual(num_iterations, len(materialized_iteration_reports))
for i in range(num_iterations):
want_materialized_reports = (want_materialized_iteration_reports[i])
materialized_reports = materialized_iteration_reports[i]
self.compare_report_lists(want_materialized_reports, materialized_reports)
# Compute argmin adanet loss.
argmin_adanet_loss = 0
smallest_known_adanet_loss = float("inf")
for j, materialized_subnetwork_report in enumerate(materialized_reports):
if (smallest_known_adanet_loss >
materialized_subnetwork_report.metrics["adanet_loss"]):
smallest_known_adanet_loss = (
materialized_subnetwork_report.metrics["adanet_loss"])
argmin_adanet_loss = j
# Check that the subnetwork with the lowest adanet loss is the one
# that is included in the final ensemble.
for j, materialized_reports in enumerate(materialized_reports):
self.assertEqual(j == argmin_adanet_loss,
materialized_reports.included_in_final_ensemble)
# Check the arguments passed into the generate_candidates method of the
# Generator.
iteration_report = spied_iteration_reports[num_iterations - 1]
self.compare_report_lists(want_previous_ensemble_reports,
iteration_report["previous_ensemble_reports"])
self.compare_report_lists(want_all_reports, iteration_report["all_reports"])
class EstimatorForceGrowTest(tu.AdanetTestCase):
"""Tests the force_grow override.
Uses linear subnetworks with the same seed. They will produce identical
outputs, so unless the `force_grow` override is set, none of the new
subnetworks will improve the AdaNet objective, and AdaNet will not add them to
the ensemble.
"""
@parameterized.named_parameters(
{
"testcase_name": "one_builder_no_force_grow",
"builders":
[_LinearBuilder("linear", mixture_weight_learning_rate=0.)],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "one_builder",
"builders":
[_LinearBuilder("linear", mixture_weight_learning_rate=0.)],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name": "two_builders",
"builders": [
_LinearBuilder("linear", mixture_weight_learning_rate=0.),
_LinearBuilder("linear2", mixture_weight_learning_rate=0.)
],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name":
"two_builders_with_evaluator",
"builders": [
_LinearBuilder("linear", mixture_weight_learning_rate=0.),
_LinearBuilder("linear2", mixture_weight_learning_rate=0.)
],
"force_grow":
True,
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"want_subnetworks":
3,
})
def test_force_grow(self,
builders,
force_grow,
want_subnetworks,
evaluator=None):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(builders)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
evaluator=evaluator,
force_grow=force_grow,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for four iterations.
estimator.train(input_fn=train_input_fn, max_steps=3)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertEqual(
want_subnetworks,
str(eval_results["architecture/adanet/ensembles"]).count(" linear "))
class EstimatorDebugTest(tu.AdanetTestCase):
"""Tests b/125483534. Detect NaNs in input_fns."""
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"nan_features",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.math.log([[1., 0.]])
}, tf.zeros([1, 1]))
}, {
"testcase_name":
"nan_label",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, tf.math.log([[0.]]))
}, {
"testcase_name":
"nan_labels_dict",
"head":
multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, {
"y": tf.math.log([[0.]])
})
})
# pylint: enable=g-long-lambda
def test_nans_from_input_fn(self, head, input_fn):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=3,
model_dir=self.test_subdirectory,
debug=True)
with self.assertRaises(tf.errors.InvalidArgumentError):
estimator.train(input_fn=input_fn, max_steps=3)
class EstimatorEvaluateDuringTrainHookTest(tu.AdanetTestCase):
"""Tests b/129000842 with a hook that calls estimator.evaluate()."""
def test_train(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
class EvalTrainHook(tf.estimator.SessionRunHook):
def end(self, session):
estimator.evaluate(input_fn=train_input_fn, steps=1)
# This should not infinite loop.
estimator.train(
input_fn=train_input_fn, max_steps=3, hooks=[EvalTrainHook()])
class CheckpointSaverHookDuringTrainingTest(tu.AdanetTestCase):
"""Tests b/139057887."""
def test_checkpoint_saver_hooks_not_decorated_during_training(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
saver_hook = tf_compat.v1.train.CheckpointSaverHook(
checkpoint_dir=self.test_subdirectory, save_steps=10)
listener = tf_compat.v1.train.CheckpointSaverListener()
estimator.train(
input_fn=train_input_fn,
max_steps=3,
hooks=[saver_hook],
saving_listeners=[listener])
# If CheckpointSaverHook was not recognized during training then all
# saving_listeners would be attached to a default CheckpointSaverHook that
# Estimator creates.
self.assertLen(saver_hook._listeners, 1)
self.assertIs(saver_hook._listeners[0], listener)
class EstimatorTFLearnRunConfigTest(tu.AdanetTestCase):
"""Tests b/129483642 for tf.contrib.learn.RunConfig.
Checks that TF_CONFIG is overwritten correctly when no cluster is specified
in the RunConfig and the only task is of type chief.
"""
def test_train(self):
try:
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
# Removed in TF 1.15 (nightly). See
# https://travis-ci.org/tensorflow/adanet/jobs/583471908
_ = run_config._session_creation_timeout_secs
except AttributeError:
self.skipTest("There is no tf.contrib in TF 2.0.")
try:
tf_config = {
"task": {
"type": "chief",
"index": 0
},
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
run_config._is_chief = True # pylint: disable=protected-access
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Will fail if TF_CONFIG is not overwritten correctly in
# Estimator#prepare_next_iteration.
estimator.train(input_fn=train_input_fn, max_steps=3)
finally:
# Revert TF_CONFIG environment variable in order to not break other tests.
del os.environ["TF_CONFIG"]
class EstimatorReplayTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_evaluator",
"evaluator": None,
"replay_evaluator": None,
"want_architecture": " dnn3 | dnn3 | dnn ",
}, {
"testcase_name":
"evaluator",
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS),
steps=1),
"replay_evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[0., 0.], [0., 0], [0., 0.],
[0., 0.]], [[0], [0], [0], [0]]),
steps=1),
"want_architecture":
" dnn3 | dnn3 | dnn ",
})
def test_replay(self, evaluator, replay_evaluator, want_architecture):
"""Train entire estimator lifecycle using Replay."""
original_model_dir = os.path.join(self.test_subdirectory, "original")
run_config = tf.estimator.RunConfig(
tf_random_seed=42, model_dir=original_model_dir)
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
_DNNBuilder("dnn3", layer_size=5),
])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=evaluator,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
replay_run_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=os.path.join(self.test_subdirectory, "replayed"))
# Use different features and labels to represent a shift in the data
# distribution.
different_features = [[0., 0.], [0., 0], [0., 0.], [0., 0.]]
different_labels = [[0], [0], [0], [0]]
replay_estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=replay_evaluator,
config=replay_run_config,
replay_config=replay.Config(best_ensemble_indices=[2, 3, 1]))
train_input_fn = tu.dummy_input_fn(different_features, different_labels)
# Train for three iterations.
replay_estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = replay_estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
if __name__ == "__main__":
tf.test.main()
| 1.421875 | 1 |
ua_roomseeker/uploader.py | nyg1/classroom-finder | 1 | 7222 | <gh_stars>1-10
from seeker.models import Building, Classroom, Time
import json
import os
os.chdir('../data')
fileList = os.listdir()
#loops through each json file
for jsonfile in fileList:
#opens the jsonfile and loads the data
f = open(jsonfile, 'r')
data = f.read()
jsondata = json.loads(data)
#create the building
building = Building(BuildingName=os.path.splitext(jsonfile)[0])
building.save()
for day in jsondata:
for room in jsondata[day].keys():
#creates each classroom, adding one only if one doesn't exist
classroom = Classroom.objects.get_or_create(building = Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room)
for time in jsondata[day][room]:
#creates each time
time = Time(building=Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), classroom=Classroom.objects.get(ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room), DayofWeek=day, TimeValue=time)
time.save()
#IMPORTANT!!!!!!!
# This program must be run inside a python manage.py shell for it to work, in the future a fix may be found,
# but for the time being, follow these steps:
# 1. open powershell and navigate to the folder that contains this file
# 2. type in "python manage.py shell"
# 3. copy and paste the code into the shell and press enter
# 4. wait time is around 5 minutes
| 2.828125 | 3 |
dash_daq/Slider.py | luiztauffer/dash-daq | 0 | 7223 | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Slider(Component):
"""A Slider component.
A slider component with support for
a target value.
Keyword arguments:
- id (string; optional):
The ID used to identify this component in Dash callbacks.
- className (string; optional):
Additional CSS class for the root DOM node.
- color (dict; default colors.DARKER_PRIMARY):
Color configuration for the slider's track.
`color` is a string | dict with keys:
- default (string; optional):
Fallback color to use when color.ranges has gaps.
- gradient (boolean; optional):
Display ranges as a gradient between given colors. Requires
color.ranges to be contiguous along the entirety of the
gauge's range of values.
- ranges (dict; optional):
Define multiple color ranges on the slider's track. The key
determines the color of the range and the value is the
start,end of the range itself.
`ranges` is a dict with keys:
- color (list of numbers; optional)
- disabled (boolean; optional):
If True, the handles can't be moved.
- dots (boolean; optional):
When the step value is greater than 1, you can set the dots to
True if you want to render the slider with dots. Note: dots are
disabled automatically when using color.ranges.
- handleLabel (dict; optional):
Configuration of the slider handle's label. Passing falsy value
will disable the label.
`handleLabel` is a string | dict with keys:
- color (string; optional)
- label (string; optional)
- showCurrentValue (boolean; optional)
- style (dict; optional)
- included (boolean; optional):
If the value is True, it means a continuous value is included.
Otherwise, it is an independent value.
- labelPosition (a value equal to: 'top', 'bottom'; default 'bottom'):
Where the component label is positioned.
- marks (dict; optional):
Marks on the slider. The key determines the position, and the
value determines what will show. If you want to set the style of a
specific mark point, the value should be an object which contains
style and label properties.
`marks` is a dict with keys:
- number (dict; optional)
`number` is a string
Or dict with keys:
- label (string; optional)
- style (dict; optional)
- max (number; optional):
Maximum allowed value of the slider.
- min (number; default 0):
Minimum allowed value of the slider.
- persisted_props (list of a value equal to: 'value's; default ['value']):
Properties whose user interactions will persist after refreshing
the component or the page. Since only `value` is allowed this prop
can normally be ignored.
- persistence (boolean | string | number; optional):
Used to allow user interactions in this component to be persisted
when the component - or the page - is refreshed. If `persisted` is
truthy and hasn't changed from its previous value, a `value` that
the user has changed while using the app will keep that change, as
long as the new `value` also matches what was given originally.
Used in conjunction with `persistence_type`.
- persistence_type (a value equal to: 'local', 'session', 'memory'; default 'local'):
Where persisted user changes will be stored: memory: only kept in
memory, reset on page refresh. local: window.localStorage, data is
kept after the browser quit. session: window.sessionStorage, data
is cleared once the browser quit.
- size (number; default 265):
Size of the slider in pixels.
- step (number; optional):
Value by which increments or decrements are made.
- targets (dict; optional):
Targets on the slider. The key determines the position, and the
value determines what will show. If you want to set the style of a
specific target point, the value should be an object which
contains style and label properties.
`targets` is a dict with keys:
- number (dict; optional)
`number` is a string
Or dict with keys:
- color (string; optional)
- label (string; optional)
- showCurrentValue (boolean; optional)
- style (dict; optional)
- theme (dict; default light):
Theme configuration to be set by a ThemeProvider.
- updatemode (a value equal to: 'mouseup', 'drag'; default 'mouseup'):
Determines when the component should update its value. If
`mouseup`, then the slider will only trigger its value when the
user has finished dragging the slider. If `drag`, then the slider
will update its value continuously as it is being dragged. Only
use `drag` if your updates are fast.
- value (number; optional):
The value of the input.
- vertical (boolean; optional):
If True, the slider will be vertical."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, marks=Component.UNDEFINED, color=Component.UNDEFINED, value=Component.UNDEFINED, className=Component.UNDEFINED, labelPosition=Component.UNDEFINED, disabled=Component.UNDEFINED, dots=Component.UNDEFINED, included=Component.UNDEFINED, min=Component.UNDEFINED, max=Component.UNDEFINED, step=Component.UNDEFINED, vertical=Component.UNDEFINED, size=Component.UNDEFINED, targets=Component.UNDEFINED, theme=Component.UNDEFINED, handleLabel=Component.UNDEFINED, updatemode=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'className', 'color', 'disabled', 'dots', 'handleLabel', 'included', 'labelPosition', 'marks', 'max', 'min', 'persisted_props', 'persistence', 'persistence_type', 'size', 'step', 'targets', 'theme', 'updatemode', 'value', 'vertical']
self._type = 'Slider'
self._namespace = 'dash_daq'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'className', 'color', 'disabled', 'dots', 'handleLabel', 'included', 'labelPosition', 'marks', 'max', 'min', 'persisted_props', 'persistence', 'persistence_type', 'size', 'step', 'targets', 'theme', 'updatemode', 'value', 'vertical']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Slider, self).__init__(**args)
| 2.578125 | 3 |
src/util/__init__.py | ooshyun/filterdesign | 1 | 7224 | <reponame>ooshyun/filterdesign
"""Utility function for process to raw data
"""
from .util import (
cvt_pcm2wav,
cvt_float2fixed,
cvt_char2num,
plot_frequency_response,
plot_pole_zero_analysis,
)
from .fi import fi
__all__ = [
"fi",
"cvt_pcm2wav",
"cvt_float2fixed",
"cvt_char2num",
"plot_frequency_response",
"plot_pole_zero_analysis",
]
| 1.101563 | 1 |
infra/apps/catalog/tests/views/distribution_upload_tests.py | datosgobar/infra.datos.gob.ar | 1 | 7225 | <gh_stars>1-10
import pytest
from django.core.files import File
from django.urls import reverse
from freezegun import freeze_time
from infra.apps.catalog.tests.helpers.open_catalog import open_catalog
pytestmark = pytest.mark.django_db
@pytest.fixture(autouse=True)
def give_user_edit_rights(user, node):
node.admins.add(user)
def _call(client, distribution):
return client.get(reverse('catalog:distribution_uploads',
kwargs={'node_id': distribution.catalog.id,
'identifier': distribution.identifier}))
def test_older_versions_listed(logged_client, distribution_upload):
distribution = distribution_upload.distribution
with freeze_time('2019-01-01'):
with open_catalog('test_data.csv') as fd:
other = distribution.distributionupload_set \
.create(file=File(fd))
response = _call(logged_client, distribution)
assert str(other.uploaded_at) in response.content.decode('utf-8')
def test_catalog_identifier_in_page(logged_client, distribution):
response = _call(logged_client, distribution)
assert distribution.catalog.identifier in response.content.decode('utf-8')
| 2.15625 | 2 |
examples/model_zoo/build_binaries.py | Embracing/unrealcv | 1,617 | 7226 | import subprocess, os
ue4_win = r"C:\Program Files\Epic Games\UE_4.16"
ue4_linux = "/home/qiuwch/workspace/UE416"
ue4_mac = '/Users/Shared/Epic Games/UE_4.16'
win_uprojects = [
r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject',
r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject',
r'D:\workspace\uprojects\Matinee\Matinee.uproject',
r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject',
]
linux_uprojects = [
os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"),
]
mac_uprojects = [
os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'),
os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'),
]
uprojects = []
for uproject_path in win_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_win,
log_file = 'log/win_%s.log' % uproject_name
),
)
for uproject_path in linux_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_linux,
log_file = 'log/linux_%s.log' % uproject_name
),
)
for uproject_path in mac_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_mac,
log_file = 'log/mac_%s.log' % uproject_name
),
)
if __name__ == '__main__':
for uproject in uprojects:
uproject_path = uproject['uproject_path']
if not os.path.isfile(uproject_path):
print("Can not find uproject file %s, skip this project" % uproject_path)
continue
cmd = [
'python', 'build.py',
'--UE4', uproject['ue4_path'],
# '--output', uproject['output_folder'],
uproject['uproject_path']
]
print(cmd)
subprocess.call(cmd,
stdout = open(uproject['log_file'], 'w'))
with open(uproject['log_file']) as f:
lines = f.readlines()
print(''.join(lines[-10:])) # Print the last few lines
| 1.476563 | 1 |
__init__.py | NeonJarbas/skill-ddg | 0 | 7227 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovos_utils.gui import can_use_gui
from adapt.intent import IntentBuilder
from mycroft.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel
from mycroft.skills.core import intent_handler
from neon_solver_ddg_plugin import DDGSolver
class DuckDuckGoSkill(CommonQuerySkill):
def __init__(self):
super().__init__()
self.duck = DDGSolver()
# for usage in tell me more / follow up questions
self.idx = 0
self.results = []
self.image = None
# intents
@intent_handler("search_duck.intent")
def handle_search(self, message):
query = message.data["query"]
summary = self.ask_the_duck(query)
if summary:
self.speak_result()
else:
self.speak_dialog("no_answer")
@intent_handler(IntentBuilder("DuckMore").require("More").
require("DuckKnows"))
def handle_tell_more(self, message):
""" Follow up query handler, "tell me more"."""
# query = message.data["DuckKnows"]
# data, related_queries = self.duck.get_infobox(query)
# TODO maybe do something with the infobox data ?
self.speak_result()
# common query
def CQS_match_query_phrase(self, utt):
summary = self.ask_the_duck(utt)
if summary:
self.idx += 1 # spoken by common query
return (utt, CQSMatchLevel.GENERAL, summary,
{'query': utt,
'image': self.image,
'answer': summary})
def CQS_action(self, phrase, data):
""" If selected show gui """
self.display_ddg(data["answer"], data["image"])
# duck duck go api
def ask_the_duck(self, query):
# context for follow up questions
self.set_context("DuckKnows", query)
self.idx = 0
self.results = self.duck.long_answer(query, lang=self.lang)
self.image = self.duck.get_image(query)
if self.results:
return self.results[0]["summary"]
def display_ddg(self, summary=None, image=None):
if not can_use_gui(self.bus):
return
image = image or \
self.image or \
"https://github.com/JarbasSkills/skill-ddg/raw/master/ui/logo.png"
if image.startswith("/"):
image = "https://duckduckgo.com" + image
self.gui['summary'] = summary or ""
self.gui['imgLink'] = image
self.gui.show_page("DuckDelegate.qml", override_idle=60)
def speak_result(self):
if self.idx + 1 > len(self.results):
self.speak_dialog("thats all")
self.remove_context("DuckKnows")
self.idx = 0
else:
self.display_ddg(self.results[self.idx]["summary"],
self.results[self.idx]["img"])
self.speak(self.results[self.idx]["summary"])
self.idx += 1
def create_skill():
return DuckDuckGoSkill()
| 2.15625 | 2 |
openstack_lease_it/openstack_lease_it/settings.py | LAL/openstack-lease-it | 0 | 7228 | """
Django settings for openstack_lease_it project.
Generated by 'django-admin startproject' using Django 1.8.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import ast
import logging
from openstack_lease_it.config import GLOBAL_CONFIG, load_config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load configuration
load_config()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = GLOBAL_CONFIG['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ast.literal_eval(GLOBAL_CONFIG['DJANGO_DEBUG'])
# ALLOWED_HOSTS secure django app access
ALLOWED_HOSTS = []
# A email as format must match this regular expression
# If you not understand, please
EMAIL_REGEXP = r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\.-]+\.[A-Za-z]*$"
# Application definition
INSTALLED_APPS = (
'openstack_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openstack_lease_it',
'lease_it',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'openstack_lease_it.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'openstack_lease_it.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_CHARSET = 'utf-8'
# We use memcached as cache backend
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{MEMCACHED_HOST}:{MEMCACHED_PORT}'.format(**GLOBAL_CONFIG),
}
}
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end of validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 100
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
if GLOBAL_CONFIG['BACKEND_PLUGIN'] == 'Openstack':
# UserId on django-openstack_auth need specific User model
AUTH_USER_MODEL = 'openstack_auth.User'
# Define keystone URL for authentification
OPENSTACK_KEYSTONE_URL = GLOBAL_CONFIG['OS_AUTH_URL']
# We use keystone v3 API
OPENSTACK_API_VERSIONS = {
"identity": GLOBAL_CONFIG['OS_IDENTITY_API_VERSION'],
}
# We use multidomain
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# We load Openstack_auth backend
AUTHENTICATION_BACKENDS = (
'openstack_auth.backend.KeystoneBackend',
'django.contrib.auth.backends.ModelBackend',
)
else:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# Configure logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(asctime)s: %(message)s'
},
},
'handlers': {
'django': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'django.log'),
'formatter': 'simple'
},
'main': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'main.log'),
'formatter': 'simple'
},
'notification': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'notification.log'),
'formatter': 'simple'
},
'instances': {
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'class': 'logging.FileHandler',
'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'instances.log'),
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'main': {
'handlers': ['main'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'notification': {
'handlers': ['notification'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
'instances': {
'handlers': ['instances'],
'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'],
'propagate': True,
},
},
}
LOGGER = logging.getLogger('main')
LOGGER_NOTIFICATION = logging.getLogger('notification')
LOGGER_INSTANCES = logging.getLogger('instances')
| 1.765625 | 2 |
rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | 276 | 7229 | <filename>rigl/experimental/jax/pruning/pruning.py
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions for pruning FLAX masked models."""
import collections
from typing import Any, Callable, Mapping, Optional, Union
import flax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import masked
def weight_magnitude(weights):
"""Creates weight magnitude-based saliencies, given a weight matrix."""
return jnp.absolute(weights)
def prune(
model,
pruning_rate,
saliency_fn = weight_magnitude,
mask = None,
compare_fn = jnp.greater):
"""Returns a mask for a model where the params in each layer are pruned using a saliency function.
Args:
model: The model to create a pruning mask for.
pruning_rate: The fraction of lowest magnitude saliency weights that are
pruned. If a float, the same rate is used for all layers, otherwise if it
is a mapping, it must contain a rate for all masked layers in the model.
saliency_fn: A function that returns a float number used to rank
the importance of individual weights in the layer.
mask: If the model has an existing mask, the mask will be applied before
pruning the model.
compare_fn: A pairwise operator to compare saliency with threshold, and
return True if the saliency indicates the value should not be masked.
Returns:
A pruned mask for the given model.
"""
if not mask:
mask = masked.simple_mask(model, jnp.ones, masked.WEIGHT_PARAM_NAMES)
if not isinstance(pruning_rate, collections.Mapping):
pruning_rate_dict = {}
for param_name, _ in masked.iterate_mask(mask):
# Get the layer name from the parameter's full name/path.
layer_name = param_name.split('/')[-2]
pruning_rate_dict[layer_name] = pruning_rate
pruning_rate = pruning_rate_dict
for param_path, param_mask in masked.iterate_mask(mask):
split_param_path = param_path.split('/')
layer_name = split_param_path[-2]
param_name = split_param_path[-1]
# If we don't have a pruning rate for the given layer, don't mask it.
if layer_name in pruning_rate and mask[layer_name][param_name] is not None:
param_value = model.params[layer_name][
masked.MaskedModule.UNMASKED][param_name]
# Here any existing mask is first applied to weight matrix.
# Note: need to check explicitly is not None for np array.
if param_mask is not None:
saliencies = saliency_fn(param_mask * param_value)
else:
saliencies = saliency_fn(param_value)
# TODO: Use partition here (partial sort) instead of sort,
# since it's O(N), not O(N log N), however JAX doesn't support it.
sorted_param = jnp.sort(jnp.abs(saliencies.flatten()))
# Figure out the weight magnitude threshold.
threshold_index = jnp.round(pruning_rate[layer_name] *
sorted_param.size).astype(jnp.int32)
threshold = sorted_param[threshold_index]
mask[layer_name][param_name] = jnp.array(
compare_fn(saliencies, threshold), dtype=jnp.int32)
return mask
| 2.25 | 2 |
venv/Lib/site-packages/dash_bootstrap_components/_components/CardLink.py | hanzzhu/chadle | 0 | 7230 | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class CardLink(Component):
"""A CardLink component.
Use card link to add consistently styled links to your cards. Links can be
used like buttons, external links, or internal Dash style links.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- className (string; optional):
Often used with CSS to style elements with common properties.
- external_link (boolean; optional):
If True, the browser will treat this as an external link, forcing
a page refresh at the new location. If False, this just changes
the location without triggering a page refresh. Use this if you
are observing dcc.Location, for instance. Defaults to True for
absolute URLs and False otherwise.
- href (string; optional):
URL of the resource to link to.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- target (string; optional):
Target attribute to pass on to the link. Only applies to external
links."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, href=Component.UNDEFINED, external_link=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, loading_state=Component.UNDEFINED, target=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'external_link', 'href', 'key', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'style', 'target']
self._type = 'CardLink'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'className', 'external_link', 'href', 'key', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'style', 'target']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(CardLink, self).__init__(children=children, **args)
| 2.84375 | 3 |
plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py | antfootAlex/HanLP | 3 | 7231 | <reponame>antfootAlex/HanLP<filename>plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 23:15
from hanlp.components.taggers.transformers.transformer_tagger_tf import TransformerTaggerTF
from tests import cdroot
cdroot()
tagger = TransformerTaggerTF()
save_dir = 'data/model/pos/ctb9_electra_small_zh_epoch_20'
tagger.fit('data/pos/ctb9/train.tsv',
'data/pos/ctb9/test.tsv',
save_dir,
transformer='hfl/chinese-electra-small-discriminator',
max_seq_length=130,
warmup_steps_ratio=0.1,
epochs=20,
learning_rate=5e-5)
tagger.load(save_dir)
print(tagger(['我', '的', '希望', '是', '希望', '和平']))
tagger.evaluate('data/pos/ctb9/test.tsv', save_dir=save_dir)
print(f'Model saved in {save_dir}')
| 1.765625 | 2 |
src/app/main.py | Wedding-APIs-System/Backend-APi | 0 | 7232 | <filename>src/app/main.py
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.api import landing, login, attendance_confirmation
from sql_app.database import orm_connection
app = FastAPI(title="Sergio's wedding backend API",
description="REST API which serves login, attendance confirmation and other features",
version="1.0",)
origins = [
"*"
# "http://172.16.17.32:5500",
# "192.168.3.11"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(landing.router)
app.include_router(login.router)
app.include_router(attendance_confirmation.router)
@app.get("/ping")
async def pong():
return {"ping": "pong!"}
| 2.4375 | 2 |
tests/test_pydora/test_utils.py | NextGenTechBar/twandora | 0 | 7233 | from unittest import TestCase
from pandora.client import APIClient
from pandora.errors import InvalidAuthToken, ParameterMissing
from pandora.models.pandora import Station, AdItem, PlaylistItem
from pandora.py2compat import Mock, patch
from pydora.utils import iterate_forever
class TestIterateForever(TestCase):
def setUp(self):
self.transport = Mock(side_effect=[InvalidAuthToken(), None])
self.client = APIClient(self.transport, None, None, None, None)
self.client._authenticate = Mock()
def test_handle_missing_params_exception_due_to_missing_ad_tokens(self):
with patch.object(APIClient, 'get_playlist') as get_playlist_mock:
with patch.object(APIClient, 'register_ad', side_effect=ParameterMissing("ParameterMissing")):
station = Station.from_json(self.client, {'stationToken': 'token_mock'})
ad_mock = AdItem.from_json(self.client, {'station_id': 'id_mock'})
get_playlist_mock.return_value=iter([ad_mock])
station_iter = iterate_forever(station.get_playlist)
next_track = next(station_iter)
self.assertEqual(ad_mock, next_track)
def test_reraise_missing_params_exception(self):
with patch.object(APIClient, 'get_playlist', side_effect=ParameterMissing("ParameterMissing")) as get_playlist_mock:
with self.assertRaises(ParameterMissing):
station = Station.from_json(self.client, {'stationToken': 'token_mock'})
track_mock = PlaylistItem.from_json(self.client, {'token': 'token_mock'})
get_playlist_mock.return_value=iter([track_mock])
station_iter = iterate_forever(station.get_playlist)
next(station_iter)
| 2.375 | 2 |
PyDSTool/PyCont/BifPoint.py | mdlama/pydstool | 2 | 7234 | """ Bifurcation point classes. Each class locates and processes bifurcation points.
* _BranchPointFold is a version based on BranchPoint location algorithms
* BranchPoint: Branch process is broken (can't find alternate branch -- see MATCONT notes)
<NAME>, March 2006
"""
from __future__ import absolute_import, print_function
from .misc import *
from PyDSTool.common import args
from .TestFunc import DiscreteMap, FixedPointMap
from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, \
subtract, divide, transpose, eye, real, imag, \
conjugate, average
from scipy import optimize, linalg
from numpy import dot as matrixmultiply
from numpy import array, float, complex, int, float64, complex64, int32, \
zeros, divide, subtract, reshape, argsort, nonzero
#####
_classes = ['BifPoint', 'BPoint', 'BranchPoint', 'FoldPoint', 'HopfPoint',
'BTPoint', 'ZHPoint', 'CPPoint',
'BranchPointFold', '_BranchPointFold', 'DHPoint',
'GHPoint', 'LPCPoint', 'PDPoint', 'NSPoint', 'SPoint']
__all__ = _classes
#####
class BifPoint(object):
def __init__(self, testfuncs, flagfuncs, label='Bifurcation', stop=False):
self.testfuncs = []
self.flagfuncs = []
self.found = []
self.label = label
self.stop = stop
self.data = args()
if not isinstance(testfuncs, list):
testfuncs = [testfuncs]
if not isinstance(flagfuncs, list):
flagfuncs = [flagfuncs]
self.testfuncs.extend(testfuncs)
self.flagfuncs.extend(flagfuncs)
self.tflen = len(self.testfuncs)
def locate(self, P1, P2, C):
pointlist = []
for i, testfunc in enumerate(self.testfuncs):
if self.flagfuncs[i] == iszero:
for ind in range(testfunc.m):
X, V = testfunc.findzero(P1, P2, ind)
pointlist.append((X,V))
X = average([point[0] for point in pointlist], axis=0)
V = average([point[1] for point in pointlist], axis=0)
C.Corrector(X,V)
return X, V
def process(self, X, V, C):
data = args()
data.X = todict(C, X)
data.V = todict(C, V)
self.found.append(data)
def info(self, C, ind=None, strlist=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
if C.verbosity >= 1:
print(self.label + ' Point found ')
if C.verbosity >= 2:
print('========================== ')
for n, i in enumerate(ind):
print(n, ': ')
Xd = self.found[i].X
for k, j in Xd.items():
print(k, ' = ', j)
print('')
if hasattr(self.found[i], 'eigs'):
print('Eigenvalues = \n')
for x in self.found[i].eigs:
print(' (%f,%f)' % (x.real, x.imag))
print('\n')
if strlist is not None:
for string in strlist:
print(string)
print('')
class SPoint(BifPoint):
"""Special point that represents user-selected free parameter values."""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'S', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
self.info(C, -1)
return True
class BPoint(BifPoint):
"""Special point that represents boundary of computational domain."""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'B', stop=stop)
def locate(self, P1, P2, C):
# Find location that triggered testfunc and initialize testfunc to that index
val1 = (P1[0]-self.testfuncs[0].lower)*(self.testfuncs[0].upper-P1[0])
val2 = (P2[0]-self.testfuncs[0].lower)*(self.testfuncs[0].upper-P2[0])
ind = nonzero(val1*val2 < 0)
self.testfuncs[0].ind = ind
self.testfuncs[0].func = self.testfuncs[0].one
X, V = BifPoint.locate(self, P1, P2, C)
# Set testfunc back to monitoring all
self.testfuncs[0].ind = None
self.testfuncs[0].func = self.testfuncs[0].all
return X, V
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class BranchPoint(BifPoint):
"""May only work for EquilibriumCurve ... (needs fixing)"""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BP', stop=stop)
def __locate_newton(self, X, C):
"""x[0:self.dim] = (x,alpha)
x[self.dim] = beta
x[self.dim+1:2*self.dim] = p
"""
J_coords = C.CorrFunc.jac(X[0:C.dim], C.coords)
J_params = C.CorrFunc.jac(X[0:C.dim], C.params)
return r_[C.CorrFunc(X[0:C.dim]) + X[C.dim]*X[C.dim+1:], \
matrixmultiply(transpose(J_coords),X[C.dim+1:]), \
matrixmultiply(transpose(X[C.dim+1:]),J_params), \
matrixmultiply(transpose(X[C.dim+1:]),X[C.dim+1:]) - 1]
def locate(self, P1, P2, C):
# Initiliaze p vector to eigenvector with smallest eigenvalue
X, V = P1
X2, V2 = P2
J_coords = C.CorrFunc.jac(X, C.coords)
W, VL = linalg.eig(J_coords, left=1, right=0)
ind = argsort([abs(eig) for eig in W])[0]
p = real(VL[:,ind])
initpoint = zeros(2*C.dim, float)
initpoint[0:C.dim] = X
initpoint[C.dim+1:] = p
X = optimize.fsolve(self.__locate_newton, initpoint, C)
self.data.psi = X[C.dim+1:]
X = X[0:C.dim]
V = 0.5*(V+V2)
return X, V
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
# Finds the new branch
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
singular = True
perpvec = r_[1,zeros(C.dim-1)]
d = 1
while singular and d <= C.dim:
try:
v0 = linalg.solve(r_[c_[J_coords, J_params],
[perpvec]], \
r_[zeros(C.dim-1),1])
except:
perpvec = r_[0., perpvec[0:(C.dim-1)]]
d += 1
else:
singular = False
if singular:
raise PyDSTool_ExistError("Problem in _compute: Failed to compute tangent vector.")
v0 /= linalg.norm(v0)
V = sign([x for x in v0 if abs(x) > 1e-8][0])*v0
A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = [ind for ind, eig in enumerate(W) if abs(eig) < 5e-5]
V1 = real(VR[:,W0[0]])
H = C.CorrFunc.hess(X, C.coords+C.params, C.coords+C.params)
c11 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
c12 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
c22 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
beta = 1
alpha = -1*c22/(2*c12)
V1 = alpha*V + beta*V1
V1 /= linalg.norm(V1)
self.found[-1].eigs = W
self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
tocoords(C, self.found[i].branch))))
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class FoldPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'LP', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
# Compute normal form coefficient
# NOTE: These are for free when using bordering technique!)
# NOTE: Does not agree with MATCONT output! (if |p| = |q| = 1, then it does)
J_coords = C.CorrFunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
minW = min(abs(W))
ind = [(abs(eig) < minW+1e-8) and (abs(eig) > minW-1e-8) for eig in W].index(True)
p, q = real(VL[:,ind]), real(VR[:,ind])
p /= matrixmultiply(p,q)
B = C.CorrFunc.hess(X, C.coords, C.coords)
self.found[-1].a = abs(0.5*matrixmultiply(p,[bilinearform(B[i,:,:], q, q) for i in range(B.shape[0])]))
self.found[-1].eigs = W
numzero = len([eig for eig in W if abs(eig) < 1e-4])
if numzero > 1:
if C.verbosity >= 2:
print('Fold-Fold!\n')
del self.found[-1]
return False
elif numzero == 0:
if C.verbosity >= 2:
print('False positive!\n')
del self.found[-1]
return False
if C.verbosity >= 2:
print('\nChecking...')
print(' |q| = %f' % linalg.norm(q))
print(' <p,q> = %f' % matrixmultiply(p,q))
print(' |Aq| = %f' % linalg.norm(matrixmultiply(J_coords,q)))
print(' |transpose(A)p| = %f\n' % linalg.norm(matrixmultiply(transpose(J_coords),p)))
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('a = ' + repr(self.found[i].a))
BifPoint.info(self, C, ind, strlist)
class HopfPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'H', stop=stop)
def process(self, X, V, C):
"""Tolerance for eigenvalues a possible problem when checking for neutral saddles."""
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.jac(X, C.coords)
eigs, LV, RV = linalg.eig(J_coords,left=1,right=1)
# Check for neutral saddles
found = False
for i in range(len(eigs)):
if abs(imag(eigs[i])) < 1e-5:
for j in range(i+1,len(eigs)):
if C.verbosity >= 2:
if abs(eigs[i]) < 1e-5 and abs(eigs[j]) < 1e-5:
print('Fold-Fold point found in Hopf!\n')
elif abs(imag(eigs[j])) < 1e-5 and abs(real(eigs[i]) + real(eigs[j])) < 1e-5:
print('Neutral saddle found!\n')
elif abs(real(eigs[i])) < 1e-5:
for j in range(i+1, len(eigs)):
if abs(real(eigs[j])) < 1e-5 and abs(real(eigs[i]) - real(eigs[j])) < 1e-5:
found = True
w = abs(imag(eigs[i]))
if imag(eigs[i]) > 0:
p = conjugate(LV[:,j])/linalg.norm(LV[:,j])
q = RV[:,i]/linalg.norm(RV[:,i])
else:
p = conjugate(LV[:,i])/linalg.norm(LV[:,i])
q = RV[:,j]/linalg.norm(RV[:,j])
if not found:
del self.found[-1]
return False
direc = conjugate(1/matrixmultiply(conjugate(p),q))
p = direc*p
# Alternate way to compute 1st lyapunov coefficient (from Kuznetsov [4])
#print (1./(w*w))*real(1j*matrixmultiply(conjugate(p),b1)*matrixmultiply(conjugate(p),b3) + \
# w*matrixmultiply(conjugate(p),trilinearform(D,q,q,conjugate(q))))
self.found[-1].w = w
self.found[-1].l1 = firstlyapunov(X, C.CorrFunc, w, J_coords=J_coords, p=p, q=q, check=(C.verbosity==2))
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('w = ' + repr(self.found[i].w))
strlist.append('l1 = ' + repr(self.found[i].l1))
BifPoint.info(self, C, ind, strlist)
# Codimension-2 bifurcations
class BTPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BT', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
self.found[-1].eigs = W
if C.verbosity >= 2:
if C.CorrFunc.testfunc.data.B.shape[1] == 2:
b = matrixmultiply(transpose(J_coords), C.CorrFunc.testfunc.data.w[:,0])
c = matrixmultiply(J_coords, C.CorrFunc.testfunc.data.v[:,0])
else:
b = C.CorrFunc.testfunc.data.w[:,0]
c = C.CorrFunc.testfunc.data.v[:,0]
print('\nChecking...')
print(' <b,c> = %f' % matrixmultiply(transpose(b), c))
print('\n')
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class ZHPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'ZH', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
self.found[-1].eigs = W
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class CPPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'CP', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
B = C.CorrFunc.sysfunc.hess(X, C.coords, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
q = C.CorrFunc.testfunc.data.C/linalg.norm(C.CorrFunc.testfunc.data.C)
p = C.CorrFunc.testfunc.data.B/matrixmultiply(transpose(C.CorrFunc.testfunc.data.B),q)
self.found[-1].eigs = W
a = 0.5*matrixmultiply(transpose(p), reshape([bilinearform(B[i,:,:], q, q) \
for i in range(B.shape[0])],(B.shape[0],1)))[0][0]
if C.verbosity >= 2:
print('\nChecking...')
print(' |a| = %f' % a)
print('\n')
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class BranchPointFold(BifPoint):
"""Check Equilibrium.m in MATCONT"""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BP', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
pind = self.testfuncs[0].pind
# Finds the new branch
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
A = r_[c_[J_coords, J_params[:,pind]]]
#A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = [ind for ind, eig in enumerate(W) if abs(eig) < 5e-5]
tmp = real(VR[:,W0[0]])
V1 = r_[tmp[:-1], 0, 0]
V1[len(tmp)-1+pind] = tmp[-1]
"""NEED TO FIX THIS!"""
H = C.CorrFunc.hess(X, C.coords+C.params, C.coords+C.params)
# c11 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
# c12 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
# c22 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
# beta = 1
# alpha = -1*c22/(2*c12)
# V1 = alpha*V + beta*V1
# V1 /= linalg.norm(V1)
self.found[-1].eigs = W
self.found[-1].branch = None
self.found[-1].par = C.freepars[self.testfuncs[0].pind]
# self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
#for n, i in enumerate(ind):
# strlist.append('branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
# tocoords(C, self.found[i].branch))))
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class _BranchPointFold(BifPoint):
"""Check Equilibrium.m in MATCONT"""
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'BP', stop=stop)
def __locate_newton(self, X, C):
"""Note: This is redundant!! B is a column of A!!! Works for now, though..."""
pind = self.testfuncs[0].pind
J_coords = C.CorrFunc.jac(X[0:C.dim], C.coords)
J_params = C.CorrFunc.jac(X[0:C.dim], C.params)
A = c_[J_coords, J_params[:,pind]]
B = J_params[:,pind]
return r_[C.CorrFunc(X[0:C.dim]) + X[C.dim]*X[C.dim+1:], \
matrixmultiply(transpose(A),X[C.dim+1:]), \
matrixmultiply(transpose(X[C.dim+1:]),B), \
matrixmultiply(transpose(X[C.dim+1:]),X[C.dim+1:]) - 1]
def locate(self, P1, P2, C):
# Initiliaze p vector to eigenvector with smallest eigenvalue
X, V = P1
pind = self.testfuncs[0].pind
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
A = r_[c_[J_coords, J_params[:,pind]]]
W, VL = linalg.eig(A, left=1, right=0)
ind = argsort([abs(eig) for eig in W])[0]
p = real(VL[:,ind])
initpoint = zeros(2*C.dim, float)
initpoint[0:C.dim] = X
initpoint[C.dim+1:] = p
X = optimize.fsolve(self.__locate_newton, initpoint, C)
self.data.psi = X[C.dim+1:]
X = X[0:C.dim]
return X, V
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
pind = self.testfuncs[0].pind
# Finds the new branch
J_coords = C.CorrFunc.jac(X, C.coords)
J_params = C.CorrFunc.jac(X, C.params)
A = r_[c_[J_coords, J_params[:,pind]]]
#A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = [ind for ind, eig in enumerate(W) if abs(eig) < 5e-5]
tmp = real(VR[:,W0[0]])
V1 = r_[tmp[:-1], 0, 0]
V1[len(tmp)-1+pind] = tmp[-1]
"""NEED TO FIX THIS!"""
H = C.CorrFunc.hess(X, C.coords+C.params, C.coords+C.params)
c11 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
c12 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
c22 = matrixmultiply(self.data.psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
beta = 1
alpha = -1*c22/(2*c12)
V1 = alpha*V + beta*V1
V1 /= linalg.norm(V1)
self.found[-1].eigs = W
self.found[-1].branch = None
self.found[-1].par = C.freepars[self.testfuncs[0].pind]
self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
#for n, i in enumerate(ind):
# strlist.append('branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
# tocoords(C, self.found[i].branch))))
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class DHPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'DH', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
eigs, LV, RV = linalg.eig(J_coords,left=1,right=1)
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
class GHPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'GH', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.CorrFunc.sysfunc.jac(X, C.coords)
eigs, LV, RV = linalg.eig(J_coords,left=1,right=1)
# Check for neutral saddles
found = False
for i in range(len(eigs)):
if abs(imag(eigs[i])) < 1e-5:
for j in range(i+1,len(eigs)):
if C.verbosity >= 2:
if abs(eigs[i]) < 1e-5 and abs(eigs[j]) < 1e-5:
print('Fold-Fold point found in Hopf!\n')
elif abs(imag(eigs[j])) < 1e-5 and abs(real(eigs[i]) + real(eigs[j])) < 1e-5:
print('Neutral saddle found!\n')
elif abs(real(eigs[i])) < 1e-5:
for j in range(i+1, len(eigs)):
if abs(real(eigs[j])) < 1e-5 and abs(real(eigs[i]) - real(eigs[j])) < 1e-5:
found = True
w = abs(imag(eigs[i]))
if imag(eigs[i]) > 0:
p = conjugate(LV[:,j]/linalg.norm(LV[:,j]))
q = RV[:,i]/linalg.norm(RV[:,i])
else:
p = conjugate(LV[:,i]/linalg.norm(LV[:,i]))
q = RV[:,j]/linalg.norm(RV[:,j])
if not found:
del self.found[-1]
return False
direc = conjugate(1/matrixmultiply(conjugate(p),q))
p = direc*p
# Alternate way to compute 1st lyapunov coefficient (from Kuznetsov [4])
#print (1./(w*w))*real(1j*matrixmultiply(conjugate(p),b1)*matrixmultiply(conjugate(p),b3) + \
# w*matrixmultiply(conjugate(p),trilinearform(D,q,q,conjugate(q))))
self.found[-1].w = w
self.found[-1].l1 = firstlyapunov(X, C.CorrFunc.sysfunc, w, J_coords=J_coords, p=p, q=q, check=(C.verbosity==2))
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('w = ' + repr(self.found[i].w))
strlist.append('l1 = ' + repr(self.found[i].l1))
BifPoint.info(self, C, ind, strlist)
# Discrete maps
class LPCPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'LPC', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.sysfunc.jac(X, C.coords)
W, VL, VR = linalg.eig(J_coords, left=1, right=1)
self.found[-1].eigs = W
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
X = tocoords(C, self.found[-1].X)
V = tocoords(C, self.found[-1].V)
C._preTestFunc(X, V)
strlist.append('Test function #1: ' + repr(self.testfuncs[0](X,V)[0]))
strlist.append('Test function #2: ' + repr(self.testfuncs[1](X,V)[0]))
BifPoint.info(self, C, ind, strlist)
class PDPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'PD', stop=stop)
def process(self, X, V, C):
"""Do I need to compute the branch, or will it always be in the direction of freepar = constant?"""
BifPoint.process(self, X, V, C)
F = DiscreteMap(C.sysfunc, period=2*C.sysfunc.period)
FP = FixedPointMap(F)
J_coords = FP.jac(X, C.coords)
J_params = FP.jac(X, C.params)
# Locate branch of double period map
W, VL = linalg.eig(J_coords, left=1, right=0)
ind = argsort([abs(eig) for eig in W])[0]
psi = real(VL[:,ind])
A = r_[c_[J_coords, J_params], [V]]
W, VR = linalg.eig(A)
W0 = argsort([abs(eig) for eig in W])[0]
V1 = real(VR[:,W0])
H = FP.hess(X, C.coords+C.params, C.coords+C.params)
c11 = matrixmultiply(psi,[bilinearform(H[i,:,:], V, V) for i in range(H.shape[0])])
c12 = matrixmultiply(psi,[bilinearform(H[i,:,:], V, V1) for i in range(H.shape[0])])
c22 = matrixmultiply(psi,[bilinearform(H[i,:,:], V1, V1) for i in range(H.shape[0])])
beta = 1
alpha = -1*c22/(2*c12)
V1 = alpha*V + beta*V1
V1 /= linalg.norm(V1)
J_coords = C.sysfunc.jac(X, C.coords)
W = linalg.eig(J_coords, right=0)
self.found[-1].eigs = W
self.found[-1].branch_period = 2*C.sysfunc.period
self.found[-1].branch = todict(C, V1)
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
strlist = []
for n, i in enumerate(ind):
strlist.append('Period doubling branch angle = ' + repr(matrixmultiply(tocoords(C, self.found[i].V), \
tocoords(C, self.found[i].branch))))
BifPoint.info(self, C, ind, strlist)
class NSPoint(BifPoint):
def __init__(self, testfuncs, flagfuncs, stop=False):
BifPoint.__init__(self, testfuncs, flagfuncs, 'NS', stop=stop)
def process(self, X, V, C):
BifPoint.process(self, X, V, C)
J_coords = C.sysfunc.jac(X, C.coords)
eigs, VL, VR = linalg.eig(J_coords, left=1, right=1)
# Check for nonreal multipliers
found = False
for i in range(len(eigs)):
for j in range(i+1,len(eigs)):
if abs(imag(eigs[i])) > 1e-10 and \
abs(imag(eigs[j])) > 1e-10 and \
abs(eigs[i]*eigs[j] - 1) < 1e-5:
found = True
if not found:
del self.found[-1]
return False
self.found[-1].eigs = eigs
self.info(C, -1)
return True
def info(self, C, ind=None):
if ind is None:
ind = list(range(len(self.found)))
elif isinstance(ind, int):
ind = [ind]
BifPoint.info(self, C, ind)
| 2.5 | 2 |
src/marion/marion/urls/__init__.py | OmenApps/marion | 7 | 7235 | <filename>src/marion/marion/urls/__init__.py
"""Urls for the marion application"""
from django.urls import include, path
from rest_framework import routers
from .. import views
router = routers.DefaultRouter()
router.register(r"requests", views.DocumentRequestViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 1.9375 | 2 |
setup.py | TanKingsley/pyxll-jupyter | 1 | 7236 | """
PyXLL-Jupyter
This package integrated Jupyter notebooks into Microsoft Excel.
To install it, first install PyXLL (see https://www.pyxll.com).
Briefly, to install PyXLL do the following::
pip install pyxll
pyxll install
Once PyXLL is installed then installing this package will add a
button to the PyXLL ribbon toolbar that will start a Jupyter
notebook browser as a custom task pane in Excel.
To install this package use::
pip install pyxll_jupyter
"""
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="pyxll_jupyter",
description="Adds Jupyter notebooks to Microsoft Excel using PyXLL.",
long_description=long_description,
long_description_content_type='text/markdown',
version="0.1.11",
packages=find_packages(),
include_package_data=True,
package_data={
"pyxll_jupyter": [
"pyxll_jupyter/resources/ribbon.xml",
"pyxll_jupyter/resources/jupyter.png",
]
},
project_urls={
"Source": "https://github.com/pyxll/pyxll-jupyter",
"Tracker": "https://github.com/pyxll/pyxll-jupyter/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows"
],
entry_points={
"pyxll": [
"modules = pyxll_jupyter.pyxll:modules",
"ribbon = pyxll_jupyter.pyxll:ribbon"
]
},
install_requires=[
"pyxll >= 5.0.0",
"jupyter >= 1.0.0",
"PySide2"
]
)
| 3.125 | 3 |
board/views.py | albi23/Pyra | 0 | 7237 | from typing import List
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic, View
from board.forms import SignUpForm
from .const import BOARD_VIEW_COLUMN_COUNT
from .models import Board, Priority, Membership, Contribution
from .models import Task
@login_required
def index(request):
board_col, row_count = Board.objects.get_user_split_boards(request.user, BOARD_VIEW_COLUMN_COUNT)
context = {
'board_col': board_col,
'row_count': row_count
}
return render(request, 'index.html', context)
@login_required
def board(request, board_id):
_board = Board.objects.get(id=board_id)
todo_tasks: List[Task] = Task.objects.filter(board=_board, status='TODO')
doing_tasks = Task.objects.filter(board=_board, status='DOING')
done_tasks = Task.objects.filter(board=_board, status='DONE')
context = {
'board': _board,
'todo_tasks': todo_tasks,
'doing_tasks': doing_tasks,
'done_tasks': done_tasks,
'user': request.user,
}
return render(request, 'board.html', context)
@login_required
def update_task_state(request):
if request.method == "POST":
task_id = request.POST['task_id']
new_state = request.POST['new_state']
this_task = Task.objects.get(id=task_id)
this_task.status = new_state
this_task.save()
return JsonResponse({"success": True})
class SignUp(generic.CreateView):
form_class = SignUpForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
class CreateBoard(View):
def post(self, request):
name = request.POST['name']
description = request.POST['description']
if name:
new_board = Board.objects.create(
name=name,
description=description,
)
Membership.objects.create(
board=new_board,
user=request.user,
role=Membership.Role.SUPER_USER
)
return JsonResponse({"success": True})
return JsonResponse({"success": False})
class CreateTask(View):
def post(self, request):
title = request.POST['title']
description = request.POST['description']
status = request.POST['status']
priority = int(request.POST['priority'])
board_id = int(request.POST['board_id'])
if title and request.user in Board.objects.get(id=board_id).members.all():
Task.objects.create(
title=title,
description=description,
status=status,
priority=Priority.choices[-int(priority) - 1][0],
created_by=request.user,
board_id=board_id
)
return JsonResponse({"success": True})
return JsonResponse({"success": False})
class CreateBoardMembership(View):
def post(self, request):
username = request.POST['username']
board_id = int(request.POST['board_id'])
if username and board_id:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return JsonResponse(
status=404,
data={'message': 'User doesn\'t exist'}
)
try:
membership = Membership.objects.get(board=board_id, user=user.id)
except Membership.DoesNotExist:
membership = None
if membership is not None:
return JsonResponse(
status=400,
data={'message': 'user already added'}
)
Membership.objects.create(
user=user,
board_id=board_id
)
return JsonResponse({'message': 'success'})
return JsonResponse(
status=400,
data={'message': 'username or board_id can\'t be empty'}
)
def parse_priority(value: str):
choices = Priority.choices
for i in range(0, len(choices)):
if value == choices[i][1].lower():
return choices[i][0]
@login_required
def update_task(request):
this_task = Task.objects.get(id=request.POST['id'])
this_task.title = request.POST['title']
this_task.description = request.POST['description']
this_task.status = request.POST['status']
this_task.priority = parse_priority(request.POST['priority'].lower())
this_task.save()
assigned_user_id = request.POST['user']
if assigned_user_id:
Contribution.objects.create(
task=this_task,
user_id=assigned_user_id,
)
return JsonResponse({"success": True})
@login_required
def get_available_users(request):
users = User.objects.filter(
membership__board_id=request.GET['board']
).exclude(
contribution__task_id=request.GET['task']
)
response_users = list(map(
lambda user: {
'id': user.id,
'username': user.username
},
users
))
return JsonResponse({'users': response_users})
@login_required
def delete_task(request):
if request.method.POST['task']:
task = Task.objects.get(id=request.method.GET['task'])
if request.user in task.board.members.all():
task.delete()
return JsonResponse({"success": True})
return JsonResponse({"success": False})
| 2.046875 | 2 |
setup.py | lazmond3/pylib-instagram-type | 0 | 7238 | <reponame>lazmond3/pylib-instagram-type
# -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
from setuptools import setup, find_packages
import os
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
def take_package_name(name):
if name.startswith("-e"):
return name[name.find("=")+1:name.rfind("-")]
else:
return name.strip()
def load_requires_from_file(filepath):
with open(filepath) as fp:
return [take_package_name(pkg_name) for pkg_name in fp.readlines()]
setup(
name='lazmond3-pylib-instagram-type',
version='1.0.8',
description='update from 1.0.8: hasattr: 1.0.7: medias 複数, str get multiple + init.py',
long_description=readme,
author='lazmond3',
author_email='<EMAIL>',
url='https://github.com/lazmond3/pylib-instagram-type.git',
install_requires=["lazmond3-pylib-debug"],
license=license,
packages=find_packages(exclude=('tests', 'docs')),
test_suite='tests'
)
| 1.578125 | 2 |
tbx/core/migrations/0111_move_sign_up_form_into_new_app.py | arush15june/wagtail-torchbox | 0 | 7239 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-15 22:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailsearchpromotions', '0002_capitalizeverbose'),
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailforms', '0003_capitalizeverbose'),
('torchbox', '0110_rename_blogpagetaglist_to_tag'),
]
database_operations = [
migrations.AlterModelTable('SignUpFormPageResponse', 'sign_up_form_signupformpageresponse'),
migrations.AlterModelTable('SignUpFormPage', 'sign_up_form_signupformpage'),
migrations.AlterModelTable('SignUpFormPageBullet', 'sign_up_form_signupformpagebullet'),
migrations.AlterModelTable('SignUpFormPageLogo', 'sign_up_form_signupformpagelogo'),
migrations.AlterModelTable('SignUpFormPageQuote', 'sign_up_form_signupformpagequote'),
]
state_operations = [
migrations.RemoveField(
model_name='signupformpage',
name='call_to_action_image',
),
migrations.RemoveField(
model_name='signupformpage',
name='email_attachment',
),
migrations.RemoveField(
model_name='signupformpage',
name='page_ptr',
),
migrations.RemoveField(
model_name='signupformpagebullet',
name='page',
),
migrations.RemoveField(
model_name='signupformpagelogo',
name='logo',
),
migrations.RemoveField(
model_name='signupformpagelogo',
name='page',
),
migrations.RemoveField(
model_name='signupformpagequote',
name='page',
),
migrations.DeleteModel(
name='SignUpFormPageResponse',
),
migrations.DeleteModel(
name='SignUpFormPage',
),
migrations.DeleteModel(
name='SignUpFormPageBullet',
),
migrations.DeleteModel(
name='SignUpFormPageLogo',
),
migrations.DeleteModel(
name='SignUpFormPageQuote',
),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=database_operations,
state_operations=state_operations,
)
]
| 1.59375 | 2 |
tests/test_webframe.py | zsolt-beringer/osm-gimmisn | 0 | 7240 | #!/usr/bin/env python3
#
# Copyright (c) 2019 <NAME> and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_webframe module covers the webframe module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import configparser
import datetime
import os
import unittest
import unittest.mock
import time
# pylint: disable=unused-import
import yattag
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse # noqa: F401
class TestHandleStatic(unittest.TestCase):
"""Tests handle_static()."""
def test_happy(self) -> None:
"""Tests the happy path: css case."""
content, content_type = webframe.handle_static("/osm/static/osm.css")
self.assertTrue(len(content))
self.assertEqual(content_type, "text/css")
def test_javascript(self) -> None:
"""Tests the javascript case."""
content, content_type = webframe.handle_static("/osm/static/sorttable.js")
self.assertTrue(len(content))
self.assertEqual(content_type, "application/x-javascript")
def test_else(self) -> None:
"""Tests the case when the content type is not recognized."""
content, content_type = webframe.handle_static("/osm/static/test.xyz")
self.assertFalse(len(content))
self.assertFalse(len(content_type))
class TestHandleException(unittest.TestCase):
"""Tests handle_exception()."""
def test_happy(self) -> None:
"""Tests the happy path."""
environ = {
"PATH_INFO": "/"
}
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
self.assertTrue(status.startswith("500"))
header_dict = dict(response_headers)
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
try:
int("a")
# pylint: disable=broad-except
except Exception:
callback = cast('StartResponse', start_response)
output_iterable = webframe.handle_exception(environ, callback)
output_list = cast(List[bytes], output_iterable)
self.assertTrue(output_list)
output = output_list[0].decode('utf-8')
self.assertIn("ValueError", output)
return
self.fail()
class TestLocalToUiTz(unittest.TestCase):
"""Tests local_to_ui_tz()."""
def test_happy(self) -> None:
"""Tests the happy path."""
def get_abspath(path: str) -> str:
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(__file__), path)
def get_config() -> configparser.ConfigParser:
config = configparser.ConfigParser()
config.read_dict({"wsgi": {"timezone": "Europe/Budapest"}})
return config
with unittest.mock.patch('util.get_abspath', get_abspath):
with unittest.mock.patch('webframe.get_config', get_config):
local_dt = datetime.datetime.fromtimestamp(0)
ui_dt = webframe.local_to_ui_tz(local_dt)
if time.strftime('%Z%z') == "CET+0100":
self.assertEqual(ui_dt.timestamp(), 0)
class TestFillMissingHeaderItems(unittest.TestCase):
"""Tests fill_missing_header_items()."""
def test_happy(self) -> None:
"""Tests the happy path."""
streets = "no"
relation_name = "gazdagret"
items: List[yattag.doc.Doc] = []
webframe.fill_missing_header_items(streets, relation_name, items)
html = items[0].getvalue()
self.assertIn("Missing house numbers", html)
self.assertNotIn("Missing streets", html)
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
spotify.py | nimatest1234/telegram_spotify_downloader_bot | 0 | 7241 | from __future__ import unicode_literals
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import requests
from youtube_search import YoutubeSearch
import youtube_dl
import eyed3.id3
import eyed3
import lyricsgenius
import telepot
spotifyy = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(client_id='a145db3dcd564b9592dacf10649e4ed5',
client_secret='<KEY>'))
genius = lyricsgenius.Genius('<KEY>')
token = '<PASSWORD>'
bot = telepot.Bot(token)
def DOWNLOADMP3(link,chat_id):
#Get MetaData
results = spotifyy.track(link)
song = results['name']
print('[Spotify]MetaData Found!')
artist = results['artists'][0]['name']
YTSEARCH = str(song + " " + artist)
artistfinder = results['artists']
tracknum = results['track_number']
album = results['album']['name']
realese_date = int(results['album']['release_date'][:4])
if len(artistfinder) > 1:
fetures = "( Ft."
for lomi in range(0, len(artistfinder)):
try:
if lomi < len(artistfinder) - 2:
artistft = artistfinder[lomi + 1]['name'] + ", "
fetures += artistft
else:
artistft = artistfinder[lomi + 1]['name'] + ")"
fetures += artistft
except:
pass
else:
fetures = ""
time_duration = ""
time_duration1 = ""
time_duration2 = ""
time_duration3 = ""
millis = results['duration_ms']
millis = int(millis)
seconds = (millis / 1000) % 60
minutes = (millis / (1000 * 60)) % 60
seconds = int(seconds)
minutes = int(minutes)
if seconds >= 10:
if seconds < 59:
time_duration = "{0}:{1}".format(minutes, seconds)
time_duration1 = "{0}:{1}".format(minutes, seconds + 1)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
if seconds == 10:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds < 58:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
elif seconds == 58:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration1 = "{0}:0{1}".format(minutes + 1, seconds - 59)
if seconds == 59:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
else:
time_duration = "{0}:0{1}".format(minutes, seconds)
time_duration1 = "{0}:0{1}".format(minutes, seconds + 1)
if seconds < 8:
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
elif seconds == 9 or seconds == 8:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds == 0:
time_duration2 = "{0}:{1}".format(minutes - 1, seconds + 59)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
else:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
trackname = song + fetures
#Download Cover
response = requests.get(results['album']['images'][0]['url'])
DIRCOVER = "songpicts//" + trackname + ".png"
file = open(DIRCOVER, "wb")
file.write(response.content)
file.close()
#search for music on youtube
results = list(YoutubeSearch(str(YTSEARCH)).to_dict())
LINKASLI = ''
for URLSSS in results:
timeyt = URLSSS["duration"]
print(URLSSS['title'])
if timeyt == time_duration or timeyt == time_duration1:
LINKASLI = URLSSS['url_suffix']
break
elif timeyt == time_duration2 or timeyt == time_duration3:
LINKASLI = URLSSS['url_suffix']
break
YTLINK = str("https://www.youtube.com/" + LINKASLI)
print('[Youtube]song found!')
print(f'[Youtube]Link song on youtube : {YTLINK}')
#Donwload Music from youtube
options = {
# PERMANENT options
'format': 'bestaudio/best',
'keepvideo': False,
'outtmpl': f'song//{trackname}.*',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320'
}]
}
with youtube_dl.YoutubeDL(options) as mp3:
mp3.download([YTLINK])
aud = eyed3.load(f"song//{trackname}.mp3")
print('[Youtube]Song Downloaded!')
aud.tag.artist = artist
aud.tag.album = album
aud.tag.album_artist = artist
aud.tag.title = trackname
aud.tag.track_num = tracknum
aud.tag.year = realese_date
try:
songok = genius.search_song(song, artist)
aud.tag.lyrics.set(songok.lyrics)
print('[Genius]Song lyric Found!')
except:
print('[Genius]Song lyric NOT Found!')
aud.tag.images.set(3, open("songpicts//" + trackname + ".png", 'rb').read(), 'image/png')
aud.tag.save()
bot.sendAudio(chat_id, open(f'song//{trackname}.mp3', 'rb'), title=trackname)
print('[Telegram]Song sent!')
def album(link):
results = spotifyy.album_tracks(link)
albums = results['items']
while results['next']:
results = spotifyy.next(results)
albums.extend(results['items'])
print('[Spotify]Album Found!')
return albums
def artist(link):
results = spotifyy.artist_top_tracks(link)
albums = results['tracks']
print('[Spotify]Artist Found!')
return albums
def searchalbum(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['album']['external_urls']['spotify']
def playlist(link):
results = spotifyy.playlist_tracks(link)
print('[Spotify]Playlist Found!')
return results['items']
def searchsingle(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['href']
def searchartist(searchstr):
results = spotifyy.search(searchstr)
return results['tracks']['items'][0]['artists'][0]["external_urls"]['spotify']
| 2.625 | 3 |
tests/test_atomdict.py | Tillsten/atom | 0 | 7242 | #------------------------------------------------------------------------------
# Copyright (c) 2018-2019, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
"""Test the typed dictionary.
"""
import sys
import pytest
from atom.api import Atom, Dict, Int, atomdict
@pytest.fixture
def atom_dict():
"""Atom with different Dict members.
"""
class DictAtom(Atom):
untyped = Dict()
keytyped = Dict(Int())
valuetyped = Dict(value=Int())
fullytyped = Dict(Int(), Int())
untyped_default = Dict(default={1: 1})
keytyped_default = Dict(Int(), default={1: 1})
valuetyped_default = Dict(value=Int(), default={1: 1})
fullytyped_default = Dict(Int(), Int(), default={1: 1})
return DictAtom()
MEMBERS = ['untyped', 'keytyped', 'valuetyped', 'fullytyped',
'untyped_default', 'keytyped_default', 'valuetyped_default',
'fullytyped_default']
@pytest.mark.parametrize('member', MEMBERS)
def test_instance(atom_dict, member):
"""Test the repr.
"""
assert isinstance(getattr(atom_dict, member), atomdict)
@pytest.mark.parametrize('member', MEMBERS)
def test_repr(atom_dict, member):
"""Test the repr.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert repr(getattr(atom_dict, member)) == repr(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_len(atom_dict, member):
"""Test the len.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert len(getattr(atom_dict, member)) == len(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_contains(atom_dict, member):
"""Test __contains__.
"""
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert 5 in getattr(atom_dict, member)
del getattr(atom_dict, member)[5]
assert 5 not in getattr(atom_dict, member)
@pytest.mark.parametrize('member', MEMBERS)
def test_keys(atom_dict, member):
"""Test the keys.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).keys() == d.keys()
@pytest.mark.parametrize('member', MEMBERS)
def test_copy(atom_dict, member):
"""Test copy.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).copy() == d
def test_setitem(atom_dict):
"""Test setting items.
"""
atom_dict.untyped[''] = 1
assert atom_dict.untyped[''] == 1
atom_dict.keytyped[1] = ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped[''] = 1
atom_dict.valuetyped[1] = 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped[''] = ''
atom_dict.fullytyped[1] = 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped[''] = 1
with pytest.raises(TypeError):
atom_dict.fullytyped[1] = ''
def test_setdefault(atom_dict):
"""Test using setdefault.
"""
assert atom_dict.untyped.setdefault('', 1) == 1
assert atom_dict.untyped.setdefault('', 2) == 1
assert atom_dict.untyped[''] == 1
assert atom_dict.keytyped.setdefault(1, '') == ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped.setdefault('', 1)
assert atom_dict.valuetyped.setdefault(1, 1) == 1
assert atom_dict.valuetyped.setdefault(1, '') == 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.setdefault(2, '')
assert atom_dict.fullytyped.setdefault(1, 1) == 1
assert atom_dict.fullytyped.setdefault(1, '') == 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault('', 1)
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault(2, '')
def test_update(atom_dict):
"""Test update a dict.
"""
atom_dict.untyped.update({'': 1})
assert atom_dict.untyped[''] == 1
atom_dict.untyped.update([('1', 1)])
assert atom_dict.untyped['1'] == 1
atom_dict.keytyped.update({1: 1})
assert atom_dict.keytyped[1] == 1
atom_dict.keytyped.update([(2, 1)])
assert atom_dict.keytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.keytyped.update({'': 1})
atom_dict.valuetyped.update({1: 1})
assert atom_dict.valuetyped[1] == 1
atom_dict.valuetyped.update([(2, 1)])
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.update({'': ''})
atom_dict.fullytyped.update({1: 1})
assert atom_dict.fullytyped[1] == 1
atom_dict.fullytyped.update([(2, 1)])
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': 1})
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': ''})
| 2.453125 | 2 |
dippy/core/timestamp.py | eggveloper/dippy.core | 4 | 7243 | <gh_stars>1-10
from datetime import datetime
class Timestamp(float):
def __new__(cls, value=None):
return super().__new__(
cls, datetime.utcnow().timestamp() if value is None else value
)
def to_date(self) -> datetime:
return datetime.utcfromtimestamp(self)
def __repr__(self):
return f"<{type(self).__name__} {self}>"
def __str__(self):
return self.to_date().isoformat(" ")
| 3.046875 | 3 |
bible/admin.py | tushortz/biblelover | 0 | 7244 | from django.contrib import admin
from bible.models import Bible, VerseOfTheDay
@admin.register(Bible)
class BibleAdmin(admin.ModelAdmin):
list_display = ['__str__', 'text']
readonly_fields = ['book', 'chapter', 'verse', 'text', 'category']
search_fields = ['text', 'book', 'chapter']
list_filter = ['category', 'book']
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
@admin.register(VerseOfTheDay)
class VerseOfTheDayAdmin(admin.ModelAdmin):
autocomplete_fields = ['verse']
raw_id_fields = ['verse']
| 2.21875 | 2 |
typy/nodes.py | Procrat/typy | 3 | 7245 | <reponame>Procrat/typy<filename>typy/nodes.py
"""
Our own implementation of an abstract syntax tree (AST).
The convert function recursively converts a Python AST (from the module `ast`)
to our own AST (of the class `Node`).
"""
import ast
from logging import debug
from typy.builtin import data_types
from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable
from typy import types
class Node:
def __init__(self, type_map, ast_node):
self.type_map = type_map
self._ast_fields = ast_node._fields
def check(self):
"""Must be overriden in subtype."""
raise NotYetSupported('check call to', self)
def iter_fields(self):
for field in self._ast_fields:
try:
yield field, getattr(self, field)
except AttributeError:
pass
def iter_child_nodes(self):
for _name, field in self.iter_fields():
if isinstance(field, Node):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, Node):
yield item
class FunctionDef(Node):
def __init__(self, type_map, ast_node):
if (ast_node.args.vararg is not None or
len(ast_node.args.kwonlyargs) > 0 or
len(ast_node.args.kw_defaults) > 0 or
ast_node.args.kwarg is not None or
len(ast_node.args.defaults) > 0):
raise NotYetSupported('default arguments and keyword arguments')
super().__init__(type_map, ast_node)
self.name = ast_node.name
self.params = [arg.arg for arg in ast_node.args.args]
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self._ast_fields = ('name', 'params', 'body')
def check(self):
debug('checking func def %s', self.name)
function = types.Function(self, self.type_map)
self.type_map.add_variable(self.name, function)
return data_types.None_()
def __repr__(self):
return 'def ' + self.name + '()'
class ClassDef(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.name = ast_node.name
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
def check(self):
debug('checking class def %s', self.name)
class_namespace = self.type_map.enter_namespace(self.name)
for stmt in self.body:
stmt.check()
self.type_map.exit_namespace()
class_ = types.Class(self, self.type_map, class_namespace)
self.type_map.add_variable(self.name, class_)
return data_types.None_()
def __repr__(self):
return 'def ' + self.name
class Attribute(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
self.attr = ast_node.attr
self.ctx = ast_node.ctx
def check(self):
debug('checking attr %s', self)
value_type = self.value.check()
debug('attr %r = %r', self, value_type)
if isinstance(self.ctx, ast.Load):
return value_type.get_attribute(self.attr)
elif isinstance(self.ctx, ast.Store):
return (value_type, self.attr)
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return repr(self.value) + '.' + self.attr
class Name(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.id = ast_node.id
self.ctx = ast_node.ctx
def check(self):
debug('checking name %s', self.id)
if isinstance(self.ctx, ast.Load):
return self.type_map.find(self.id)
elif isinstance(self.ctx, ast.Store):
return self
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return self.id
class Call(Node):
def __init__(self, type_map, ast_node):
if (len(ast_node.keywords) > 0 or
ast_node.starargs is not None or
ast_node.kwargs is not None):
raise NotYetSupported('keyword arguments and star arguments')
super().__init__(type_map, ast_node)
self.func = convert(type_map, ast_node.func)
self.args = [convert(type_map, expr) for expr in ast_node.args]
def check(self):
debug('checking call')
func = self.func.check()
args = [arg.check() for arg in self.args]
return func.check_call(args)
def __repr__(self):
return repr(self.func) + \
'(' + ', '.join(repr(x) for x in self.args) + ')'
class Expr(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking expr')
self.value.check()
return data_types.None_()
def __repr__(self):
return repr(self.value)
class Return(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking return')
return self.value.check()
def __repr__(self):
return 'return ' + repr(self.value)
class Module(Node, types.Type):
def __init__(self, type_map, ast_node):
Node.__init__(self, type_map, ast_node)
types.Type.__init__(self, type_map)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
def check(self):
debug('checking module')
self.module_namespace = self.type_map.enter_namespace('__main__')
debug('entering %r', self.type_map.current_namespace)
for stmt in self.body:
debug('still in %r', self.type_map.current_namespace)
stmt.check()
debug('leaving %r', self.type_map.current_namespace)
self.type_map.exit_namespace()
def get_attribute(self, name):
try:
return self.module_namespace[name]
except KeyError:
types.Type.get_attribute(self, name)
class Assign(Node):
def __init__(self, type_map, ast_node):
# TODO handle multiple targets
if len(ast_node.targets) > 1:
raise NotYetSupported('assignment with multiple targets')
super().__init__(type_map, ast_node)
self.target = convert(type_map, ast_node.targets[0])
self.value = convert(type_map, ast_node.value)
self._ast_fields = ('target', 'value')
def check(self):
debug('checking assign %r', self.target)
_assign(self.target, self.value, self.type_map)
return data_types.None_()
def __repr__(self):
return repr(self.target) + ' = ' + repr(self.value)
class Pass(Node):
def check(self):
debug('checking pass')
return data_types.None_()
def __repr__(self):
return 'pass'
class Not(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking not')
self.value.check()
return data_types.Bool()
def __repr__(self):
return 'not ' + repr(self.value)
class BoolOp(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.op = ast_node.op
self.values = [convert(type_map, value) for value in ast_node.values]
def check(self):
debug('checking boolop')
for value in self.values:
value.check()
# TODO return intersection van types?
return data_types.Bool()
def __repr__(self):
op_name = ' {} '.format(self.op)
return '(' + op_name.join(repr(val) for val in self.values) + ')'
class In(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.element = convert(type_map, ast_node.element)
self.container = convert(type_map, ast_node.container)
def check(self):
debug('checking in')
element = self.element.check()
container = self.container.check()
try:
container.call_magic_method('__contains__', element)
except NoSuchAttribute:
if not container.is_iterable():
raise NotIterable(container)
return data_types.Bool()
def __repr__(self):
return '{!r} in {!r}'.format(self.element, self.container)
class For(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.target = convert(type_map, ast_node.target)
self.iter = convert(type_map, ast_node.iter)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, clause) for clause in ast_node.orelse]
def check(self):
debug('checking for')
iterator = self.iter.check()
enclosed_type = iterator.get_enclosed_type()
_assign(self.target, enclosed_type, self.type_map)
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'for {!r} in {!r}:\n '.format(self.target, self.iter)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class If(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, stmt) for stmt in ast_node.orelse]
def check(self):
debug('checking if')
# TODO take isinstance into account (?)
# TODO real branching?
self.test.check()
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'if {!r}:\n '.format(self.test)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class IfExp(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = convert(type_map, ast_node.body)
self.orelse = convert(type_map, ast_node.orelse)
def check(self):
debug('checking ifexp')
# TODO take isinstance into account (?)
self.test.check()
value1 = self.body.check()
value2 = self.orelse.check()
return types.Intersection(value1, value2)
def __repr__(self):
template = '{!r} if {!r} else {!r}'
return template.format(self.test, self.body, self.orelse)
class NameConstant(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = ast_node.value
def check(self):
debug('checking name constant %r', self.value)
if self.value is None:
return data_types.None_()
elif self.value is True or self.value is False:
return data_types.Bool()
else:
raise NotYetSupported('name constant', self.value)
def __repr__(self):
return repr(self.value)
class While(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, stmt) for stmt in ast_node.orelse]
def check(self):
debug('checking while')
# TODO take isinstance into account (?)
# TODO real branching?
self.test.check()
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'while {!r}:\n '.format(self.test)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class Break(Node):
def check(self):
debug('checking break')
return data_types.None_()
def __repr__(self):
return 'break'
class Continue(Node):
def check(self):
debug('checking continue')
return data_types.None_()
def __repr__(self):
return 'continue'
class Num(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.number_type = {
int: data_types.Int,
# float: data_types.Float,
# complex: data_types.Complex,
}[type(ast_node.n)]
def check(self):
debug('checking num')
return self.number_type()
class Tuple(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.elts = [convert(type_map, el) for el in ast_node.elts]
self.ctx = ast_node.ctx
def check(self):
debug('checking tuple %r', self)
if isinstance(self.ctx, ast.Load):
el_types = (el.check() for el in self.elts)
return types.Tuple(self.type_map, *el_types)
elif isinstance(self.ctx, ast.Store):
return self
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return '(' + ', '.join(repr(el) for el in self.elts) + ')'
def _assign(target, value, type_map):
value_type = value.check()
if isinstance(target, Name):
target_type = target.check()
type_map.add_variable(target_type.id, value_type)
elif isinstance(target, Attribute):
target_type, attr = target.check()
target_type.set_attribute(attr, value_type)
else:
raise NotYetSupported('assignment to', target)
def convert(type_map, node):
class_name = node.__class__.__name__
try:
# Try to convert to a node
class_ = globals()[class_name]
return class_(type_map, node)
except KeyError:
try:
# Try to convert to a builtin type
class_ = getattr(data_types, class_name)
return class_()
except AttributeError:
raise NotYetSupported('node', node)
| 2.390625 | 2 |
anonlink-entity-service/backend/entityservice/integrationtests/objectstoretests/test_objectstore.py | Sam-Gresh/linkage-agent-tools | 1 | 7246 | <reponame>Sam-Gresh/linkage-agent-tools
"""
Testing:
- uploading over existing files
- using deleted credentials
- using expired credentials
"""
import io
import minio
from minio import Minio
import pytest
from minio.credentials import AssumeRoleProvider, Credentials
from entityservice.object_store import connect_to_object_store, connect_to_upload_object_store
from entityservice.settings import Config
restricted_upload_policy = """{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::uploads/2020/*"
],
"Sid": "Upload-access-to-specific-bucket-only"
}
]
}
"""
class TestAssumeRole:
def test_temp_credentials_minio(self):
upload_endpoint = Config.UPLOAD_OBJECT_STORE_SERVER
bucket_name = "uploads"
root_mc_client = connect_to_object_store()
upload_restricted_minio_client = connect_to_upload_object_store()
if not root_mc_client.bucket_exists(bucket_name):
root_mc_client.make_bucket(bucket_name)
with pytest.raises(minio.error.AccessDenied):
upload_restricted_minio_client.list_buckets()
# Should be able to put an object though
upload_restricted_minio_client.put_object(bucket_name, 'testobject', io.BytesIO(b'data'), length=4)
credentials_provider = AssumeRoleProvider(upload_restricted_minio_client,
Policy=restricted_upload_policy
)
temp_creds = Credentials(provider=credentials_provider)
newly_restricted_mc_client = Minio(upload_endpoint, credentials=temp_creds, region='us-east-1', secure=False)
with pytest.raises(minio.error.AccessDenied):
newly_restricted_mc_client.list_buckets()
# Note this put object worked with the earlier credentials
# But should fail if we have applied the more restrictive policy
with pytest.raises(minio.error.AccessDenied):
newly_restricted_mc_client.put_object(bucket_name, 'testobject2', io.BytesIO(b'data'), length=4)
# this path is allowed in the policy however
newly_restricted_mc_client.put_object(bucket_name, '2020/testobject', io.BytesIO(b'data'), length=4)
| 1.929688 | 2 |
soil/build/lib/soil/db/sqlalchemy/api.py | JackDan9/soil | 1 | 7247 | # Copyright 2020 Soil, Inc.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import inspect
import sys
import threading
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import soil.conf
from soil.i18n import _
CONF = soil.conf.CONF
LOG = logging.getLogger(__name__)
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database)
)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def dispose_engine():
get_engine().dispose()
| 1.796875 | 2 |
tests/test_models.py | kykrueger/redash | 1 | 7248 | <reponame>kykrueger/redash
import calendar
import datetime
from unittest import TestCase
import pytz
from dateutil.parser import parse as date_parse
from tests import BaseTestCase
from redash import models, redis_connection
from redash.models import db, types
from redash.utils import gen_query_hash, utcnow
class DashboardTest(BaseTestCase):
def test_appends_suffix_to_slug_when_duplicate(self):
d1 = self.factory.create_dashboard()
db.session.flush()
self.assertEqual(d1.slug, 'test')
d2 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d2.slug)
d3 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d3.slug)
self.assertNotEqual(d2.slug, d3.slug)
class ShouldScheduleNextTest(TestCase):
def test_interval_schedule_that_needs_reschedule(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600"))
def test_interval_schedule_that_doesnt_need_reschedule(self):
now = utcnow()
half_an_hour_ago = now - datetime.timedelta(minutes=30)
self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600"))
def test_exact_time_that_needs_reschedule(self):
now = utcnow()
yesterday = now - datetime.timedelta(days=1)
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(yesterday, now, "86400",
scheduled_time))
def test_exact_time_that_doesnt_need_reschedule(self):
now = date_parse("2015-10-16 20:10")
yesterday = date_parse("2015-10-15 23:07")
schedule = "23:00"
self.assertFalse(models.should_schedule_next(yesterday, now, "86400", schedule))
def test_exact_time_with_day_change(self):
now = utcnow().replace(hour=0, minute=1)
previous = (now - datetime.timedelta(days=2)).replace(hour=23,
minute=59)
schedule = "23:59".format(now.hour + 3)
self.assertTrue(models.should_schedule_next(previous, now, "86400", schedule))
def test_exact_time_every_x_days_that_needs_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=4)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_that_doesnt_need_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=2)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_with_day_change(self):
now = utcnow().replace(hour=23, minute=59)
previous = (now - datetime.timedelta(days=2)).replace(hour=0, minute=1)
schedule = "23:58"
three_day_interval = "259200"
self.assertTrue(models.should_schedule_next(previous, now, three_day_interval, schedule))
def test_exact_time_every_x_weeks_that_needs_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Tuesday
# 2) The last time it ran was 3 weeks ago from this week's Thursday
# 3) It is now Wednesday of this week
#
# Expectation: Even though less than 3 weeks have passed since the
# last run 3 weeks ago on Thursday, it's overdue since
# it should be running on Tuesdays.
this_thursday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Thursday") - utcnow().weekday())
three_weeks_ago = this_thursday - datetime.timedelta(weeks=3)
now = this_thursday - datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Tuesday"))
def test_exact_time_every_x_weeks_that_doesnt_need_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Thurday
# 2) The last time it ran was 3 weeks ago from this week's Tuesday
# 3) It is now Wednesday of this week
#
# Expectation: Even though more than 3 weeks have passed since the
# last run 3 weeks ago on Tuesday, it's not overdue since
# it should be running on Thursdays.
this_tuesday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Tuesday") - utcnow().weekday())
three_weeks_ago = this_tuesday - datetime.timedelta(weeks=3)
now = this_tuesday + datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Thursday"))
def test_backoff(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
failures=5))
self.assertFalse(models.should_schedule_next(two_hours_ago, now,
"3600", failures=10))
def test_next_iteration_overflow(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertFalse(models.should_schedule_next(two_hours_ago, now, "3600", failures=32))
class QueryOutdatedQueriesTest(BaseTestCase):
# TODO: this test can be refactored to use mock version of should_schedule_next to simplify it.
def test_outdated_queries_skips_unscheduled_queries(self):
query = self.factory.create_query(schedule={'interval':None, 'time': None, 'until':None, 'day_of_week':None})
query_with_none = self.factory.create_query(schedule=None)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
self.assertNotIn(query_with_none, queries)
def test_outdated_queries_works_with_ttl_based_schedule(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_outdated_queries_works_scheduled_queries_tracker(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
models.scheduled_queries_executions.update(query.id)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_skips_fresh_queries(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_outdated_queries_works_with_specific_time_schedule(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'86400', 'time':half_an_hour_ago.strftime('%H:%M'), 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago - datetime.timedelta(days=1))
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_enqueues_query_only_once(self):
"""
Only one query per data source with the same text will be reported by
Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query2])
def test_enqueues_query_with_correct_data_source(self):
"""
Queries from different data sources will be reported by
Query.outdated_queries() even if they have the same query text.
"""
query = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, data_source=self.factory.create_data_source())
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
outdated_queries = models.Query.outdated_queries()
self.assertEqual(len(outdated_queries), 2)
self.assertIn(query, outdated_queries)
self.assertIn(query2, outdated_queries)
def test_enqueues_only_for_relevant_data_source(self):
"""
If multiple queries with the same text exist, only ones that are
scheduled to be refreshed are reported by Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'3600', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_failure_extends_schedule(self):
"""
Execution failures recorded for a query result in exponential backoff
for scheduling future execution.
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, schedule_failures=4)
retrieved_at = utcnow() - datetime.timedelta(minutes=16)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [])
query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_schedule_until_after(self):
"""
Queries with non-null ``schedule['until']`` are not reported by
Query.outdated_queries() after the given time is past.
"""
one_day_ago = (utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_ago, 'time':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_schedule_until_before(self):
"""
Queries with non-null ``schedule['until']`` are reported by
Query.outdated_queries() before the given time is past.
"""
one_day_from_now = (utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_from_now, 'time': None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
class QueryArchiveTest(BaseTestCase):
def test_archive_query_sets_flag(self):
query = self.factory.create_query()
db.session.flush()
query.archive()
self.assertEqual(query.is_archived, True)
def test_archived_query_doesnt_return_in_all(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
yesterday = utcnow() - datetime.timedelta(days=1)
query_result = models.QueryResult.store_result(
query.org_id, query.data_source, query.query_hash, query.query_text,
"1", 123, yesterday)
query.latest_query_data = query_result
groups = list(models.Group.query.filter(models.Group.id.in_(query.groups)))
self.assertIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertIn(query, models.Query.outdated_queries())
db.session.flush()
query.archive()
self.assertNotIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertNotIn(query, models.Query.outdated_queries())
def test_removes_associated_widgets_from_dashboards(self):
widget = self.factory.create_widget()
query = widget.visualization.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Widget.query.get(widget.id), None)
def test_removes_scheduling(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
query.archive()
self.assertIsNone(query.schedule)
def test_deletes_alerts(self):
subscription = self.factory.create_alert_subscription()
query = subscription.alert.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Alert.query.get(subscription.alert.id), None)
self.assertEqual(models.AlertSubscription.query.get(subscription.id), None)
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
qr = self.factory.create_query_result()
self.factory.create_query(latest_query_data=qr)
db.session.flush()
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(qr, list(models.QueryResult.unused()))
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
db.session.flush()
new_unused_qr = self.factory.create_query_result()
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(new_unused_qr, list(models.QueryResult.unused()))
class TestQueryAll(BaseTestCase):
def test_returns_only_queries_in_given_groups(self):
ds1 = self.factory.create_data_source()
ds2 = self.factory.create_data_source()
group1 = models.Group(name="g1", org=ds1.org, permissions=['create', 'view'])
group2 = models.Group(name="g2", org=ds1.org, permissions=['create', 'view'])
q1 = self.factory.create_query(data_source=ds1)
q2 = self.factory.create_query(data_source=ds2)
db.session.add_all([
ds1, ds2,
group1, group2,
q1, q2,
models.DataSourceGroup(
group=group1, data_source=ds1),
models.DataSourceGroup(group=group2, data_source=ds2)
])
db.session.flush()
self.assertIn(q1, list(models.Query.all_queries([group1.id])))
self.assertNotIn(q2, list(models.Query.all_queries([group1.id])))
self.assertIn(q1, list(models.Query.all_queries([group1.id, group2.id])))
self.assertIn(q2, list(models.Query.all_queries([group1.id, group2.id])))
def test_skips_drafts(self):
q = self.factory.create_query(is_draft=True)
self.assertNotIn(q, models.Query.all_queries([self.factory.default_group.id]))
def test_includes_drafts_of_given_user(self):
q = self.factory.create_query(is_draft=True)
self.assertIn(q, models.Query.all_queries([self.factory.default_group.id], user_id=q.user_id))
def test_order_by_relationship(self):
u1 = self.factory.create_user(name='alice')
u2 = self.factory.create_user(name='bob')
self.factory.create_query(user=u1)
self.factory.create_query(user=u2)
db.session.commit()
# have to reset the order here with None since all_queries orders by
# created_at by default
base = models.Query.all_queries([self.factory.default_group.id]).order_by(None)
qs1 = base.order_by(models.User.name)
self.assertEqual(['alice', 'bob'], [q.user.name for q in qs1])
qs2 = base.order_by(models.User.name.desc())
self.assertEqual(['bob', 'alice'], [q.user.name for q in qs2])
class TestGroup(BaseTestCase):
def test_returns_groups_with_specified_names(self):
org1 = self.factory.create_org()
org2 = self.factory.create_org()
matching_group1 = models.Group(id=999, name="g1", org=org1)
matching_group2 = models.Group(id=888, name="g2", org=org1)
non_matching_group = models.Group(id=777, name="g1", org=org2)
groups = models.Group.find_by_name(org1, ["g1", "g2"])
self.assertIn(matching_group1, groups)
self.assertIn(matching_group2, groups)
self.assertNotIn(non_matching_group, groups)
def test_returns_no_groups(self):
org1 = self.factory.create_org()
models.Group(id=999, name="g1", org=org1)
self.assertEqual([], models.Group.find_by_name(org1, ["non-existing"]))
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
self.data_source = self.factory.data_source
self.query = "SELECT 1"
self.query_hash = gen_query_hash(self.query)
self.runtime = 123
self.utcnow = utcnow()
self.data = '{"a": 1}'
def test_stores_the_result(self):
query_result = models.QueryResult.store_result(
self.data_source.org_id, self.data_source, self.query_hash,
self.query, self.data, self.runtime, self.utcnow)
self.assertEqual(query_result._data, self.data)
self.assertEqual(query_result.runtime, self.runtime)
self.assertEqual(query_result.retrieved_at, self.utcnow)
self.assertEqual(query_result.query_text, self.query)
self.assertEqual(query_result.query_hash, self.query_hash)
self.assertEqual(query_result.data_source, self.data_source)
class TestEvents(BaseTestCase):
def raw_event(self):
timestamp = 1411778709.791
user = self.factory.user
created_at = datetime.datetime.utcfromtimestamp(timestamp)
db.session.flush()
raw_event = {"action": "view",
"timestamp": timestamp,
"object_type": "dashboard",
"user_id": user.id,
"object_id": 1,
"org_id": 1}
return raw_event, user, created_at
def test_records_event(self):
raw_event, user, created_at = self.raw_event()
event = models.Event.record(raw_event)
db.session.flush()
self.assertEqual(event.user, user)
self.assertEqual(event.action, "view")
self.assertEqual(event.object_type, "dashboard")
self.assertEqual(event.object_id, 1)
self.assertEqual(event.created_at, created_at)
def test_records_additional_properties(self):
raw_event, _, _ = self.raw_event()
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
raw_event.update(additional_properties)
event = models.Event.record(raw_event)
self.assertDictEqual(event.additional_properties, additional_properties)
def _set_up_dashboard_test(d):
d.g1 = d.factory.create_group(name='First', permissions=['create', 'view'])
d.g2 = d.factory.create_group(name='Second', permissions=['create', 'view'])
d.ds1 = d.factory.create_data_source()
d.ds2 = d.factory.create_data_source()
db.session.flush()
d.u1 = d.factory.create_user(group_ids=[d.g1.id])
d.u2 = d.factory.create_user(group_ids=[d.g2.id])
db.session.add_all([
models.DataSourceGroup(group=d.g1, data_source=d.ds1),
models.DataSourceGroup(group=d.g2, data_source=d.ds2)
])
d.q1 = d.factory.create_query(data_source=d.ds1)
d.q2 = d.factory.create_query(data_source=d.ds2)
d.v1 = d.factory.create_visualization(query_rel=d.q1)
d.v2 = d.factory.create_visualization(query_rel=d.q2)
d.w1 = d.factory.create_widget(visualization=d.v1)
d.w2 = d.factory.create_widget(visualization=d.v2)
d.w3 = d.factory.create_widget(visualization=d.v2, dashboard=d.w2.dashboard)
d.w4 = d.factory.create_widget(visualization=d.v2)
d.w5 = d.factory.create_widget(visualization=d.v1, dashboard=d.w4.dashboard)
d.w1.dashboard.is_draft = False
d.w2.dashboard.is_draft = False
d.w4.dashboard.is_draft = False
class TestDashboardAll(BaseTestCase):
def setUp(self):
super(TestDashboardAll, self).setUp()
_set_up_dashboard_test(self)
def test_requires_group_or_user_id(self):
d1 = self.factory.create_dashboard()
self.assertNotIn(d1, list(models.Dashboard.all(
d1.user.org, d1.user.group_ids, None)))
l2 = list(models.Dashboard.all(
d1.user.org, [0], d1.user.id))
self.assertIn(d1, l2)
def test_returns_dashboards_based_on_groups(self):
self.assertIn(self.w1.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
self.assertIn(self.w2.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w1.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w2.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
def test_returns_each_dashboard_once(self):
dashboards = list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
self.assertEqual(len(dashboards), 2)
def test_returns_dashboard_you_have_partial_access_to(self):
self.assertIn(self.w5.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
def test_returns_dashboards_created_by_user(self):
d1 = self.factory.create_dashboard(user=self.u1)
db.session.flush()
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, self.u1.id)))
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, [0], self.u1.id)))
self.assertNotIn(d1, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, self.u2.id)))
def test_returns_dashboards_with_text_widgets(self):
w1 = self.factory.create_widget(visualization=None)
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertIn(w1.dashboard, models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
def test_returns_dashboards_from_current_org_only(self):
w1 = self.factory.create_widget(visualization=None)
user = self.factory.create_user(org=self.factory.create_org())
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertNotIn(w1.dashboard, models.Dashboard.all(user.org, user.group_ids, None))
| 2.515625 | 3 |
.history/List of Capstone Projects/FibonacciSequence_20200516134123.py | EvanthiosPapadopoulos/Python3 | 1 | 7249 | '''
Fibonacci Sequence
'''
import HeaderOfFiles
def fibonacciSeq(number):
'''
Generate Fibonacci Sequence to the given number.
'''
a = 1
b = 1
for i in range(number):
yield a
a,b = b,a+b
while True:
try:
f = int(input("Enter a number for Fibonacci: "))
break
except:
print("Give me a number please!")
fibonacciSeq(f) | 4.5 | 4 |
composer/algorithms/mixup/__init__.py | jacobfulano/composer | 2 | 7250 | <reponame>jacobfulano/composer
# Copyright 2021 MosaicML. All Rights Reserved.
from composer.algorithms.mixup.mixup import MixUp as MixUp
from composer.algorithms.mixup.mixup import MixUpHparams as MixUpHparams
from composer.algorithms.mixup.mixup import mixup_batch as mixup_batch
_name = 'MixUp'
_class_name = 'MixUp'
_functional = 'mixup_batch'
_tldr = 'Blends pairs of examples and labels'
_attribution = '(Zhang et al, 2017)'
_link = 'https://arxiv.org/abs/1710.09412'
_method_card = ''
| 0.890625 | 1 |
tests/simple_gan_test.py | alanpeixinho/NiftyNet | 0 | 7251 | from __future__ import absolute_import, print_function
import unittest
import os
import tensorflow as tf
from tensorflow.keras import regularizers
from niftynet.network.simple_gan import SimpleGAN
from tests.niftynet_testcase import NiftyNetTestCase
class SimpleGANTest(NiftyNetTestCase):
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
def test_2d_reg_shape(self):
input_shape = (2, 64, 64, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
if __name__ == "__main__":
tf.test.main()
| 2.4375 | 2 |
tests/test.py | N4S4/thingspeak_wrapper | 0 | 7252 | import time
import thingspeak_wrapper as tsw
# Initiate the class ThingWrapper with (CHANNEL_ID, WRITE_API__KEY, READ_API_KEY)
# if is a public channel just pass the CHANNEL_ID argument, api_key defaults are None
my_channel = tsw.wrapper.ThingWrapper(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z')
# all set of functions are:
# my_channel.sender()
# my_channel.multiple_sender()
# my_channel.get_json_feeds()
# my_channel.get_json_feeds_from()
# my_channel.get_xml_feeds()
# my_channel.get_xml_feeds_from()
# my_channel.get_csv_feeds()
# my_channel.get_csv_feeds_from()
# ---------------------------
# Now you can use all the possible functions
# Send a value to a single field
my_channel.sender(1, 4)
# this delay is due to limitation of thingspeak free account which allow you to post data every 15 sec minimum
time.sleep(15)
# ---------------------------
# Send data to multiple field
# It take 2 input as lists ([..], [..])
# Create lists of fields and values
fields = [1, 2, 3]
values = [22.0, 1029, 700]
# pass them to the function
my_channel.multiple_sender(fields, values)
# ---------------------------
# Get data functions returns data as json, xml, csv
# optionally csv can be returned as Pandas Data frame
# pass arguments to the function (field, data_quantity)
# default values are ( fields='feeds', results_quantity=None)
# you will get all fields and all values (max 8000)
json_field1 = my_channel.get_json_feeds(1, 300)
print(json_field1)
# get xml data pass same values as previous function
xml_field1 = my_channel.get_xml_feeds(1, 300)
print(xml_field1)
# get csv data
# this function requires to specify (field, pandas_format=True, result_quantity=None)
# defaults are (fields='feeds', pandas_format=True, result_quantity=None)
csv_field1 = my_channel.get_csv_feeds(1, pandas_format=True,
results_quantity=300)
print(csv_field1)
# data without pandas_format
csv_no_pandas = my_channel.get_csv_feeds(1, pandas_format=False,
results_quantity=300)
print(csv_no_pandas)
# there is the possibility to request data from and to specific dates
# set date and time as strings YYYY-MM-DD HH:NN:SS
start_date, start_time = '2018-05-21', '12:00:00'
stop_date, stop_time = '2018-05-21', '23:59:59'
# pass values to the function
# defaults are (start_date, start_time, stop_date=None, stop_time=None, fields='feeds')
values_from_date = my_channel.get_json_feeds_from(stop_date, start_time, stop_date, stop_time, 1)
print(values_from_date)
| 2.84375 | 3 |
neptunecontrib/monitoring/skopt.py | neptune-ai/neptune-contrib | 22 | 7253 | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import matplotlib.pyplot as plt
import neptune
import numpy as np
import skopt.plots as sk_plots
from skopt.utils import dump
from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run
class NeptuneCallback:
"""Logs hyperparameter optimization process to Neptune.
Specifically using NeptuneCallback will log: run metrics and run parameters, best run metrics so far, and
the current results checkpoint.
Examples:
Initialize NeptuneCallback::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(api_token='<PASSWORD>',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
neptune_callback = sk_utils.NeptuneCallback()
Run skopt training passing neptune_callback as a callback::
...
results = skopt.forest_minimize(objective, space, callback=[neptune_callback],
base_estimator='ET', n_calls=100, n_random_starts=10)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
def __init__(self, experiment=None, log_checkpoint=True):
self._exp = experiment if experiment else neptune
expect_not_a_run(self._exp)
self.log_checkpoint = log_checkpoint
self._iteration = 0
def __call__(self, res):
self._exp.log_metric('run_score', x=self._iteration, y=res.func_vals[-1])
self._exp.log_metric('best_so_far_run_score', x=self._iteration, y=np.min(res.func_vals))
self._exp.log_text('run_parameters', x=self._iteration, y=NeptuneCallback._get_last_params(res))
if self.log_checkpoint:
self._exp.log_artifact(_export_results_object(res), 'results.pkl')
self._iteration += 1
@staticmethod
def _get_last_params(res):
param_vals = res.x_iters[-1]
named_params = _format_to_named_params(param_vals, res)
return str(named_params)
def log_results(results, experiment=None, log_plots=True, log_pickle=True):
"""Logs runs results and parameters to neptune.
Logs all hyperparameter optimization results to Neptune. Those include best score ('best_score' metric),
best parameters ('best_parameters' property), convergence plot ('diagnostics' log),
evaluations plot ('diagnostics' log), and objective plot ('diagnostics' log).
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an output
| of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
log_plots: ('bool'): If True skopt plots will be logged to Neptune.
log_pickle: ('bool'): if True pickled skopt results object will be logged to Neptune.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Initialize Neptune::
import neptune
neptune.init(api_token='<PASSWORD>',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
Send best parameters to Neptune::
import neptunecontrib.monitoring.skopt as sk_utils
sk_utils.log_results(results)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
_exp = experiment if experiment else neptune
expect_not_a_run(_exp)
_log_best_score(results, _exp)
_log_best_parameters(results, _exp)
if log_plots:
_log_plot_convergence(results, _exp)
_log_plot_evaluations(results, _exp)
_log_plot_regret(results, _exp)
_log_plot_objective(results, _exp)
if log_pickle:
_log_results_object(results, _exp)
def NeptuneMonitor(*args, **kwargs):
message = """NeptuneMonitor was renamed to NeptuneCallback and will be removed in future releases.
"""
warnings.warn(message)
return NeptuneCallback(*args, **kwargs)
def _log_best_parameters(results, experiment):
expect_not_a_run(experiment)
named_params = ([(dimension.name, param) for dimension, param in zip(results.space, results.x)])
experiment.set_property('best_parameters', str(named_params))
def _log_best_score(results, experiment):
experiment.log_metric('best_score', results.fun)
def _log_plot_convergence(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_convergence(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_regret(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_regret(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_evaluations(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig)
experiment.log_image(name, fig)
def _log_plot_objective(results, experiment, name='diagnostics'):
try:
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_objective(results), fig=fig)
experiment.log_image(name, fig)
except Exception as e:
print('Could not create the objective chart due to error: {}'.format(e))
def _log_results_object(results, experiment=None):
expect_not_a_run(experiment)
experiment.log_artifact(_export_results_object(results), 'results.pkl')
def _export_results_object(results):
from io import BytesIO
results.specs['args'].pop('callback', None)
buffer = BytesIO()
dump(results, buffer, store_objective=False)
buffer.seek(0)
return buffer
def _format_to_named_params(params, result):
return [(dimension.name, param) for dimension, param in zip(result.space, params)]
| 2.25 | 2 |
snoopy/server/transforms/Maltego.py | aiddenkeli/Snoopy | 432 | 7254 | #!/usr/bin/python
#
# This might be horrible code...
# ...but it works
# Feel free to re-write in a better way
# And if you want to - send it to us, we'll update ;)
# <EMAIL> (2010/10/18)
#
import sys
from xml.dom import minidom
class MaltegoEntity(object):
value = "";
weight = 100;
displayInformation = "";
additionalFields = [];
iconURL = "";
entityType = "Phrase"
def __init__(self,eT=None,v=None):
if (eT is not None):
self.entityType = eT;
if (v is not None):
self.value = v;
self.additionalFields = None;
self.additionalFields = [];
self.weight = 100;
self.displayInformation = "";
self.iconURL = "";
def setType(self,eT=None):
if (eT is not None):
self.entityType = eT;
def setValue(self,eV=None):
if (eV is not None):
self.value = eV;
def setWeight(self,w=None):
if (w is not None):
self.weight = w;
def setDisplayInformation(self,di=None):
if (di is not None):
self.displayInformation = di;
def addAdditionalFields(self,fieldName=None,displayName=None,matchingRule=False,value=None):
self.additionalFields.append([fieldName,displayName,matchingRule,value]);
def setIconURL(self,iU=None):
if (iU is not None):
self.iconURL = iU;
def returnEntity(self):
print "<Entity Type=\"" + str(self.entityType) + "\">";
print "<Value>" + str(self.value) + "</Value>";
print "<Weight>" + str(self.weight) + "</Weight>";
if (self.displayInformation is not None):
print "<DisplayInformation><Label Name=\"\" Type=\"text/html\"><![CDATA[" + str(self.displayInformation) + "]]></Label></DisplayInformation>";
if (len(self.additionalFields) > 0):
print "<AdditionalFields>";
for i in range(len(self.additionalFields)):
if (str(self.additionalFields[i][2]) <> "strict"):
print "<Field Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
else:
print "<Field MatchingRule=\"" + str(self.additionalFields[i][2]) + "\" Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
print "</AdditionalFields>";
if (len(self.iconURL) > 0):
print "<IconURL>" + self.iconURL + "</IconURL>";
print "</Entity>";
class MaltegoTransform(object):
entities = []
exceptions = []
UIMessages = []
#def __init__(self):
#empty.
def addEntity(self,enType,enValue):
me = MaltegoEntity(enType,enValue);
self.addEntityToMessage(me);
return self.entities[len(self.entities)-1];
def addEntityToMessage(self,maltegoEntity):
self.entities.append(maltegoEntity);
def addUIMessage(self,message,messageType="Inform"):
self.UIMessages.append([messageType,message]);
def addException(self,exceptionString):
self.exceptions.append(exceptionString);
def throwExceptions(self):
print "<MaltegoMessage>";
print "<MaltegoTransformExceptionMessage>";
print "<Exceptions>"
for i in range(len(self.exceptions)):
print "<Exception>" + self.exceptions[i] + "</Exceptions>";
print "</Exceptions>"
print "</MaltegoTransformExceptionMessage>";
print "</MaltegoMessage>";
def returnOutput(self):
print "<MaltegoMessage>";
print "<MaltegoTransformResponseMessage>";
print "<Entities>"
for i in range(len(self.entities)):
self.entities[i].returnEntity();
print "</Entities>"
print "<UIMessages>"
for i in range(len(self.UIMessages)):
print "<UIMessage MessageType=\"" + self.UIMessages[i][0] + "\">" + self.UIMessages[i][1] + "</UIMessage>";
print "</UIMessages>"
print "</MaltegoTransformResponseMessage>";
print "</MaltegoMessage>";
def writeSTDERR(self,msg):
sys.stderr.write(str(msg));
def heartbeat(self):
self.writeSTDERR("+");
def progress(self,percent):
self.writeSTDERR("%" + str(percent));
def debug(self,msg):
self.writeSTDERR("D:" + str(msg));
class MaltegoMsg:
def __init__(self,MaltegoXML=""):
xmldoc = minidom.parseString(MaltegoXML)
#read the easy stuff like value, limits etc
self.Value = self.i_getNodeValue(xmldoc,"Value")
self.Weight = self.i_getNodeValue(xmldoc,"Weight")
self.Slider = self.i_getNodeAttributeValue(xmldoc,"Limits","SoftLimit")
self.Type = self.i_getNodeAttributeValue(xmldoc,"Entity","Type")
#read additional fields
AdditionalFields = {}
try:
AFNodes= xmldoc.getElementsByTagName("AdditionalFields")[0]
Settings = AFNodes.getElementsByTagName("Field")
for node in Settings:
AFName = node.attributes["Name"].value;
AFValue = self.i_getText(node.childNodes);
AdditionalFields[AFName] = AFValue
except:
#sure this is not the right way...;)
dontcare=1
#parse transform settings
TransformSettings = {}
try:
TSNodes= xmldoc.getElementsByTagName("TransformFields")[0]
Settings = TSNodes.getElementsByTagName("Field")
for node in Settings:
TSName = node.attributes["Name"].value;
TSValue = self.i_getText(node.childNodes);
TransformSettings[TSName] = TSValue
except:
dontcare=1
#load back into object
self.AdditionalFields = AdditionalFields
self.TransformSettings = TransformSettings
def i_getText(self,nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def i_getNodeValue(self,node,Tag):
return self.i_getText(node.getElementsByTagName(Tag)[0].childNodes)
def i_getNodeAttributeValue(self,node,Tag,Attribute):
return node.getElementsByTagName(Tag)[0].attributes[Attribute].value;
| 2.25 | 2 |
metadeploy/api/migrations/0050_add_clickthrough_agreement.py | sfdc-qbranch/MetaDeploy | 33 | 7255 | # Generated by Django 2.1.5 on 2019-02-12 21:18
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0049_add_all_other_translations")]
operations = [
migrations.CreateModel(
name="ClickThroughAgreement",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
],
),
migrations.AddField(
model_name="job",
name="click_through_agreement",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="api.ClickThroughAgreement",
),
),
]
| 1.648438 | 2 |
invenio_iiif/config.py | dfdan/invenio-iiif | 3 | 7256 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""IIIF API for Invenio."""
IIIF_API_PREFIX = '/iiif/'
"""URL prefix to IIIF API."""
IIIF_UI_URL = '/api{}'.format(IIIF_API_PREFIX)
"""URL to IIIF API endpoint (allow hostname)."""
IIIF_PREVIEWER_PARAMS = {
'size': '750,'
}
"""Parameters for IIIF image previewer extension."""
IIIF_PREVIEW_TEMPLATE = 'invenio_iiif/preview.html'
"""Template for IIIF image preview."""
IIIF_API_DECORATOR_HANDLER = 'invenio_iiif.handlers:protect_api'
"""Image opener handler decorator."""
IIIF_IMAGE_OPENER_HANDLER = 'invenio_iiif.handlers:image_opener'
"""Image opener handler function."""
| 1.34375 | 1 |
pub_ingest.py | mconlon17/vivo-pub-ingest | 0 | 7257 | #!/user/bin/env/python
"""
pub_ingest.py -- Read a bibtex file and make VIVO RDF
The following objects will be made as needed:
-- publisher
-- journal
-- information resource
-- timestamp for the information resource
-- people
-- authorships
-- concepts
The resulting ADD and SUB RDF file can then be read into VIVO
To Do
-- Complete refactor as an update process. Create resuable parts so that
a publication can be created from bibtex, doi or pmid
-- Improve DateTimeValue accuracy. Currently all publications are entered
as yearMonth precision. Sometimes we have more information, sometimes
we have less. We should use the information as presented by the
publisher, not overstate (yearMonth when there is only year) and not
understate (yearMonth when we know the day).
-- Reuse date objects -- only create dates when the appropriate date entity
is not already in VIVO
-- Update for VIVO-ISF
-- Update or vivofoundation and vivopubs
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "1.3"
import sys
from datetime import datetime, date
from pybtex.database.input import bibtex
import tempita
import vivotools
MAX_AUTHORS = 50
publisher_report = {}
journal_report = {}
title_report = {}
author_report = {}
disambiguation_report = {}
dictionaries = []
journal_dictionary = {}
publisher_dictionary = {}
title_dictionary = {}
def open_files(bibtex_file_name):
"""
Give the name of the bibitex file to be used as input, generate the file
names for rdf, rpt and lst. Return the open file handles
"""
base = bibtex_file_name[:bibtex_file_name.find('.')]
rpt_file = open(base+'.rpt', 'w')
lst_file = open(base+'.lst', 'w')
rdf_file = open(base+'.rdf', 'w')
return [rdf_file, rpt_file, lst_file]
def update_disambiguation_report(authors, publication_uri):
"""
Given the authors structure and thte publication_uri, add to the report
if any of the authors need to be disambiguated
"""
for value in authors.values():
if value[8] == "Disambig":
if publication_uri in disambiguation_report:
result = disambiguation_report[publication_uri]
result[len(result.keys())+1] = value
disambiguation_report[publication_uri] = result
else:
disambiguation_report[publication_uri] = {1:value}
return
# start here. Create a parser for bibtex and use it to read the file of
# bibtex entries. open the output files
print datetime.now(), "Read the BibTex"
bibtex_file_name = sys.argv[1]
[rdf_file, rpt_file, lst_file] = open_files(bibtex_file_name)
parser = bibtex.Parser()
bib_data = parser.parse_file(bibtex_file_name)
bib_sorted = sorted(bib_data.entries.items(),
key=lambda x: x[1].fields['title'])
print >>rdf_file, "<!--", len(bib_data.entries.keys()),\
"publications to be processed -->"
print datetime.now(), len(bib_data.entries.keys()),\
"publications to be processed."
# make dictionaries for people, papers, publishers, journals, concepts
print datetime.now(), "Creating the dictionaries"
print datetime.now(), "Publishers"
publisher_dictionary = vivotools.make_publisher_dictionary()
print datetime.now(), "Journals"
journal_dictionary = vivotools.make_journal_dictionary()
print datetime.now(), "People"
dictionaries = make_people_dictionaries()
print datetime.now(), "Titles"
title_dictionary = vivotools.make_title_dictionary()
print datetime.now(), "Concepts"
vivotools.make_concept_dictionary()
# process the papers
print >>rdf_file, vivotools.rdf_header()
for key, value in bib_sorted:
try:
title = value.fields['title'].title() + " "
except:
title_report["No title"] = ["No Title", None, 1]
print >>rdf_file, "<!-- No title found. No RDF necessary -->"
continue
title = abbrev_to_words(title)
title = title[0:-1]
if title in title_report:
print >>rdf_file, "<!-- Title", title,\
"handled previously. No RDF necessary -->"
title_report[title][2] = title_report[title][2] + 1
continue
else:
print >>rdf_file, "<!-- Begin RDF for " + title + " -->"
print datetime.now(), "<!-- Begin RDF for " + title + " -->"
document = {}
document['title'] = title
title_report[title] = ["Start", None, 1]
[found, uri] = vivotools.find_title(title, title_dictionary)
if not found:
title_report[title][0] = "Create" # Create
# Authors
[author_rdf, authors] = make_author_rdf(value)
document['authors'] = make_document_authors(authors)
if count_uf_authors(authors) == 0:
print >>rdf_file, "<!-- End RDF. No UF authors for " +\
title + " No RDF necessary -->"
title_report[title][0] = "No UF Auth"
continue
update_author_report(authors)
# Datetime
[datetime_rdf, datetime_uri] = make_datetime_rdf(value, title)
# Publisher
[journal_create, journal_name, journal_uri] =\
make_journal_uri(value)
[publisher_create, publisher, publisher_uri, publisher_rdf] =\
make_publisher_rdf(value)
# Journal
[journal_rdf, journal_uri] = make_journal_rdf(value,\
journal_create, journal_name, journal_uri)
# Publisher/Journal bi-directional links
publisher_journal_rdf = ""
if journal_uri != "" and publisher_uri != "" and\
(journal_create or publisher_create):
publisher_journal_rdf = \
make_publisher_journal_rdf(publisher_uri, journal_uri)
# Authorships
publication_uri = vivotools.get_vivo_uri()
title_report[title][1] = publication_uri
[authorship_rdf, authorship_uris] = make_authorship_rdf(authors,\
publication_uri)
# AuthorInAuthorships
author_in_authorship_rdf = make_author_in_authorship_rdf(authors,\
authorship_uris)
# Journal/Publication bi-directional links
if journal_uri != "" and publication_uri != "":
journal_publication_rdf = \
make_journal_publication_rdf(journal_uri, publication_uri)
# PubMed values
pubmed_rdf = ""
if 'doi' in value.fields:
[pubmed_rdf, sub] = vivotools.update_pubmed(publication_uri,\
value.fields['doi'])
if sub != "":
raise Exception("Non empty subtraction RDF"+\
"for Update PubMed")
# Publication
publication_rdf = make_publication_rdf(value,\
title,publication_uri,datetime_uri,authorship_uris)
print >>rdf_file, datetime_rdf, publisher_rdf, journal_rdf,\
publisher_journal_rdf, author_rdf, authorship_rdf,\
author_in_authorship_rdf, journal_publication_rdf,\
publication_rdf, pubmed_rdf
print >>rdf_file, "<!-- End RDF for " + title + " -->"
print >>lst_file, vivotools.string_from_document(document),\
'VIVO uri', publication_uri, '\n'
update_disambiguation_report(authors, publication_uri)
else:
title_report[title][0] = "Found"
title_report[title][1] = uri
print >>rdf_file, "<!-- Found: " + title + " No RDF necessary -->"
print >>rdf_file, vivotools.rdf_footer()
#
# Reports
#
print >>rpt_file,"""
Publisher Report
Lists the publishers that appear in the bibtex file in alphabetical order. For
each publisher, show the improved name, the number of papers in journals of this publisher,
the action to be taken for the publisher and the VIVO URI -- the URI is the new
URI to be created if Action is Create, otherwise it is the URI of the found publisher
in VIVO.
Publisher Papers Action VIVO URI
---------------------------------------------------------------------------------"""
publisher_count = 0
actions = {}
for publisher in sorted(publisher_report.keys()):
publisher_count = publisher_count + 1
[create,uri,count] = publisher_report[publisher]
if create:
result = "Create"
else:
result = "Found "
actions[result] = actions.get(result,0) + 1
print >>rpt_file, "{0:40}".format(publisher[0:40]),"{0:>3}".format(count),result,uri
print >>rpt_file,""
print >>rpt_file, "Publisher count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, publisher_count,"publisher(s)"
print >>rpt_file, """
Journal Report
Lists the journals that appear in the bibtex file in alphabetical order. For
each journal, show the improved name, the number of papers t be linked to the journal,
the action to be taken for the journal and the VIVO URI -- the URI is the new
URI to be created if Action is Create, otherwise it is the URI of the found journal
in VIVO.
Journal Papers Action VIVO URI
---------------------------------------------------------------------------------"""
journal_count = 0
actions = {}
for journal in sorted(journal_report.keys()):
journal_count = journal_count + 1
[create,uri,count] = journal_report[journal]
if create:
result = "Create"
else:
result = "Found "
actions[result] = actions.get(result,0) + 1
print >>rpt_file, "{0:40}".format(journal[0:40]),"{0:>3}".format(count),result,uri
print >>rpt_file, ""
print >>rpt_file, "Journal count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, journal_count,"journal(s)"
print >>rpt_file, """
Title Report
Lists the titles that appear in the bibtex file in alphabetical order. For
each title, show the action to be taken, the number of times the title appears in
the bibtex, the improved title and the VIVO URI of the publication -- the URI is the new
URI to be created if action is Create, otherwise it is the URI of the found publication
in VIVO.
Action # Title and VIVO URI
---------------------------------------------------------------------------------"""
title_count = 0
actions = {}
for title in sorted(title_report.keys()):
title_count = title_count +1
[action,uri,count] = title_report[title]
actions[action] = actions.get(action,0) + 1
print >>rpt_file, "{0:>10}".format(action),title,uri
print >>rpt_file, ""
print >>rpt_file, "Title count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, title_count,"title(s)"
print >>rpt_file, """
Author Report
For each author found in the bibtex file, show the author's name followed by the number of papers
for the author in the bibtex to be entered, followed by
a pair of results for each time the author appears on a paper in the bibtex. The result
pair contains an action and a URI. The action is "non UF" if a non-UF author stub will be
be created, the URI is the URI of the new author stub. Action "Make UF" if a new UF author
stub will be created with the URI of the new author stub. "Found UF" indicate the author was
found at the URI. "Disambig" if multiple UF people were found with the given name. The URI
is the URI of one of the found people. Follow-up is needed to determine if correct and
reassign author if not correct.
Author Action URI Action URI
----------------------------------------------------------------------------------------------"""
author_count = 0
actions = {}
for author in sorted(author_report.keys()):
author_count = author_count + 1
results = ""
papers = len(author_report[author])
action = author_report[author][1][8] # 1st report, 8th value is action
actions[action] = actions.get(action,0) + 1
for key in author_report[author].keys():
value = author_report[author][key]
results = results + value[8] + " " + "{0:45}".format(value[9])
print >>rpt_file, "{0:25}".format(author),"{0:>3}".format(papers),results
print >>rpt_file, ""
print >>rpt_file, "Author count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, author_count,"authors(s)"
print >>rpt_file, """
Disambiguation Report
For each publication with one or more authors to disambiguate, list the paper, and
then the authors in question with each of the possible URIs to be disambiguated, show the URI
of the paper, and then for each author that needs to be disambiguated on the paper, show
the last name, first name and middle initial and the all the URIs in VIVO for UF persons
with the same names.
"""
for uri in disambiguation_report.keys():
print >>rpt_file,"The publication at",uri,"has one or more authors in question"
for key,value in disambiguation_report[uri].items():
uris = value[9].split(";")
print >>rpt_file," ",value[4],value[5],value[6],":"
for u in uris:
person = vivotools.get_person(u)
if 'last_name' not in person:
person['last_name'] = "No last name"
if 'middle_name' not in person:
person['middle_name'] = "No middle name"
if 'first_name' not in person:
person['first_name'] = "No first name"
if 'home_department_name' not in person:
person['home_department_name'] = "No home department"
npubs = len(person['authorship_uris'])
print >>rpt_file," ",u,person['last_name'], \
person['first_name'],person['middle_name'], \
person['home_department_name'],"Number of pubs = ",npubs
print >>rpt_file
print >>rpt_file
#
# Close the files, we're done
#
rpt_file.close()
rdf_file.close()
lst_file.close()
| 2.734375 | 3 |
port/platform/common/automation/u_utils.py | u-blox/ubxlib | 91 | 7258 | #!/usr/bin/env python
'''Generally useful bits and bobs.'''
import queue # For PrintThread and exe_run
from time import sleep, time, gmtime, strftime # For lock timeout, exe_run timeout and logging
from multiprocessing import RLock
from copy import copy
import threading # For PrintThread
import sys
import os # For ChangeDir, has_admin
import stat # To help deltree out
from collections import deque # For storing a window of debug
from telnetlib import Telnet # For talking to JLink server
import socket
import shutil # To delete a directory tree
import signal # For CTRL_C_EVENT
import subprocess
import platform # Figure out current OS
import re # Regular Expression
import serial # Pyserial (make sure to do pip install pyserial)
import psutil # For killing things (make sure to do pip install psutil)
import requests # For HTTP comms with a KMTronic box (do pip install requests)
import u_settings
# Since this function is used by the global variables below it needs
# to be placed here.
def is_linux():
'''Returns True when system is Linux'''
return platform.system() == 'Linux'
# Since this function is used by the global variables below it needs
# to be placed here.
def pick_by_os(linux=None, other=None):
'''
This is a convenience function for selecting a value based on platform.
As an example the line below will print out "Linux" when running on a
Linux platform and "Not Linux" when running on some other platform:
print( u_utils.pick_by_os(linux="Linux", other="Not Linux") )
'''
if is_linux():
return linux
return other
# The port that this agent service runs on
# Deliberately NOT a setting, we need to be sure
# everyone uses the same value
AGENT_SERVICE_PORT = 17003
# The maximum number of characters that an agent will
# use from controller_name when constructing a directory
# name for a ubxlib branch to be checked out into
AGENT_WORKING_SUBDIR_CONTROLLER_NAME_MAX_LENGTH = 4
# How long to wait for an install lock in seconds
INSTALL_LOCK_WAIT_SECONDS = u_settings.INSTALL_LOCK_WAIT_SECONDS #(60 * 60)
# The URL for Unity, the unit test framework
UNITY_URL = u_settings.UNITY_URL #"https://github.com/ThrowTheSwitch/Unity"
# The sub-directory that Unity is usually put in
# (off the working directory)
UNITY_SUBDIR = u_settings.UNITY_SUBDIR #"Unity"
# The path to DevCon, a Windows tool that allows
# USB devices to be reset, amongst other things
DEVCON_PATH = u_settings.DEVCON_PATH #"devcon.exe"
# The path to jlink.exe (or just the name 'cos it's on the path)
JLINK_PATH = u_settings.JLINK_PATH #"jlink.exe"
# The port number for SWO trace capture out of JLink
JLINK_SWO_PORT = u_settings.JLINK_SWO_PORT #19021
# The port number for GDB control of ST-LINK GDB server
STLINK_GDB_PORT = u_settings.STLINK_GDB_PORT #61200
# The port number for SWO trace capture out of ST-LINK GDB server
STLINK_SWO_PORT = u_settings.STLINK_SWO_PORT #61300
# The format string passed to strftime()
# for logging prints
TIME_FORMAT = u_settings.TIME_FORMAT #"%Y-%m-%d_%H:%M:%S"
# The default guard time waiting for a platform lock in seconds
PLATFORM_LOCK_GUARD_TIME_SECONDS = u_settings.PLATFORM_LOCK_GUARD_TIME_SECONDS #60 * 60
# The default guard time for downloading to a target in seconds
DOWNLOAD_GUARD_TIME_SECONDS = u_settings.DOWNLOAD_GUARD_TIME_SECONDS #60
# The default guard time for running tests in seconds
RUN_GUARD_TIME_SECONDS = u_settings.RUN_GUARD_TIME_SECONDS #60 * 60
# The default inactivity timer for running tests in seconds
RUN_INACTIVITY_TIME_SECONDS = u_settings.RUN_INACTIVITY_TIME_SECONDS #60 * 5
# The name of the #define that forms the filter string
# for which tests to run
FILTER_MACRO_NAME = u_settings.FILTER_MACRO_NAME #"U_CFG_APP_FILTER"
# The name of the environment variable that indicates we're running under automation
ENV_UBXLIB_AUTO = "U_UBXLIB_AUTO"
# The time for which to wait for something from the
# queue in exe_run(). If this is too short, in a
# multiprocessing world or on a slow machine, it is
# possible to miss things as the task putting things
# on the queue may be blocked from doing so until
# we've decided the queue has been completely emptied
# and moved on
EXE_RUN_QUEUE_WAIT_SECONDS = u_settings.EXE_RUN_QUEUE_WAIT_SECONDS #1
# The number of seconds a USB cutter and the bit positions of
# a KMTronic box are switched off for
HW_RESET_DURATION_SECONDS = u_settings.HW_RESET_DURATION_SECONDS # e.g. 5
# Executable file extension. This will be "" for Linux
# and ".exe" for Windows
EXE_EXT = pick_by_os(linux="", other=".exe")
def keep_going(flag, printer=None, prompt=None):
'''Check a keep_going flag'''
do_not_stop = True
if flag is not None and not flag.is_set():
do_not_stop = False
if printer and prompt:
printer.string("{}aborting as requested.".format(prompt))
return do_not_stop
# subprocess arguments behaves a little differently on Linux and Windows
# depending if a shell is used or not, which can be read here:
# https://stackoverflow.com/a/15109975
# This function will compensate for these deviations
def subprocess_osify(cmd, shell=True):
''' expects an array of strings being [command, param, ...] '''
if is_linux() and shell:
line = ''
for item in cmd:
# Put everything in a single string and quote args containing spaces
if ' ' in item:
line += '\"{}\" '.format(item)
else:
line += '{} '.format(item)
cmd = line
return cmd
def split_command_line_args(cmd_line):
''' Will split a command line string into a list of arguments.
Quoted arguments will be preserved as one argument '''
return [p for p in re.split("( |\\\".*?\\\"|'.*?')", cmd_line) if p.strip()]
def get_actual_path(path):
'''Given a drive number return real path if it is a subst'''
actual_path = path
if is_linux():
return actual_path
if os.name == 'nt':
# Get a list of substs
text = subprocess.check_output("subst",
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# Lines should look like this:
# Z:\: => C:\projects\ubxlib_priv
# So, in this example, if we were given z:\blah
# then the actual path should be C:\projects\ubxlib_priv\blah
text = line.decode()
bits = text.rsplit(": => ")
if (len(bits) > 1) and (len(path) > 1) and \
(bits[0].lower()[0:2] == path[0:2].lower()):
actual_path = bits[1] + path[2:]
break
return actual_path
def get_instance_text(instance):
'''Return the instance as a text string'''
instance_text = ""
for idx, item in enumerate(instance):
if idx == 0:
instance_text += str(item)
else:
instance_text += "." + str(item)
return instance_text
# Get a list of instances as a text string separated
# by spaces.
def get_instances_text(instances):
'''Return the instances as a text string'''
instances_text = ""
for instance in instances:
if instance:
instances_text += " {}".format(get_instance_text(instance))
return instances_text
def remove_readonly(func, path, exec_info):
'''Help deltree out'''
del exec_info
os.chmod(path, stat.S_IWRITE)
func(path)
def deltree(directory, printer, prompt):
'''Remove an entire directory tree'''
tries = 3
success = False
if os.path.isdir(directory):
# Retry this as sometimes Windows complains
# that the directory is not empty when it
# it really should be, some sort of internal
# Windows race condition
while not success and (tries > 0):
try:
# Need the onerror bit on Winders, see
# this Stack Overflow post:
# https://stackoverflow.com/questions/1889597/deleting-directory-in-python
shutil.rmtree(directory, onerror=remove_readonly)
success = True
except OSError as ex:
if printer and prompt:
printer.string("{}ERROR unable to delete \"{}\" {}: \"{}\"".
format(prompt, directory,
ex.errno, ex.strerror))
sleep(1)
tries -= 1
else:
success = True
return success
# Some list types aren't quite list types: for instance,
# the lists returned by RPyC look like lists but they
# aren't of type list and so "in", for instance, will fail.
# This converts an instance list (i.e. a list-like object
# containing items that are each another list-like object)
# into a plain-old two-level list.
def copy_two_level_list(instances_in):
'''Convert instances_in into a true list'''
instances_out = []
if instances_in:
for item1 in instances_in:
instances_out1 = []
for item2 in item1:
instances_out1.append(item2)
instances_out.append(copy(instances_out1))
return instances_out
# Check if admin privileges are available, from:
# https://stackoverflow.com/questions/2946746/python-checking-if-a-user-has-administrator-privileges
def has_admin():
'''Check for administrator privileges'''
admin = False
if os.name == 'nt':
try:
# only Windows users with admin privileges can read the C:\windows\temp
if os.listdir(os.sep.join([os.environ.get("SystemRoot", "C:\\windows"), "temp"])):
admin = True
except PermissionError:
pass
else:
# Pylint will complain about the following line but
# that's OK, it is only executed if we're NOT on Windows
# and there the geteuid() method will exist
if "SUDO_USER" in os.environ and os.geteuid() == 0:
admin = True
return admin
# Reset a USB port with the given Device Description
def usb_reset(device_description, printer, prompt):
''' Reset a device'''
instance_id = None
found = False
success = False
try:
# Run devcon and parse the output to find the given device
printer.string("{}running {} to look for \"{}\"...". \
format(prompt, DEVCON_PATH, device_description))
cmd = [DEVCON_PATH, "hwids", "=ports"]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# The format of a devcon entry is this:
#
# USB\VID_1366&PID_1015&MI_00\6&38E81674&0&0000
# Name: JLink CDC UART Port (COM45)
# Hardware IDs:
# USB\VID_1366&PID_1015&REV_0100&MI_00
# USB\VID_1366&PID_1015&MI_00
# Compatible IDs:
# USB\Class_02&SubClass_02&Prot_00
# USB\Class_02&SubClass_02
# USB\Class_02
#
# Grab what we hope is the instance ID
line = line.decode()
if line.startswith("USB"):
instance_id = line
else:
# If the next line is the Name we want then we're done
if instance_id and ("Name: " + device_description in line):
found = True
printer.string("{}\"{}\" found with instance ID \"{}\"". \
format(prompt, device_description,
instance_id))
break
instance_id = None
if found:
# Now run devcon to reset the device
printer.string("{}running {} to reset device \"{}\"...". \
format(prompt, DEVCON_PATH, instance_id))
cmd = [DEVCON_PATH, "restart", "@" + instance_id]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=False) # Has to be False or devcon won't work
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
else:
printer.string("{}device with description \"{}\" not found.". \
format(prompt, device_description))
except subprocess.CalledProcessError:
printer.string("{} unable to find and reset device.".format(prompt))
return success
# Open the required serial port.
def open_serial(serial_name, speed, printer, prompt):
'''Open serial port'''
serial_handle = None
text = "{}: trying to open \"{}\" as a serial port...". \
format(prompt, serial_name)
try:
return_value = serial.Serial(serial_name, speed, timeout=0.05)
serial_handle = return_value
printer.string("{} opened.".format(text))
except (ValueError, serial.SerialException) as ex:
printer.string("{}{} while accessing port {}: {}.".
format(prompt, type(ex).__name__,
serial_handle.name, str(ex)))
return serial_handle
def open_telnet(port_number, printer, prompt):
'''Open telnet port on localhost'''
telnet_handle = None
text = "{}trying to open \"{}\" as a telnet port on localhost...". \
format(prompt, port_number)
try:
telnet_handle = Telnet("localhost", int(port_number), timeout=5)
if telnet_handle is not None:
printer.string("{} opened.".format(text))
else:
printer.string("{} failed.".format(text))
except (socket.error, socket.timeout, ValueError) as ex:
printer.string("{}{} failed to open telnet {}: {}.".
format(prompt, type(ex).__name__,
port_number, str(ex)))
return telnet_handle
def install_lock_acquire(install_lock, printer, prompt, keep_going_flag=None):
'''Attempt to acquire install lock'''
timeout_seconds = INSTALL_LOCK_WAIT_SECONDS
success = False
if install_lock:
printer.string("{}waiting for install lock...".format(prompt))
while not install_lock.acquire(False) and (timeout_seconds > 0) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
if timeout_seconds > 0:
printer.string("{}got install lock.".format(prompt))
success = True
else:
printer.string("{}failed to aquire install lock.".format(prompt))
else:
printer.string("{}warning, there is no install lock.".format(prompt))
return success
def install_lock_release(install_lock, printer, prompt):
'''Release install lock'''
if install_lock:
install_lock.release()
printer.string("{}install lock released.".format(prompt))
def fetch_repo(url, directory, branch, printer, prompt, submodule_init=True, force=False):
'''Fetch a repo: directory can be relative or absolute, branch can be a hash'''
got_code = False
success = False
dir_text = directory
if dir_text == ".":
dir_text = "this directory"
if printer and prompt:
printer.string("{}in directory {}, fetching"
" {} to {}.".format(prompt, os.getcwd(),
url, dir_text))
if not branch:
branch = "master"
if os.path.isdir(directory):
# Update existing code
with ChangeDir(directory):
if printer and prompt:
printer.string("{}updating code in {}...".
format(prompt, dir_text))
target = branch
if branch.startswith("#"):
# Actually been given a branch, lose the
# preceding #
target = branch[1:len(branch)]
# Try this once and, if it fails and force is set,
# do a git reset --hard and try again
tries = 1
if force:
tries += 1
while tries > 0:
try:
call_list = []
call_list.append("git")
call_list.append("fetch")
call_list.append("origin")
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
# Try to pull the code
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code:
tries = 0
else:
if force:
# git reset --hard
printer.string("{}in directory {} calling git reset --hard...". \
format(prompt, os.getcwd()))
try:
text = subprocess.check_output(subprocess_osify(["git", "reset",
"--hard"]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
force = False
tries -= 1
if not got_code:
# If we still haven't got the code, delete the
# directory for a true clean start
deltree(directory, printer, prompt)
if not os.path.isdir(directory):
# Clone the repo
if printer and prompt:
printer.string("{}cloning from {} into {}...".
format(prompt, url, dir_text))
try:
text = subprocess.check_output(subprocess_osify(["git", "clone", "-q",
url, directory]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code and os.path.isdir(directory):
# Check out the correct branch and recurse submodules
with ChangeDir(directory):
target = "origin/" + branch
if branch.startswith("#"):
# Actually been given a branch, so lose the
# "origin/" and the preceding #
target = branch[1:len(branch)]
if printer and prompt:
printer.string("{}checking out {}...".
format(prompt, target))
try:
call_list = ["git", "-c", "advice.detachedHead=false",
"checkout", "--no-progress"]
if submodule_init:
call_list.append("--recurse-submodules")
printer.string("{}also recursing sub-modules (can take some time" \
" and gives no feedback).".format(prompt))
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
return success
def exe_where(exe_name, help_text, printer, prompt):
'''Find an executable using where.exe or which on linux'''
success = False
try:
printer.string("{}looking for \"{}\"...". \
format(prompt, exe_name))
# See here:
# https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen
# ...for why the construction "".join() is necessary when
# passing things which might have spaces in them.
# It is the only thing that works.
if is_linux():
cmd = ["which {}".format(exe_name.replace(":", "/"))]
printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd))
else:
cmd = ["where", "".join(exe_name)]
printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd))
text = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{} found in {}".format(prompt, exe_name,
line.decode()))
success = True
except subprocess.CalledProcessError:
if help_text:
printer.string("{}ERROR {} not found: {}". \
format(prompt, exe_name, help_text))
else:
printer.string("{}ERROR {} not found". \
format(prompt, exe_name))
return success
def exe_version(exe_name, version_switch, printer, prompt):
'''Print the version of a given executable'''
success = False
if not version_switch:
version_switch = "--version"
try:
text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError:
printer.string("{}ERROR {} either not found or didn't like {}". \
format(prompt, exe_name, version_switch))
return success
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
def read_from_process_and_queue(process, read_queue):
'''Read from a process, non-blocking'''
while process.poll() is None:
string = process.stdout.readline().decode()
if string and string != "":
read_queue.put(string)
else:
sleep(0.1)
def queue_get_no_exception(the_queue, block=True, timeout=None):
'''A version of queue.get() that doesn't throw an Empty exception'''
thing = None
try:
thing = the_queue.get(block=block, timeout=timeout)
except queue.Empty:
pass
return thing
def capture_env_var(line, env, printer, prompt):
'''A bit of exe_run that needs to be called from two places'''
# Find a KEY=VALUE bit in the line,
# parse it out and put it in the dictionary
# we were given
pair = line.split('=', 1)
if len(pair) == 2:
env[pair[0]] = pair[1].rstrip()
else:
printer.string("{}WARNING: not an environment variable: \"{}\"".
format(prompt, line))
# Note: if returned_env is given then "set"
# will be executed after the exe and the environment
# variables will be returned in it. The down-side
# of this is that the return value of the exe is,
# of course, lost.
def exe_run(call_list, guard_time_seconds=None, printer=None, prompt=None,
shell_cmd=False, set_env=None, returned_env=None,
bash_cmd=False, keep_going_flag=None):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
flibbling = False
kill_time = None
read_time = start_time
if returned_env is not None:
# The caller wants the environment after the
# command has run, so, from this post:
# https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess
# append a tag that we can detect
# to the command and then call set,
# from which we can parse the environment
call_list.append("&&")
call_list.append("echo")
call_list.append("flibble")
call_list.append("&&")
if is_linux():
call_list.append("env")
bash_cmd = True
else:
call_list.append("set")
# I've seen output from set get lost,
# possibly because the process ending
# is asynchronous with stdout,
# so add a delay here as well
call_list.append("&&")
call_list.append("sleep")
call_list.append("2")
try:
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': shell_cmd,
'env': set_env,
'executable': "bin/bash" if bash_cmd else None
}
# Call the thang
# Note: used to have bufsize=1 here but it turns out
# that is ignored 'cos the output is considered
# binary. Seems to work in any case, I guess
# Winders, at least, is in any case line-buffered.
process = subprocess.Popen(subprocess_osify(call_list, shell=shell_cmd),
**popen_keywords)
if printer:
printer.string("{}{}, pid {} started with guard time {} second(s)". \
format(prompt, call_list[0], process.pid,
guard_time_seconds))
# This is over complex but, unfortunately, necessary.
# At least one thing that we try to run, nrfjprog, can
# crash silently: just hangs and sends no output. However
# it also doesn't flush and close stdout and so read(1)
# will hang, meaning we can't read its output as a means
# to check that it has hung.
# So, here we poll for the return value, which is normally
# how things will end, and we start another thread which
# reads from the process's stdout. If the thread sees
# nothing for guard_time_seconds then we terminate the
# process.
read_queue = queue.Queue()
read_thread = threading.Thread(target=read_from_process_and_queue,
args=(process, read_queue))
read_thread.start()
while process.poll() is None:
if keep_going_flag is None or keep_going(keep_going_flag, printer, prompt):
if guard_time_seconds and (kill_time is None) and \
((time() - start_time > guard_time_seconds) or
(time() - read_time > guard_time_seconds)):
kill_time = time()
if printer:
printer.string("{}guard time of {} second(s)." \
" expired, stopping {}...".
format(prompt, guard_time_seconds,
call_list[0]))
exe_terminate(process.pid)
else:
exe_terminate(process.pid)
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
sleep(0.1)
# Can't join() read_thread here as it might have
# blocked on a read() (if nrfjprog has anything to
# do with it). It will be tidied up when this process
# exits.
# There may still be stuff on the queue, read it out here
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
# There may still be stuff in the buffer after
# the application has finished running so flush that
# out here
line = process.stdout.readline().decode()
while line:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = process.stdout.readline().decode()
if (process.poll() == 0) and kill_time is None:
success = True
if printer:
printer.string("{}{}, pid {} ended with return value {}.". \
format(prompt, call_list[0],
process.pid, process.poll()))
except ValueError as ex:
if printer:
printer.string("{}failed: {} while trying to execute {}.". \
format(prompt, type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
process.kill()
raise KeyboardInterrupt from ex
return success
def set_process_prio_high():
'''Set the priority of the current process to high'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(-10)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.HIGH_PRIORITY_CLASS)
def set_process_prio_normal():
'''Set the priority of the current process to normal'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(0)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.NORMAL_PRIORITY_CLASS)
class ExeRun():
'''Run an executable as a "with:"'''
def __init__(self, call_list, printer=None, prompt=None, shell_cmd=False, with_stdin=False):
self._call_list = call_list
self._printer = printer
self._prompt = prompt
self._shell_cmd = shell_cmd
self._with_stdin=with_stdin
self._process = None
def __enter__(self):
if self._printer:
text = ""
for idx, item in enumerate(self._call_list):
if idx == 0:
text = item
else:
text += " {}".format(item)
self._printer.string("{}starting {}...".format(self._prompt,
text))
try:
# Start exe
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': self._shell_cmd
}
if not is_linux():
popen_keywords['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if self._with_stdin:
popen_keywords['stdin'] = subprocess.PIPE
self._process = subprocess.Popen(self._call_list, **popen_keywords)
if self._printer:
self._printer.string("{}{} pid {} started".format(self._prompt,
self._call_list[0],
self._process.pid))
except (OSError, subprocess.CalledProcessError, ValueError) as ex:
if self._printer:
self._printer.string("{}failed: {} to start {}.". \
format(self._prompt,
type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
self._process.kill()
raise KeyboardInterrupt from ex
return self._process
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
# Stop exe
if self._printer:
self._printer.string("{}stopping {}...". \
format(self._prompt,
self._call_list[0]))
return_value = self._process.poll()
if not return_value:
retry = 5
while (self._process.poll() is None) and (retry > 0):
# Try to stop with CTRL-C
if is_linux():
sig = signal.SIGINT
else:
sig = signal.CTRL_BREAK_EVENT
self._process.send_signal(sig)
sleep(1)
retry -= 1
return_value = self._process.poll()
if not return_value:
# Terminate with a vengeance
self._process.terminate()
while self._process.poll() is None:
sleep(0.1)
if self._printer:
self._printer.string("{}{} pid {} terminated".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} CTRL-C'd".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} already ended".format(self._prompt,
self._call_list[0],
self._process.pid))
return return_value
# Simple SWO decoder: only handles single bytes of application
# data at a time, i.e. what ITM_SendChar() sends.
class SwoDecoder():
'''Take the contents of a byte_array and decode it as SWO'''
def __init__(self, address, replaceLfWithCrLf=False):
self._address = address
self._replace_lf_with_crlf = replaceLfWithCrLf
self._expecting_swit = True
def decode(self, swo_byte_array):
'''Do the decode'''
decoded_byte_array = bytearray()
if swo_byte_array:
for data_byte in swo_byte_array:
# We're looking only for "address" and we also know
# that CMSIS only offers ITM_SendChar(), so packet length
# is always 1, and we only send ASCII characters,
# so the top bit of the data byte must be 0.
#
# For the SWO protocol, see:
#
# https://developer.arm.com/documentation/ddi0314/h/
# instrumentation-trace-macrocell/
# about-the-instrumentation-trace-macrocell/trace-packet-format
#
# When we see SWIT (SoftWare Instrumentation Trace
# I think, anyway, the bit that carries our prints
# off the target) which is 0bBBBBB0SS, where BBBBB is
# address and SS is the size of payload to follow,
# in our case 0x01, we know that the next
# byte is probably data and if it is ASCII then
# it is data. Anything else is ignored.
# The reason for doing it this way is that the
# ARM ITM only sends out sync packets under
# special circumstances so it is not a recovery
# mechanism for simply losing a byte in the
# transfer, which does happen occasionally.
if self._expecting_swit:
if ((data_byte & 0x03) == 0x01) and ((data_byte & 0xf8) >> 3 == self._address):
# Trace packet type is SWIT, i.e. our
# application logging
self._expecting_swit = False
else:
if data_byte & 0x80 == 0:
if (data_byte == 10) and self._replace_lf_with_crlf:
decoded_byte_array.append(13)
decoded_byte_array.append(data_byte)
self._expecting_swit = True
return decoded_byte_array
class PrintThread(threading.Thread):
'''Print thread to organise prints nicely'''
def __init__(self, print_queue, file_handle=None,
window_file_handle=None, window_size=10000,
window_update_period_seconds=1):
self._queue = print_queue
self._lock = RLock()
self._queue_forwards = []
self._running = False
self._file_handle = file_handle
self._window = None
self._window_file_handle = window_file_handle
if self._window_file_handle:
self._window = deque(self._window_file_handle, maxlen=window_size)
self._window_update_pending = False
self._window_update_period_seconds = window_update_period_seconds
self._window_next_update_time = time()
threading.Thread.__init__(self)
def _send_forward(self, flush=False):
# Send from any forwarding buffers
# self._lock should be acquired before this is called
queue_idxes_to_remove = []
for idx, queue_forward in enumerate(self._queue_forwards):
if flush or time() > queue_forward["last_send"] + queue_forward["buffer_time"]:
string_forward = ""
len_queue_forward = len(queue_forward["buffer"])
count = 0
for item in queue_forward["buffer"]:
count += 1
if count < len_queue_forward:
item += "\n"
if queue_forward["prefix_string"]:
item = queue_forward["prefix_string"] + item
string_forward += item
queue_forward["buffer"] = []
if string_forward:
try:
queue_forward["queue"].put(string_forward)
except TimeoutError:
pass
except (OSError, EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
queue_forward["last_send"] = time()
for idx in queue_idxes_to_remove:
self._queue_forwards.pop(idx)
def add_forward_queue(self, queue_forward, prefix_string=None, buffer_time=0):
'''Forward things received on the print queue to another queue'''
self._lock.acquire()
already_done = False
for item in self._queue_forwards:
if item["queue"] == queue_forward:
already_done = True
break
if not already_done:
item = {}
item["queue"] = queue_forward
item["prefix_string"] = prefix_string
item["buffer"] = []
item["buffer_time"] = buffer_time
item["last_send"] = time()
self._queue_forwards.append(item)
self._lock.release()
def remove_forward_queue(self, queue_forward):
'''Stop forwarding things received on the print queue to another queue'''
self._lock.acquire()
queues = []
self._send_forward(flush=True)
for item in self._queue_forwards:
if item["queue"] != queue_forward:
queues.append(item)
self._queue_forwards = queues
self._lock.release()
def stop_thread(self):
'''Helper function to stop the thread'''
self._lock.acquire()
self._running = False
# Write anything remaining to the window file
if self._window_update_pending:
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_file_handle.flush()
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
def run(self):
'''Worker thread'''
self._running = True
while self._running:
# Print locally and store in any forwarding buffers
try:
my_string = self._queue.get(block=False, timeout=0.5)
print(my_string)
if self._file_handle:
self._file_handle.write(my_string + "\n")
self._lock.acquire()
if self._window is not None:
# Note that my_string can contain multiple lines,
# hence the need to split it here to maintain the
# window
for line in my_string.splitlines():
self._window.append(line + "\n")
self._window_update_pending = True
for queue_forward in self._queue_forwards:
queue_forward["buffer"].append(my_string)
self._lock.release()
except queue.Empty:
sleep(0.1)
except (OSError, EOFError, BrokenPipeError):
# Try to restore stdout
sleep(0.1)
sys.stdout = sys.__stdout__
self._lock.acquire()
# Send from any forwarding buffers
self._send_forward()
# Write the window to file if required
if self._window_update_pending and time() > self._window_next_update_time:
# If you don't do this you can end up with garbage
# at the end of the file
self._window_file_handle.truncate()
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
class PrintToQueue():
'''Print to a queue, if there is one'''
def __init__(self, print_queue, file_handle, include_timestamp=False):
self._queues = []
self._lock = RLock()
if print_queue:
self._queues.append(print_queue)
self._file_handle = file_handle
self._include_timestamp = include_timestamp
def add_queue(self, print_queue):
'''Add a queue to the list of places to print to'''
self._lock.acquire()
already_done = False
for item in self._queues:
if item == print_queue:
already_done = True
break
if not already_done:
self._queues.append(print_queue)
self._lock.release()
def remove_queue(self, print_queue):
'''Remove a queue from the list of places to print to'''
self._lock.acquire()
queues = []
for item in self._queues:
if item != print_queue:
queues.append(item)
self._queues = queues
self._lock.release()
def string(self, string, file_only=False):
'''Print a string to the queue(s)'''
if self._include_timestamp:
string = strftime(TIME_FORMAT, gmtime()) + " " + string
if not file_only:
self._lock.acquire()
queue_idxes_to_remove = []
if self._queues:
for idx, print_queue in enumerate(self._queues):
try:
print_queue.put(string)
except (EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
for idx in queue_idxes_to_remove:
self._queues.pop(idx)
else:
print(string)
self._lock.release()
if self._file_handle:
self._file_handle.write(string + "\n")
self._file_handle.flush()
# This stolen from here:
# https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python
class ChangeDir():
'''Context manager for changing the current working directory'''
def __init__(self, new_path):
self._new_path = os.path.expanduser(new_path)
self._saved_path = None
def __enter__(self):
'''CD to new_path'''
self._saved_path = os.getcwd()
os.chdir(self._new_path)
def __exit__(self, etype, value, traceback):
'''CD back to saved_path'''
os.chdir(self._saved_path)
class Lock():
'''Hold a lock as a "with:"'''
def __init__(self, lock, guard_time_seconds,
lock_type, printer, prompt, keep_going_flag=None):
self._lock = lock
self._guard_time_seconds = guard_time_seconds
self._lock_type = lock_type
self._printer = printer
self._prompt = prompt
self._keep_going_flag = keep_going_flag
self._locked = False
def __enter__(self):
if not self._lock:
return True
# Wait on the lock
if not self._locked:
timeout_seconds = self._guard_time_seconds
self._printer.string("{}waiting up to {} second(s)" \
" for a {} lock...". \
format(self._prompt,
self._guard_time_seconds,
self._lock_type))
count = 0
while not self._lock.acquire(False) and \
((self._guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(self._keep_going_flag, self._printer, self._prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
self._printer.string("{}still waiting {} second(s)" \
" for a {} lock (locker is" \
" currently {}).". \
format(self._prompt, timeout_seconds,
self._lock_type, self._lock))
count = 0
if (self._guard_time_seconds == 0) or (timeout_seconds > 0):
self._locked = True
self._printer.string("{}{} lock acquired ({}).". \
format(self._prompt, self._lock_type,
self._lock))
return self._locked
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
if self._lock and self._locked:
try:
self._lock.release()
self._locked = False
self._printer.string("{}released a {} lock.".format(self._prompt,
self._lock_type))
except RuntimeError:
self._locked = False
self._printer.string("{}{} lock was already released.". \
format(self._prompt, self._lock_type))
def wait_for_completion(_list, purpose, guard_time_seconds,
printer, prompt, keep_going_flag):
'''Wait for a completion list to empty'''
completed = False
if len(_list) > 0:
timeout_seconds = guard_time_seconds
printer.string("{}waiting up to {} second(s)" \
" for {} completion...". \
format(prompt, guard_time_seconds, purpose))
count = 0
while (len(_list) > 0) and \
((guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
list_text = ""
for item in _list:
if list_text:
list_text += ", "
list_text += str(item)
printer.string("{}still waiting {} second(s)" \
" for {} to complete (waiting" \
" for {}).". \
format(prompt, timeout_seconds,
purpose, list_text))
count = 0
if len(_list) == 0:
completed = True
printer.string("{}{} completed.".format(prompt, purpose))
return completed
def reset_nrf_target(connection, printer, prompt):
'''Reset a Nordic NRFxxx target'''
call_list = []
printer.string("{}resetting target...".format(prompt))
# Assemble the call list
call_list.append("nrfjprog")
call_list.append("--reset")
if connection and "debugger" in connection and connection["debugger"]:
call_list.append("-s")
call_list.append(connection["debugger"])
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Call it
return exe_run(call_list, 60, printer, prompt)
def usb_cutter_reset(usb_cutter_id_strs, printer, prompt):
'''Cut and then un-cut USB cables using Cleware USB cutters'''
# First switch the USB cutters off
action = "1"
count = 0
call_list_root = ["usbswitchcmd"]
call_list_root.append("-s")
call_list_root.append("-n")
while count < 2:
for usb_cutter_id_str in usb_cutter_id_strs:
call_list = call_list_root.copy()
call_list.append(usb_cutter_id_str)
call_list.append(action)
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
if printer:
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Set shell to keep Jenkins happy
exe_run(call_list, 0, printer, prompt, shell_cmd=True)
# Wait 5ish seconds
if printer:
printer.string("{}waiting {} second(s)...". \
format(prompt, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# "0" to switch the USB cutters on again
action = "0"
count += 1
def kmtronic_reset(ip_address, hex_bitmap, printer, prompt):
'''Cut and then un-cut power using a KMTronic box'''
# KMTronic is a web relay box which will be controlling
# power to, for instance, EVKs The last byte of the URL
# is a hex bitmap of the outputs where 0 sets off and 1
# sets on
# Take only the last two digits of the hex bitmap
hex_bitmap_len = len(hex_bitmap)
hex_bitmap = hex_bitmap[hex_bitmap_len - 2:hex_bitmap_len]
kmtronic_off = "http://" + ip_address + "FFE0" + hex_bitmap
kmtronic_on = "http://" + ip_address + "FFE0" + "{0:x}".format(int(hex_bitmap, 16) ^ 0xFF)
try:
# First switch the given bit positions off
if printer:
printer.string("{}sending {}". \
format(prompt, kmtronic_off))
response = requests.get(kmtronic_off)
# Wait 5ish seconds
if printer:
printer.string("{}...received response {}, waiting {} second(s)...". \
format(prompt, response.status_code, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# Switch the given bit positions on
if printer:
printer.string("{}sending {}".format(prompt, kmtronic_on))
response = requests.get(kmtronic_on)
if printer:
printer.string("{}...received response {}.". \
format(prompt, response.status_code))
except requests.ConnectionError:
if printer:
printer.string("{}unable to connect to KMTronic box at {}.". \
format(prompt, ip_address))
# Look for a single line anywhere in message
# beginning with "test: ". This must be followed by
# "x.y.z a.b.c m.n.o" (i.e. instance IDs space separated)
# and then an optional "blah" filter string, or just "*"
# and an optional "blah" filter string or "None".
# Valid examples are:
#
# test: 1
# test: 1 3 7
# test: 1.0.3 3 7.0
# test: 1 2 example
# test: 1.1 8 portInit
# test: *
# test: * port
# test: none
#
# Filter strings must NOT begin with a digit.
# There cannot be more than one * or a * with any other instance.
# There can only be one filter string.
# Only whitespace is expected after this on the line.
# Anything else is ignored.
# Populates instances with the "0 4.5 13.5.1" bit as instance
# entries [[0], [4, 5], [13, 5, 1]] and returns the filter
# string, if any.
def commit_message_parse(message, instances, printer=None, prompt=None):
'''Find stuff in a commit message'''
instances_all = False
instances_local = []
filter_string_local = None
found = False
if message:
# Search through message for a line beginning
# with "test:"
if printer:
printer.string("{}### parsing message to see if it contains a test directive...". \
format(prompt))
lines = message.split("\\n")
for idx1, line in enumerate(lines):
if printer:
printer.string("{}text line {}: \"{}\"".format(prompt, idx1 + 1, line))
if line.lower().startswith("test:"):
found = True
instances_all = False
# Pick through what follows
parts = line[5:].split()
for part in parts:
if instances_all and (part[0].isdigit() or part == "*" or part.lower() == "none"):
# If we've had a "*" and this is another one
# or it begins with a digit then this is
# obviously not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if filter_string_local:
# If we've had a filter string then nothing
# must follow so this is not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...extraneous characters after test directive," \
" ignoring.".format(prompt))
found = False
break
if part[0].isdigit():
# If this part begins with a digit it could
# be an instance containing numbers
instance = []
bad = False
for item in part.split("."):
try:
instance.append(int(item))
except ValueError:
# Some rubbish, not a test line so
# leave the loop and try the next
# line
bad = True
break
if bad:
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if instance:
instances_local.append(instance[:])
elif part == "*":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
# If we haven't had any instances and
# this is a * then it means "all"
instances_local.append(part)
instances_all = True
elif part.lower() == "none":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
instances_local = []
filter_string_local = None
break
elif instances_local and not part == "*":
# If we've had an instance and this
# is not a "*" then this must be a
# filter string
filter_string_local = part
else:
# Found some rubbish, not a "test:"
# line after all, leave the loop
# and try the next line
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if found:
text = "found test directive with"
if instances_local:
text += " instance(s)" + get_instances_text(instances_local)
if filter_string_local:
text += " and filter \"" + filter_string_local + "\""
else:
text += " instances \"None\""
if printer:
printer.string("{}{}.".format(prompt, text))
break
if printer:
printer.string("{}no test directive found".format(prompt))
if found and instances_local:
instances.extend(instances_local[:])
return found, filter_string_local
| 2.46875 | 2 |
faigler_mazeh.py | tcjansen/beer | 0 | 7259 | import numpy as np
import astropy.modeling.blackbody as bb
import astropy.constants as const
from astropy.io import fits
from scipy.interpolate import interp2d
class FaiglerMazehFit():
def __init__(self, P_orb, inc, R_star, M_star, T_star, A_ellip=False, A_beam=False,
R_p=False, a=False, u=False, g=0.65, logg=None, tele='TESS', M_p=False,
K=False):
self.P_orb = P_orb # orbital period in days
self.inc = inc * np.pi / 180 # inclination converted to radians
self.R_star = R_star # radius of the star in solar units
self.M_star = M_star # mass of the star in solar units
self.T_star = T_star # temperature of the star [K]
self.A_ellip = A_ellip # ellipsoidal amplitude in ppm
self.A_beam = A_beam # beaming amplitude in ppm
self.g = g # gravity-darkening coefficient, expected range is 0.3-1.0
self.logg = logg # log surface gravity of the star [cm s^-2]
self.tele = tele.lower() # observation instrument used, default is TESS. Only other
# other option (for now) is Kepler.
self.R_p = R_p # radius of the planet in jupiter radii
self.a = a
self.u = u # the limb-darkening coefficient, range is 0-1
self.g = g
self.M_p = M_p
self.K = K
# get the mass from the ellipsoidal amplitude, if given.
# u is the limb-darkening coefficient, range is 0-1
if not M_p and not not A_ellip and not not logg:
self.u = self.LDC()
self.M_p = self.m_from_ellip()
# star-planet separation [au] assuming a circular orbit
if not a and not not M_p:
self.a = get_a(self.P_orb * 86400, self.M_star * const.M_sun.value, \
self.M_p * const.M_jup.value) / const.au.value
def alpha_ellip(self):
if not self.u:
self.u = self.LDC()
if not self.g:
self.g = self.GDC()
a = 15 + self.u
b = 1 + self.g
c = 3 - self.u
return 0.15 * a * b / c
def RV_amp(self):
"""
Returns the radial velocity amplitude [m/s] of the star given a companion mass.
"""
return 27 / 40 * const.c.value \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def doppler_shift(self, K):
"""
Returns the shift in wavelength for a given radial velocity amplitude.
"""
return K / const.c.value
def response_convolution(self, lambdas, response):
return response * bb.blackbody_lambda(lambdas, self.T_star).value
def alpha_beam(self, K):
"""
Returns the factor that accounts for the flux lost when a star gets Doppler shifted
in and out of the observer's bandpass.
"""
print(K)
rest_lambdas, response = response_func(self.tele)
flux_rest = np.trapz(self.response_convolution(rest_lambdas, response), \
x=rest_lambdas)
blueshifted_lambdas = rest_lambdas - self.doppler_shift(K=K)
flux_blueshift = np.trapz(self.response_convolution(blueshifted_lambdas, response), \
x=rest_lambdas)
redshifted_lambdas = rest_lambdas + self.doppler_shift(K=K)
flux_redshift = np.trapz(self.response_convolution(redshifted_lambdas, response), \
x=rest_lambdas)
alpha_blue = abs( (flux_rest - flux_blueshift) / flux_rest )
alpha_red = abs( (flux_rest - flux_redshift) / flux_rest )
return 1 - np.mean([alpha_red, alpha_blue])
def m_from_ellip(self):
return self.A_ellip \
* self.R_star ** (-3) \
* self.M_star ** 2 \
* self.P_orb ** 2 \
/ (12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2)
def ellip_from_m(self):
return self.M_p * 12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2 \
* self.R_star ** 3 \
* self.M_star ** (-2) \
* self.P_orb ** (-2)
def m_from_beam(self, K=False, alpha_beam=False):
if not alpha_beam and not K and not not self.M_p:
alpha_beam = self.alpha_beam(K=self.RV_amp())
elif not alpha_beam and not not K:
alpha_beam = self.alpha_beam(K=K)
elif not not K and not not alpha_beam:
raise ValueError("Please only specify either K or alpha_beam, not both.")
elif not K and not alpha_beam:
raise ValueError("Please specify a radial velocity (K) or alpha_beam parameter")
return self.A_beam \
* self.M_star ** (2/3) \
* self.P_orb ** (1/3) \
/ (alpha_beam * np.sin(self.inc) * 2.7)
def beam_from_m(self):
"""
Returns the expected Doppler beaming amplitude [ppm] for a given mass.
"""
if not self.M_p:
raise ValueError("Argument 'M_p' must be specified if you're trying to " +
"derive a beaming amplitude from a mass.")
if not self.K:
K=self.RV_amp()
return 2.7 * self.alpha_beam(K=self.K) \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def Ag_from_thermref(self, A_thermref):
"""
Return the geometric albedo derived from the thermal + ref amplitude.
"""
return A_thermref * (self.R_p / self.a) ** -2 * (const.au / const.R_jup) ** 2
def mass(self, derived_from=None, K=False, alpha_beam=False):
if derived_from == "ellip":
return self.m_from_ellip()
elif derived_from == "beam":
return self.m_from_beam(K=K, alpha_beam=alpha_beam)
else:
raise ValueError("derived_from must equal either 'ellip' or 'beam'")
def nearest_neighbors(self, value, array, max_difference):
"""
Returns a set of nearest neighbor indices of the given array.
"""
return set(list((np.where(abs(array - value) < max_difference))[0]))
def correct_maxdiff(self, value, array, guess):
while len(self.nearest_neighbors(value, array, guess)) > 0:
guess -= 0.01 * guess
return guess
def shared_neighbor(self, value1, array1, max_diff1, value2, array2, max_diff2):
set1 = self.nearest_neighbors(value1, array1, max_diff1)
set2 = self.nearest_neighbors(value2, array2, max_diff2)
nearest = list(set1.intersection(set2))
# if len(nearest) > 1:
# newmax_diff1 = self.correct_maxdiff(value1, array1, max_diff1)
# newmax_diff2 = self.correct_maxdiff(value2, array2, max_diff2)
# print(newmax_diff1, newmax_diff2)
# if newmax_diff2 > newmax_diff1:
# max_diff2 = newmax_diff2
# else:
# max_diff1 = newmax_diff1
# set1 = self.nearest_neighbors(value1, array1, max_diff1)
# set2 = self.nearest_neighbors(value2, array2, max_diff2)
# nearest = list(set1.intersection(set2))
# print(nearest)
# # if len(nearest) > 1:
# # raise ValueError("Multiple shared nearest neighbors, indices = ", nearest)
# # else:
# # return nearest[0]
return nearest[0]
def tess_warning(self):
if self.tele != 'tess':
raise ValueError("This function is only appropriate for observations done with " +
"the TESS satellite")
def claret_LDC(self):
"""
Returns the mu coefficient and the four-parameters used in the Claret four-parameter
limb-darkening law (Claret 2000). These are obtained by finding the nearest neighbor
in the model limb-darkening of TESS from Claret 2018.
"""
# print("claret_LDC is still garbage, sorry. Quitting now...")
# exit()
self.tess_warning()
logg, Teff, a1, a2, a3, a4, mu, mod = np.genfromtxt('../claret_ldc.dat',
usecols=(0,1,4,5,6,7,8,10),
unpack=True)
mod = np.genfromtxt('../claret_ldc.dat', usecols=(10,), dtype='str')
if self.T_star <= 3000:
# the PC model is meant for cool stars, and if we break it up this way we can do an
# easier 2D interpolation.
mask = mod == 'PD'
else:
mask = mod == 'PC'
logg = logg[mask]
Teff = Teff[mask]
a1 = a1[mask]
a2 = a2[mask]
a3 = a3[mask]
a4 = a4[mask]
mu = mu[mask]
nearest = self.shared_neighbor(self.T_star, Teff, 100, self.logg, logg, 0.25)
mu = mu[nearest]
a_coeffs = [a1[nearest], a2[nearest], a3[nearest], a4[nearest]]
return mu, a_coeffs
def GDC(self):
"""
Returns the gravity-darkening coefficient from the Claret 2017 model
"""
self.tess_warning()
logg, log_Teff, g = np.genfromtxt('../claret_gdc.dat', usecols=(2,3,4), unpack=True)
nearest = self.shared_neighbor(np.log10(self.T_star), log_Teff, .01, self.logg,
logg, 0.25)
return g[nearest]
def LDC(self):
"""
Returns the limb-darkening coefficient of the host star.
"""
mu, a_coeffs = self.claret_LDC()
return 1 - sum([a_coeffs[k] * (1 - mu ** ((k+1) / 2)) for k in range(4)])
def get_response_specs(tele):
if tele=="tess":
return "../tess-response-function-v1.0.csv", ',', 1e1
elif tele=="kepler":
return "../kepler_hires.dat", '\t', 1e4
def response_func(tele):
file, delimiter, to_AA = get_response_specs(tele)
lambdas, response = np.genfromtxt(file, delimiter=delimiter, usecols=(0,1), unpack=True)
return lambdas * to_AA, response
def get_a(P, M_star, M_p):
"""
Use Kepler's third law to derive the star-planet separation.
"""
return (P ** 2 * const.G.value * (M_star + M_p) / (4 * np.pi ** 2)) ** (1/3)
| 2.46875 | 2 |
src/vanilla_pytorch/prune_model.py | f2010126/LTH_Master | 0 | 7260 | <gh_stars>0
import torch.nn.utils.prune as prune
import torch
from src.vanilla_pytorch.utils import count_rem_weights
from src.vanilla_pytorch.models.linearnets import LeNet, init_weights
from src.vanilla_pytorch.models.resnets import Resnets
def remove_pruning(model):
for i, (name, module) in enumerate(model.named_modules()):
# name and val
if any([isinstance(module, cl) for cl in [torch.nn.Conv2d, torch.nn.Linear]]):
prune.remove(module, 'weight')
def get_masks(model, prune_amts=None):
"""
prune the lowest p% weights by magnitude per layer
:param model: model to prune
:param p_rate: prune rate = 0.2 as per paper
:param prune_amts: dictionary
:return: the created mask. model has served it's purpose.
"""
# TODO: Adjust pruning with output layer
if prune_amts is None: # ie dict is empty, use the default prune rate = 0.2
prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2}
for i, (name, module) in enumerate(model.named_modules()):
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
module = prune.l1_unstructured(module, name='weight', amount=prune_amts['conv'])
# prune 20% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
module = prune.l1_unstructured(module, name='weight', amount=prune_amts['linear'])
masks = list(model.named_buffers())
remove_pruning(model)
return masks
def update_apply_masks(model, masks):
# doesn't seem to be needed.
# for key, val in masks.items():
# print(f"key {key}")
# layer = getattr(model, key.split('.')[0])
# layer.weight_mask = val
for name, module in model.named_modules():
if any([isinstance(module, cl) for cl in [torch.nn.Conv2d, torch.nn.Linear]]):
module = prune.custom_from_mask(module, name='weight', mask=masks[name + ".weight_mask"])
# remove_pruning(model)
return model
def prune_random(model, prune_amts=None):
if prune_amts is None: # ie dict is empty, use the default prune rate =0.2
prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2}
for name, module in model.named_modules():
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
module = prune.random_unstructured(module, name='weight', amount=prune_amts['conv'])
# prune 20% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
module = prune.random_unstructured(module, name='weight', amount=prune_amts['linear'])
remove_pruning(model)
if __name__ == '__main__':
net = Resnets(in_channels=3)
net.apply(init_weights)
prune_rate = 0.8
prune_custom = {"linear": 0.2, "conv": 0.2, "last": 0.1}
for i in range(3):
masks = get_masks(net, prune_amts=prune_custom)
print(f"Count zero : {count_rem_weights(net)}")
| 2.359375 | 2 |
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py | CitrusAqua/mol-infer | 0 | 7261 | <reponame>CitrusAqua/mol-infer<filename>Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
"""
read_instance_BH-cyclic.py
"""
'''
[seed graph]
V_C : "V_C"
E_C : "E_C"
[core specification]
ell_LB : "\ell_{\rm LB}"
ell_UB : "\ell_{\rm UB}"
cs_LB : "\textsc{cs}_{\rm LB}"
cs_UB : "\textsc{cs}_{\rm UB}"
'''
import sys
def read_pmax_file(filename):
with open(filename,'r') as f:
F = [line.rstrip('\n') for line in f if line[0]!='#']
p_max = int(F.pop(0))
s = F.pop(0)
delta = list(map(float, s.split(' ')))
s = F.pop(0)
r = list(map(int, s.split(' ')))
return p_max, delta, r
def read_seed_graph(filename):
with open(filename,'r') as f:
F = [line.rstrip('\n') for line in f if line[0]!='#']
### read V_C ###
num_V_C = int(F.pop(0))
V_C = tuple(range(1,num_V_C+1))
### read E_C ###
num_E_C = int(F.pop(0))
E_C = {}
for e in range(num_E_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
E_C[arr[0]] = (arr[0], arr[1], arr[2]) # Add arr[0] to distinguish two edges with same starting and ending vertices, by Zhu
### read ell_LB and ell_UB ###
ell_LB = {}
ell_UB = {}
for e in range(num_E_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ell_LB[arr[0]] = arr[1]
ell_UB[arr[0]] = arr[2]
### compute E_ge_two, E_ge_one, E_zero_one, E_equal_one ###
E_ge_two = []
E_ge_one = []
E_zero_one = []
E_equal_one = []
I_ge_two = []
I_ge_one = []
I_zero_one = []
I_equal_one = []
for e in E_C:
if ell_LB[e] >= 2:
E_ge_two.append(E_C[e])
I_ge_two.append(e)
elif ell_LB[e] == 1 and ell_UB[e] >= 2:
E_ge_one.append(E_C[e])
I_ge_one.append(e)
elif ell_LB[e] == 0 and ell_UB[e] == 1:
E_zero_one.append(E_C[e])
I_zero_one.append(e)
elif ell_LB[e] == 1 and ell_UB[e] == 1:
E_equal_one.append(E_C[e])
I_equal_one.append(e)
else:
sys.stderr.write('error: a strange edge is found.\n')
sys.exit(1)
### read n_LB_int and n_UB_int ###
n_LB_int = int(F.pop(0))
n_UB_int = int(F.pop(0))
# read n_LB and n_star
n_LB = int(F.pop(0))
n_star = int(F.pop(0))
# read rho
rho = int(F.pop(0))
### read ch_LB and ch_UB ###
ch_LB = {}
ch_UB = {}
for v in range(num_V_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ch_LB[arr[0]] = arr[1]
ch_UB[arr[0]] = arr[2]
for e in range(len(E_ge_two + E_ge_one)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ch_LB[E_C[arr[0]]] = arr[1]
ch_UB[E_C[arr[0]]] = arr[2]
### read bl_LB and bl_UB ###
bl_LB = {}
bl_UB = {}
for v in range(num_V_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bl_LB[arr[0]] = arr[1]
bl_UB[arr[0]] = arr[2]
for e in range(len(E_ge_two + E_ge_one)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bl_LB[E_C[arr[0]]] = arr[1]
bl_UB[E_C[arr[0]]] = arr[2]
# read Lambda
s = F.pop(0)
Lambda = list(s.split(' '))
# read Lambda_dg_int
s = F.pop(0)
num = int(s)
Lambda_dg_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
Lambda_dg_int.append((arr[0], int(arr[1])))
# read Gamma_int_ac
s = F.pop(0)
num = int(s)
Gamma_int_ac = list()
nu_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
tmp_1 = (arr[0], arr[1], int(arr[2]))
tmp_2 = (arr[1], arr[0], int(arr[2]))
nu_int.append(tmp_1)
if tmp_1 not in Gamma_int_ac:
Gamma_int_ac.append(tmp_1)
if tmp_2 not in Gamma_int_ac:
Gamma_int_ac.append(tmp_2)
# read Gamma_int
s = F.pop(0)
num = int(s)
Gamma_int = list()
gam_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
tmp_1 = ((arr[0], int(arr[1])), (arr[2], int(arr[3])), int(arr[4]))
tmp_2 = ((arr[2], int(arr[3])), (arr[0], int(arr[1])), int(arr[4]))
gam_int.append(tmp_1)
if tmp_1 not in Gamma_int:
Gamma_int.append(tmp_1)
if tmp_2 not in Gamma_int:
Gamma_int.append(tmp_2)
# read Lambda_star
Lambda_star = {i: set() for i in range(1, num_V_C + 1)}
for i in range(1, num_V_C + 1):
s = F.pop(0)
arr = list(s.split(' '))
ind = int(arr[0])
arr.pop(0)
for a in arr:
Lambda_star[ind].add(a)
Lambda_int = list()
# read na_LB and na_UB
s = F.pop(0)
num = int(s)
na_LB = {}
na_UB = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
na_LB[arr[0]] = int(arr[1])
na_UB[arr[0]] = int(arr[2])
# read na_LB_int and na_UB_int
s = F.pop(0)
num = int(s)
na_LB_int = {}
na_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
na_LB_int[arr[0]] = int(arr[1])
na_UB_int[arr[0]] = int(arr[2])
Lambda_int.append(arr[0])
# read ns_LB_int and ns_UB_int
s = F.pop(0)
num = int(s)
ns_LB_int = {}
ns_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
ns_LB_int[(arr[0], int(arr[1]))] = int(arr[2])
ns_UB_int[(arr[0], int(arr[1]))] = int(arr[3])
# read ac_LB_int and ac_UB_int
s = F.pop(0)
num = int(s)
ac_LB_int = {}
ac_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
a1, a2, m = nu_int[int(arr[0]) - 1]
ac_LB_int[(a1, a2, m)] = int(arr[1])
ac_LB_int[(a2, a1, m)] = int(arr[1])
ac_UB_int[(a1, a2, m)] = int(arr[2])
ac_UB_int[(a2, a1, m)] = int(arr[2])
# read ec_LB_int and ec_UB_int
s = F.pop(0)
num = int(s)
ec_LB_int = {}
ec_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
a1, a2, m = gam_int[int(arr[0]) - 1]
ec_LB_int[(a1, a2, m)] = int(arr[1])
ec_LB_int[(a2, a1, m)] = int(arr[1])
ec_UB_int[(a1, a2, m)] = int(arr[2])
ec_UB_int[(a2, a1, m)] = int(arr[2])
# read bd2_LB and bd2_UB
bd2_LB = {}
bd2_UB = {}
for e in range(len(E_C)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bd2_LB[E_C[arr[0]]] = arr[1]
bd2_UB[E_C[arr[0]]] = arr[2]
# read bd3_LB and bd3_UB
bd3_LB = {}
bd3_UB = {}
for e in range(len(E_C)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bd3_LB[E_C[arr[0]]] = arr[1]
bd3_UB[E_C[arr[0]]] = arr[2]
# read ac_LB_lf and ac_UB_lf
s = F.pop(0)
num = int(s)
ac_LB_lf = dict()
ac_UB_lf = dict()
for e in range(num):
s = F.pop(0)
arr = list(s.split(' '))
ac_LB_lf[(arr[0], arr[1], int(arr[2]))] = int(arr[3])
ac_UB_lf[(arr[0], arr[1], int(arr[2]))] = int(arr[4])
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ac_LB_lf_common = arr[0]
ac_UB_lf_common = arr[1]
####################################
# # Undefined constants for instances but used in MILP
r_GC = num_E_C - (num_V_C - 1)
dg_LB = [0,0,0,0,0]
dg_UB = [n_star,n_star,n_star,n_star,n_star]
return V_C, E_C, \
E_ge_two, E_ge_one, E_zero_one, E_equal_one, \
I_ge_two, I_ge_one, I_zero_one, I_equal_one, \
ell_LB, ell_UB, n_LB_int, n_UB_int, \
n_LB, n_star, rho, \
ch_LB, ch_UB, bl_LB, bl_UB, \
Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \
Lambda_star, na_LB, na_UB, Lambda_int, \
na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \
ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \
bd2_LB, bd2_UB, bd3_LB, bd3_UB, \
dg_LB, dg_UB, ac_LB_lf, ac_UB_lf, ac_LB_lf_common, ac_UB_lf_common, r_GC
def get_value(filename):
y_min = 0
y_max = 0
ind = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if len(line.split(",")) < 2:
continue
if line.split(",")[0] == "CID":
continue
if ind == 0:
y_min = float(line.split(",")[1])
y_max = float(line.split(",")[1])
ind = 1
else:
y_tmp = float(line.split(",")[1])
if y_tmp > y_max:
y_max = y_tmp
if y_tmp < y_min:
y_min = y_tmp
return y_min, y_max
# prepare a set of chemical rooted tree
class chemicalRootedTree():
def __init__(self):
self.root = ("e", 0)
self.index = 0
self.vertex = []
self.adj = []
self.alpha = []
self.beta = []
self.height = 0
self.chg = []
def prepare_fringe_trees(fringe_filename, Lambda):
# modified for 2LMM, 0527
set_F = list()
strF = dict()
fc_LB = dict()
fc_UB = dict()
with open(fringe_filename,'r') as f:
lines = f.readlines()
for line in lines:
if len(line.split(",")) < 4:
continue
ind = int(line.split(",")[0])
str1 = line.split(",")[1]
str2 = line.split(",")[2]
str3 = line.split(",")[3].replace('\n', '')
if len(line.split(",")) > 4:
LB_tmp = line.split(",")[4].replace('\n', '')
LB_tmp = LB_tmp.replace(' ', '')
fc_LB[ind] = int(LB_tmp)
UB_tmp = line.split(",")[5].replace('\n', '')
UB_tmp = UB_tmp.replace(' ', '')
fc_UB[ind] = int(UB_tmp)
else:
fc_LB[ind] = 0
fc_UB[ind] = 10
psi = chemicalRootedTree()
seq1 = str1.split()
seq2 = [int(mul) for mul in line.split(",")[2].split()]
seq3 = [int(chg) for chg in line.split(",")[3].split()]
psi.index = ind
psi.vertex = [(seq1[j], int(seq1[j + 1])) for j in range(0, len(seq1), 2)]
psi.root = psi.vertex[0]
psi.height = max(psi.vertex[v][1] for v in range(len(psi.vertex)) if psi.vertex[v][0] != "H1")
psi.adj = [set() for _ in range(len(psi.vertex))]
psi.beta = [[0 for _ in range(len(psi.vertex))] for _ in range(len(psi.vertex))]
psi.chg = [chg for chg in seq3]
for j in range(len(seq2)):
cld = j + 1
prt = max(v for v in range(j + 1) if psi.vertex[v][1] == psi.vertex[cld][1] - 1)
psi.adj[prt].add(cld)
psi.adj[cld].add(prt)
psi.beta[prt][cld] = seq2[j]
psi.beta[cld][prt] = seq2[j]
# print(str(prt) + " " + str(cld) + " " + str(j) + " " + str(seq2[j]))
flag = True
for (a, d) in psi.vertex:
if a not in Lambda:
flag = False
break
if flag:
strF[ind] = (str1, str2, str3)
set_F.append(psi)
Lambda_ex = list()
for psi in set_F:
for (a, d) in psi.vertex[1:]:
if a not in Lambda_ex and a in Lambda:
Lambda_ex.append(a)
return set_F, Lambda_ex, strF, fc_LB, fc_UB
if __name__=="__main__":
V_C, E_C, \
E_ge_two, E_ge_one, E_zero_one, E_equal_one, \
I_ge_two, I_ge_one, I_zero_one, I_equal_one, \
ell_LB, ell_UB, n_LB_int, n_UB_int, \
n_LB, n_star, rho, \
ch_LB, ch_UB, bl_LB, bl_UB, \
Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \
Lambda_star, na_LB, na_UB, Lambda_int, \
na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \
ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \
bd2_LB, bd2_UB, bd3_LB, bd3_UB, dg_LB, dg_UB = read_seed_graph(sys.argv[1])
set_F, psi_epsilon, Code_F, n_psi, deg_r, \
beta_r, atom_r, ht, Lambda_ex = prepare_fringe_trees(sys.argv[2])
# print(V_C)
# print(E_C)
# print(E_ge_two)
# print(E_ge_one)
# print(E_zero_one)
# print(E_equal_one)
# print(ell_LB)
# print(ell_UB)
# print(bl_UB)
for psi in (set_F + [psi_epsilon]):
print(str(Code_F[psi]) + " " + str(n_psi[Code_F[psi]]) + " " + \
str(ht[Code_F[psi]]) + " " + str(atom_r[Code_F[psi]]) + " " + \
str(deg_r[Code_F[psi]]) + " " + str(beta_r[Code_F[psi]]))
# print(Lambda_ex)
# set_F_v = {v : set_F for v in V_C}
# set_F_E = set_F
# n_C = max(psi.numVertex - 1 for v in V_C for psi in set_F_v[v])
# n_T = max(psi.numVertex - 1 for psi in set_F_E)
# n_F = max(psi.numVertex - 1 for psi in set_F_E)
# print(str(n_C) + " " + str(n_T) + " " + str(n_F))
MAX_VAL = 4
val = {"C": 4, "O": 2, "N": 3}
n_H = dict()
na_alpha_ex = {ele : {i + 1 : 0} for i in range(len(set_F)) for ele in Lambda_ex}
for i, psi in enumerate(set_F):
n_H_tmp = {d : 0 for d in range(MAX_VAL)}
na_ex_tmp = {ele : 0 for ele in Lambda_ex}
for u, (ele, dep) in enumerate(psi.vertex[1:]):
beta_tmp = 0
na_ex_tmp[ele] += 1
for v in psi.adj[u + 1]:
beta_tmp += psi.beta[u + 1][v]
d_tmp = val[ele] - beta_tmp
n_H_tmp[d_tmp] += 1
for ele, d in na_alpha_ex.items():
d[i + 1] = na_ex_tmp[ele]
n_H[i + 1] = n_H_tmp
print(n_H)
print(na_alpha_ex)
| 2.171875 | 2 |
gamma/system_input.py | ArtBIT/gamma | 15 | 7262 | from .system import *
from .colours import *
class InputSystem(System):
def init(self):
self.key = 'input'
def setRequirements(self):
self.requiredComponents = ['input']
def updateEntity(self, entity, scene):
# don't allow input during a cutscene
if scene.cutscene is not None:
return
# run the stored input context
if entity.getComponent('input').inputContext is not None:
entity.getComponent('input').inputContext(entity)
| 2.6875 | 3 |
tensorflow_federated/python/research/utils/checkpoint_utils_test.py | mcognetta/federated | 0 | 7263 | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ServerState save."""
import functools
import os
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.examples.mnist import models
from tensorflow_federated.python.research.utils import checkpoint_utils
@attr.s(cmp=False, frozen=False)
class Obj(object):
"""Container for all state that need to be stored in the checkpoint.
Attributes:
model: A ModelWeights structure, containing Tensors or Variables.
optimizer_state: A list of Tensors or Variables, in the order returned by
optimizer.variables().
round_num: Training round_num.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
@classmethod
def from_anon_tuple(cls, anon_tuple, round_num):
# TODO(b/130724878): These conversions should not be needed.
return cls(
model=anon_tuple.model._asdict(recursive=True),
optimizer_state=list(anon_tuple.optimizer_state),
round_num=round_num)
class SavedStateTest(tf.test.TestCase):
def test_save_and_load(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj, export_dir)
loaded_obj = checkpoint_utils.load(export_dir, obj)
self.assertAllClose(tf.nest.flatten(obj), tf.nest.flatten(loaded_obj))
def test_load_latest_state(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj_1 = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj_1, export_dir)
# TODO(b/130724878): These conversions should not be needed.
obj_2 = Obj.from_anon_tuple(server_state, 2)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_2')
checkpoint_utils.save(obj_2, export_dir)
export_dir = checkpoint_utils.latest_checkpoint(self.get_temp_dir())
loaded_obj = checkpoint_utils.load(export_dir, obj_1)
self.assertEqual(os.path.join(self.get_temp_dir(), 'ckpt_2'), export_dir)
self.assertAllClose(tf.nest.flatten(obj_2), tf.nest.flatten(loaded_obj))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 1.96875 | 2 |
website/models/user.py | alexli0707/pyforum | 4 | 7264 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import peewee
from flask import current_app,abort
from flask.ext.login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from peewee import Model, IntegerField, CharField,PrimaryKeyField
from website.app import db_wrapper, login_manager
from website.http.main_exception import MainException
from werkzeug.security import check_password_hash,generate_password_hash
class User(UserMixin, db_wrapper.Model):
id = PrimaryKeyField()
email = CharField(index=True)
username = CharField(index=True)
password_hash = CharField()
role_id = IntegerField()
confirmed = IntegerField()
class Meta:
db_table = 'users'
def register(self,email,password,username):
user = User(email=email, username=username, password_hash=generate_password_hash(password))
try:
user.save()
except peewee.IntegrityError as err:
print(err.args)
if err.args[0] == 1062:
if 'ix_users_email' in err.args[1]:
raise MainException.DUPLICATE_EMAIL
if 'ix_users_username' in err.args[1]:
raise MainException.DUPLICATE_USERNAME
return user
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
"""生成验证邮箱的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
"""验证邮箱"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
print(data)
except:
return False
if data.get('confirm') != self.id:
return False
# 验证成功,写入数据库
self.confirmed = True
self.save()
return True
def generate_reset_token(self, expiration=3600):
"""生成重置密码的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
"""重置密码"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
# 验证成功,写入数据库
self.password = <PASSWORD>
self.save()
return True
"""
匿名用户
"""
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
user = User.get(User.id == int(user_id))
if not user:
abort(404)
else:
return user
| 2.515625 | 3 |
FlaskApp/__init__.py | robertavram/project5 | 7 | 7265 | # application
import application | 1.101563 | 1 |
sim2net/speed/constant.py | harikuts/dsr_optimization | 12 | 7266 | <reponame>harikuts/dsr_optimization
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 <NAME> <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
Provides an implementation of a constant node speed. In this case a speed of a
node is constant at a given value.
"""
from math import fabs
from sim2net.speed._speed import Speed
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class Constant(Speed):
"""
This class implements a constant node speed fixed at a given value.
"""
def __init__(self, speed):
"""
*Parameters*:
- **speed** (`float`): a value of the node speed.
*Example*:
.. testsetup::
from sim2net.speed.constant import Constant
.. doctest::
>>> speed = Constant(5.0)
>>> speed.current
5.0
>>> speed.get_new()
5.0
>>> speed = Constant(-5.0)
>>> speed.current
5.0
>>> speed.get_new()
5.0
"""
super(Constant, self).__init__(Constant.__name__)
check_argument_type(Constant.__name__, 'speed', float, speed,
self.logger)
self.__current_speed = fabs(float(speed))
@property
def current(self):
"""
(*Property*) The absolute value of the current speed of type `float`.
"""
return self.__current_speed
def get_new(self):
"""
Returns the absolute value of the given node speed of type `float`.
"""
return self.current
| 2.90625 | 3 |
nexula/nexula_utility/utility_extract_func.py | haryoa/nexula | 3 | 7267 | <reponame>haryoa/nexula
from nexula.nexula_utility.utility_import_var import import_class
class NexusFunctionModuleExtractor():
"""
Used for constructing pipeline data preporcessing and feature representer
"""
def __init__(self, module_class_list, args_dict, **kwargs):
"""
Instantiate class(es) object in pipeline
Parameters
----------
module_class_list
args_dict
kwargs
"""
# self.list_of_cls = self._search_module_function(module_class_list)
self.list_of_cls = module_class_list
if 'logger' in kwargs:
self.logger = kwargs['logger']
self.logger.debug(args_dict) if 'logger' in self.__dict__ else None
self.args_init = [arg['init'] for arg in args_dict]
self.args_call = [arg['call'] for arg in args_dict]
self._construct_object()
# Extract call
def _construct_object(self):
"""
Instantiate object of all pipeline
"""
import logging
logger = logging.getLogger('nexula')
logger.debug(self.list_of_cls)
new_list_of_cls = []
for i, cls in enumerate(self.list_of_cls): # REFACTOR
logger.debug(cls)
new_list_of_cls.append(cls(**self.args_init[i]))
self.list_of_cls = new_list_of_cls
def _search_module_function(self, module_function_list):
"""
Search the module in the library
Parameters
----------
module_function_list
Returns
-------
"""
list_of_cls = []
for module, function in module_function_list:
# TODO Raise exception if empty
list_of_cls.append(import_class(function, module))
return list_of_cls
def __call__(self, x, y, *args, **kwargs):
"""
Call the object by evoking __call__ function
Returns
-------
"""
for i,cls in enumerate(self.list_of_cls):
current_args = self.args_call[i]
x, y = cls(x, y, **kwargs, **current_args)
return x, y
| 2.40625 | 2 |
marbas/preprocessing.py | MJ-Jang/Marbas | 0 | 7268 | <filename>marbas/preprocessing.py
import os
from configparser import ConfigParser
cfg = ConfigParser()
#PATH_CUR = os.getcwd() + '/pynori'
PATH_CUR = os.path.dirname(__file__)
cfg.read(PATH_CUR+'/config.ini')
# PREPROCESSING
ENG_LOWER = cfg.getboolean('PREPROCESSING', 'ENG_LOWER')
class Preprocessing(object):
"""Preprocessing modules before tokenizing
It doesn't need to be initialized.
"""
def __init__(self):
pass
def pipeline(self, input_str):
if ENG_LOWER:
input_str = self.lower(input_str)
return input_str
def lower(self, input_str):
return input_str.lower()
def typo(self, input_str):
"""To correct typing errors"""
pass
def spacing(self, input_str):
"""To correct spacing errors"""
pass
| 2.8125 | 3 |
pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/ServiceNowQueryTile.py | amvasudeva/rapidata | 0 | 7269 | #
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS
# FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS.
#
import com.xhaus.jyson.JysonCodec as json
if not servicenowServer:
raise Exception("ServiceNow server ID must be provided")
if not username:
username = servicenowServer["username"]
if not password:
password = servicenowServer["password"]
servicenowUrl = servicenowServer['url']
credentials = CredentialsFallback(servicenowServer, username, password).getCredentials()
content = None
RESPONSE_OK_STATUS = 200
print "Sending content %s" % content
def get_row_data(item):
row_map = {}
for column in detailsViewColumns:
if detailsViewColumns[column] and "." in detailsViewColumns[column]:
json_col = detailsViewColumns[column].split('.')
if item[json_col[0]]:
row_map[column] = item[json_col[0]][json_col[1]]
else:
row_map[column] = item[column]
row_map['link'] = servicenowUrl + "nav_to.do?uri=%s.do?sys_id=%s" % (tableName, item['sys_id'])
return row_map
servicenowAPIUrl = servicenowUrl + '/api/now/v1/table/%s?sysparm_display_value=true&sysparm_limit=1000&sysparm_query=%s' % (tableName, query)
servicenowResponse = XLRequest(servicenowAPIUrl, 'GET', content, credentials['username'], credentials['password'], 'application/json').send()
if servicenowResponse.status == RESPONSE_OK_STATUS:
json_data = json.loads(servicenowResponse.read())
rows = {}
for item in json_data['result']:
row = item['number']
rows[row] = get_row_data(item)
data = rows
else:
error = json.loads(servicenowResponse.read())
if 'Invalid table' in error['error']['message']:
print "Invalid Table Name"
data = {"Invalid table name"}
servicenowResponse.errorDump()
else:
print "Failed to run query in Service Now"
servicenowResponse.errorDump()
sys.exit(1) | 2.0625 | 2 |
bc/recruitment/migrations/0022_merge_20200331_1633.py | Buckinghamshire-Digital-Service/buckinghamshire-council | 1 | 7270 | <reponame>Buckinghamshire-Digital-Service/buckinghamshire-council<filename>bc/recruitment/migrations/0022_merge_20200331_1633.py
# Generated by Django 2.2.10 on 2020-03-31 15:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("recruitment", "0021_merge_20200331_1503"),
("recruitment", "0013_button_block"),
]
operations = []
| 1.304688 | 1 |
Stage_3/Task11_Graph/depth_first_search.py | Pyabecedarian/Algorithms-and-Data-Structures-using-Python | 0 | 7271 | """
The Depth First Search (DFS)
The goal of a dfs is to search as deeply as possible, connecting as many nodes in the graph as possible and
branching where necessary. Think of the BFS that builds a search tree one level at a time, whereas the DFS
creates a search tree by exploring one branch of the tree as deeply as possible.
As with bfs the dfs makes use of `predecessor` links to construct the tree. In
addition, the dfs will make use of two additional instance variables in the Vertex class, `discovery` and
`finish_time`.
predecessor : same as bfs
discovery : tracks the number of steps in the algorithm before a vertex is first encountered;
finish_time : is the number of steps before a vertex is colored black
"""
from datastruct.graph import Vertex, Graph
class DFSGraph(Graph):
def __init__(self):
super(DFSGraph, self).__init__()
self.time = 0
def reset(self):
self.time = 0
for v in self:
v.color = 'white'
v.predecessor = None
def dfs(self):
self.reset()
for v in self:
if v.color == 'white':
self._dfs_visit(v)
def _dfs_visit(self, vert: Vertex):
vert.color = 'gray'
self.time += 1
vert.discovery = self.time
for nextv in vert.get_connections():
if nextv.color == 'white':
nextv.predecessor = vert
self._dfs_visit(nextv)
vert.color = 'black'
self.time += 1
vert.finish_time = self.time
| 4.125 | 4 |
salt/_modules/freebsd_common.py | rbtcollins/rusty_rail | 16 | 7272 | def sysrc(value):
"""Call sysrc.
CLI Example:
.. code-block:: bash
salt '*' freebsd_common.sysrc sshd_enable=YES
salt '*' freebsd_common.sysrc static_routes
"""
return __salt__['cmd.run_all']("sysrc %s" % value)
| 1.789063 | 2 |
auth0/v3/management/blacklists.py | jhunken/auth0-python | 0 | 7273 | from .rest import RestClient
class Blacklists(object):
"""Auth0 blacklists endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
"""
def __init__(self, domain, token, telemetry=True):
self.url = 'https://{}/api/v2/blacklists/tokens'.format(domain)
self.client = RestClient(jwt=token, telemetry=telemetry)
def get(self, aud=None):
"""Retrieves the jti and aud of all tokens in the blacklist.
Args:
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
See: https://auth0.com/docs/api/management/v2#!/Blacklists/get_tokens
"""
params = {
'aud': aud
}
return self.client.get(self.url, params=params)
def create(self, jti, aud=''):
"""Adds a token to the blacklist.
Args:
jti (str): the jti of the JWT to blacklist.
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
body (dict):
See: https://auth0.com/docs/api/management/v2#!/Blacklists/post_tokens
"""
return self.client.post(self.url, data={'jti': jti, 'aud': aud})
| 2.734375 | 3 |
test_backtest/simplebacktest.py | qzm/QUANTAXIS | 1 | 7274 | <gh_stars>1-10
# coding=utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import QUANTAXIS as QA
import random
"""
该代码旨在给出一个极其容易实现的小回测 高效 无事件驱动
"""
B = QA.QA_BacktestBroker()
AC = QA.QA_Account()
"""
# 账户设置初始资金
AC.reset_assets(assets)
# 发送订单
Order=AC.send_order(code='000001',amount=1000,time='2018-03-21',towards=QA.ORDER_DIRECTION.BUY,price=0,order_model=QA.ORDER_MODEL.MARKET,amount_model=QA.AMOUNT_MODEL.BY_AMOUNT)
# 撮合订单
dealmes=B.receive_order(QA.QA_Event(order=Order,market_data=data))
# 更新账户
AC.receive_deal(dealmes)
# 分析结果
risk=QA.QA_Risk(AC)
"""
AC.reset_assets(20000000) #设置初始资金
def simple_backtest(AC, code, start, end):
DATA = QA.QA_fetch_stock_day_adv(code, start, end).to_qfq()
for items in DATA.panel_gen: # 一天过去了
for item in items.security_gen:
if random.random()>0.5:# 加入一个随机 模拟买卖的
if AC.sell_available.get(item.code[0], 0) == 0:
order=AC.send_order(
code=item.data.code[0], time=item.data.date[0], amount=1000, towards=QA.ORDER_DIRECTION.BUY, price=0, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
AC.receive_deal(B.receive_order(QA.QA_Event(order=order,market_data=item)))
else:
AC.receive_deal(B.receive_order(QA.QA_Event(order=AC.send_order(
code=item.data.code[0], time=item.data.date[0], amount=1000, towards=QA.ORDER_DIRECTION.SELL, price=0, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
),market_data=item)))
AC.settle()
simple_backtest(AC, QA.QA_fetch_stock_block_adv(
).code[0:10], '2017-01-01', '2018-01-31')
print(AC.message)
AC.save()
risk = QA.QA_Risk(AC)
print(risk.message)
risk.save() | 1.554688 | 2 |
artview/components/field.py | jjhelmus/artview | 0 | 7275 | """
field.py
Class instance used for modifying field via Display window.
"""
# Load the needed packages
from functools import partial
from ..core import Variable, Component, QtGui, QtCore
class FieldButtonWindow(Component):
'''Class to display a Window with Field name radio buttons.'''
Vradar = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
def __init__(self, Vradar=None, Vfield=None, name="FieldButtons",
parent=None):
'''
Initialize the class to create the interface.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one empty string
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to FieldButtonWindow.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class records the selected button and passes the
change value back to variable.
'''
super(FieldButtonWindow, self).__init__(name=name, parent=parent)
# Set up signal, so that DISPLAY can react to external
# (or internal) changes in field (Core.Variable instances expected)
# The change is sent through Vfield
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
self.sharedVariables = {"Vradar": self.NewRadar,
"Vfield": self.NewField}
self.connectAllVariables()
self.CreateFieldWidget()
self.SetFieldRadioButtons()
self.show()
########################
# Button methods #
########################
def FieldSelectCmd(self, field):
'''Captures a selection and updates field variable.'''
self.Vfield.change(field)
def CreateFieldWidget(self):
'''Create a widget to store radio buttons to control field adjust.'''
self.radioBox = QtGui.QGroupBox("Field Selection", parent=self)
self.rBox_layout = QtGui.QVBoxLayout(self.radioBox)
self.radioBox.setLayout(self.rBox_layout)
self.setCentralWidget(self.radioBox)
def SetFieldRadioButtons(self):
'''Set a field selection using radio buttons.'''
# Instantiate the buttons into a list for future use
self.fieldbutton = {}
if self.Vradar.value is None:
return
# Loop through and create each field button and
# connect a value when selected
for field in self.Vradar.value.fields.keys():
button = QtGui.QRadioButton(field, self.radioBox)
self.fieldbutton[field] = button
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"),
partial(self.FieldSelectCmd, field))
self.rBox_layout.addWidget(button)
# set Checked the current field
self.NewField(self.Vfield, self.Vfield.value, True)
def NewField(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Update radio check
'''
if (self.Vradar.value is not None and
value in self.Vradar.value.fields):
self.fieldbutton[value].setChecked(True)
def NewRadar(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Recreate radio items
'''
self.CreateFieldWidget()
self.SetFieldRadioButtons()
| 2.96875 | 3 |
rest_framework_mongoengine/fields.py | Careerleaf/django-rest-framework-mongoengine | 0 | 7276 | from bson.errors import InvalidId
from django.core.exceptions import ValidationError
from django.utils.encoding import smart_str
from mongoengine import dereference
from mongoengine.base.document import BaseDocument
from mongoengine.document import Document
from rest_framework import serializers
from mongoengine.fields import ObjectId
import sys
if sys.version_info[0] >= 3:
def unicode(val):
return str(val)
class MongoDocumentField(serializers.WritableField):
MAX_RECURSION_DEPTH = 5 # default value of depth
def __init__(self, *args, **kwargs):
try:
self.model_field = kwargs.pop('model_field')
self.depth = kwargs.pop('depth', self.MAX_RECURSION_DEPTH)
except KeyError:
raise ValueError("%s requires 'model_field' kwarg" % self.type_label)
super(MongoDocumentField, self).__init__(*args, **kwargs)
def transform_document(self, document, depth):
data = {}
# serialize each required field
for field in document._fields:
if hasattr(document, smart_str(field)):
# finally check for an attribute 'field' on the instance
obj = getattr(document, field)
else:
continue
val = self.transform_object(obj, depth-1)
if val is not None:
data[field] = val
return data
def transform_dict(self, obj, depth):
return dict([(key, self.transform_object(val, depth-1))
for key, val in obj.items()])
def transform_object(self, obj, depth):
"""
Models to natives
Recursion for (embedded) objects
"""
if depth == 0:
# Return primary key if exists, else return default text
return str(getattr(obj, 'pk', "Max recursion depth exceeded"))
elif isinstance(obj, BaseDocument):
# Document, EmbeddedDocument
return self.transform_document(obj, depth-1)
elif isinstance(obj, dict):
# Dictionaries
return self.transform_dict(obj, depth-1)
elif isinstance(obj, list):
# List
return [self.transform_object(value, depth-1) for value in obj]
elif obj is None:
return None
else:
return unicode(obj) if isinstance(obj, ObjectId) else obj
class ReferenceField(MongoDocumentField):
type_label = 'ReferenceField'
def from_native(self, value):
try:
dbref = self.model_field.to_python(value)
except InvalidId:
raise ValidationError(self.error_messages['invalid'])
instance = dereference.DeReference().__call__([dbref])[0]
# Check if dereference was successful
if not isinstance(instance, Document):
msg = self.error_messages['invalid']
raise ValidationError(msg)
return instance
def to_native(self, obj):
return self.transform_object(obj, self.depth)
class ListField(MongoDocumentField):
type_label = 'ListField'
def from_native(self, value):
return self.model_field.to_python(value)
def to_native(self, obj):
return self.transform_object(obj, self.depth)
class EmbeddedDocumentField(MongoDocumentField):
type_label = 'EmbeddedDocumentField'
def __init__(self, *args, **kwargs):
try:
self.document_type = kwargs.pop('document_type')
except KeyError:
raise ValueError("EmbeddedDocumentField requires 'document_type' kwarg")
super(EmbeddedDocumentField, self).__init__(*args, **kwargs)
def get_default_value(self):
return self.to_native(self.default())
def to_native(self, obj):
if obj is None:
return None
else:
return self.model_field.to_mongo(obj)
def from_native(self, value):
return self.model_field.to_python(value)
class DynamicField(MongoDocumentField):
type_label = 'DynamicField'
def to_native(self, obj):
return self.model_field.to_python(obj) | 2.125 | 2 |
tests/conftest.py | bbhunter/fuzz-lightyear | 169 | 7277 | import pytest
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import get_excluded_operations
from fuzz_lightyear.datastore import get_included_tags
from fuzz_lightyear.datastore import get_non_vulnerable_operations
from fuzz_lightyear.datastore import get_user_defined_mapping
from fuzz_lightyear.plugins import get_enabled_plugins
from fuzz_lightyear.request import get_victim_session_factory
from fuzz_lightyear.supplements.abstraction import get_abstraction
@pytest.fixture(autouse=True)
def clear_caches():
get_abstraction.cache_clear()
get_user_defined_mapping.cache_clear()
get_enabled_plugins.cache_clear()
get_victim_session_factory.cache_clear()
get_excluded_operations.cache_clear()
get_non_vulnerable_operations.cache_clear()
get_included_tags.cache_clear()
_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_ALL_POST_FUZZ_HOOKS_BY_TAG.clear()
_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear()
@pytest.fixture(autouse=True)
def ignore_hypothesis_non_interactive_example_warning():
"""In theory we're not supposed to use hypothesis'
strategy.example(), but fuzz-lightyear isn't using
hypothesis in a normal way.
"""
import warnings
from hypothesis.errors import NonInteractiveExampleWarning
warnings.filterwarnings(
'ignore',
category=NonInteractiveExampleWarning,
)
| 1.796875 | 2 |
src/diepvries/field.py | michael-the1/diepvries | 67 | 7278 | """Module for a Data Vault field."""
from typing import Optional
from . import (
FIELD_PREFIX,
FIELD_SUFFIX,
METADATA_FIELDS,
TABLE_PREFIXES,
UNKNOWN,
FieldDataType,
FieldRole,
TableType,
)
class Field:
"""A field in a Data Vault model."""
def __init__(
self,
parent_table_name: str,
name: str,
data_type: FieldDataType,
position: int,
is_mandatory: bool,
precision: int = None,
scale: int = None,
length: int = None,
):
"""Instantiate a Field.
Convert both name and parent_table_name to lower case.
Args:
parent_table_name: Name of parent table in the database.
name: Column name in the database.
data_type: Column data type in the database.
position: Column position in the database.
is_mandatory: Column is mandatory in the database.
precision: Numeric precision (maximum number of digits before the decimal
separator). Only applicable when `self.data_type==FieldDataType.NUMBER`.
scale: Numeric scale (maximum number of digits after the decimal
separator). Only applicable when `self.data_type==FieldDataType.NUMBER`.
length: Character length (maximum number of characters allowed). Only
applicable when `self.data_type==FieldDataType.TEXT`.
"""
self.parent_table_name = parent_table_name.lower()
self.name = name.lower()
self.data_type = data_type
self.position = position
self.is_mandatory = is_mandatory
self.precision = precision
self.scale = scale
self.length = length
def __hash__(self):
"""Hash of a Data Vault field."""
return hash(self.name_in_staging)
def __eq__(self, other):
"""Equality of a Data Vault field."""
return self.name_in_staging == other.name_in_staging
def __str__(self) -> str:
"""Representation of a Field object as a string.
This helps the tracking of logging events per entity.
Returns:
String representation for the `Field` object.
"""
return f"{type(self).__name__}: {self.name}"
@property
def data_type_sql(self) -> str:
"""Build SQL expression to represent the field data type."""
if self.data_type == FieldDataType.NUMBER:
return f"{self.data_type.value} ({self.precision}, {self.scale})"
if self.data_type == FieldDataType.TEXT and self.length:
return f"{self.data_type.value} ({self.length})"
return f"{self.data_type.name}"
@property
def hash_concatenation_sql(self) -> str:
"""Build SQL expression to deterministically represent the field as a string.
This expression is needed to produce hashes (hashkey/hashdiff) that are
consistent, independently on the data type used to store the field in the
extraction table.
The SQL expression does the following steps:
1. Cast field to its data type in the DV model.
2. Produce a consistent string representation of the result of step 1, depending
on the field data type.
3. Ensure the result of step 2 never returns NULL.
Returns:
SQL expression to deterministically represent the field as a string.
"""
hash_concatenation_sql = ""
date_format = "yyyy-mm-dd"
time_format = "hh24:mi:ss.ff9"
timezone_format = "tzhtzm"
cast_expression = (
f"CAST({self.name} AS {self.data_type_sql})"
if self.data_type != FieldDataType.GEOGRAPHY
else f"TO_GEOGRAPHY({self.name})"
)
if self.data_type in (FieldDataType.TIMESTAMP_LTZ, FieldDataType.TIMESTAMP_TZ):
hash_concatenation_sql = (
f"TO_CHAR({cast_expression}, "
f"'{date_format} {time_format} {timezone_format}')"
)
elif self.data_type == FieldDataType.TIMESTAMP_NTZ:
hash_concatenation_sql = (
f"TO_CHAR({cast_expression}, '{date_format} {time_format}')"
)
elif self.data_type == FieldDataType.DATE:
hash_concatenation_sql = f"TO_CHAR({cast_expression}, '{date_format}')"
elif self.data_type == FieldDataType.TIME:
hash_concatenation_sql = f"TO_CHAR({cast_expression}, '{time_format}')"
elif self.data_type == FieldDataType.TEXT:
hash_concatenation_sql = cast_expression
elif self.data_type == FieldDataType.GEOGRAPHY:
hash_concatenation_sql = f"ST_ASTEXT({cast_expression})"
else:
hash_concatenation_sql = f"CAST({cast_expression} AS TEXT)"
default_value = UNKNOWN if self.role == FieldRole.BUSINESS_KEY else ""
return f"COALESCE({hash_concatenation_sql}, '{default_value}')"
@property
def suffix(self) -> str:
"""Get field suffix.
Returns:
Field suffix.
"""
return self.name.split("_").pop()
@property
def prefix(self) -> str:
"""Get field prefix.
Returns:
Field prefix.
"""
return next(split_part for split_part in self.name.split("_"))
@property
def parent_table_type(self) -> TableType:
"""Get parent table type, based on table prefix.
Returns:
Table type (HUB, LINK or SATELLITE).
"""
table_prefix = next(
split_part for split_part in self.parent_table_name.split("_")
)
if table_prefix in TABLE_PREFIXES[TableType.LINK]:
return TableType.LINK
if table_prefix in TABLE_PREFIXES[TableType.SATELLITE]:
return TableType.SATELLITE
return TableType.HUB
@property
def name_in_staging(self) -> str:
"""Get the name that this field should have, when created in a staging table.
In most cases this function will return `self.name`, but for hashdiffs the name
is <parent_table_name>_hashdiff (every Satellite has one hashdiff field, named
s_hashdiff).
Returns:
Name of the field in staging.
"""
if self.role == FieldRole.HASHDIFF:
return f"{self.parent_table_name}_{FIELD_SUFFIX[FieldRole.HASHDIFF]}"
return self.name
@property
def ddl_in_staging(self) -> str:
"""Get DDL expression to create this field in the staging table.
Returns:
The DDL expression for this field.
"""
return (
f"{self.name_in_staging} {self.data_type_sql}"
f"{' NOT NULL' if self.is_mandatory else ''}"
)
@property
def role(self) -> FieldRole:
"""Get the role of the field in a Data Vault model.
See `FieldRole` enum for more information.
Returns:
Field role in a Data Vault model.
Raises:
RuntimeError: When no field role can be attributed.
"""
found_role: Optional[FieldRole] = None
if self.name in METADATA_FIELDS.values():
found_role = FieldRole.METADATA
elif (
self.name == f"{self.parent_table_name}_{self.suffix}"
and self.suffix == FIELD_SUFFIX[FieldRole.HASHKEY]
):
found_role = FieldRole.HASHKEY
elif self.suffix == FIELD_SUFFIX[FieldRole.HASHKEY]:
found_role = FieldRole.HASHKEY_PARENT
elif self.prefix == FIELD_PREFIX[FieldRole.CHILD_KEY]:
found_role = FieldRole.CHILD_KEY
elif (
self.parent_table_type != TableType.SATELLITE
and self.prefix not in FIELD_PREFIX.values()
and self.position != 1
):
found_role = FieldRole.BUSINESS_KEY
elif self.suffix == FIELD_SUFFIX[FieldRole.HASHDIFF]:
found_role = FieldRole.HASHDIFF
elif self.parent_table_type == TableType.SATELLITE:
found_role = FieldRole.DESCRIPTIVE
if found_role is not None:
return found_role
raise RuntimeError(
(
f"{self.name}: It was not possible to assign a valid field role "
f" (validate FieldRole and FIELD_PREFIXES configuration)"
)
)
| 3.0625 | 3 |
mmdet/datasets/deepscoresV2.py | tuggeluk/mmdetection | 1 | 7279 | <reponame>tuggeluk/mmdetection
"""DEEPSCORESV2
Provides access to the DEEPSCORESV2 database with a COCO-like interface. The
only changes made compared to the coco.py file are the class labels.
Author:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
Created on:
November 23, 2019
"""
from .coco import *
import os
import json
from obb_anns import OBBAnns
@DATASETS.register_module
class DeepScoresV2Dataset(CocoDataset):
def load_annotations(self, ann_file):
self.obb = OBBAnns(ann_file)
self.obb.load_annotations()
self.obb.set_annotation_set_filter(['deepscores'])
self.obb.set_class_blacklist(["staff"])
self.cat_ids = list(self.obb.get_cats().keys())
self.cat2label = {
cat_id: i
for i, cat_id in enumerate(self.cat_ids)
}
self.label2cat = {v: k for k, v in self.cat2label.items()}
self.CLASSES = tuple([v["name"] for (k, v) in self.obb.get_cats().items()])
self.img_ids = [id['id'] for id in self.obb.img_info]
return self.obb.img_info
def get_ann_info(self, idx):
return self._parse_ann_info(*self.obb.get_img_ann_pair(idxs=[idx]))
def _filter_imgs(self, min_size=32):
valid_inds = []
for i, img_info in enumerate(self.obb.img_info):
if self.filter_empty_gt and len(img_info['ann_ids']) == 0:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
img_info, ann_info = img_info[0], ann_info[0]
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
for i, ann in ann_info.iterrows():
# we have no ignore feature
if ann['area'] <= 0:
continue
bbox = ann['a_bbox']
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['cat_id'][0]])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=None,
seg_map=None)
return ann
def prepare_json_dict(self, results):
json_results = {"annotation_set": "deepscores", "proposals": []}
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['img_id'] = img_id
data['bbox'] = [str(nr) for nr in bboxes[i][0:-1]]
data['score'] = str(bboxes[i][-1])
data['cat_id'] = self.label2cat[label]
json_results["proposals"].append(data)
return json_results
def write_results_json(self, results, filename=None):
if filename is None:
filename = "deepscores_results.json"
json_results = self.prepare_json_dict(results)
with open(filename, "w") as fo:
json.dump(json_results, fo)
return filename
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=True,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05),
average_thrs=False):
"""Evaluation in COCO protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str: float]
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
filename = self.write_results_json(results)
self.obb.load_proposals(filename)
metric_results = self.obb.calculate_metrics(iou_thrs=iou_thrs, classwise=classwise, average_thrs=average_thrs)
metric_results = {self.CLASSES[self.cat2label[key]]: value for (key, value) in metric_results.items()}
# add occurences
occurences_by_class = self.obb.get_class_occurences()
for (key, value) in metric_results.items():
value.update(no_occurences=occurences_by_class[key])
if True:
import pickle
pickle.dump(metric_results, open('evaluation_renamed_rcnn.pickle', 'wb'))
print(metric_results)
return metric_results
| 2.296875 | 2 |
tests/go_cd_configurator_test.py | agsmorodin/gomatic | 0 | 7280 | #!/usr/bin/env python
import unittest
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
from decimal import Decimal
from gomatic import GoCdConfigurator, FetchArtifactDir, RakeTask, ExecTask, ScriptExecutorTask, FetchArtifactTask, \
FetchArtifactFile, Tab, GitMaterial, PipelineMaterial, Pipeline
from gomatic.fake import FakeHostRestClient, empty_config_xml, config, empty_config
from gomatic.gocd.pipelines import DEFAULT_LABEL_TEMPLATE
from gomatic.gocd.artifacts import Artifact
from gomatic.xml_operations import prettify
def find_with_matching_name(things, name):
return [thing for thing in things if thing.name == name]
def standard_pipeline_group():
return GoCdConfigurator(config('config-with-typical-pipeline')).ensure_pipeline_group('P.Group')
def typical_pipeline():
return standard_pipeline_group().find_pipeline('typical')
def more_options_pipeline():
return GoCdConfigurator(config('config-with-more-options-pipeline')).ensure_pipeline_group('P.Group').find_pipeline('more-options')
def empty_pipeline():
return GoCdConfigurator(empty_config()).ensure_pipeline_group("pg").ensure_pipeline("pl").set_git_url("gurl")
def empty_stage():
return empty_pipeline().ensure_stage("deploy-to-dev")
class TestAgents(unittest.TestCase):
def _agents_from_config(self):
return GoCdConfigurator(config('config-with-just-agents')).agents
def test_could_have_no_agents(self):
agents = GoCdConfigurator(empty_config()).agents
self.assertEquals(0, len(agents))
def test_agents_have_resources(self):
agents = self._agents_from_config()
self.assertEquals(2, len(agents))
self.assertEquals({'a-resource', 'b-resource'}, agents[0].resources)
def test_agents_have_names(self):
agents = self._agents_from_config()
self.assertEquals('go-agent-1', agents[0].hostname)
self.assertEquals('go-agent-2', agents[1].hostname)
def test_agent_could_have_no_resources(self):
agents = self._agents_from_config()
self.assertEquals(0, len(agents[1].resources))
def test_can_add_resource_to_agent_with_no_resources(self):
agent = self._agents_from_config()[1]
agent.ensure_resource('a-resource-that-it-does-not-already-have')
self.assertEquals(1, len(agent.resources))
def test_can_add_resource_to_agent(self):
agent = self._agents_from_config()[0]
self.assertEquals(2, len(agent.resources))
agent.ensure_resource('a-resource-that-it-does-not-already-have')
self.assertEquals(3, len(agent.resources))
class TestJobs(unittest.TestCase):
def test_jobs_have_resources(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
resources = job.resources
self.assertEquals(1, len(resources))
self.assertEquals({'a-resource'}, resources)
def test_job_has_nice_tostring(self):
job = typical_pipeline().stages[0].jobs[0]
self.assertEquals("Job('compile', [ExecTask(['make', 'options', 'source code'])])", str(job))
def test_jobs_can_have_timeout(self):
job = typical_pipeline().ensure_stage("deploy").ensure_job("upload")
self.assertEquals(True, job.has_timeout)
self.assertEquals('20', job.timeout)
def test_can_set_timeout(self):
job = empty_stage().ensure_job("j")
j = job.set_timeout("42")
self.assertEquals(j, job)
self.assertEquals(True, job.has_timeout)
self.assertEquals('42', job.timeout)
def test_jobs_do_not_have_to_have_timeout(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
self.assertEquals(False, job.has_timeout)
try:
timeout = job.timeout
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_jobs_can_run_on_all_agents(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
self.assertEquals(True, job.runs_on_all_agents)
def test_jobs_do_not_have_to_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
self.assertEquals(False, job.runs_on_all_agents)
def test_jobs_can_be_made_to_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
j = job.set_runs_on_all_agents()
self.assertEquals(j, job)
self.assertEquals(True, job.runs_on_all_agents)
def test_jobs_can_be_made_to_not_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
j = job.set_runs_on_all_agents(False)
self.assertEquals(j, job)
self.assertEquals(False, job.runs_on_all_agents)
def test_can_ensure_job_has_resource(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
j = job.ensure_resource('moo')
self.assertEquals(j, job)
self.assertEquals(2, len(job.resources))
self.assertEquals({'a-resource', 'moo'}, job.resources)
def test_jobs_have_artifacts(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
artifacts = job.artifacts
self.assertEquals({
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts"),
Artifact.get_build_artifact("scripts/*", "files"),
Artifact.get_test_artifact("from", "to")},
artifacts)
def test_job_that_has_no_artifacts_has_no_artifacts_element_to_reduce_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
job = go_cd_configurator.ensure_pipeline_group("g").ensure_pipeline("p").ensure_stage("s").ensure_job("j")
job.ensure_artifacts(set())
self.assertEquals(set(), job.artifacts)
xml = parseString(go_cd_configurator.config)
self.assertEquals(0, len(xml.getElementsByTagName('artifacts')))
def test_artifacts_might_have_no_dest(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("rake-job")
artifacts = job.artifacts
self.assertEquals(1, len(artifacts))
self.assertEquals({Artifact.get_build_artifact("things/*")}, artifacts)
def test_can_add_build_artifacts_to_job(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job_with_artifacts = job.ensure_artifacts({
Artifact.get_build_artifact("a1", "artifacts"),
Artifact.get_build_artifact("a2", "others")})
self.assertEquals(job, job_with_artifacts)
artifacts = job.artifacts
self.assertEquals(5, len(artifacts))
self.assertTrue({Artifact.get_build_artifact("a1", "artifacts"), Artifact.get_build_artifact("a2", "others")}.issubset(artifacts))
def test_can_add_test_artifacts_to_job(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job_with_artifacts = job.ensure_artifacts({
Artifact.get_test_artifact("a1"),
Artifact.get_test_artifact("a2")})
self.assertEquals(job, job_with_artifacts)
artifacts = job.artifacts
self.assertEquals(5, len(artifacts))
self.assertTrue({Artifact.get_test_artifact("a1"), Artifact.get_test_artifact("a2")}.issubset(artifacts))
def test_can_ensure_artifacts(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job.ensure_artifacts({
Artifact.get_test_artifact("from", "to"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "somewhereElse"),
Artifact.get_test_artifact("another", "with dest"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts")})
self.assertEquals({
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts"),
Artifact.get_build_artifact("scripts/*", "files"),
Artifact.get_test_artifact("from", "to"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "somewhereElse"),
Artifact.get_test_artifact("another", "with dest")
},
job.artifacts)
def test_jobs_have_tasks(self):
job = more_options_pipeline().ensure_stage("s1").jobs[2]
tasks = job.tasks
self.assertEquals(4, len(tasks))
self.assertEquals('rake', tasks[0].type)
self.assertEquals('sometarget', tasks[0].target)
self.assertEquals('passed', tasks[0].runif)
self.assertEquals('fetchartifact', tasks[1].type)
self.assertEquals('more-options', tasks[1].pipeline)
self.assertEquals('earlyStage', tasks[1].stage)
self.assertEquals('earlyWorm', tasks[1].job)
self.assertEquals(FetchArtifactDir('sourceDir'), tasks[1].src)
self.assertEquals('destDir', tasks[1].dest)
self.assertEquals('passed', tasks[1].runif)
def test_runif_defaults_to_passed(self):
pipeline = typical_pipeline()
tasks = pipeline.ensure_stage("build").ensure_job("compile").tasks
self.assertEquals("passed", tasks[0].runif)
def test_jobs_can_have_rake_tasks(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
tasks = job.tasks
self.assertEquals(1, len(tasks))
self.assertEquals('rake', tasks[0].type)
self.assertEquals("boo", tasks[0].target)
def test_can_ensure_rake_task(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
job.ensure_task(RakeTask("boo"))
self.assertEquals(1, len(job.tasks))
def test_can_add_rake_task(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
job.ensure_task(RakeTask("another"))
self.assertEquals(2, len(job.tasks))
self.assertEquals("another", job.tasks[1].target)
def test_script_executor_task(self):
script = '''
echo This is script
echo 'This is a string in single quotes'
echo "This is a string in double quotes"
'''
job = more_options_pipeline().ensure_stage("script-executor").\
ensure_job('test-script-executor')
job.ensure_task(ScriptExecutorTask(script, runif='any'))
self.assertEquals(1, len(job.tasks))
self.assertEquals('script', job.tasks[0].type)
self.assertEquals(script, job.tasks[0].script)
self.assertEquals('any', job.tasks[0].runif)
job.ensure_task(ScriptExecutorTask(script, runif='failed'))
self.assertEquals(2, len(job.tasks))
self.assertEquals('script', job.tasks[1].type)
self.assertEquals(script, job.tasks[1].script)
self.assertEquals('failed', job.tasks[1].runif)
job.ensure_task(ScriptExecutorTask(script))
self.assertEquals(3, len(job.tasks))
self.assertEquals('script', job.tasks[2].type)
self.assertEquals(script, job.tasks[2].script)
self.assertEquals('passed', job.tasks[2].runif)
def test_can_add_exec_task_with_runif(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir', "failed"))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
self.assertEquals('failed', task.runif)
def test_can_add_exec_task(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir'))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
def test_can_ensure_exec_task(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
t1 = job.ensure_task(ExecTask(['ls', '-la'], 'some/dir'))
t2 = job.ensure_task(ExecTask(['make', 'options', 'source code']))
job.ensure_task(ExecTask(['ls', '-la'], 'some/otherdir'))
job.ensure_task(ExecTask(['ls', '-la'], 'some/dir'))
self.assertEquals(3, len(job.tasks))
self.assertEquals(t2, job.tasks[0])
self.assertEquals(['make', 'options', 'source code'], (job.tasks[0]).command_and_args)
self.assertEquals(t1, job.tasks[1])
self.assertEquals(['ls', '-la'], (job.tasks[1]).command_and_args)
self.assertEquals('some/dir', (job.tasks[1]).working_dir)
self.assertEquals(['ls', '-la'], (job.tasks[2]).command_and_args)
self.assertEquals('some/otherdir', (job.tasks[2]).working_dir)
def test_exec_task_args_are_unescaped_as_appropriate(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
task = job.tasks[1]
self.assertEquals(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"'],
task.command_and_args)
def test_exec_task_args_are_escaped_as_appropriate(self):
job = empty_stage().ensure_job("j")
task = job.add_task(ExecTask(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"']))
self.assertEquals(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"'],
task.command_and_args)
def test_can_have_no_tasks(self):
self.assertEquals(0, len(empty_stage().ensure_job("empty_job").tasks))
def test_can_add_fetch_artifact_task_to_job(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), runif="any"))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(added_task, task)
self.assertEquals('p', task.pipeline)
self.assertEquals('s', task.stage)
self.assertEquals('j', task.job)
self.assertEquals(FetchArtifactDir('d'), task.src)
self.assertEquals('any', task.runif)
def test_fetch_artifact_task_can_have_src_file_rather_than_src_dir(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("variety-of-tasks")
tasks = job.tasks
self.assertEquals(4, len(tasks))
self.assertEquals('more-options', tasks[1].pipeline)
self.assertEquals('earlyStage', tasks[1].stage)
self.assertEquals('earlyWorm', tasks[1].job)
self.assertEquals(FetchArtifactFile('someFile'), tasks[2].src)
self.assertEquals('passed', tasks[1].runif)
self.assertEquals(['true'], tasks[3].command_and_args)
def test_fetch_artifact_task_can_have_dest(self):
pipeline = more_options_pipeline()
job = pipeline.ensure_stage("s1").ensure_job("variety-of-tasks")
tasks = job.tasks
self.assertEquals(FetchArtifactTask("more-options",
"earlyStage",
"earlyWorm",
FetchArtifactDir("sourceDir"),
dest="destDir"),
tasks[1])
def test_can_ensure_fetch_artifact_tasks(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("variety-of-tasks")
job.ensure_task(FetchArtifactTask("more-options", "middleStage", "middleJob", FetchArtifactFile("someFile")))
first_added_task = job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
self.assertEquals(5, len(job.tasks))
self.assertEquals(first_added_task, job.tasks[4])
self.assertEquals('p', (job.tasks[4]).pipeline)
self.assertEquals('s', (job.tasks[4]).stage)
self.assertEquals('j', (job.tasks[4]).job)
self.assertEquals(FetchArtifactDir('dir'), (job.tasks[4]).src)
self.assertEquals('passed', (job.tasks[4]).runif)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactFile('f')))
self.assertEquals(FetchArtifactFile('f'), (job.tasks[5]).src)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir'), dest="somedest"))
self.assertEquals("somedest", (job.tasks[6]).dest)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir'), runif="failed"))
self.assertEquals('failed', (job.tasks[7]).runif)
def test_tasks_run_if_defaults_to_passed(self):
job = empty_stage().ensure_job("j")
job.add_task(ExecTask(['ls', '-la'], 'some/dir'))
job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
job.add_task(RakeTask('x'))
self.assertEquals('passed', (job.tasks[0]).runif)
self.assertEquals('passed', (job.tasks[1]).runif)
self.assertEquals('passed', (job.tasks[2]).runif)
def test_tasks_run_if_variants(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("run-if-variants")
tasks = job.tasks
self.assertEquals('t-passed', tasks[0].command_and_args[0])
self.assertEquals('passed', tasks[0].runif)
self.assertEquals('t-none', tasks[1].command_and_args[0])
self.assertEquals('passed', tasks[1].runif)
self.assertEquals('t-failed', tasks[2].command_and_args[0])
self.assertEquals('failed', tasks[2].runif)
self.assertEquals('t-any', tasks[3].command_and_args[0])
self.assertEquals('any', tasks[3].runif)
self.assertEquals('t-both', tasks[4].command_and_args[0])
self.assertEquals('any', tasks[4].runif)
def test_cannot_set_runif_to_random_things(self):
try:
ExecTask(['x'], runif='whatever')
self.fail("should have thrown exception")
except RuntimeError as e:
self.assertTrue(e.message.count("whatever") > 0)
def test_can_set_runif_to_particular_values(self):
self.assertEquals('passed', ExecTask(['x'], runif='passed').runif)
self.assertEquals('failed', ExecTask(['x'], runif='failed').runif)
self.assertEquals('any', ExecTask(['x'], runif='any').runif)
def test_tasks_dest_defaults_to_none(self): # TODO: maybe None could be avoided
job = empty_stage().ensure_job("j")
job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
self.assertEquals(None, (job.tasks[0]).dest)
def test_can_add_exec_task_to_empty_job(self):
job = empty_stage().ensure_job("j")
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir', "any"))
self.assertEquals(1, len(job.tasks))
task = job.tasks[0]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
self.assertEquals('any', task.runif)
def test_can_remove_all_tasks(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
self.assertEquals(1, len(job.tasks))
j = job.without_any_tasks()
self.assertEquals(j, job)
self.assertEquals(0, len(job.tasks))
def test_can_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
job = pipeline.ensure_stage('defaultStage').ensure_job('defaultJob')
self.assertEquals({"MY_JOB_PASSWORD": "<PASSWORD>=="}, job.encrypted_environment_variables)
def test_can_set_encrypted_environment_variables(self):
job = empty_stage().ensure_job("j")
job.ensure_encrypted_environment_variables({'one': 'blah=='})
self.assertEquals({"one": "blah=="}, job.encrypted_environment_variables)
def test_can_add_environment_variables(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.ensure_environment_variables({"new": "one"})
self.assertEquals(j, job)
self.assertEquals({"CF_COLOR": "false", "new": "one"}, job.environment_variables)
def test_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
job = go_cd_configurator\
.ensure_pipeline_group('P.Group')\
.ensure_pipeline('P.Name') \
.ensure_stage("build") \
.ensure_job("compile")
job.ensure_environment_variables({"ant": "a", "badger": "a", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'zebra'], names)
def test_can_remove_all_environment_variables(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.without_any_environment_variables()
self.assertEquals(j, job)
self.assertEquals({}, job.environment_variables)
def test_job_can_haveTabs(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html")], job.tabs)
def test_can_addTab(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.ensure_tab(Tab("n", "p"))
self.assertEquals(j, job)
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html"), Tab("n", "p")], job.tabs)
def test_can_ensure_tab(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
job.ensure_tab(Tab("Time_Taken", "artifacts/test-run-times.html"))
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html")], job.tabs)
class TestStages(unittest.TestCase):
def test_pipelines_have_stages(self):
self.assertEquals(2, len(typical_pipeline().stages))
def test_stages_have_names(self):
stages = typical_pipeline().stages
self.assertEquals('build', stages[0].name)
self.assertEquals('deploy', stages[1].name)
def test_stages_can_have_manual_approval(self):
self.assertEquals(False, typical_pipeline().stages[0].has_manual_approval)
self.assertEquals(True, typical_pipeline().stages[1].has_manual_approval)
def test_can_set_manual_approval(self):
stage = typical_pipeline().stages[0]
s = stage.set_has_manual_approval()
self.assertEquals(s, stage)
self.assertEquals(True, stage.has_manual_approval)
def test_stages_have_fetch_materials_flag(self):
stage = typical_pipeline().ensure_stage("build")
self.assertEquals(True, stage.fetch_materials)
stage = more_options_pipeline().ensure_stage("s1")
self.assertEquals(False, stage.fetch_materials)
def test_can_set_fetch_materials_flag(self):
stage = typical_pipeline().ensure_stage("build")
s = stage.set_fetch_materials(False)
self.assertEquals(s, stage)
self.assertEquals(False, stage.fetch_materials)
stage = more_options_pipeline().ensure_stage("s1")
stage.set_fetch_materials(True)
self.assertEquals(True, stage.fetch_materials)
def test_stages_have_jobs(self):
stages = typical_pipeline().stages
jobs = stages[0].jobs
self.assertEquals(1, len(jobs))
self.assertEquals('compile', jobs[0].name)
def test_can_add_job(self):
stage = typical_pipeline().ensure_stage("deploy")
self.assertEquals(1, len(stage.jobs))
ensured_job = stage.ensure_job("new-job")
self.assertEquals(2, len(stage.jobs))
self.assertEquals(ensured_job, stage.jobs[1])
self.assertEquals("new-job", stage.jobs[1].name)
def test_can_add_job_to_empty_stage(self):
stage = empty_stage()
self.assertEquals(0, len(stage.jobs))
ensured_job = stage.ensure_job("new-job")
self.assertEquals(1, len(stage.jobs))
self.assertEquals(ensured_job, stage.jobs[0])
self.assertEquals("new-job", stage.jobs[0].name)
def test_can_ensure_job_exists(self):
stage = typical_pipeline().ensure_stage("deploy")
self.assertEquals(1, len(stage.jobs))
ensured_job = stage.ensure_job("upload")
self.assertEquals(1, len(stage.jobs))
self.assertEquals("upload", ensured_job.name)
def test_can_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
stage = pipeline.ensure_stage('defaultStage')
self.assertEquals({"MY_STAGE_PASSWORD": "<PASSWORD>/s=="}, stage.encrypted_environment_variables)
def test_can_set_encrypted_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
stage.ensure_encrypted_environment_variables({'one': 'blah=='})
self.assertEquals({"one": "blah=="}, stage.encrypted_environment_variables)
def test_can_set_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
s = stage.ensure_environment_variables({"new": "one"})
self.assertEquals(s, stage)
self.assertEquals({"BASE_URL": "http://myurl", "new": "one"}, stage.environment_variables)
def test_can_remove_all_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
s = stage.without_any_environment_variables()
self.assertEquals(s, stage)
self.assertEquals({}, stage.environment_variables)
class TestPipeline(unittest.TestCase):
def test_pipelines_have_names(self):
pipeline = typical_pipeline()
self.assertEquals('typical', pipeline.name)
def test_can_add_stage(self):
pipeline = empty_pipeline()
self.assertEquals(0, len(pipeline.stages))
new_stage = pipeline.ensure_stage("some_stage")
self.assertEquals(1, len(pipeline.stages))
self.assertEquals(new_stage, pipeline.stages[0])
self.assertEquals("some_stage", new_stage.name)
def test_can_ensure_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
ensured_stage = pipeline.ensure_stage("deploy")
self.assertEquals(2, len(pipeline.stages))
self.assertEquals("deploy", ensured_stage.name)
def test_can_remove_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
p = pipeline.ensure_removal_of_stage("deploy")
self.assertEquals(p, pipeline)
self.assertEquals(1, len(pipeline.stages))
self.assertEquals(0, len([s for s in pipeline.stages if s.name == "deploy"]))
def test_can_ensure_removal_of_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
pipeline.ensure_removal_of_stage("stage-that-has-already-been-deleted")
self.assertEquals(2, len(pipeline.stages))
def test_can_ensure_initial_stage(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("first")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals(3, len(pipeline.stages))
def test_can_ensure_initial_stage_if_already_exists_as_initial(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("build")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals(2, len(pipeline.stages))
def test_can_ensure_initial_stage_if_already_exists(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("deploy")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals("build", pipeline.stages[1].name)
self.assertEquals(2, len(pipeline.stages))
def test_can_set_stage_clean_policy(self):
pipeline = empty_pipeline()
stage1 = pipeline.ensure_stage("some_stage1").set_clean_working_dir()
stage2 = pipeline.ensure_stage("some_stage2")
self.assertEquals(True, pipeline.stages[0].clean_working_dir)
self.assertEquals(True, stage1.clean_working_dir)
self.assertEquals(False, pipeline.stages[1].clean_working_dir)
self.assertEquals(False, stage2.clean_working_dir)
def test_pipelines_can_have_git_urls(self):
pipeline = typical_pipeline()
self.assertEquals("<EMAIL>:springersbm/gomatic.git", pipeline.git_url)
def test_git_is_polled_by_default(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.set_git_url("some git url")
self.assertEquals(True, pipeline.git_material.polling)
def test_pipelines_can_have_git_material_with_material_name(self):
pipeline = more_options_pipeline()
self.assertEquals("<EMAIL>:springersbm/gomatic.git", pipeline.git_url)
self.assertEquals("some-material-name", pipeline.git_material.material_name)
def test_git_material_can_ignore_sources(self):
pipeline = GoCdConfigurator(config('config-with-source-exclusions')).ensure_pipeline_group("P.Group").find_pipeline("with-exclusions")
self.assertEquals({"excluded-folder", "another-excluded-folder"}, pipeline.git_material.ignore_patterns)
def test_can_set_pipeline_git_url(self):
pipeline = typical_pipeline()
p = pipeline.set_git_url("<EMAIL>:springersbm/changed.git")
self.assertEquals(p, pipeline)
self.assertEquals("<EMAIL>:springersbm/changed.git", pipeline.git_url)
self.assertEquals('master', pipeline.git_branch)
def test_can_set_pipeline_git_url_with_options(self):
pipeline = typical_pipeline()
p = pipeline.set_git_material(GitMaterial(
"<EMAIL>:springersbm/changed.git",
branch="branch",
destination_directory="foo",
material_name="material-name",
ignore_patterns={"ignoreMe", "ignoreThisToo"},
polling=False))
self.assertEquals(p, pipeline)
self.assertEquals("branch", pipeline.git_branch)
self.assertEquals("foo", pipeline.git_material.destination_directory)
self.assertEquals("material-name", pipeline.git_material.material_name)
self.assertEquals({"ignoreMe", "ignoreThisToo"}, pipeline.git_material.ignore_patterns)
self.assertFalse(pipeline.git_material.polling, "git polling")
def test_throws_exception_if_no_git_url(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
self.assertEquals(False, pipeline.has_single_git_material)
try:
url = pipeline.git_url
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_git_url_throws_exception_if_multiple_git_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/one.git"))
pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/two.git"))
self.assertEquals(False, pipeline.has_single_git_material)
try:
url = pipeline.git_url
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_set_git_url_throws_exception_if_multiple_git_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/one.git"))
pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/two.git"))
try:
pipeline.set_git_url("<EMAIL>:springersbm/three.git")
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_can_add_git_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/changed.git"))
self.assertEquals(p, pipeline)
self.assertEquals("<EMAIL>:springersbm/changed.git", pipeline.git_url)
def test_can_ensure_git_material(self):
pipeline = typical_pipeline()
pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/gomatic.git"))
self.assertEquals("<EMAIL>:springersbm/gomatic.git", pipeline.git_url)
self.assertEquals([GitMaterial("<EMAIL>:springersbm/gomatic.git")], pipeline.materials)
def test_can_have_multiple_git_materials(self):
pipeline = typical_pipeline()
pipeline.ensure_material(GitMaterial("<EMAIL>:springersbm/changed.git"))
self.assertEquals([GitMaterial("<EMAIL>:springersbm/gomatic.git"), GitMaterial("<EMAIL>:springersbm/changed.git")],
pipeline.materials)
def test_pipelines_can_have_pipeline_materials(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
self.assertEquals(GitMaterial('<EMAIL>:springersbm/gomatic.git', branch="a-branch", material_name="some-material-name", polling=False),
pipeline.materials[0])
def test_pipelines_can_have_more_complicated_pipeline_materials(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
self.assertEquals(True, pipeline.materials[0].is_git)
self.assertEquals(PipelineMaterial('pipeline2', 'build'), pipeline.materials[1])
def test_pipelines_can_have_no_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
self.assertEquals(0, len(pipeline.materials))
def test_can_add_pipeline_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(PipelineMaterial('deploy-qa', 'baseline-user-data'))
self.assertEquals(p, pipeline)
self.assertEquals(PipelineMaterial('deploy-qa', 'baseline-user-data'), pipeline.materials[0])
def test_can_add_more_complicated_pipeline_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(PipelineMaterial('p', 's', 'm'))
self.assertEquals(p, pipeline)
self.assertEquals(PipelineMaterial('p', 's', 'm'), pipeline.materials[0])
def test_can_ensure_pipeline_material(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
pipeline.ensure_material(PipelineMaterial('pipeline2', 'build'))
self.assertEquals(2, len(pipeline.materials))
def test_can_remove_all_pipeline_materials(self):
pipeline = more_options_pipeline()
pipeline.remove_materials()
self.assertEquals(0, len(pipeline.materials))
def test_materials_are_sorted(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator.ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(PipelineMaterial('zeta', 'build'))
pipeline.ensure_material(GitMaterial('<EMAIL>:springersbm/zebra.git'))
pipeline.ensure_material(PipelineMaterial('alpha', 'build'))
pipeline.ensure_material(GitMaterial('<EMAIL>:springersbm/art.git'))
pipeline.ensure_material(PipelineMaterial('theta', 'build'))
pipeline.ensure_material(GitMaterial('<EMAIL>:springersbm/this.git'))
xml = parseString(go_cd_configurator.config)
materials = xml.getElementsByTagName('materials')[0].childNodes
self.assertEquals('git', materials[0].tagName)
self.assertEquals('git', materials[1].tagName)
self.assertEquals('git', materials[2].tagName)
self.assertEquals('pipeline', materials[3].tagName)
self.assertEquals('pipeline', materials[4].tagName)
self.assertEquals('pipeline', materials[5].tagName)
self.assertEquals('<EMAIL>:springersbm/art.git', materials[0].attributes['url'].value)
self.assertEquals('<EMAIL>:springersbm/this.git', materials[1].attributes['url'].value)
self.assertEquals('<EMAIL>:springersbm/zebra.git', materials[2].attributes['url'].value)
self.assertEquals('alpha', materials[3].attributes['pipelineName'].value)
self.assertEquals('theta', materials[4].attributes['pipelineName'].value)
self.assertEquals('zeta', materials[5].attributes['pipelineName'].value)
def test_can_set_pipeline_git_url_for_new_pipeline(self):
pipeline_group = standard_pipeline_group()
new_pipeline = pipeline_group.ensure_pipeline("some_name")
new_pipeline.set_git_url("<EMAIL>:springersbm/changed.git")
self.assertEquals("<EMAIL>:springersbm/changed.git", new_pipeline.git_url)
def test_pipelines_do_not_have_to_be_based_on_template(self):
pipeline = more_options_pipeline()
self.assertFalse(pipeline.is_based_on_template)
def test_pipelines_can_be_based_on_template(self):
pipeline = GoCdConfigurator(config('pipeline-based-on-template')).ensure_pipeline_group('defaultGroup').find_pipeline('siberian')
assert isinstance(pipeline, Pipeline)
self.assertTrue(pipeline.is_based_on_template)
template = GoCdConfigurator(config('pipeline-based-on-template')).templates[0]
self.assertEquals(template, pipeline.template)
def test_pipelines_can_be_created_based_on_template(self):
configurator = GoCdConfigurator(empty_config())
configurator.ensure_template('temple').ensure_stage('s').ensure_job('j')
pipeline = configurator.ensure_pipeline_group("g").ensure_pipeline('p').set_template_name('temple')
self.assertEquals('temple', pipeline.template.name)
def test_pipelines_have_environment_variables(self):
pipeline = typical_pipeline()
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.8"}, pipeline.environment_variables)
def test_pipelines_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
self.assertEquals({"MY_SECURE_PASSWORD": "<PASSWORD>=="}, pipeline.encrypted_environment_variables)
def test_pipelines_have_unencrypted_secure_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-unencrypted-secure-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
self.assertEquals({"MY_SECURE_PASSWORD": "<PASSWORD>"}, pipeline.unencrypted_secure_environment_variables)
def test_can_add_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
p = pipeline.ensure_environment_variables({"new": "one", "again": "two"})
self.assertEquals(p, pipeline)
self.assertEquals({"new": "one", "again": "two"}, pipeline.environment_variables)
def test_can_add_encrypted_secure_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
pipeline.ensure_encrypted_environment_variables({"new": "one", "again": "two"})
self.assertEquals({"new": "one", "again": "two"}, pipeline.encrypted_environment_variables)
def test_can_add_unencrypted_secure_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
pipeline.ensure_unencrypted_secure_environment_variables({"new": "one", "again": "two"})
self.assertEquals({"new": "one", "again": "two"}, pipeline.unencrypted_secure_environment_variables)
def test_can_add_environment_variables_to_new_pipeline(self):
pipeline = typical_pipeline()
pipeline.ensure_environment_variables({"new": "one"})
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.8", "new": "one"}, pipeline.environment_variables)
def test_can_modify_environment_variables_of_pipeline(self):
pipeline = typical_pipeline()
pipeline.ensure_environment_variables({"new": "one", "JAVA_HOME": "/opt/java/jdk-1.1"})
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.1", "new": "one"}, pipeline.environment_variables)
def test_can_remove_all_environment_variables(self):
pipeline = typical_pipeline()
p = pipeline.without_any_environment_variables()
self.assertEquals(p, pipeline)
self.assertEquals({}, pipeline.environment_variables)
def test_can_remove_specific_environment_variable(self):
pipeline = empty_pipeline()
pipeline.ensure_encrypted_environment_variables({'a': 's'})
pipeline.ensure_environment_variables({'c': 'v', 'd': 'f'})
pipeline.remove_environment_variable('d')
p = pipeline.remove_environment_variable('unknown')
self.assertEquals(p, pipeline)
self.assertEquals({'a': 's'}, pipeline.encrypted_environment_variables)
self.assertEquals({'c': 'v'}, pipeline.environment_variables)
def test_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_environment_variables({"badger": "a", "xray": "a"})
pipeline.ensure_environment_variables({"ant": "a2", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'xray', u'zebra'], names)
def test_encrypted_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_encrypted_environment_variables({"badger": "a", "xray": "a"})
pipeline.ensure_encrypted_environment_variables({"ant": "a2", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'xray', u'zebra'], names)
def test_unencrypted_environment_variables_do_not_have_secure_attribute_in_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_environment_variables({"ant": "a"})
xml = parseString(go_cd_configurator.config)
secure_attributes = [e.getAttribute('secure') for e in xml.getElementsByTagName('variable')]
# attributes that are missing are returned as empty
self.assertEquals([''], secure_attributes, "should not have any 'secure' attributes")
def test_cannot_have_environment_variable_which_is_both_secure_and_insecure(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_unencrypted_secure_environment_variables({"ant": "a"})
pipeline.ensure_environment_variables({"ant": "b"}) # not secure
self.assertEquals({"ant": "b"}, pipeline.environment_variables)
self.assertEquals({}, pipeline.unencrypted_secure_environment_variables)
def test_can_change_environment_variable_from_secure_to_insecure(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_unencrypted_secure_environment_variables({"ant": "a", "badger": "b"})
pipeline.ensure_environment_variables({"ant": "b"})
self.assertEquals({"ant": "b"}, pipeline.environment_variables)
self.assertEquals({"badger": "b"}, pipeline.unencrypted_secure_environment_variables)
def test_pipelines_have_parameters(self):
pipeline = more_options_pipeline()
self.assertEquals({"environment": "qa"}, pipeline.parameters)
def test_pipelines_have_no_parameters(self):
pipeline = typical_pipeline()
self.assertEquals({}, pipeline.parameters)
def test_can_add_params_to_pipeline(self):
pipeline = typical_pipeline()
p = pipeline.ensure_parameters({"new": "one", "again": "two"})
self.assertEquals(p, pipeline)
self.assertEquals({"new": "one", "again": "two"}, pipeline.parameters)
def test_can_modify_parameters_of_pipeline(self):
pipeline = more_options_pipeline()
pipeline.ensure_parameters({"new": "one", "environment": "qa55"})
self.assertEquals({"environment": "qa55", "new": "one"}, pipeline.parameters)
def test_can_remove_all_parameters(self):
pipeline = more_options_pipeline()
p = pipeline.without_any_parameters()
self.assertEquals(p, pipeline)
self.assertEquals({}, pipeline.parameters)
def test_can_have_timer(self):
pipeline = more_options_pipeline()
self.assertEquals(True, pipeline.has_timer)
self.assertEquals("0 15 22 * * ?", pipeline.timer)
self.assertEquals(False, pipeline.timer_triggers_only_on_changes)
def test_can_have_timer_with_onlyOnChanges_option(self):
pipeline = GoCdConfigurator(config('config-with-more-options-pipeline')).ensure_pipeline_group('P.Group').find_pipeline('pipeline2')
self.assertEquals(True, pipeline.has_timer)
self.assertEquals("0 0 22 ? * MON-FRI", pipeline.timer)
self.assertEquals(True, pipeline.timer_triggers_only_on_changes)
def test_need_not_have_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
self.assertEquals(False, pipeline.has_timer)
try:
timer = pipeline.timer
self.fail('should have thrown an exception')
except RuntimeError:
pass
def test_can_set_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three")
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
def test_can_set_timer_with_only_on_changes_flag_off(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three", only_on_changes=False)
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
self.assertEquals(False, pipeline.timer_triggers_only_on_changes)
def test_can_set_timer_with_only_on_changes_flag(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three", only_on_changes=True)
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
self.assertEquals(True, pipeline.timer_triggers_only_on_changes)
def test_can_remove_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
pipeline.set_timer("one two three")
p = pipeline.remove_timer()
self.assertEquals(p, pipeline)
self.assertFalse(pipeline.has_timer)
def test_can_have_label_template(self):
pipeline = typical_pipeline()
self.assertEquals("something-${COUNT}", pipeline.label_template)
self.assertEquals(True, pipeline.has_label_template)
def test_might_not_have_label_template(self):
pipeline = more_options_pipeline() # TODO swap label with typical
self.assertEquals(False, pipeline.has_label_template)
try:
label_template = pipeline.label_template
self.fail('should have thrown an exception')
except RuntimeError:
pass
def test_can_set_label_template(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_label_template("some label")
self.assertEquals(p, pipeline)
self.assertEquals("some label", pipeline.label_template)
def test_can_set_default_label_template(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_default_label_template()
self.assertEquals(p, pipeline)
self.assertEquals(DEFAULT_LABEL_TEMPLATE, pipeline.label_template)
def test_can_set_automatic_pipeline_locking(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("new_group").ensure_pipeline("some_name")
p = pipeline.set_automatic_pipeline_locking()
self.assertEquals(p, pipeline)
self.assertEquals(True, pipeline.has_automatic_pipeline_locking)
def test_pipelines_to_dict(self):
pipeline = typical_pipeline()
pp_dict = pipeline.to_dict("P.Group")
self.assertEquals('typical', pp_dict['name'])
self.assertEquals({'JAVA_HOME': '/opt/java/jdk-1.8'},
pp_dict['environment_variables'])
self.assertEquals({}, pp_dict['encrypted_environment_variables'])
self.assertEquals({}, pp_dict['parameters'])
self.assertEquals(2, len(pp_dict['stages']))
self.assertEquals(1, len(pp_dict['materials']))
self.assertFalse(pp_dict.has_key('template'))
self.assertTrue(pp_dict['cron_timer_spec'] is None)
self.assertFalse(pp_dict['automatic_pipeline_locking'])
class TestPipelineGroup(unittest.TestCase):
def _pipeline_group_from_config(self):
return GoCdConfigurator(config('config-with-two-pipelines')).ensure_pipeline_group('P.Group')
def test_pipeline_groups_have_names(self):
pipeline_group = standard_pipeline_group()
self.assertEquals("P.Group", pipeline_group.name)
def test_pipeline_groups_have_pipelines(self):
pipeline_group = self._pipeline_group_from_config()
self.assertEquals(2, len(pipeline_group.pipelines))
def test_can_add_pipeline(self):
configurator = GoCdConfigurator(empty_config())
pipeline_group = configurator.ensure_pipeline_group("new_group")
new_pipeline = pipeline_group.ensure_pipeline("some_name")
self.assertEquals(1, len(pipeline_group.pipelines))
self.assertEquals(new_pipeline, pipeline_group.pipelines[0])
self.assertEquals("some_name", new_pipeline.name)
self.assertEquals(False, new_pipeline.has_single_git_material)
self.assertEquals(False, new_pipeline.has_label_template)
self.assertEquals(False, new_pipeline.has_automatic_pipeline_locking)
def test_can_find_pipeline(self):
found_pipeline = self._pipeline_group_from_config().find_pipeline("pipeline2")
self.assertEquals("pipeline2", found_pipeline.name)
self.assertTrue(self._pipeline_group_from_config().has_pipeline("pipeline2"))
def test_does_not_find_missing_pipeline(self):
self.assertFalse(self._pipeline_group_from_config().has_pipeline("unknown-pipeline"))
try:
self._pipeline_group_from_config().find_pipeline("unknown-pipeline")
self.fail("should have thrown exception")
except RuntimeError as e:
self.assertTrue(e.message.count("unknown-pipeline"))
def test_can_remove_pipeline(self):
pipeline_group = self._pipeline_group_from_config()
pipeline_group.ensure_removal_of_pipeline("pipeline1")
self.assertEquals(1, len(pipeline_group.pipelines))
try:
pipeline_group.find_pipeline("pipeline1")
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_ensuring_replacement_of_pipeline_leaves_it_empty_but_in_same_place(self):
pipeline_group = self._pipeline_group_from_config()
self.assertEquals("pipeline1", pipeline_group.pipelines[0].name)
pipeline = pipeline_group.find_pipeline("pipeline1")
pipeline.set_label_template("something")
self.assertEquals(True, pipeline.has_label_template)
p = pipeline_group.ensure_replacement_of_pipeline("pipeline1")
self.assertEquals(p, pipeline_group.pipelines[0])
self.assertEquals("pipeline1", p.name)
self.assertEquals(False, p.has_label_template)
def test_can_ensure_pipeline_removal(self):
pipeline_group = self._pipeline_group_from_config()
pg = pipeline_group.ensure_removal_of_pipeline("already-removed-pipeline")
self.assertEquals(pg, pipeline_group)
self.assertEquals(2, len(pipeline_group.pipelines))
try:
pipeline_group.find_pipeline("already-removed-pipeline")
self.fail("should have thrown exception")
except RuntimeError:
pass
class TestGoCdConfigurator(unittest.TestCase):
def test_can_tell_if_there_is_no_change_to_save(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
p = configurator.ensure_pipeline_group('Second.Group').ensure_replacement_of_pipeline('smoke-tests')
p.set_git_url('<EMAIL>:springersbm/gomatic.git')
p.ensure_stage('build').ensure_job('compile').ensure_task(ExecTask(['make', 'source code']))
self.assertFalse(configurator.has_changes)
def test_can_tell_if_there_is_a_change_to_save(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
p = configurator.ensure_pipeline_group('Second.Group').ensure_replacement_of_pipeline('smoke-tests')
p.set_git_url('<EMAIL>:springersbm/gomatic.git')
p.ensure_stage('moo').ensure_job('bar')
self.assertTrue(configurator.has_changes)
def test_keeps_schema_version(self):
empty_config = FakeHostRestClient(empty_config_xml.replace('schemaVersion="72"', 'schemaVersion="73"'), "empty_config()")
configurator = GoCdConfigurator(empty_config)
self.assertEquals(1, configurator.config.count('schemaVersion="73"'))
def test_can_find_out_server_settings(self):
configurator = GoCdConfigurator(config('config-with-server-settings'))
self.assertEquals("/some/dir", configurator.artifacts_dir)
self.assertEquals("http://10.20.30.40/", configurator.site_url)
self.assertEquals("my_ci_server", configurator.agent_auto_register_key)
self.assertEquals(Decimal("55.0"), configurator.purge_start)
self.assertEquals(Decimal("75.0"), configurator.purge_upto)
def test_can_find_out_server_settings_when_not_set(self):
configurator = GoCdConfigurator(config('config-with-no-server-settings'))
self.assertEquals(None, configurator.artifacts_dir)
self.assertEquals(None, configurator.site_url)
self.assertEquals(None, configurator.agent_auto_register_key)
self.assertEquals(None, configurator.purge_start)
self.assertEquals(None, configurator.purge_upto)
def test_can_set_server_settings(self):
configurator = GoCdConfigurator(config('config-with-no-server-settings'))
configurator.artifacts_dir = "/a/dir"
configurator.site_url = "http://192.168.127.12/"
configurator.agent_auto_register_key = "a_ci_server"
configurator.purge_start = Decimal("44.0")
configurator.purge_upto = Decimal("88.0")
self.assertEquals("/a/dir", configurator.artifacts_dir)
self.assertEquals("http://1.2.3.4/", configurator.site_url)
self.assertEquals("a_ci_server", configurator.agent_auto_register_key)
self.assertEquals(Decimal("44.0"), configurator.purge_start)
self.assertEquals(Decimal("88.0"), configurator.purge_upto)
def test_can_have_no_pipeline_groups(self):
self.assertEquals(0, len(GoCdConfigurator(empty_config()).pipeline_groups))
def test_gets_all_pipeline_groups(self):
self.assertEquals(2, len(GoCdConfigurator(config('config-with-two-pipeline-groups')).pipeline_groups))
def test_can_get_initial_config_md5(self):
configurator = GoCdConfigurator(empty_config())
self.assertEquals("42", configurator._initial_md5)
def test_config_is_updated_as_result_of_updating_part_of_it(self):
configurator = GoCdConfigurator(config('config-with-just-agents'))
agent = configurator.agents[0]
self.assertEquals(2, len(agent.resources))
agent.ensure_resource('a-resource-that-it-does-not-already-have')
configurator_based_on_new_config = GoCdConfigurator(FakeHostRestClient(configurator.config))
self.assertEquals(3, len(configurator_based_on_new_config.agents[0].resources))
def test_can_remove_agent(self):
configurator = GoCdConfigurator(config('config-with-just-agents'))
self.assertEquals(2, len(configurator.agents))
configurator.ensure_removal_of_agent('go-agent-1')
self.assertEquals(1, len(configurator.agents))
self.assertEquals('go-agent-2', configurator.agents[0].hostname)
def test_can_add_pipeline_group(self):
configurator = GoCdConfigurator(empty_config())
self.assertEquals(0, len(configurator.pipeline_groups))
new_pipeline_group = configurator.ensure_pipeline_group("a_new_group")
self.assertEquals(1, len(configurator.pipeline_groups))
self.assertEquals(new_pipeline_group, configurator.pipeline_groups[-1])
self.assertEquals("a_new_group", new_pipeline_group.name)
def test_can_ensure_pipeline_group_exists(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
self.assertEquals(2, len(configurator.pipeline_groups))
pre_existing_pipeline_group = configurator.ensure_pipeline_group('Second.Group')
self.assertEquals(2, len(configurator.pipeline_groups))
self.assertEquals('Second.Group', pre_existing_pipeline_group.name)
def test_can_remove_all_pipeline_groups(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
s = configurator.remove_all_pipeline_groups()
self.assertEquals(s, configurator)
self.assertEquals(0, len(configurator.pipeline_groups))
def test_can_remove_pipeline_group(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
s = configurator.ensure_removal_of_pipeline_group('P.Group')
self.assertEquals(s, configurator)
self.assertEquals(1, len(configurator.pipeline_groups))
def test_can_ensure_removal_of_pipeline_group(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
configurator.ensure_removal_of_pipeline_group('pipeline-group-that-has-already-been-removed')
self.assertEquals(2, len(configurator.pipeline_groups))
def test_can_have_templates(self):
templates = GoCdConfigurator(config('config-with-just-templates')).templates
self.assertEquals(2, len(templates))
self.assertEquals('api-component', templates[0].name)
self.assertEquals('deploy-stack', templates[1].name)
self.assertEquals('deploy-components', templates[1].stages[0].name)
def test_can_have_no_templates(self):
self.assertEquals(0, len(GoCdConfigurator(empty_config()).templates))
def test_can_add_template(self):
configurator = GoCdConfigurator(empty_config())
template = configurator.ensure_template('foo')
self.assertEquals(1, len(configurator.templates))
self.assertEquals(template, configurator.templates[0])
self.assertTrue(isinstance(configurator.templates[0], Pipeline), "so all methods that use to configure pipeline don't need to be tested for template")
def test_can_ensure_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
template = configurator.ensure_template('deploy-stack')
self.assertEquals('deploy-components', template.stages[0].name)
def test_can_ensure_replacement_of_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
template = configurator.ensure_replacement_of_template('deploy-stack')
self.assertEquals(0, len(template.stages))
def test_can_remove_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
self.assertEquals(2, len(configurator.templates))
configurator.ensure_removal_of_template('deploy-stack')
self.assertEquals(1, len(configurator.templates))
def test_if_remove_all_templates_also_remove_templates_element(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
self.assertEquals(2, len(configurator.templates))
configurator.ensure_removal_of_template('api-component')
configurator.ensure_removal_of_template('deploy-stack')
self.assertEquals(0, len(configurator.templates))
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server'], [element.tag for element in root])
def test_top_level_elements_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-agents-and-templates-but-without-pipelines'))
configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEquals("pipelines", root[0].tag)
self.assertEquals("templates", root[1].tag)
self.assertEquals("agents", root[2].tag)
def test_top_level_elements_with_environment_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-pipelines-environments-and-agents'))
configurator.ensure_pipeline_group("P.Group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server', 'pipelines', 'environments', 'agents'], [element.tag for element in root])
def test_top_level_elements_that_cannot_be_created_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-many-of-the-top-level-elements-that-cannot-be-added'))
configurator.ensure_pipeline_group("P.Group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server', 'repositories', 'scms', 'pipelines', 'environments', 'agents'],
[element.tag for element in root])
def test_elements_can_be_created_in_order_to_please_go(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
pipeline.ensure_parameters({'p': 'p'})
pipeline.set_timer("some timer")
pipeline.ensure_environment_variables({'pe': 'pe'})
pipeline.set_git_url("gurl")
stage = pipeline.ensure_stage("s")
stage.ensure_environment_variables({'s': 's'})
job = stage.ensure_job("j")
job.ensure_environment_variables({'j': 'j'})
job.ensure_task(ExecTask(['ls']))
job.ensure_tab(Tab("n", "p"))
job.ensure_resource("r")
job.ensure_artifacts({Artifact.get_build_artifact('s', 'd')})
xml = configurator.config
pipeline_root = ET.fromstring(xml).find('pipelines').find('pipeline')
self.assertEquals("params", pipeline_root[0].tag)
self.assertEquals("timer", pipeline_root[1].tag)
self.assertEquals("environmentvariables", pipeline_root[2].tag)
self.assertEquals("materials", pipeline_root[3].tag)
self.assertEquals("stage", pipeline_root[4].tag)
self.__check_stage(pipeline_root)
def test_elements_are_reordered_in_order_to_please_go(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
pipeline.set_git_url("gurl")
pipeline.ensure_environment_variables({'pe': 'pe'})
pipeline.set_timer("some timer")
pipeline.ensure_parameters({'p': 'p'})
self.__configure_stage(pipeline)
self.__configure_stage(configurator.ensure_template('templ'))
xml = configurator.config
pipeline_root = ET.fromstring(xml).find('pipelines').find('pipeline')
self.assertEquals("params", pipeline_root[0].tag)
self.assertEquals("timer", pipeline_root[1].tag)
self.assertEquals("environmentvariables", pipeline_root[2].tag)
self.assertEquals("materials", pipeline_root[3].tag)
self.assertEquals("stage", pipeline_root[4].tag)
self.__check_stage(pipeline_root)
template_root = ET.fromstring(xml).find('templates').find('pipeline')
self.assertEquals("stage", template_root[0].tag)
self.__check_stage(template_root)
def __check_stage(self, pipeline_root):
stage_root = pipeline_root.find('stage')
self.assertEquals("environmentvariables", stage_root[0].tag)
self.assertEquals("jobs", stage_root[1].tag)
job_root = stage_root.find('jobs').find('job')
self.assertEquals("environmentvariables", job_root[0].tag)
self.assertEquals("tasks", job_root[1].tag)
self.assertEquals("tabs", job_root[2].tag)
self.assertEquals("resources", job_root[3].tag)
self.assertEquals("artifacts", job_root[4].tag)
def __configure_stage(self, pipeline):
stage = pipeline.ensure_stage("s")
job = stage.ensure_job("j")
stage.ensure_environment_variables({'s': 's'})
job.ensure_tab(Tab("n", "p"))
job.ensure_artifacts({Artifact.get_build_artifact('s', 'd')})
job.ensure_task(ExecTask(['ls']))
job.ensure_resource("r")
job.ensure_environment_variables({'j': 'j'})
def simplified(s):
return s.strip().replace("\t", "").replace("\n", "").replace("\\", "").replace(" ", "")
def sneakily_converted_to_xml(pipeline):
if pipeline.is_template:
return ET.tostring(pipeline.element)
else:
return ET.tostring(pipeline.parent.element)
class TestReverseEngineering(unittest.TestCase):
def check_round_trip_pipeline(self, configurator, before, show=False):
reverse_engineered_python = configurator.as_python(before, with_save=False)
if show:
print('r' * 88)
print(reverse_engineered_python)
pipeline = "evaluation failed"
template = "evaluation failed"
exec reverse_engineered_python
# exec reverse_engineered_python.replace("from gomatic import *", "from gomatic.go_cd_configurator import *")
xml_before = sneakily_converted_to_xml(before)
# noinspection PyTypeChecker
xml_after = sneakily_converted_to_xml(pipeline)
if show:
print('b' * 88)
print(prettify(xml_before))
print('a' * 88)
print(prettify(xml_after))
self.assertEquals(xml_before, xml_after)
if before.is_based_on_template:
# noinspection PyTypeChecker
self.assertEquals(sneakily_converted_to_xml(before.template), sneakily_converted_to_xml(template))
def test_can_round_trip_simplest_pipeline(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_standard_label(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_default_label_template()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_non_standard_label(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_label_template("non standard")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_automatic_pipeline_locking(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_automatic_pipeline_locking()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_material(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_material(PipelineMaterial("p", "s", "m"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_multiple_git_materials(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_material(GitMaterial("giturl1", "b", "m1"))
before.ensure_material(GitMaterial("giturl2"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_url(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_url("some git url")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_extras(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(
GitMaterial("some git url",
branch="some branch",
material_name="some material name",
polling=False,
ignore_patterns={"excluded", "things"},
destination_directory='foo/bar'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_branch_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", branch="some branch"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_material_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", material_name="m name"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_polling_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", polling=False))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_ignore_patterns_only_ISSUE_4(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("git url", ignore_patterns={"ex", "cluded"}))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_destination_directory_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("git url", destination_directory='foo/bar'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_parameters(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_parameters({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_encrypted_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_encrypted_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_unencrypted_secure_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_unencrypted_secure_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_timer(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_timer("some timer")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_timer_only_on_changes(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_timer("some timer", only_on_changes=True)
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_stage_bits(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage1").ensure_environment_variables({"k": "v"}).set_clean_working_dir().set_has_manual_approval().set_fetch_materials(False)
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_stages(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage1")
before.ensure_stage("stage2")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_job(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage").ensure_job("job")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_job_bits(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage").ensure_job("job") \
.ensure_artifacts({Artifact.get_build_artifact("s", "d"), Artifact.get_test_artifact("sauce")}) \
.ensure_environment_variables({"k": "v"}) \
.ensure_resource("r") \
.ensure_tab(Tab("n", "p")) \
.set_timeout("23") \
.set_runs_on_all_agents()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_jobs(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
stage = before.ensure_stage("stage")
stage.ensure_job("job1")
stage.ensure_job("job2")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_tasks(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
job = before.ensure_stage("stage").ensure_job("job")
job.add_task(ExecTask(["one", "two"], working_dir="somewhere", runif="failed"))
job.add_task(ExecTask(["one", "two"], working_dir="somewhere", runif="failed"))
job.ensure_task(ExecTask(["one"], working_dir="somewhere else"))
job.ensure_task(ExecTask(["two"], runif="any"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactFile('f'), runif="any"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d')))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), dest="somewhere-else"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), dest="somewhere-else", runif="any"))
job.ensure_task(RakeTask('t1', runif="any"))
job.ensure_task(RakeTask('t2'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_base_on_template(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_template_name("temple")
configurator.ensure_template("temple").ensure_stage("stage").ensure_job("job")
self.check_round_trip_pipeline(configurator, before)
def test_can_reverse_engineer_pipeline(self):
configurator = GoCdConfigurator(config('config-with-more-options-pipeline'))
actual = configurator.as_python(more_options_pipeline(), with_save=False)
expected = """#!/usr/bin/env python
from gomatic import *
configurator = GoCdConfigurator(FakeConfig(whatever))
pipeline = configurator\
.ensure_pipeline_group("P.Group")\
.ensure_replacement_of_pipeline("more-options")\
.set_timer("0 15 22 * * ?")\
.set_git_material(GitMaterial("<EMAIL>:springersbm/gomatic.git", branch="a-branch", material_name="some-material-name", polling=False))\
.ensure_material(PipelineMaterial("pipeline2", "build")).ensure_environment_variables({'JAVA_HOME': '/opt/java/jdk-1.7'})\
.ensure_parameters({'environment': 'qa'})
stage = pipeline.ensure_stage("earlyStage")
job = stage.ensure_job("earlyWorm").ensure_artifacts(set([BuildArtifact("scripts/*", "files"), BuildArtifact("target/universal/myapp*.zip", "artifacts"), TestArtifact("from", "to")])).set_runs_on_all_agents()
job.add_task(ExecTask(['ls']))
job.add_task(ExecTask(['bash', '-c', 'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"']))
stage = pipeline.ensure_stage("middleStage")
job = stage.ensure_job("middleJob")
stage = pipeline.ensure_stage("s1").set_fetch_materials(False)
job = stage.ensure_job("rake-job").ensure_artifacts({BuildArtifact("things/*")})
job.add_task(RakeTask("boo", "passed"))
job = stage.ensure_job("run-if-variants")
job.add_task(ExecTask(['t-passed']))
job.add_task(ExecTask(['t-none']))
job.add_task(ExecTask(['t-failed'], runif="failed"))
job.add_task(ExecTask(['t-any'], runif="any"))
job.add_task(ExecTask(['t-both'], runif="any"))
job = stage.ensure_job("variety-of-tasks")
job.add_task(RakeTask("sometarget", "passed"))
job.add_task(FetchArtifactTask("more-options", "earlyStage", "earlyWorm", FetchArtifactDir("sourceDir"), dest="destDir"))
job.add_task(FetchArtifactTask("more-options", "middleStage", "middleJob", FetchArtifactFile("someFile")))
job.add_task(ExecTask(['true']))
"""
self.assertEquals(simplified(expected), simplified(actual))
class TestXmlFormatting(unittest.TestCase):
def test_can_format_simple_xml(self):
expected = '<?xml version="1.0" ?>\n<top>\n\t<middle>stuff</middle>\n</top>'
non_formatted = "<top><middle>stuff</middle></top>"
formatted = prettify(non_formatted)
self.assertEquals(expected, formatted)
def test_can_format_more_complicated_xml(self):
expected = '<?xml version="1.0" ?>\n<top>\n\t<middle>\n\t\t<innermost>stuff</innermost>\n\t</middle>\n</top>'
non_formatted = "<top><middle><innermost>stuff</innermost></middle></top>"
formatted = prettify(non_formatted)
self.assertEquals(expected, formatted)
def test_can_format_actual_config(self):
formatted = prettify(open("test-data/config-unformatted.xml").read())
expected = open("test-data/config-formatted.xml").read()
def head(s):
return "\n".join(s.split('\n')[:10])
self.assertEquals(expected, formatted, "expected=\n%s\n%s\nactual=\n%s" % (head(expected), "=" * 88, head(formatted)))
| 2.03125 | 2 |
gui/wellplot/settings/style/wellplotstylehandler.py | adriangrepo/qreservoir | 2 | 7281 | import logging
from qrutilities.imageutils import ImageUtils
from PyQt4.QtGui import QColor
logger = logging.getLogger('console')
class WellPlotStyleHandler(object):
'''
classdocs
'''
def saveDataState(self, wellPlotData, wellPlotStyleWidget):
if wellPlotStyleWidget.plotTitleOnCheckBox.isChecked():
wellPlotData.title_on = True
else:
wellPlotData.title_on = False
wellPlotData.title = wellPlotStyleWidget.plotTitleLineEdit.text()
r,g,b,a = QColor(wellPlotStyleWidget.trackBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.plot_background_rgb = rgbString
wellPlotData.plot_background_alpha = wellPlotStyleWidget.trackBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_background_rgb = rgbString
wellPlotData.label_background_alpha = wellPlotStyleWidget.labelBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelForegroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_foreground_rgb = rgbString
wellPlotData.label_foreground_alpha = wellPlotStyleWidget.labelForegroundOpacitySpinBox.value()
if wellPlotStyleWidget.singleRowLabelsCheckBox.isChecked():
wellPlotData.single_row_header_labels = True
else:
wellPlotData.single_row_header_labels = False | 2.3125 | 2 |
utm_messages/urls.py | geoffreynyaga/ANGA-UTM | 7 | 7282 | from django.conf.urls import url
from . import views
app_name = "messages"
urlpatterns = [
url(r'^$', views.InboxListView.as_view(), name='inbox'),
url(r'^sent/$', views.SentMessagesListView.as_view(), name='sent'),
url(r'^compose/$', views.MessagesCreateView.as_view(), name='compose'),
# url(r'^compose-all/$', views.SendToAll.as_view(), name='compose_to_all'),
url(r'^(?P<pk>\d+)/$', views.MessageDetailView.as_view(), name='message_detail'),
url(r'^calendar/$', views.CalendarView.as_view(), name='calendar'),
]
| 1.828125 | 2 |
server/apps/datablock/tests/test_create_worker.py | iotile/iotile_cloud | 0 | 7283 | import datetime
import json
import dateutil.parser
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.utils import timezone
from apps.devicelocation.models import DeviceLocation
from apps.physicaldevice.models import Device
from apps.property.models import GenericProperty
from apps.report.models import GeneratedUserReport
from apps.sqsworker.exceptions import WorkerActionHardError
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.models import StreamData
from apps.streamevent.models import StreamEventData
from apps.streamfilter.models import *
from apps.streamnote.models import StreamNote
from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask
from apps.utils.gid.convert import *
from apps.utils.test_util import TestMixin
from ..models import *
from ..worker.archive_device_data import ArchiveDeviceDataAction
user_model = get_user_model()
class DataBlockCreateWorkerTests(TestMixin, TestCase):
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.v1 = StreamVariable.objects.create_variable(
name='Var A', project=self.p1, created_by=self.u2, lid=1,
)
self.v2 = StreamVariable.objects.create_variable(
name='Var B', project=self.p1, created_by=self.u3, lid=2,
)
self.pd1 = Device.objects.create_device(project=self.p1, label='d1', template=self.dt1, created_by=self.u2)
self.pd2 = Device.objects.create_device(project=self.p1, label='d2', template=self.dt1, created_by=self.u2)
StreamId.objects.create_after_new_device(self.pd1)
StreamId.objects.create_after_new_device(self.pd2)
self.s1 = StreamId.objects.filter(variable=self.v1).first()
self.s2 = StreamId.objects.filter(variable=self.v2).first()
def tearDown(self):
StreamFilterAction.objects.all().delete()
StreamFilterTrigger.objects.all().delete()
StreamFilter.objects.all().delete()
StreamId.objects.all().delete()
StreamVariable.objects.all().delete()
GenericProperty.objects.all().delete()
Device.objects.all().delete()
StreamData.objects.all().delete()
StreamEventData.objects.all().delete()
self.deviceTemplateTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
def testDataBlockActionBadArguments(self):
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={})
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={'foobar': 5})
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={'data_block_slug': 'b--0000-0000-0000-0001', 'extra-bad-arg': 'foo'})
self.assertTrue(ArchiveDeviceDataAction._arguments_ok({'data_block_slug': 'b--0000-0000-0000-0001'}))
action = ArchiveDeviceDataAction()
self.assertIsNotNone(action)
with self.assertRaises(WorkerActionHardError):
action.execute(arguments={'foobar': 5})
def testDataBlockActionNoDataBlock(self):
action = ArchiveDeviceDataAction()
self.assertIsNotNone(action)
with self.assertRaises(WorkerActionHardError):
action.execute({'data_block_slug': 'b--0000-0000-0000-0001'})
def testDataBlockActionMigrateProperties(self):
db1 = DataBlock.objects.create(org=self.o1, title='test', device=self.pd1, block=1, created_by=self.u1)
GenericProperty.objects.create_int_property(slug=self.pd1.slug,
created_by=self.u1,
name='prop1', value=4)
GenericProperty.objects.create_str_property(slug=self.pd1.slug,
created_by=self.u1,
name='prop2', value='4')
GenericProperty.objects.create_bool_property(slug=self.pd1.slug,
created_by=self.u1, is_system=True,
name='@prop3', value=True)
self.assertEqual(GenericProperty.objects.object_properties_qs(self.pd1).count(), 3)
self.assertEqual(GenericProperty.objects.object_properties_qs(db1).count(), 0)
action = ArchiveDeviceDataAction()
action._block = db1
action._device = self.pd1
action._migrate_properties()
self.assertEqual(GenericProperty.objects.object_properties_qs(self.pd1).count(), 1)
self.assertEqual(GenericProperty.objects.object_properties_qs(db1).count(), 3)
def testDataBlockActionMigrateStreams(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
var3 = StreamVariable.objects.create_variable(
name='Var C', project=self.p1, created_by=self.u2, lid=3,
)
stream3 = StreamId.objects.create_stream(
project=self.p1, variable=var3, device=device, created_by=self.u2
)
self.assertEqual(self.p1.variables.count(), 3)
count0 = StreamId.objects.count()
self.assertEqual(device.streamids.count(), 3)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamId.objects.count(), count0 + 3)
def testDataBlockActionMigrateStreamData(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=5,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=6,
int_value=6
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=7,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=8,
int_value=8
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=9,
int_value=9
)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 2)
action._migrate_stream_data()
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 0)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).count(), 3)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream2).count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).first().project_slug, '')
def testDataBlockActionMigrateStreamEvents(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream2.slug,
streamer_local_id=4
)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 1)
action._migrate_stream_events()
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 0)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream1).count(), 2)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream2).count(), 1)
def testDataBlockActionMigrateStreamNote(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='System 1',
type='sc'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 2'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u1,
note='Note 3'
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 4'
)
self.assertEqual(StreamNote.objects.count(), 4)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 2)
action._migrate_stream_notes()
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamNote.objects.count(), 4)
self.assertEqual(StreamNote.objects.filter(target_slug=new_stream1).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=block.slug).count(), 1)
def testDataBlockActionMigrateDeviceLocations(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=10.000,
user=self.u2
)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=11.000,
user=self.u2
)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=12.000,
user=self.u2
)
self.assertEqual(DeviceLocation.objects.count(), 3)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
self.assertEqual(DeviceLocation.objects.filter(target_slug=device.slug).count(), 3)
action._migrate_device_locations()
self.assertEqual(DeviceLocation.objects.filter(target_slug=device.slug).count(), 0)
self.assertEqual(DeviceLocation.objects.filter(target_slug=block.slug).count(), 3)
def testDataBlockActionMigrateReports(self):
db1 = DataBlock.objects.create(org=self.pd1.org, title='test', device=self.pd1, block=1, created_by=self.u2)
GeneratedUserReport.objects.create(
org=self.pd1.org,
label='My report 1',
source_ref=self.pd1.slug,
created_by=self.u2
)
GeneratedUserReport.objects.create(
org=self.pd1.org,
label='My report 2',
source_ref=self.pd1.slug,
created_by=self.u2
)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=self.pd1.slug).count(), 2)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=db1.slug).count(), 0)
action = ArchiveDeviceDataAction()
action._block = db1
action._device = self.pd1
action._migrate_reports()
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=self.pd1.slug).count(), 0)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=db1.slug).count(), 2)
def testDataBlockActionTestAll(self):
sg = SensorGraph.objects.create(name='SG 1',
major_version=1,
created_by=self.u1, org=self.o1)
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, sg=sg, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
GenericProperty.objects.create_int_property(slug=device.slug,
created_by=self.u1,
name='prop1', value=4)
GenericProperty.objects.create_str_property(slug=device.slug,
created_by=self.u1,
name='prop2', value='4')
GenericProperty.objects.create_bool_property(slug=device.slug,
created_by=self.u1,
name='prop3', value=True)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream2.slug,
streamer_local_id=4
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=5,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=6,
int_value=6
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=7,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=8,
int_value=8
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=9,
int_value=9
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 1'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 2'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 3'
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u1,
note='Note 4'
)
self.assertEqual(GenericProperty.objects.object_properties_qs(device).count(), 3)
self.assertEqual(GenericProperty.objects.object_properties_qs(block).count(), 0)
self.assertEqual(device.streamids.count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 1)
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 3)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action.execute(arguments={'data_block_slug': block.slug})
self.assertEqual(GenericProperty.objects.object_properties_qs(device).count(), 0)
self.assertEqual(GenericProperty.objects.object_properties_qs(block).count(), 3)
self.assertEqual(device.streamids.count(), 4)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamId.objects.filter(slug=new_stream1).count(), 1)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamId.objects.filter(slug=new_stream2).count(), 1)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).count(), 3)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream1).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=new_stream1).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream2).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream2).count(), 1)
block = DataBlock.objects.first()
self.assertIsNotNone(block.completed_on)
self.assertIsNotNone(block.sg)
self.assertEqual(block.sg, sg)
def testDataBlockActionTestDataMask(self):
sg = SensorGraph.objects.create(name='SG 1',
major_version=1,
created_by=self.u1, org=self.o1)
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, sg=sg, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
dt1 = dateutil.parser.parse('2017-09-28T10:00:00Z')
dt2 = dateutil.parser.parse('2017-09-28T11:00:00Z')
dt3 = dateutil.parser.parse('2017-09-30T10:00:00Z')
dt4 = dateutil.parser.parse('2017-09-30T10:10:00Z')
dt5 = dateutil.parser.parse('2017-09-30T10:20:00Z')
set_data_mask(device, '2017-09-28T10:30:00Z', '2017-09-30T10:15:00Z', [], [], self.u1)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt1,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt2,
int_value=6
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt3,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt4,
int_value=8
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt5,
int_value=9
)
self.assertEqual(device.streamids.count(), 1)
data_mask_event = get_data_mask_event(device)
mask_slug = data_mask_event.stream_slug
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 5)
self.assertEqual(StreamEventData.objects.filter(stream_slug=mask_slug).count(), 1)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action.execute(arguments={'data_block_slug': block.slug})
self.assertEqual(device.streamids.count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=mask_slug).count(), 0)
data_mask_event = get_data_mask_event(block)
self.assertEqual(StreamEventData.objects.filter(stream_slug=data_mask_event.stream_slug).count(), 1)
| 1.914063 | 2 |
nova/policies/servers.py | maya2250/nova | 0 | 7284 | <reponame>maya2250/nova<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
RULE_AOO = base.RULE_ADMIN_OR_OWNER
SERVERS = 'os_compute_api:servers:%s'
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor'
REQUESTED_DESTINATION = 'compute:servers:create:requested_destination'
CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
rules = [
policy.DocumentedRuleDefault(
SERVERS % 'index',
RULE_AOO,
"List all servers",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail',
RULE_AOO,
"List all servers with detailed information",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'index:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers for all projects",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers with detailed information for all projects",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'allow_all_filters',
base.RULE_ADMIN_API,
"Allow all filters when listing servers",
[
{
'method': 'GET',
'path': '/servers'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show',
RULE_AOO,
"Show a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
}
]),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information.
This means host_status will be shown irrespective of status value. If showing
only host_status UNKNOWN is desired, use the
``os_compute_api:servers:show:host_status:unknown-only`` policy rule.
Microvision 2.75 added the ``host_status`` attribute in the
``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``
API responses which are also controlled by this policy rule, like the
``GET /servers*`` APIs.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
},
{
'method': 'PUT',
'path': '/servers/{server_id}'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status:unknown-only',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information, only if host status is
UNKNOWN.
This policy rule will only be enforced when the
``os_compute_api:servers:show:host_status`` policy rule does not pass for the
request. An example policy configuration could be where the
``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and
the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to
allow everyone.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create',
RULE_AOO,
"Create a server",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:forced_host',
base.RULE_ADMIN_API,
"""
Create a server on the specified host and/or node.
In this case, the server is forced to launch on the specified
host and/or node by bypassing the scheduler filters unlike the
``compute:servers:create:requested_destination`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
REQUESTED_DESTINATION,
base.RULE_ADMIN_API,
"""
Create a server on the requested compute service host and/or
hypervisor_hostname.
In this case, the requested host and/or hypervisor_hostname is
validated by the scheduler filters unlike the
``os_compute_api:servers:create:forced_host`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_volume',
RULE_AOO,
"Create a server with the requested volume attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_network',
RULE_AOO,
"Create a server with the requested network attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:trusted_certs',
RULE_AOO,
"Create a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
ZERO_DISK_FLAVOR,
base.RULE_ADMIN_API,
"""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
For a flavor with disk=0, the root disk will be set to exactly the size of the
image used to deploy the instance. However, in this case the filter_scheduler
cannot select the compute host based on the virtual image size. Therefore, 0
should only be used for volume booted instances or for testing purposes.
WARNING: It is a potential security exposure to enable this policy rule
if users can upload their own images since repeated attempts to
create a disk=0 flavor instance with a large image can exhaust
the local disk of the compute (or shared storage cluster). See bug
https://bugs.launchpad.net/nova/+bug/1739646 for details.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
NETWORK_ATTACH_EXTERNAL,
'is_admin:True',
"Attach an unshared external network to a server",
[
# Create a server with a requested network or port.
{
'method': 'POST',
'path': '/servers'
},
# Attach a network or port to an existing server.
{
'method': 'POST',
'path': '/servers/{server_id}/os-interface'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'delete',
RULE_AOO,
"Delete a server",
[
{
'method': 'DELETE',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'update',
RULE_AOO,
"Update a server",
[
{
'method': 'PUT',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'confirm_resize',
RULE_AOO,
"Confirm a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (confirmResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'revert_resize',
RULE_AOO,
"Revert a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (revertResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'reboot',
RULE_AOO,
"Reboot a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (reboot)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'resize',
RULE_AOO,
"Resize a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
CROSS_CELL_RESIZE,
base.RULE_NOBODY,
"Resize a server across cells. By default, this is disabled for all "
"users and recommended to be tested in a deployment for admin users "
"before opening it up to non-admin users. Resizing within a cell is "
"the default preferred behavior even if this is enabled. ",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild',
RULE_AOO,
"Rebuild a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild:trusted_certs',
RULE_AOO,
"Rebuild a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image',
RULE_AOO,
"Create an image from a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image:allow_volume_backed',
RULE_AOO,
"Create an image from a volume backed server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'start',
RULE_AOO,
"Start a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-start)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'stop',
RULE_AOO,
"Stop a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-stop)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'trigger_crash_dump',
RULE_AOO,
"Trigger crash dump in a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
]),
]
def list_rules():
return rules
| 1.585938 | 2 |
set-env.py | sajaldebnath/vrops-custom-group-creation | 1 | 7285 | <gh_stars>1-10
# !/usr/bin python
"""
#
# set-env - a small python program to setup the configuration environment for data-push.py
# data-push.py contains the python program to push attribute values to vROps
# Author <NAME> <<EMAIL>>
#
"""
# Importing the required modules
import json
import base64
import os,sys
# Getting the absolute path from where the script is being run
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
# Getting the inputs from user
def get_the_inputs():
adapterkind = raw_input("Please enter Adapter Kind: ")
resourceKind = raw_input("Please enter Resource Kind: ")
servername = raw_input("Enter enter Server IP/FQDN: ")
serveruid = raw_input("Please enter user id: ")
serverpasswd = raw_input("Please enter vRops password: ")
encryptedvar = base64.b64encode(serverpasswd)
data = {}
data["adapterKind"] = adapterkind
data["resourceKind"] = resourceKind
serverdetails = {}
serverdetails["name"] = servername
serverdetails["userid"] = serveruid
serverdetails["password"] = encrypted<PASSWORD>
data["server"] = serverdetails
return data
# Getting the path where env.json file should be kept
path = get_script_path()
fullpath = path+"/"+"env.json"
# Getting the data for the env.json file
final_data = get_the_inputs()
# Saving the data to env.json file
with open(fullpath, 'w') as outfile:
json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False) | 2.578125 | 3 |
week02/day08.py | gtadeus/LeetCodeChallenge2009 | 0 | 7286 | <filename>week02/day08.py
import unittest
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
m = self.c(root)
r=0
for n in m:
if n != 0:
if n== 1:
r+=1
else:
r+=int(n,2)
return r
def c(self, l):
if l.left is None and l.right is None:
return [l.val]
else:
p, p2 = [], []
if not l.left is None:
p=self.c(l.left)
if not l.right is None:
p2=self.c(l.right)
v=f'{l.val}'
#v = l.val << 1
for i, x in enumerate(p):
if not l.left is None:
p[i]=f'{v}{x}'
for i, x in enumerate(p2):
if not l.right is None:
p2[i]=f'{v}{x}'
return p+p2
class TestDay08(unittest.TestCase):
S = Solution()
input_ = [ TreeNode(1, TreeNode(0, TreeNode(0,None,None), TreeNode(1,None,None)), TreeNode(1, TreeNode(0,None,None), TreeNode(1,None,None))) ]
solutions = [22]
def testSumRoot(self):
for indx, val in enumerate(self.input_):
self.assertEqual(self.solutions[indx], self.S.sumRootToLeaf(val))
if __name__ == "__main__":
unittest.main()
| 3.828125 | 4 |
config.py | tiuD/cross-prom | 0 | 7287 | <filename>config.py
TOKEN = "<KEY>"
CHAT_ID = [957539786] # e.g. [1234567, 2233445, 3466123...]
| 1.109375 | 1 |
buchschloss/gui2/__init__.py | mik2k2/buchschloss | 1 | 7288 | """entry point"""
from . import main
start = main.app.launch
| 1.101563 | 1 |
src/tests/test_stop_at_task.py | francesco-p/FACIL | 243 | 7289 | <filename>src/tests/test_stop_at_task.py
from tests import run_main_and_assert
FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \
" --network LeNet --num-tasks 5 --seed 1 --batch-size 32" \
" --nepochs 2 --num-workers 0 --stop-at-task 3"
def test_finetuning_stop_at_task():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --approach finetuning"
run_main_and_assert(args_line)
| 1.953125 | 2 |
Python/contains-duplicate.py | shreyventure/LeetCode-Solutions | 388 | 7290 | <gh_stars>100-1000
# Autor: <NAME> (@optider)
# Github Profile: https://github.com/Optider/
# Problem Link: https://leetcode.com/problems/contains-duplicate/
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
count = {}
for n in nums :
if count.get(n) != None :
return True
count[n] = 1
return False
| 2.796875 | 3 |
build/android/gyp/dex.py | google-ar/chromium | 2,151 | 7291 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import optparse
import os
import sys
import tempfile
import zipfile
from util import build_utils
def _CheckFilePathEndsWithJar(parser, file_path):
if not file_path.endswith(".jar"):
# dx ignores non .jar files.
parser.error("%s does not end in .jar" % file_path)
def _CheckFilePathsEndWithJar(parser, file_paths):
for file_path in file_paths:
_CheckFilePathEndsWithJar(parser, file_path)
def _RemoveUnwantedFilesFromZip(dex_path):
iz = zipfile.ZipFile(dex_path, 'r')
tmp_dex_path = '%s.tmp.zip' % dex_path
oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)
for i in iz.namelist():
if i.endswith('.dex'):
oz.writestr(i, iz.read(i))
os.remove(dex_path)
os.rename(tmp_dex_path, dex_path)
def _ParseArgs(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk-tools',
help='Android sdk build tools directory.')
parser.add_option('--output-directory',
default=os.getcwd(),
help='Path to the output build directory.')
parser.add_option('--dex-path', help='Dex output path.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME.')
parser.add_option('--proguard-enabled',
help='"true" if proguard is enabled.')
parser.add_option('--debug-build-proguard-enabled',
help='"true" if proguard is enabled for debug build.')
parser.add_option('--proguard-enabled-input-path',
help=('Path to dex in Release mode when proguard '
'is enabled.'))
parser.add_option('--no-locals', default='0',
help='Exclude locals list from the dex file.')
parser.add_option('--incremental',
action='store_true',
help='Enable incremental builds when possible.')
parser.add_option('--inputs', help='A list of additional input paths.')
parser.add_option('--excluded-paths',
help='A list of paths to exclude from the dex file.')
parser.add_option('--main-dex-list-path',
help='A file containing a list of the classes to '
'include in the main dex.')
parser.add_option('--multidex-configuration-path',
help='A JSON file containing multidex build configuration.')
parser.add_option('--multi-dex', default=False, action='store_true',
help='Generate multiple dex files.')
options, paths = parser.parse_args(args)
required_options = ('android_sdk_tools',)
build_utils.CheckOptions(options, parser, required=required_options)
if options.multidex_configuration_path:
with open(options.multidex_configuration_path) as multidex_config_file:
multidex_config = json.loads(multidex_config_file.read())
options.multi_dex = multidex_config.get('enabled', False)
if options.multi_dex and not options.main_dex_list_path:
logging.warning('multidex cannot be enabled without --main-dex-list-path')
options.multi_dex = False
elif options.main_dex_list_path and not options.multi_dex:
logging.warning('--main-dex-list-path is unused if multidex is not enabled')
if options.inputs:
options.inputs = build_utils.ParseGnList(options.inputs)
_CheckFilePathsEndWithJar(parser, options.inputs)
if options.excluded_paths:
options.excluded_paths = build_utils.ParseGnList(options.excluded_paths)
if options.proguard_enabled_input_path:
_CheckFilePathEndsWithJar(parser, options.proguard_enabled_input_path)
_CheckFilePathsEndWithJar(parser, paths)
return options, paths
def _AllSubpathsAreClassFiles(paths, changes):
for path in paths:
if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)):
return False
return True
def _DexWasEmpty(paths, changes):
for path in paths:
if any(p.endswith('.class')
for p in changes.old_metadata.IterSubpaths(path)):
return False
return True
def _IterAllClassFiles(changes):
for path in changes.IterAllPaths():
for subpath in changes.IterAllSubpaths(path):
if subpath.endswith('.class'):
yield path
def _MightHitDxBug(changes):
# We've seen dx --incremental fail for small libraries. It's unlikely a
# speed-up anyways in this case.
num_classes = sum(1 for x in _IterAllClassFiles(changes))
if num_classes < 10:
return True
# We've also been able to consistently produce a failure by adding an empty
# line to the top of the first .java file of a library.
# https://crbug.com/617935
first_file = next(_IterAllClassFiles(changes))
for path in changes.IterChangedPaths():
for subpath in changes.IterChangedSubpaths(path):
if first_file == subpath:
return True
return False
def _RunDx(changes, options, dex_cmd, paths):
with build_utils.TempDir() as classes_temp_dir:
# --multi-dex is incompatible with --incremental.
if options.multi_dex:
dex_cmd.append('--main-dex-list=%s' % options.main_dex_list_path)
else:
# --incremental tells dx to merge all newly dex'ed .class files with
# what that already exist in the output dex file (existing classes are
# replaced).
# Use --incremental when .class files are added or modified, but not when
# any are removed (since it won't know to remove them).
if (options.incremental
and not _MightHitDxBug(changes)
and changes.AddedOrModifiedOnly()):
changed_inputs = set(changes.IterChangedPaths())
changed_paths = [p for p in paths if p in changed_inputs]
if not changed_paths:
return
# When merging in other dex files, there's no easy way to know if
# classes were removed from them.
if (_AllSubpathsAreClassFiles(changed_paths, changes)
and not _DexWasEmpty(changed_paths, changes)):
dex_cmd.append('--incremental')
for path in changed_paths:
changed_subpaths = set(changes.IterChangedSubpaths(path))
# Note: |changed_subpaths| may be empty if nothing changed.
if changed_subpaths:
build_utils.ExtractAll(path, path=classes_temp_dir,
predicate=lambda p: p in changed_subpaths)
paths = [classes_temp_dir]
dex_cmd += paths
build_utils.CheckOutput(dex_cmd, print_stderr=False)
if options.dex_path.endswith('.zip'):
_RemoveUnwantedFilesFromZip(options.dex_path)
def _OnStaleMd5(changes, options, dex_cmd, paths):
_RunDx(changes, options, dex_cmd, paths)
build_utils.WriteJson(
[os.path.relpath(p, options.output_directory) for p in paths],
options.dex_path + '.inputs')
def main(args):
options, paths = _ParseArgs(args)
if ((options.proguard_enabled == 'true'
and options.configuration_name == 'Release')
or (options.debug_build_proguard_enabled == 'true'
and options.configuration_name == 'Debug')):
paths = [options.proguard_enabled_input_path]
if options.inputs:
paths += options.inputs
if options.excluded_paths:
# Excluded paths are relative to the output directory.
exclude_paths = options.excluded_paths
paths = [p for p in paths if not
os.path.relpath(p, options.output_directory) in exclude_paths]
input_paths = list(paths)
dx_binary = os.path.join(options.android_sdk_tools, 'dx')
# See http://crbug.com/272064 for context on --force-jumbo.
# See https://github.com/android/platform_dalvik/commit/dd140a22d for
# --num-threads.
# See http://crbug.com/658782 for why -JXmx2G was added.
dex_cmd = [dx_binary, '-JXmx2G', '--num-threads=8', '--dex', '--force-jumbo',
'--output', options.dex_path]
if options.no_locals != '0':
dex_cmd.append('--no-locals')
if options.multi_dex:
input_paths.append(options.main_dex_list_path)
dex_cmd += [
'--multi-dex',
'--minimal-main-dex',
]
output_paths = [
options.dex_path,
options.dex_path + '.inputs',
]
# An escape hatch to be able to check if incremental dexing is causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0))
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths),
options,
input_paths=input_paths,
input_strings=dex_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.140625 | 2 |
apps/views.py | Edwardhgj/meiduo | 0 | 7292 | <reponame>Edwardhgj/meiduo
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.hashers import check_password, make_password
from django.views import View
from utils.response_code import RET, error_map
from rest_framework.views import APIView
from rest_framework.response import Response
from apps.serializers import *
from datetime import datetime
# Create your views here.
# 展示登陆页
def login(request):
return render(request, 'admin/login.html')
# 提交登陆
import json
class SubmitLogin(View):
def post(self, request): #反射
mes = {}
name = request.POST.get('name')
passwd = request.POST.get('passwd')
# print(name,passwd)
if not all([name, passwd]):
mes['code'] = RET.DATAERR
mes['message'] = error_map[RET.DATAERR]
else:
# 查询name
admin = Sadmin.objects.filter(username=name).first()
print(admin.username)
if admin:
# 比较密码
if check_password(passwd, admin.password):
# 登陆成功
request.session['admin_id'] = admin.id
mes['code'] = RET.OK
mes['message'] = error_map[RET.OK]
else:
mes['code'] = RET.PWDERR
mes['message'] = error_map[RET.PWDERR]
else:
mes['code'] = RET.USERERR
mes['message'] = error_map[RET.USERERR]
print('sdfsdfssdf')
return HttpResponse(json.dumps(mes))
# 注册
def reg(request):
password = <PASSWORD>_password('<PASSWORD>')
admin = Sadmin(username='admin', password=password, is_admin=True)
admin.save()
return HttpResponse('ok')
# 展示首页
def index(request):
admin_id = request.session.get('admin_id')
if admin_id:
admin = Sadmin.objects.get(id=admin_id)
return render(request, 'admin/index.html', locals())
# 展示分类页面
def showCate(request):
return render(request, "admin/cate_list.html")
# 展示新闻页面
def showNews(request):
return render(request, "admin/news_list.html")
#展示焦点图页面
def bannersCate(request):
return render(request, "admin/point_list.html")
#展示标签页面
def tagCate(request):
return render(request, "admin/tag_list.html")
#展示商品页面
def goodsCate(request):
return render(request, "admin/goods_list.html")
#展示商品页面
def newsCate(request):
return render(request, "admin/news_list.html")
#展示焦点图页面
def bannersCate(request):
return render(request, "admin/point_list.html")
# 分类列表
class CateList(APIView):
def get(self, request):
cate = Cate.objects.all()
c = CateModelSerializer(cate, many=True)
mes = {}
mes['code'] = RET.OK
mes['cateList'] = c.data
return Response(mes)
#标签列表
class TagList(APIView):
def get(self, request):
tags = Tags.objects.all()
c = TagModelSerializer(tags, many=True)
mes = {}
mes['code'] = RET.OK
mes['tagList'] = c.data
return Response(mes)
# 商品列表
class GoodsList(APIView):
def get(self, request):
goods = Goods.objects.all()
g = GoodsModelSerializer(goods, many=True)
mes = {}
mes['code'] = RET.OK
mes['goodsList'] = g.data
return Response(mes)
#新闻列表
class NewsList(APIView):
def get(self, request):
news = News.objects.all()
n=NewsModelSerializer(news,many=True)
mes = {}
mes['code'] = RET.OK
mes['newsList'] = n.data
return Response(mes)
#焦点图列表
class BannersList(APIView):
def get(self, request):
banners = Banners.objects.all()
n=BannersModelSerializer(banners,many=True)
mes = {}
mes['code'] = RET.OK
mes['bannersList'] = n.data
return Response(mes)
# 添加分类页面
def addCate(request):
# 获取一级分类
cate = Cate.objects.filter(pid=0).all()
id=request.GET.get('id')
try:
#修改
one_cate=Cate.objects.get(id=id)
print(one_cate)
except:
id=""
return render(request, "admin/add_cate.html", locals())
# 添加标签页面
def addTag(request):
# print('sdf')
cate_list = Cate.objects.all()
id=request.GET.get('id')
try:
#修改
one_tag=Tags.objects.get(id=id)
except:
id=""
return render(request, "admin/add_tag.html", locals())
# 添加商品页面
def addGoods(request):
# print('ceshi')
# 获取所有商品
goods = Goods.objects.all()
cates = Cate.objects.all()
tag_list=Tags.objects.all()
id=request.GET.get('id')
print(id)
try:
one_goods=Goods.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_goods.html", locals())
# 添加商品页面
def addNews(request):
# print('ceshi')
# 获取所有商品
news = News.objects.all()
#修改时需要传id
id=request.GET.get('id')
print(id)
try:
one_news=News.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_news.html", locals())
# 添加焦点图页面
def addBanners(request):
# print('ceshi')
# 获取所有商品
banners = Banners.objects.all()
#修改时需要传id
id=request.GET.get('id')
print(id)
try:
one_banner=Banners.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_banners.html", locals())
from day01.settings import UPLOADFILES
import os
# 上传图片方法
def upload_img(img):
if img:
f = open(os.path.join(UPLOADFILES, '', img.name),'wb')
for chunk in img.chunks():
f.write(chunk)
f.close()
img=datetime.now().strftime("%Y-%m-%d-%H-%M-%S")+img.name
return 'http://127.0.0.1:8000/static/upload/'+img
return ' '
#富文本上传图片
def addnews_upload(request):
files = request.FILES.get('file')
path = upload_img(files)
mes = {
'path': path,
'error': False
}
return HttpResponse(json.dumps(mes))
# 增加分类接口
class SubmitAddCate(APIView):
def post(self, request):
content = request.data
print(content)
# 上传图片
img = request.FILES.get('img')
path=upload_img(img)
content['picture']=path
try:
pid=int(content['pid'])
except:
pid=0
# 通过pic构造top_id,type
if pid == 0:
type = 1
top_id = 0
else:
cate = Cate.objects.get(id=pid)
type = cate.type + 1
if cate.top_id==0:
top_id = cate.id
else:
top_id = cate.top_id
print(top_id,pid,type)
content['type'] = type
content['top_id'] = top_id
try:
id=int(content['id'])
except:
id=0
if id>0:
cc=Cate.objects.get(id=id)
c=CateSerializer(cc,data=content)
#修改
else:
c = CateSerializer(data=content)
mes={}
if c.is_valid():
try:
c.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
print(c.errors)
mes['code'] = RET.DATAERR
return Response(mes)
#删除分类
def deleteCate(request):
id=request.GET.get('id')
Cate.objects.get(id=id).delete()
return render(request, "admin/cate_list.html")
# 增加标签接口
class SubmitAddTag(APIView):
def post(self, request):
content = request.data
print(content)
try:
id = int(content['id']) # 取出id
print(id)
print('di 到这了')
except:
id = 0
if id > 0:
dd = Tags.objects.get(id=id)
d = TagSerializer(dd, data=content)
# 修改
else:
d = TagSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
#删除标签
def deleteTag(request):
id=request.GET.get('id')
Cate.objects.get(id=id).delete()
return render(request, "admin/tag_list.html")
# 增加商品接口
class SubmitAddGoods(APIView):
def post(self, request):
# print('eerw')
content = request.data
print(content)
print(content['id'])
print(content['cid_id'])
# 上传图片
img = request.FILES.get('img')
path=upload_img(img)
content['picture']=path
one_cate=Cate.objects.get(id=int(content['cid_id']))
print(one_cate)
content['top_id'] = one_cate.top_id
try:
print('测试代码')
id=int(content['id'])
print(id)
except:
id=0
if id>0:
# 修改商品
instance = Goods.objects.get(id=id)
c = GoodsSerializer(instance, data=content)
else:
c = GoodsSerializer(data=content)
mes={}
if c.is_valid():
c.save()
mes['code'] = RET.OK
else:
print(c.errors)
mes['code'] = RET.DATAERR
return Response(mes)
#删除商品
def deleteGoods(request):
id=request.GET.get('id')
Goods.objects.get(id=id).delete()
return render(request, "admin/goods_list.html")
#添加新闻接口
class SubmitAddNews(APIView):
def post(self,request):
content=request.data
print(content)
try:
id = int(content['id']) # 取出id
except:
id = 0
if id > 0:
print(id)
nn = News.objects.get(id=id)
d = NewsSerializer(nn, data=content)
# 修改
else:
d = NewsSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
#删除新闻
def deleteNews(request):
id=request.GET.get('id')
News.objects.get(id=id).delete()
return render(request,"admin/news_list.html")
#删除焦点图
def deleteBanners(request):
id=request.GET.get('id')
Banners.objects.get(id=id).delete()
return render(request,"admin/point_list.html")
#添加焦点图接口
class SubmitAddBanner(APIView):
def post(self,request):
content=request.data
print(content)
try:
id = int(content['id']) # 取出id
except:
id = 0
if id > 0:
print(id)
nn = Banners.objects.get(id=id)
d = BannersSerializer(nn, data=content)
# 修改
else:
d = BannersSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
def user_count(request):
return render(request,'admin/user_count.html')
| 2.1875 | 2 |
learnedevolution/targets/covariance/amalgam_covariance.py | realtwister/LearnedEvolution | 0 | 7293 | <filename>learnedevolution/targets/covariance/amalgam_covariance.py
import numpy as np;
from .covariance_target import CovarianceTarget;
class AMaLGaMCovariance(CovarianceTarget):
_API=2.
def __init__(self,
theta_SDR = 1.,
eta_DEC = 0.9,
alpha_Sigma = [-1.1,1.2,1.6],
NIS_MAX = 25,
tau = 0.35,
epsilon = 1e-30,
condition_number_epsilon = 1e6):
self.epsilon = epsilon;
self.theta_SDR = theta_SDR;
self.eta_DEC = eta_DEC;
self.eta_INC = 1./eta_DEC;
self.NIS_MAX = NIS_MAX;
self.alpha_Sigma = alpha_Sigma;
self.tau = tau;
self.condition_number_epsilon = condition_number_epsilon;
def _reset(self, initial_mean, initial_covariance):
self.mean = initial_mean;
self.old_mean = initial_mean;
self.covariance = initial_covariance;
self.d = len(initial_mean);
self.Sigma = initial_covariance;
self.c_multiplier = 1.;
self.NIS = 0;
self.t = 0;
self.best_f = -float('inf');
def _update_mean(self, mean):
self.old_mean = self.mean;
self.mean = mean;
def _calculate(self, population):
self.update_matrix(population);
self.update_multiplier(population);
self.t += 1;
self.best_f = max(self.best_f, np.max(population.fitness));
new_covariance = self.Sigma*self.c_multiplier;
u,s,_ = np.linalg.svd(new_covariance);
s_max = np.max(s)
s_max = np.clip(s_max, self.epsilon*self.condition_number_epsilon, 1e3);
s = np.clip(s, s_max/self.condition_number_epsilon, s_max);
new_covariance = u*[email protected]
self.covariance = new_covariance
return self.covariance;
def update_matrix(self, population):
F = population.fitness;
sel_idx = F.argsort()[-np.ceil(self.tau*len(population)).astype(int):][::-1]
alpha = self.alpha_Sigma;
eta_Sigma = 1.-np.exp(alpha[0]*len(sel_idx)**alpha[1]/self.d**alpha[2]);
current_update = np.zeros((self.d,self.d));
selection = population.population[sel_idx];
for individual in selection:
delta = individual-self.old_mean;
current_update += np.outer(delta,delta)
current_update /= (selection.shape[0]);
self.Sigma *= (1-eta_Sigma);
self.Sigma += eta_Sigma*current_update;
# We need to ensure the condition number is OK to avoid singular matrix.
u,s,_ = np.linalg.svd(self.Sigma);
s_max = np.max(s)
s_max = np.clip(s_max, self.epsilon*self.condition_number_epsilon, None);
s = np.clip(s, s_max/self.condition_number_epsilon, s_max);
self.Sigma = u*[email protected]
def update_multiplier(self, population):
if np.any(population.fitness>self.best_f):
self.NIS = 0;
self.c_multiplier = max(1., self.c_multiplier);
self.SDR(population);
else:
if self.c_multiplier <= 1:
self.NIS += 1;
if self.c_multiplier > 1 or self.NIS >= self.NIS_MAX:
self.c_multiplier *= self.eta_DEC;
if self.c_multiplier < 1 and self.NIS < self.NIS_MAX:
self.c_multiplier = 1;
def SDR(self, population):
x_avg = np.mean(population.population[population.fitness>self.best_f], axis=0);
delta = np.abs(self.mean-x_avg);
variances = np.abs(np.diag(self.covariance));
if np.any(delta/np.sqrt(variances)>self.theta_SDR):
self.c_multiplier *= self.eta_INC;
def _calculate_deterministic(self,population):
return self._calculate(population);
def _terminating(self, population):
pass;
@classmethod
def _get_kwargs(cls, config, key = ""):
cls._config_required(
'theta_SDR',
'eta_DEC',
'alpha_Sigma',
'NIS_MAX',
'tau',
'epsilon',
'condition_number_epsilon'
)
cls._config_defaults(
theta_SDR = 1.,
eta_DEC = 0.9,
alpha_Sigma = [-1.1,1.2,1.6],
NIS_MAX = 25,
tau = 0.35,
epsilon = 1e-30,
condition_number_epsilon = 1e6
)
return super()._get_kwargs(config, key = key);
| 2.3125 | 2 |
binding.gyp | terrorizer1980/fs-admin | 25 | 7294 | <reponame>terrorizer1980/fs-admin
{
'target_defaults': {
'win_delay_load_hook': 'false',
'conditions': [
['OS=="win"', {
'msvs_disabled_warnings': [
4530, # C++ exception handler used, but unwind semantics are not enabled
4506, # no definition for inline function
],
}],
],
},
'targets': [
{
'target_name': 'fs_admin',
'defines': [
"NAPI_VERSION=<(napi_build_version)",
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'xcode_settings': { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
},
'msvs_settings': {
'VCCLCompilerTool': { 'ExceptionHandling': 1 },
},
'sources': [
'src/main.cc',
],
'include_dirs': [
'<!(node -p "require(\'node-addon-api\').include_dir")',
],
'conditions': [
['OS=="win"', {
'sources': [
'src/fs-admin-win.cc',
],
'libraries': [
'-lole32.lib',
'-lshell32.lib',
],
}],
['OS=="mac"', {
'sources': [
'src/fs-admin-darwin.cc',
],
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
],
}],
['OS=="linux"', {
'sources': [
'src/fs-admin-linux.cc',
],
}],
],
}
]
}
| 1.320313 | 1 |
src/botwtracker/settings.py | emoritzx/botw-tracker | 7 | 7295 | """Django settings for botwtracker project.
Copyright (c) 2017, <NAME>.
botw-tracker is an open source software project released under the MIT License.
See the accompanying LICENSE file for terms.
"""
import os
from .config_local import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, '..', 'data')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quests.apps.QuestsConfig',
'user.apps.UserConfig',
]
if USE_SIGNUP:
INSTALLED_APPS.append('signup.apps.SignupConfig')
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'botwtracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'botwtracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'sqlite3.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "..", "static")
]
| 1.554688 | 2 |
app/domains/users/views.py | Geo-Gabriel/eccomerce_nestle_mongodb | 3 | 7296 | <gh_stars>1-10
from flask import Blueprint, request, jsonify
from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user
app_users = Blueprint('app.users', __name__)
@app_users.route('/users', methods=['GET'])
def get_users():
return jsonify([user.serialize() for user in get_all_users()]), 200
@app_users.route('/users/<id>', methods=["GET"])
def get_by_id(id: str):
user = get_user_by_id(id_user=id)
return jsonify(user.serialize()), 200
@app_users.route('/users', methods=["POST"])
def post_user():
payload = request.get_json()
user = insert_user(payload)
return jsonify(user.serialize()), 201
@app_users.route('/users/<id>', methods=["PUT"])
def update(id: str):
payload = request.get_json()
user = update_user(id_user=id, data=payload)
return jsonify(user.serialize()), 200
@app_users.route('/users/<id>', methods=["DELETE"])
def delete(id: str):
delete_user(id_user=id)
return jsonify({"message": "user deleted"}), 200
| 2.609375 | 3 |
legacy_code/tf_cnn_siamese/model.py | PerryXDeng/project_punyslayer | 2 | 7297 | import legacy_code.tf_cnn_siamese.configurations as conf
import tensorflow as tf
import numpy as np
def construct_cnn(x, conv_weights, conv_biases, fc_weights, fc_biases,
dropout = False):
"""
constructs the convolution graph for one image
:param x: input node
:param conv_weights: convolution weights
:param conv_biases: relu biases for each convolution
:param fc_weights: fully connected weights, only one set should be used here
:param fc_biases: fully connected biases, only one set should be used here
:param dropout: whether to add a dropout layer for the fully connected layer
:return: output node
"""
k = conf.NUM_POOL
for i in range(conf.NUM_CONVS):
x = tf.nn.conv2d(x, conv_weights[i], strides=[1, 1, 1, 1], padding='SAME',
data_format=conf.DATA_FORMAT)
x = tf.nn.relu(tf.nn.bias_add(x, conv_biases[i],
data_format=conf.DATA_FORMAT))
if k > 0:
x = tf.nn.max_pool(x, ksize=conf.POOL_KDIM,strides=conf.POOL_KDIM,
padding='VALID', data_format=conf.DATA_FORMAT)
k -= 1
# Reshape the feature map cuboids into vectors for fc layers
features_shape = x.get_shape().as_list()
n = features_shape[0]
m = features_shape[1] * features_shape[2] * features_shape[3]
features = tf.reshape(x, [n, m])
# last fc_weights determine output dimensions
fc = tf.nn.sigmoid(tf.matmul(features, fc_weights[0]) + fc_biases[0])
# for actual training
if dropout:
fc = tf.nn.dropout(fc, conf.DROP_RATE)
return fc
def construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights,
fc_biases, dropout=False):
"""
constructs the logit node before the final sigmoid activation
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to include dropout layers
:return: logit node
"""
with tf.name_scope("twin_1"):
twin_1 = construct_cnn(x_1, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
with tf.name_scope("twin_2"):
twin_2 = construct_cnn(x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return logits
def construct_full_model(x_1, x_2, conv_weights, conv_biases,fc_weights,
fc_biases):
"""
constructs the graph for the neural network without loss node or optimizer
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: sigmoid output node
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False)
return tf.nn.sigmoid(logits)
def construct_loss_optimizer(x_1, x_2, labels, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False,
lagrange=False):
"""
constructs the neural network graph with the loss and optimizer node
:param x_1: input image node 1
:param x_2: input image node 2
:param labels: expected output
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to use dropout
:param lagrange: whether to apply constraints
:return: the node for the optimizer as well as the loss
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# cross entropy loss on sigmoids of joined output and labels
loss_vec = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
loss = tf.reduce_mean(loss_vec)
if lagrange:
# constraints on sigmoid layers
regularizers = (tf.nn.l2_loss(fc_weights[0]) + tf.nn.l2_loss(fc_biases[0]) +
tf.nn.l2_loss(fc_weights[1]) + tf.nn.l2_loss(fc_biases[1]))
loss += conf.LAMBDA * regularizers
# setting up the optimization
batch = tf.Variable(0, dtype=conf.DTYPE)
# vanilla momentum optimizer
# accumulation = momentum * accumulation + gradient
# every epoch: variable -= learning_rate * accumulation
# batch_total = labels.shape[0]
# learning_rate = tf.train.exponential_decay(
# conf.BASE_LEARNING_RATE,
# batch * conf.BATCH_SIZE, # Current index into the dataset.
# batch_total,
# conf.DECAY_RATE, # Decay rate.
# staircase=True)
# trainer = tf.train.MomentumOptimizer(learning_rate, conf.MOMENTUM)\
# .minimize(loss, global_step=batch)
# adaptive momentum estimation optimizer
# default params: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
trainer = tf.train.AdamOptimizer().minimize(loss, global_step=batch)
return trainer, loss
def construct_joined_model(twin_1, twin_2, fc_weights, fc_biases):
"""
constructs joined model for two sets of extracted features
:param twin_1: features node extracted from first image
:param twin_2: features node extracted from second image
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: logit node
"""
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return tf.nn.sigmoid(logits)
def initialize_weights():
"""
initializes the variable tensors to be trained in the neural network, decides
network dimensions
:return: nodes for the variables
"""
# twin network convolution and pooling variables
conv_weights = []
conv_biases = []
fc_weights = []
fc_biases = []
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
weight_name = "twin_conv" + str(i + 1) + "_weights"
bias_name = "twin_conv" + str(i + 1) + "_biases"
conv_weights.append(tf.Variable(tf.truncated_normal(conv_dim, stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name=weight_name))
conv_biases.append(tf.Variable(tf.zeros([out], dtype=conf.DTYPE),
name=bias_name))
# twin network fullly connected variables
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="twin_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="twin_fc_biases"))
# joined network fully connected variables
inp = conf.NUM_FC_NEURONS
out = 1
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="joined_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="joined_fc_biases"))
return conv_weights, conv_biases, fc_weights, fc_biases
def num_params():
"""
calculates the number of parameters in the model
:return: m, number of parameters
"""
m = 0
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
m += np.prod(conv_dim) + np.prod(out)
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
m += inp * out + out
inp = conf.NUM_FC_NEURONS
out = 1
m += inp * out + out
return m
if __name__ == "__main__":
print("Number of Parameters: " + str(num_params()))
| 3.0625 | 3 |
tests/test_utils_log.py | FingerCrunch/scrapy | 41,267 | 7298 | import sys
import logging
import unittest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter,
LogCounterHandler, StreamLogger)
from scrapy.utils.test import get_crawler
from scrapy.extensions import telnet
class FailureToExcInfoTest(unittest.TestCase):
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
self.assertTupleEqual(exc_info, failure_to_exc_info(failure))
def test_non_failure(self):
self.assertIsNone(failure_to_exc_info('test'))
class TopLevelFormatterTest(unittest.TestCase):
def setUp(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(['test']))
def test_top_level_logger(self):
logger = logging.getLogger('test')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_children_logger(self):
logger = logging.getLogger('test.test1')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_overlapping_name_logger(self):
logger = logging.getLogger('test2')
with self.handler as log:
logger.warning('test log msg')
log.check(('test2', 'WARNING', 'test log msg'))
def test_different_name_logger(self):
logger = logging.getLogger('different')
with self.handler as log:
logger.warning('test log msg')
log.check(('different', 'WARNING', 'test log msg'))
class LogCounterHandlerTest(unittest.TestCase):
def setUp(self):
settings = {'LOG_LEVEL': 'WARNING'}
if not telnet.TWISTED_CONCH_AVAILABLE:
# disable it to avoid the extra warning
settings['TELNETCONSOLE_ENABLED'] = False
self.logger = logging.getLogger('test')
self.logger.setLevel(logging.NOTSET)
self.logger.propagate = False
self.crawler = get_crawler(settings_dict=settings)
self.handler = LogCounterHandler(self.crawler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.logger.propagate = True
self.logger.removeHandler(self.handler)
def test_init(self):
self.assertIsNone(self.crawler.stats.get_value('log_count/DEBUG'))
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
self.assertIsNone(self.crawler.stats.get_value('log_count/WARNING'))
self.assertIsNone(self.crawler.stats.get_value('log_count/ERROR'))
self.assertIsNone(self.crawler.stats.get_value('log_count/CRITICAL'))
def test_accepted_level(self):
self.logger.error('test log msg')
self.assertEqual(self.crawler.stats.get_value('log_count/ERROR'), 1)
def test_filtered_out_level(self):
self.logger.debug('test log msg')
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
class StreamLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
sys.stdout = StreamLogger(logger, logging.ERROR)
def tearDown(self):
sys.stdout = self.stdout
def test_redirect(self):
with LogCapture() as log:
print('test log msg')
log.check(('test', 'ERROR', 'test log msg'))
| 2.390625 | 2 |
astar.py | jeff012345/clue-part-duo | 0 | 7299 | <reponame>jeff012345/clue-part-duo
import heapq
from typing import List
from definitions import RoomPosition, Position
import random
import sys
class PriorityQueue:
def __init__(self):
self.elements: Array = []
def empty(self) -> bool:
return len(self.elements) == 0
def put(self, item, priority: float):
heapq.heappush(self.elements, (priority, random.randint(1, 9999999999999999), item))
def get(self):
return heapq.heappop(self.elements)[2]
def heuristic(a: Position, b: Position) -> float:
if a == b:
return 0
if isinstance(a, RoomPosition):
if isinstance(b, RoomPosition):
raise Exception("Cannot calculate heuristic between two rooms")
return 1 # (1^2 + 0^2)
if isinstance(b, RoomPosition):
return 1 # (1^2 + 0^2)
# both are Space
return (a.col - b.col) ** 2 + (a.row - b.row) ** 2
def a_star_search(start: Position, goal: Position) -> List[Position]:
if start is None:
raise Exception("Start is None")
if goal is None:
raise Exception("goal is None")
if start == goal:
raise Exception('Start and goal are the same')
frontier = PriorityQueue()
frontier.put(start, 0)
came_from: Dict[Position, Optional[Position]] = {}
cost_so_far: Dict[Position, float] = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current: Position = frontier.get()
if current == goal:
break
for next in current.connections:
if isinstance(next, RoomPosition) and next != goal:
# once you enter a room, it's a dead end
continue
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
if frontier.empty():
print(str(start) + " to " + str(goal))
raise Exception('no path found')
shortest_path = []
prev = goal
while prev is not None:
shortest_path.append(prev)
prev = came_from[prev]
shortest_path.reverse()
return shortest_path
| 3.5625 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.