code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
from __future__ import print_function
# summary provider for CF(Mutable)BitVector
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
# first define some utility functions
def byte_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos / 8
def bit_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos & 7
def get_bit(byte, index):
logger = lldb.formatters.Logger.Logger()
if index < 0 or index > 7:
return None
return (byte >> (7 - index)) & 1
def grab_array_item_data(pointer, index):
logger = lldb.formatters.Logger.Logger()
return pointer.GetPointeeData(index, 1)
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for a CF*BitVector, so they need not
# obey the interface specification for synthetic children providers
class CFBitVectorKnown_SummaryProvider:
def adjust_for_architecture(self):
logger = lldb.formatters.Logger.Logger()
self.uiint_size = self.sys_params.types_cache.NSUInteger.GetByteSize()
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
if not(self.sys_params.types_cache.charptr):
self.sys_params.types_cache.charptr = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeChar).GetPointerType()
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# we skip the CFRuntimeBase
# then the next CFIndex is the count
# then we skip another CFIndex and then we get at a byte array
# that wraps the individual bits
def contents(self):
logger = lldb.formatters.Logger.Logger()
count_vo = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.cfruntime_size,
self.sys_params.types_cache.NSUInteger)
count = count_vo.GetValueAsUnsigned(0)
if count == 0:
return '(empty)'
array_vo = self.valobj.CreateChildAtOffset(
"data",
self.sys_params.cfruntime_size +
2 *
self.uiint_size,
self.sys_params.types_cache.charptr)
data_list = []
cur_byte_pos = None
for i in range(0, count):
if cur_byte_pos is None:
cur_byte_pos = byte_index(i)
cur_byte = grab_array_item_data(array_vo, cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
else:
byte_pos = byte_index(i)
# do not fetch the pointee data every single time through
if byte_pos != cur_byte_pos:
cur_byte_pos = byte_pos
cur_byte = grab_array_item_data(array_vo, cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
bit = get_bit(cur_byte_val, bit_index(i))
if (i % 4) == 0:
data_list.append(' ')
if bit == 1:
data_list.append('1')
else:
data_list.append('0')
return ''.join(data_list)
class CFBitVectorUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def contents(self):
logger = lldb.formatters.Logger.Logger()
return '<unable to summarize this CFBitVector>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = name_string
logger >> "name string got was " + \
str(name_string) + " but actual name is " + str(actual_name)
if class_data.is_cftype():
# CFBitVectorRef does not expose an actual NSWrapper type, so we have to check that this is
# an NSCFType and then check we are a pointer-to CFBitVectorRef
valobj_type = valobj.GetType()
if valobj_type.IsValid() and valobj_type.IsPointerType():
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBitVector' or actual_name == '__CFMutableBitVector':
wrapper = CFBitVectorKnown_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(
valobj, class_data.sys_params)
print(actual_name)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(
valobj, class_data.sys_params)
print(name_string)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def CFBitVector_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.contents()
except:
summary = None
logger >> "summary got from provider: " + str(summary)
if summary is None or summary == '':
summary = '<variable is not CFBitVector>'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFBitVector.CFBitVector_SummaryProvider CFBitVectorRef CFMutableBitVectorRef")
|
lldb/examples/summaries/cocoa/CFBitVector.py
|
from __future__ import print_function
# summary provider for CF(Mutable)BitVector
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
# first define some utility functions
def byte_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos / 8
def bit_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos & 7
def get_bit(byte, index):
logger = lldb.formatters.Logger.Logger()
if index < 0 or index > 7:
return None
return (byte >> (7 - index)) & 1
def grab_array_item_data(pointer, index):
logger = lldb.formatters.Logger.Logger()
return pointer.GetPointeeData(index, 1)
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for a CF*BitVector, so they need not
# obey the interface specification for synthetic children providers
class CFBitVectorKnown_SummaryProvider:
def adjust_for_architecture(self):
logger = lldb.formatters.Logger.Logger()
self.uiint_size = self.sys_params.types_cache.NSUInteger.GetByteSize()
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
if not(self.sys_params.types_cache.charptr):
self.sys_params.types_cache.charptr = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeChar).GetPointerType()
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# we skip the CFRuntimeBase
# then the next CFIndex is the count
# then we skip another CFIndex and then we get at a byte array
# that wraps the individual bits
def contents(self):
logger = lldb.formatters.Logger.Logger()
count_vo = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.cfruntime_size,
self.sys_params.types_cache.NSUInteger)
count = count_vo.GetValueAsUnsigned(0)
if count == 0:
return '(empty)'
array_vo = self.valobj.CreateChildAtOffset(
"data",
self.sys_params.cfruntime_size +
2 *
self.uiint_size,
self.sys_params.types_cache.charptr)
data_list = []
cur_byte_pos = None
for i in range(0, count):
if cur_byte_pos is None:
cur_byte_pos = byte_index(i)
cur_byte = grab_array_item_data(array_vo, cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
else:
byte_pos = byte_index(i)
# do not fetch the pointee data every single time through
if byte_pos != cur_byte_pos:
cur_byte_pos = byte_pos
cur_byte = grab_array_item_data(array_vo, cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
bit = get_bit(cur_byte_val, bit_index(i))
if (i % 4) == 0:
data_list.append(' ')
if bit == 1:
data_list.append('1')
else:
data_list.append('0')
return ''.join(data_list)
class CFBitVectorUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def contents(self):
logger = lldb.formatters.Logger.Logger()
return '<unable to summarize this CFBitVector>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = name_string
logger >> "name string got was " + \
str(name_string) + " but actual name is " + str(actual_name)
if class_data.is_cftype():
# CFBitVectorRef does not expose an actual NSWrapper type, so we have to check that this is
# an NSCFType and then check we are a pointer-to CFBitVectorRef
valobj_type = valobj.GetType()
if valobj_type.IsValid() and valobj_type.IsPointerType():
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBitVector' or actual_name == '__CFMutableBitVector':
wrapper = CFBitVectorKnown_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(
valobj, class_data.sys_params)
print(actual_name)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(
valobj, class_data.sys_params)
print(name_string)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def CFBitVector_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.contents()
except:
summary = None
logger >> "summary got from provider: " + str(summary)
if summary is None or summary == '':
summary = '<variable is not CFBitVector>'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFBitVector.CFBitVector_SummaryProvider CFBitVectorRef CFMutableBitVectorRef")
| 0.587943 | 0.32748 |
import re
import math
from time import time
HEX_RE = re.compile("#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})")
class Py3status:
"""
"""
# available configuration parameters
cycle_time = 1
force = False
format = "{output}"
gradient = [
"#FF0000",
"#FFFF00",
"#00FF00",
"#00FFFF",
"#0000FF",
"#FF00FF",
"#FF0000",
]
multi_color = True
steps = 10
class Meta:
container = True
def post_config_hook(self):
def from_hex(color):
"""
Convert hex color #xxx or #xxxxxx to [r, g, b].
"""
if not HEX_RE.match(color):
color = "#FFF"
if len(color) == 7:
return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16))
return (
int(color[1], 16) * 17,
int(color[2], 16) * 17,
int(color[3], 16) * 17,
)
def to_hex(color):
"""
Convert [r, g, b] to hex.
"""
return "#{:02X}{:02X}{:02X}".format(
int(color[0]), int(color[1]), int(color[2])
)
def make_color(c1, c2, t):
"""
Generate a mid color between c1 and c2.
"""
def fade(i):
a = c1[i]
b = c2[i]
x = b * t
x += a * (1 - t)
return x
c1 = from_hex(c1)
c2 = from_hex(c2)
return (fade(0), fade(1), fade(2))
colors = []
if self.steps == 1:
colors = [to_hex(from_hex(x)) for x in self.gradient]
else:
for i in range(len(self.gradient) - 1):
for j in range(self.steps):
colors.append(
to_hex(
make_color(
self.gradient[i], self.gradient[i + 1], j / (self.steps)
)
)
)
self.colors = colors
self.active_color = 0
self._set_cycle_time()
def _set_cycle_time(self):
"""
Set next cycle update time synced to nearest second or 0.1 of second.
"""
now = time()
try:
cycle_time = now - self._cycle_time
if cycle_time < 0:
cycle_time = 0
except AttributeError:
cycle_time = 0
cycle_time += self.cycle_time
if cycle_time == int(cycle_time):
self._cycle_time = math.ceil(now + cycle_time)
else:
self._cycle_time = math.ceil((now + cycle_time) * 10) / 10
self._cycle_time = now + self.cycle_time
def _get_current_output(self):
"""
Get child modules output.
"""
output = []
for item in self.items:
out = self.py3.get_output(item)
if out and "separator" not in out[-1]:
out[-1]["separator"] = True
output += out
return output
def rainbow(self):
"""
Make a rainbow!
"""
if not self.items:
return {"full_text": "", "cached_until": self.py3.CACHE_FOREVER}
if time() >= self._cycle_time - (self.cycle_time / 10):
self.active_color = (self.active_color + 1) % len(self.colors)
self._set_cycle_time()
color = self.colors[self.active_color]
content = self._get_current_output()
output = []
if content:
step = len(self.colors) // len(content)
for index, item in enumerate(content):
if self.multi_color:
offset = (self.active_color + (index * step)) % len(self.colors)
color = self.colors[offset]
obj = item.copy()
if self.force or not obj.get("color"):
obj["color"] = color
output.append(obj)
composites = {"output": self.py3.composite_create(output)}
rainbow = self.py3.safe_format(self.format, composites)
return {"cached_until": self._cycle_time, "full_text": rainbow}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
py3status/modules/rainbow.py
|
import re
import math
from time import time
HEX_RE = re.compile("#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})")
class Py3status:
"""
"""
# available configuration parameters
cycle_time = 1
force = False
format = "{output}"
gradient = [
"#FF0000",
"#FFFF00",
"#00FF00",
"#00FFFF",
"#0000FF",
"#FF00FF",
"#FF0000",
]
multi_color = True
steps = 10
class Meta:
container = True
def post_config_hook(self):
def from_hex(color):
"""
Convert hex color #xxx or #xxxxxx to [r, g, b].
"""
if not HEX_RE.match(color):
color = "#FFF"
if len(color) == 7:
return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16))
return (
int(color[1], 16) * 17,
int(color[2], 16) * 17,
int(color[3], 16) * 17,
)
def to_hex(color):
"""
Convert [r, g, b] to hex.
"""
return "#{:02X}{:02X}{:02X}".format(
int(color[0]), int(color[1]), int(color[2])
)
def make_color(c1, c2, t):
"""
Generate a mid color between c1 and c2.
"""
def fade(i):
a = c1[i]
b = c2[i]
x = b * t
x += a * (1 - t)
return x
c1 = from_hex(c1)
c2 = from_hex(c2)
return (fade(0), fade(1), fade(2))
colors = []
if self.steps == 1:
colors = [to_hex(from_hex(x)) for x in self.gradient]
else:
for i in range(len(self.gradient) - 1):
for j in range(self.steps):
colors.append(
to_hex(
make_color(
self.gradient[i], self.gradient[i + 1], j / (self.steps)
)
)
)
self.colors = colors
self.active_color = 0
self._set_cycle_time()
def _set_cycle_time(self):
"""
Set next cycle update time synced to nearest second or 0.1 of second.
"""
now = time()
try:
cycle_time = now - self._cycle_time
if cycle_time < 0:
cycle_time = 0
except AttributeError:
cycle_time = 0
cycle_time += self.cycle_time
if cycle_time == int(cycle_time):
self._cycle_time = math.ceil(now + cycle_time)
else:
self._cycle_time = math.ceil((now + cycle_time) * 10) / 10
self._cycle_time = now + self.cycle_time
def _get_current_output(self):
"""
Get child modules output.
"""
output = []
for item in self.items:
out = self.py3.get_output(item)
if out and "separator" not in out[-1]:
out[-1]["separator"] = True
output += out
return output
def rainbow(self):
"""
Make a rainbow!
"""
if not self.items:
return {"full_text": "", "cached_until": self.py3.CACHE_FOREVER}
if time() >= self._cycle_time - (self.cycle_time / 10):
self.active_color = (self.active_color + 1) % len(self.colors)
self._set_cycle_time()
color = self.colors[self.active_color]
content = self._get_current_output()
output = []
if content:
step = len(self.colors) // len(content)
for index, item in enumerate(content):
if self.multi_color:
offset = (self.active_color + (index * step)) % len(self.colors)
color = self.colors[offset]
obj = item.copy()
if self.force or not obj.get("color"):
obj["color"] = color
output.append(obj)
composites = {"output": self.py3.composite_create(output)}
rainbow = self.py3.safe_format(self.format, composites)
return {"cached_until": self._cycle_time, "full_text": rainbow}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 0.425367 | 0.282668 |
import os
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QDialog, QLabel, QDialogButtonBox, QLineEdit, QCheckBox,
QHBoxLayout, QVBoxLayout, QFormLayout, QFileDialog)
from lanzou.gui.qss import dialog_qss_style
from lanzou.gui.others import MyLineEdit, AutoResizingTextEdit
from lanzou.debug import SRC_DIR
class SettingDialog(QDialog):
saved = pyqtSignal()
def __init__(self, parent=None):
super(SettingDialog, self).__init__(parent)
self._config = object
self.download_threads = 3
self.max_size = 100
self.timeout = 5
self.dl_path = None
self.time_fmt = False
self.to_tray = False
self.watch_clipboard = False
self.debug = False
self.set_pwd = False
self.set_desc = False
self.upload_delay = 0
self.allow_big_file = False
self.upgrade = True
self.pwd = ""
self.desc = ""
self.initUI()
self.setStyleSheet(dialog_qss_style)
def open_dialog(self, config):
""""打开前先更新一下显示界面"""
self._config = config
if self._config.name:
self.setWindowTitle(f"设置 <{self._config.name}>")
else:
self.setWindowTitle("设置")
self.cwd = self._config.path
self.set_values()
self.exec()
def show_values(self):
"""控件显示值"""
self.download_threads_var.setText(str(self.download_threads))
self.max_size_var.setText(str(self.max_size))
self.timeout_var.setText(str(self.timeout))
self.dl_path_var.setText(str(self.dl_path))
self.time_fmt_box.setChecked(self.time_fmt)
self.to_tray_box.setChecked(self.to_tray)
self.watch_clipboard_box.setChecked(self.watch_clipboard)
self.debug_box.setChecked(self.debug)
self.set_pwd_box.setChecked(self.set_pwd)
self.set_pwd_var.setEnabled(self.set_pwd)
self.set_pwd_var.setText(self.pwd)
self.set_desc_box.setChecked(self.set_desc)
self.set_desc_var.setEnabled(self.set_desc)
self.set_desc_var.setText(self.desc)
self.upload_delay_var.setText(str(self.upload_delay))
self.big_file_box.setChecked(self.allow_big_file)
self.big_file_box.setText(f"允许上传超过 {self.max_size}MB 的大文件")
self.big_file_box.setDisabled(True) # 关闭允许上传大文件设置入口
self.upgrade_box.setChecked(self.upgrade)
def set_values(self, reset=False):
"""设置控件对应变量初始值"""
settings = self._config.default_settings if reset else self._config.settings
self.download_threads = settings["download_threads"]
self.max_size = settings["max_size"]
self.timeout = settings["timeout"]
self.dl_path = settings["dl_path"]
self.time_fmt = settings["time_fmt"]
self.to_tray = settings["to_tray"]
self.watch_clipboard = settings["watch_clipboard"]
self.debug = settings["debug"]
self.set_pwd = settings["set_pwd"]
self.pwd = settings["<PASSWORD>"]
self.set_desc = settings["set_desc"]
self.desc = settings["desc"]
self.upload_delay = settings["upload_delay"]
if 'upgrade' in settings:
self.upgrade = settings["upgrade"]
self.show_values()
def get_values(self) -> dict:
"""读取输入控件的值"""
if self.download_threads_var.text():
self.download_threads = int(self.download_threads_var.text())
if self.max_size_var.text():
self.max_size = int(self.max_size_var.text())
if self.timeout_var.text():
self.timeout = int(self.timeout_var.text())
if self.upload_delay_var.text():
self.upload_delay = int(self.upload_delay_var.text())
self.dl_path = str(self.dl_path_var.text())
self.pwd = str(self.set_pwd_var.toPlainText())
self.desc = str(self.set_desc_var.toPlainText())
return {"download_threads": self.download_threads,
"max_size": self.max_size,
"timeout": self.timeout,
"dl_path": self.dl_path,
"time_fmt": self.time_fmt,
"to_tray": self.to_tray,
"watch_clipboard": self.watch_clipboard,
"debug": self.debug,
"set_pwd": self.set_pwd,
"pwd": self.<PASSWORD>,
"set_desc": self.set_desc,
"desc": self.desc,
"upload_delay": self.upload_delay,
"allow_big_file": self.allow_big_file,
"upgrade": self.upgrade}
def initUI(self):
self.setWindowTitle("设置")
logo = QLabel()
logo.setPixmap(QPixmap(SRC_DIR + "logo2.gif"))
logo.setStyleSheet("background-color:rgb(255,255,255);")
logo.setAlignment(Qt.AlignCenter)
self.download_threads_lb = QLabel("同时下载文件数")
self.download_threads_var = QLineEdit()
self.download_threads_var.setPlaceholderText("范围:1-9")
self.download_threads_var.setToolTip("范围:1-9")
self.download_threads_var.setInputMask("D")
self.max_size_lb = QLabel("分卷大小(MB)")
self.max_size_var = QLineEdit()
self.max_size_var.setPlaceholderText("普通用户最大100,vip用户根据具体情况设置")
self.max_size_var.setToolTip("普通用户最大100,vip用户根据具体情况设置")
self.max_size_var.setInputMask("D99")
self.timeout_lb = QLabel("请求超时(秒)")
self.timeout_var = QLineEdit()
self.timeout_var.setPlaceholderText("范围:1-99")
self.timeout_var.setToolTip("范围:1-99")
self.timeout_var.setInputMask("D9")
self.upload_delay_lb = QLabel("上传延时(秒)")
self.upload_delay_var = QLineEdit()
self.upload_delay_var.setPlaceholderText("范围:1-99")
self.upload_delay_var.setToolTip("范围:1-99")
self.upload_delay_var.setInputMask("D9")
self.dl_path_lb = QLabel("下载保存路径")
self.dl_path_var = MyLineEdit(self)
self.dl_path_var.clicked.connect(self.set_download_path)
self.time_fmt_box = QCheckBox("使用[年-月-日]时间格式")
self.time_fmt_box.setToolTip("文件上传日期显示格式")
self.to_tray_box = QCheckBox("关闭到系统托盘")
self.to_tray_box.setToolTip("点击关闭软件按钮是最小化软件至系统托盘")
self.watch_clipboard_box = QCheckBox("监听系统剪切板")
self.watch_clipboard_box.setToolTip("检测到系统剪切板中有符合规范的蓝奏链接时自动唤起软件,并提取")
self.debug_box = QCheckBox("开启调试日志")
self.debug_box.setToolTip("记录软件 debug 信息至 debug-lanzou-gui.log 文件")
self.set_pwd_box = QCheckBox("上传文件自动设置密码")
self.set_pwd_var = AutoResizingTextEdit()
self.set_pwd_var.setPlaceholderText(" 2-8 位数字或字母")
self.set_pwd_var.setToolTip("2-8 位数字或字母")
self.set_desc_box = QCheckBox("上传文件自动设置描述")
self.set_desc_var = AutoResizingTextEdit()
self.big_file_box = QCheckBox(f"允许上传超过 {self.max_size}MB 的大文件")
self.big_file_box.setToolTip("开启大文件上传支持 (功能下线)")
self.upgrade_box = QCheckBox("自动检测新版本")
self.upgrade_box.setToolTip("在软件打开时自动检测是否有新的版本发布,如有则弹出更新信息")
self.time_fmt_box.toggle()
self.time_fmt_box.stateChanged.connect(self.change_time_fmt)
self.to_tray_box.stateChanged.connect(self.change_to_tray)
self.watch_clipboard_box.stateChanged.connect(self.change_watch_clipboard)
self.debug_box.stateChanged.connect(self.change_debug)
self.set_pwd_box.stateChanged.connect(self.change_set_pwd)
self.set_pwd_var.editingFinished.connect(self.check_pwd)
self.set_desc_box.stateChanged.connect(self.change_set_desc)
self.big_file_box.stateChanged.connect(self.change_big_file)
self.upgrade_box.stateChanged.connect(self.change_upgrade)
buttonBox = QDialogButtonBox()
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(QDialogButtonBox.Reset | QDialogButtonBox.Save | QDialogButtonBox.Cancel)
buttonBox.button(QDialogButtonBox.Reset).setText("重置")
buttonBox.button(QDialogButtonBox.Save).setText("保存")
buttonBox.button(QDialogButtonBox.Cancel).setText("取消")
buttonBox.button(QDialogButtonBox.Reset).clicked.connect(lambda: self.set_values(reset=True))
buttonBox.button(QDialogButtonBox.Save).clicked.connect(self.slot_save)
buttonBox.rejected.connect(self.reject)
form = QFormLayout()
form.setLabelAlignment(Qt.AlignRight)
form.setSpacing(10)
form.addRow(self.download_threads_lb, self.download_threads_var)
form.addRow(self.timeout_lb, self.timeout_var)
form.addRow(self.upload_delay_lb, self.upload_delay_var)
form.addRow(self.max_size_lb, self.max_size_var)
form.addRow(self.dl_path_lb, self.dl_path_var)
vbox = QVBoxLayout()
vbox.addWidget(logo)
vbox.addStretch(1)
vbox.addLayout(form)
vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addWidget(self.time_fmt_box)
hbox.addWidget(self.to_tray_box)
hbox.addWidget(self.watch_clipboard_box)
hbox.addWidget(self.debug_box)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox_2 = QHBoxLayout()
hbox_2.addWidget(self.set_pwd_box)
hbox_2.addWidget(self.set_pwd_var)
vbox.addLayout(hbox_2)
vbox.addStretch(1)
hbox_3 = QHBoxLayout()
hbox_3.addWidget(self.set_desc_box)
hbox_3.addWidget(self.set_desc_var)
vbox.addLayout(hbox_3)
hbox_4 = QHBoxLayout()
hbox_4.addWidget(self.big_file_box)
hbox_4.addWidget(self.upgrade_box)
vbox.addStretch(1)
vbox.addLayout(hbox_4)
vbox.addStretch(2)
vbox.addWidget(buttonBox)
self.setLayout(vbox)
self.setMinimumWidth(500)
def change_time_fmt(self, state):
if state == Qt.Checked:
self.time_fmt = True
else:
self.time_fmt = False
def change_to_tray(self, state):
if state == Qt.Checked:
self.to_tray = True
else:
self.to_tray = False
def change_watch_clipboard(self, state):
if state == Qt.Checked:
self.watch_clipboard = True
else:
self.watch_clipboard = False
def change_debug(self, state):
if state == Qt.Checked:
self.debug = True
else:
self.debug = False
def change_big_file(self, state):
if state == Qt.Checked:
self.allow_big_file = True
else:
self.allow_big_file = False
def change_upgrade(self, state):
if state == Qt.Checked:
self.upgrade = True
else:
self.upgrade = False
def change_set_pwd(self, state):
if state == Qt.Checked:
self.set_pwd = True
self.set_pwd_var.setDisabled(False)
else:
self.set_pwd = False
self.set_pwd_var.setDisabled(True)
def change_set_desc(self, state):
if state == Qt.Checked:
self.set_desc = True
self.set_desc_var.setDisabled(False)
else:
self.set_desc = False
self.set_desc_var.setDisabled(True)
def check_pwd(self):
pwd = self.set_pwd_var.toPlainText()
pwd = ''.join(list(filter(str.isalnum, pwd)))
if len(pwd) < 2:
pwd = ""
self.set_pwd_var.setText(pwd[:8])
def set_download_path(self):
"""设置下载路径"""
dl_path = QFileDialog.getExistingDirectory(self, "选择文件下载保存文件夹", self.cwd)
dl_path = os.path.normpath(dl_path) # windows backslash
if dl_path == self.dl_path or dl_path == ".":
return None
self.dl_path_var.setText(dl_path)
self.dl_path = dl_path
def slot_save(self):
"""保存槽函数"""
self._config.settings = self.get_values()
self.saved.emit()
self.close()
|
lanzou/gui/dialogs/setting.py
|
import os
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QDialog, QLabel, QDialogButtonBox, QLineEdit, QCheckBox,
QHBoxLayout, QVBoxLayout, QFormLayout, QFileDialog)
from lanzou.gui.qss import dialog_qss_style
from lanzou.gui.others import MyLineEdit, AutoResizingTextEdit
from lanzou.debug import SRC_DIR
class SettingDialog(QDialog):
saved = pyqtSignal()
def __init__(self, parent=None):
super(SettingDialog, self).__init__(parent)
self._config = object
self.download_threads = 3
self.max_size = 100
self.timeout = 5
self.dl_path = None
self.time_fmt = False
self.to_tray = False
self.watch_clipboard = False
self.debug = False
self.set_pwd = False
self.set_desc = False
self.upload_delay = 0
self.allow_big_file = False
self.upgrade = True
self.pwd = ""
self.desc = ""
self.initUI()
self.setStyleSheet(dialog_qss_style)
def open_dialog(self, config):
""""打开前先更新一下显示界面"""
self._config = config
if self._config.name:
self.setWindowTitle(f"设置 <{self._config.name}>")
else:
self.setWindowTitle("设置")
self.cwd = self._config.path
self.set_values()
self.exec()
def show_values(self):
"""控件显示值"""
self.download_threads_var.setText(str(self.download_threads))
self.max_size_var.setText(str(self.max_size))
self.timeout_var.setText(str(self.timeout))
self.dl_path_var.setText(str(self.dl_path))
self.time_fmt_box.setChecked(self.time_fmt)
self.to_tray_box.setChecked(self.to_tray)
self.watch_clipboard_box.setChecked(self.watch_clipboard)
self.debug_box.setChecked(self.debug)
self.set_pwd_box.setChecked(self.set_pwd)
self.set_pwd_var.setEnabled(self.set_pwd)
self.set_pwd_var.setText(self.pwd)
self.set_desc_box.setChecked(self.set_desc)
self.set_desc_var.setEnabled(self.set_desc)
self.set_desc_var.setText(self.desc)
self.upload_delay_var.setText(str(self.upload_delay))
self.big_file_box.setChecked(self.allow_big_file)
self.big_file_box.setText(f"允许上传超过 {self.max_size}MB 的大文件")
self.big_file_box.setDisabled(True) # 关闭允许上传大文件设置入口
self.upgrade_box.setChecked(self.upgrade)
def set_values(self, reset=False):
"""设置控件对应变量初始值"""
settings = self._config.default_settings if reset else self._config.settings
self.download_threads = settings["download_threads"]
self.max_size = settings["max_size"]
self.timeout = settings["timeout"]
self.dl_path = settings["dl_path"]
self.time_fmt = settings["time_fmt"]
self.to_tray = settings["to_tray"]
self.watch_clipboard = settings["watch_clipboard"]
self.debug = settings["debug"]
self.set_pwd = settings["set_pwd"]
self.pwd = settings["<PASSWORD>"]
self.set_desc = settings["set_desc"]
self.desc = settings["desc"]
self.upload_delay = settings["upload_delay"]
if 'upgrade' in settings:
self.upgrade = settings["upgrade"]
self.show_values()
def get_values(self) -> dict:
"""读取输入控件的值"""
if self.download_threads_var.text():
self.download_threads = int(self.download_threads_var.text())
if self.max_size_var.text():
self.max_size = int(self.max_size_var.text())
if self.timeout_var.text():
self.timeout = int(self.timeout_var.text())
if self.upload_delay_var.text():
self.upload_delay = int(self.upload_delay_var.text())
self.dl_path = str(self.dl_path_var.text())
self.pwd = str(self.set_pwd_var.toPlainText())
self.desc = str(self.set_desc_var.toPlainText())
return {"download_threads": self.download_threads,
"max_size": self.max_size,
"timeout": self.timeout,
"dl_path": self.dl_path,
"time_fmt": self.time_fmt,
"to_tray": self.to_tray,
"watch_clipboard": self.watch_clipboard,
"debug": self.debug,
"set_pwd": self.set_pwd,
"pwd": self.<PASSWORD>,
"set_desc": self.set_desc,
"desc": self.desc,
"upload_delay": self.upload_delay,
"allow_big_file": self.allow_big_file,
"upgrade": self.upgrade}
def initUI(self):
self.setWindowTitle("设置")
logo = QLabel()
logo.setPixmap(QPixmap(SRC_DIR + "logo2.gif"))
logo.setStyleSheet("background-color:rgb(255,255,255);")
logo.setAlignment(Qt.AlignCenter)
self.download_threads_lb = QLabel("同时下载文件数")
self.download_threads_var = QLineEdit()
self.download_threads_var.setPlaceholderText("范围:1-9")
self.download_threads_var.setToolTip("范围:1-9")
self.download_threads_var.setInputMask("D")
self.max_size_lb = QLabel("分卷大小(MB)")
self.max_size_var = QLineEdit()
self.max_size_var.setPlaceholderText("普通用户最大100,vip用户根据具体情况设置")
self.max_size_var.setToolTip("普通用户最大100,vip用户根据具体情况设置")
self.max_size_var.setInputMask("D99")
self.timeout_lb = QLabel("请求超时(秒)")
self.timeout_var = QLineEdit()
self.timeout_var.setPlaceholderText("范围:1-99")
self.timeout_var.setToolTip("范围:1-99")
self.timeout_var.setInputMask("D9")
self.upload_delay_lb = QLabel("上传延时(秒)")
self.upload_delay_var = QLineEdit()
self.upload_delay_var.setPlaceholderText("范围:1-99")
self.upload_delay_var.setToolTip("范围:1-99")
self.upload_delay_var.setInputMask("D9")
self.dl_path_lb = QLabel("下载保存路径")
self.dl_path_var = MyLineEdit(self)
self.dl_path_var.clicked.connect(self.set_download_path)
self.time_fmt_box = QCheckBox("使用[年-月-日]时间格式")
self.time_fmt_box.setToolTip("文件上传日期显示格式")
self.to_tray_box = QCheckBox("关闭到系统托盘")
self.to_tray_box.setToolTip("点击关闭软件按钮是最小化软件至系统托盘")
self.watch_clipboard_box = QCheckBox("监听系统剪切板")
self.watch_clipboard_box.setToolTip("检测到系统剪切板中有符合规范的蓝奏链接时自动唤起软件,并提取")
self.debug_box = QCheckBox("开启调试日志")
self.debug_box.setToolTip("记录软件 debug 信息至 debug-lanzou-gui.log 文件")
self.set_pwd_box = QCheckBox("上传文件自动设置密码")
self.set_pwd_var = AutoResizingTextEdit()
self.set_pwd_var.setPlaceholderText(" 2-8 位数字或字母")
self.set_pwd_var.setToolTip("2-8 位数字或字母")
self.set_desc_box = QCheckBox("上传文件自动设置描述")
self.set_desc_var = AutoResizingTextEdit()
self.big_file_box = QCheckBox(f"允许上传超过 {self.max_size}MB 的大文件")
self.big_file_box.setToolTip("开启大文件上传支持 (功能下线)")
self.upgrade_box = QCheckBox("自动检测新版本")
self.upgrade_box.setToolTip("在软件打开时自动检测是否有新的版本发布,如有则弹出更新信息")
self.time_fmt_box.toggle()
self.time_fmt_box.stateChanged.connect(self.change_time_fmt)
self.to_tray_box.stateChanged.connect(self.change_to_tray)
self.watch_clipboard_box.stateChanged.connect(self.change_watch_clipboard)
self.debug_box.stateChanged.connect(self.change_debug)
self.set_pwd_box.stateChanged.connect(self.change_set_pwd)
self.set_pwd_var.editingFinished.connect(self.check_pwd)
self.set_desc_box.stateChanged.connect(self.change_set_desc)
self.big_file_box.stateChanged.connect(self.change_big_file)
self.upgrade_box.stateChanged.connect(self.change_upgrade)
buttonBox = QDialogButtonBox()
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(QDialogButtonBox.Reset | QDialogButtonBox.Save | QDialogButtonBox.Cancel)
buttonBox.button(QDialogButtonBox.Reset).setText("重置")
buttonBox.button(QDialogButtonBox.Save).setText("保存")
buttonBox.button(QDialogButtonBox.Cancel).setText("取消")
buttonBox.button(QDialogButtonBox.Reset).clicked.connect(lambda: self.set_values(reset=True))
buttonBox.button(QDialogButtonBox.Save).clicked.connect(self.slot_save)
buttonBox.rejected.connect(self.reject)
form = QFormLayout()
form.setLabelAlignment(Qt.AlignRight)
form.setSpacing(10)
form.addRow(self.download_threads_lb, self.download_threads_var)
form.addRow(self.timeout_lb, self.timeout_var)
form.addRow(self.upload_delay_lb, self.upload_delay_var)
form.addRow(self.max_size_lb, self.max_size_var)
form.addRow(self.dl_path_lb, self.dl_path_var)
vbox = QVBoxLayout()
vbox.addWidget(logo)
vbox.addStretch(1)
vbox.addLayout(form)
vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addWidget(self.time_fmt_box)
hbox.addWidget(self.to_tray_box)
hbox.addWidget(self.watch_clipboard_box)
hbox.addWidget(self.debug_box)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox_2 = QHBoxLayout()
hbox_2.addWidget(self.set_pwd_box)
hbox_2.addWidget(self.set_pwd_var)
vbox.addLayout(hbox_2)
vbox.addStretch(1)
hbox_3 = QHBoxLayout()
hbox_3.addWidget(self.set_desc_box)
hbox_3.addWidget(self.set_desc_var)
vbox.addLayout(hbox_3)
hbox_4 = QHBoxLayout()
hbox_4.addWidget(self.big_file_box)
hbox_4.addWidget(self.upgrade_box)
vbox.addStretch(1)
vbox.addLayout(hbox_4)
vbox.addStretch(2)
vbox.addWidget(buttonBox)
self.setLayout(vbox)
self.setMinimumWidth(500)
def change_time_fmt(self, state):
if state == Qt.Checked:
self.time_fmt = True
else:
self.time_fmt = False
def change_to_tray(self, state):
if state == Qt.Checked:
self.to_tray = True
else:
self.to_tray = False
def change_watch_clipboard(self, state):
if state == Qt.Checked:
self.watch_clipboard = True
else:
self.watch_clipboard = False
def change_debug(self, state):
if state == Qt.Checked:
self.debug = True
else:
self.debug = False
def change_big_file(self, state):
if state == Qt.Checked:
self.allow_big_file = True
else:
self.allow_big_file = False
def change_upgrade(self, state):
if state == Qt.Checked:
self.upgrade = True
else:
self.upgrade = False
def change_set_pwd(self, state):
if state == Qt.Checked:
self.set_pwd = True
self.set_pwd_var.setDisabled(False)
else:
self.set_pwd = False
self.set_pwd_var.setDisabled(True)
def change_set_desc(self, state):
if state == Qt.Checked:
self.set_desc = True
self.set_desc_var.setDisabled(False)
else:
self.set_desc = False
self.set_desc_var.setDisabled(True)
def check_pwd(self):
pwd = self.set_pwd_var.toPlainText()
pwd = ''.join(list(filter(str.isalnum, pwd)))
if len(pwd) < 2:
pwd = ""
self.set_pwd_var.setText(pwd[:8])
def set_download_path(self):
"""设置下载路径"""
dl_path = QFileDialog.getExistingDirectory(self, "选择文件下载保存文件夹", self.cwd)
dl_path = os.path.normpath(dl_path) # windows backslash
if dl_path == self.dl_path or dl_path == ".":
return None
self.dl_path_var.setText(dl_path)
self.dl_path = dl_path
def slot_save(self):
"""保存槽函数"""
self._config.settings = self.get_values()
self.saved.emit()
self.close()
| 0.22051 | 0.08292 |
import os
import platform
import argparse
class Common():
def __init__(self, sudo_cmd):
# Assumption is nvidia-smi is installed on systems with gpu
self.is_gpu_instance = True if os.system("nvidia-smi") == 0 else False
self.torch_stable_url = "https://download.pytorch.org/whl/torch_stable.html"
self.sudo_cmd = sudo_cmd
def install_java(self):
pass
def install_nodejs(self):
pass
def install_torch_packages(self, cuda_version):
if self.is_gpu_instance:
if (cuda_version is not None) and cuda_version == 'cu101':
os.system(f"pip install -U -r requirements/torch_cu101.txt -f {self.torch_stable_url}")
else:
os.system(f"pip install -U -r requirements/torch.txt -f {self.torch_stable_url}")
else:
os.system(f"pip install -U -r requirements/torch_cpu.txt -f {self.torch_stable_url}")
def install_python_packages(self, cuda_version):
self.install_torch_packages(cuda_version)
os.system("pip install -U -r requirements/developer.txt") # developer.txt also installs packages from common.txt
if os.system("conda") == 0: # If conda is available install conda-build package
os.system("conda install -y conda-build")
def install_node_packages(self):
os.system(f"{self.sudo_cmd}apt-get update")
os.system(f"{self.sudo_cmd}npm install -g newman newman-reporter-html markdown-link-check")
def install_jmeter(self):
pass
class Linux(Common):
def __init__(self, sudo_cmd):
super().__init__(sudo_cmd)
def install_java(self):
os.system(f"{self.sudo_cmd}apt-get update")
os.system(f"{self.sudo_cmd}apt-get install -y openjdk-11-jdk")
def install_nodejs(self):
os.system(f"{self.sudo_cmd}apt-get update")
os.system(f"{self.sudo_cmd}curl -sL https://deb.nodesource.com/setup_14.x | {self.sudo_cmd}bash -")
os.system(f"{self.sudo_cmd}apt-get install -y nodejs")
class Windows(Common):
def __init__(self, sudo_cmd):
super().__init__(sudo_cmd)
def install_java(self):
pass
def install_nodejs(self):
pass
class Darwin(Common):
def __init__(self, sudo_cmd):
super().__init__(sudo_cmd)
def install_java(self):
os.system("brew tap AdoptOpenJDK/openjdk")
os.system("brew cask install adoptopenjdk11")
def install_nodejs(self):
os.system("brew install node")
def install_torch_packages(self, cuda_version=''):
os.system(f"pip install -U -r requirements/torch.txt -f {self.torch_stable_url}")
def install_dependencies(sudo_cmd='sudo ', cuda_version=None):
os_map = {
"Linux": Linux,
"Windows": Windows,
"Darwin": Darwin
}
system = os_map[platform.system()](sudo_cmd)
import sys
# Sequence of installation to be maintained
system.install_java()
system.install_nodejs()
system.install_python_packages(cuda_version)
system.install_node_packages()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Install various build and test dependencies of TorchServe")
parser.add_argument('--cuda', default=None, choices=['cu101'], help="CUDA version for torch")
args = parser.parse_args()
install_dependencies('', cuda_version=args.cuda)
|
ts_scripts/install_dependencies.py
|
import os
import platform
import argparse
class Common():
def __init__(self, sudo_cmd):
# Assumption is nvidia-smi is installed on systems with gpu
self.is_gpu_instance = True if os.system("nvidia-smi") == 0 else False
self.torch_stable_url = "https://download.pytorch.org/whl/torch_stable.html"
self.sudo_cmd = sudo_cmd
def install_java(self):
pass
def install_nodejs(self):
pass
def install_torch_packages(self, cuda_version):
if self.is_gpu_instance:
if (cuda_version is not None) and cuda_version == 'cu101':
os.system(f"pip install -U -r requirements/torch_cu101.txt -f {self.torch_stable_url}")
else:
os.system(f"pip install -U -r requirements/torch.txt -f {self.torch_stable_url}")
else:
os.system(f"pip install -U -r requirements/torch_cpu.txt -f {self.torch_stable_url}")
def install_python_packages(self, cuda_version):
self.install_torch_packages(cuda_version)
os.system("pip install -U -r requirements/developer.txt") # developer.txt also installs packages from common.txt
if os.system("conda") == 0: # If conda is available install conda-build package
os.system("conda install -y conda-build")
def install_node_packages(self):
os.system(f"{self.sudo_cmd}apt-get update")
os.system(f"{self.sudo_cmd}npm install -g newman newman-reporter-html markdown-link-check")
def install_jmeter(self):
pass
class Linux(Common):
def __init__(self, sudo_cmd):
super().__init__(sudo_cmd)
def install_java(self):
os.system(f"{self.sudo_cmd}apt-get update")
os.system(f"{self.sudo_cmd}apt-get install -y openjdk-11-jdk")
def install_nodejs(self):
os.system(f"{self.sudo_cmd}apt-get update")
os.system(f"{self.sudo_cmd}curl -sL https://deb.nodesource.com/setup_14.x | {self.sudo_cmd}bash -")
os.system(f"{self.sudo_cmd}apt-get install -y nodejs")
class Windows(Common):
def __init__(self, sudo_cmd):
super().__init__(sudo_cmd)
def install_java(self):
pass
def install_nodejs(self):
pass
class Darwin(Common):
def __init__(self, sudo_cmd):
super().__init__(sudo_cmd)
def install_java(self):
os.system("brew tap AdoptOpenJDK/openjdk")
os.system("brew cask install adoptopenjdk11")
def install_nodejs(self):
os.system("brew install node")
def install_torch_packages(self, cuda_version=''):
os.system(f"pip install -U -r requirements/torch.txt -f {self.torch_stable_url}")
def install_dependencies(sudo_cmd='sudo ', cuda_version=None):
os_map = {
"Linux": Linux,
"Windows": Windows,
"Darwin": Darwin
}
system = os_map[platform.system()](sudo_cmd)
import sys
# Sequence of installation to be maintained
system.install_java()
system.install_nodejs()
system.install_python_packages(cuda_version)
system.install_node_packages()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Install various build and test dependencies of TorchServe")
parser.add_argument('--cuda', default=None, choices=['cu101'], help="CUDA version for torch")
args = parser.parse_args()
install_dependencies('', cuda_version=args.cuda)
| 0.294114 | 0.097648 |
import pytest
import numpy as np
from shapely.geometry import Polygon, Point, LineString
import geopandas as gpd
import earthpy.clip as cl
@pytest.fixture
def point_gdf():
"""Create a point GeoDataFrame."""
pts = np.array([[2, 2], [3, 4], [9, 8], [-12, -15]])
gdf = gpd.GeoDataFrame(
[Point(xy) for xy in pts],
columns=["geometry"],
crs="epsg:4326",
)
return gdf
@pytest.fixture
def single_rectangle_gdf():
"""Create a single rectangle for clipping."""
poly_inters = Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
gdf = gpd.GeoDataFrame([1], geometry=[poly_inters], crs="epsg:4326")
gdf["attr2"] = "site-boundary"
return gdf
@pytest.fixture
def two_line_gdf():
"""Create Line Objects For Testing """
linea = LineString([(1, 1), (2, 2), (3, 2), (5, 3)])
lineb = LineString([(3, 4), (5, 7), (12, 2), (10, 5), (9, 7.5)])
gdf = gpd.GeoDataFrame([1, 2], geometry=[linea, lineb], crs="epsg:4326")
return gdf
@pytest.fixture
def multi_line(two_line_gdf):
"""Create a multi-line GeoDataFrame.
This has one multi line and another regular line.
"""
# Create a single and multi line object
multiline_feat = two_line_gdf.unary_union
linec = LineString([(2, 1), (3, 1), (4, 1), (5, 2)])
out_df = gpd.GeoDataFrame(
geometry=gpd.GeoSeries([multiline_feat, linec]),
crs="epsg:4326",
)
out_df = out_df.rename(columns={0: "geometry"}).set_geometry("geometry")
out_df["attr"] = ["road", "stream"]
return out_df
@pytest.fixture
def multi_point(point_gdf):
"""Create a multi-point GeoDataFrame."""
multi_point = point_gdf.unary_union
out_df = gpd.GeoDataFrame(
gpd.GeoSeries(
[multi_point, Point(2, 5), Point(-11, -14), Point(-10, -12)]
),
crs="epsg:4326",
)
out_df = out_df.rename(columns={0: "geometry"}).set_geometry("geometry")
out_df["attr"] = ["tree", "another tree", "shrub", "berries"]
return out_df
def test_warning_main_clip_function(point_gdf, single_rectangle_gdf):
"""Check that clip_shp returns a deprecated warning."""
with pytest.raises(Warning, match="clip_shp is deprecated in earthpy"):
cl.clip_shp(point_gdf, single_rectangle_gdf)
def test_warning_multi_line_clip_function(multi_line, single_rectangle_gdf):
"""Check that _clip_multi_poly_line returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_multi_poly_line is deprecated. Use the "
"_clip_line_poly()",
):
cl._clip_multi_poly_line(multi_line, single_rectangle_gdf)
def test_warning_line_clip_function(two_line_gdf, single_rectangle_gdf):
"""Check that _clip_line_poly returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_line_poly is deprecated. Use the _clip_line_poly()",
):
cl._clip_line_poly(two_line_gdf, single_rectangle_gdf)
def test_warning_mutli_point_clip_function(multi_point, single_rectangle_gdf):
"""Check that _clip_multi_point returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_multi_point is deprecated. Use the _clip_points()",
):
cl._clip_multi_point(multi_point, single_rectangle_gdf)
def test_warning_point_clip_function(point_gdf, single_rectangle_gdf):
"""Check that _clip_points returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_points is deprecated. Use the _clip_points()",
):
cl._clip_points(point_gdf, single_rectangle_gdf)
|
earthpy/tests/test_clip.py
|
import pytest
import numpy as np
from shapely.geometry import Polygon, Point, LineString
import geopandas as gpd
import earthpy.clip as cl
@pytest.fixture
def point_gdf():
"""Create a point GeoDataFrame."""
pts = np.array([[2, 2], [3, 4], [9, 8], [-12, -15]])
gdf = gpd.GeoDataFrame(
[Point(xy) for xy in pts],
columns=["geometry"],
crs="epsg:4326",
)
return gdf
@pytest.fixture
def single_rectangle_gdf():
"""Create a single rectangle for clipping."""
poly_inters = Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
gdf = gpd.GeoDataFrame([1], geometry=[poly_inters], crs="epsg:4326")
gdf["attr2"] = "site-boundary"
return gdf
@pytest.fixture
def two_line_gdf():
"""Create Line Objects For Testing """
linea = LineString([(1, 1), (2, 2), (3, 2), (5, 3)])
lineb = LineString([(3, 4), (5, 7), (12, 2), (10, 5), (9, 7.5)])
gdf = gpd.GeoDataFrame([1, 2], geometry=[linea, lineb], crs="epsg:4326")
return gdf
@pytest.fixture
def multi_line(two_line_gdf):
"""Create a multi-line GeoDataFrame.
This has one multi line and another regular line.
"""
# Create a single and multi line object
multiline_feat = two_line_gdf.unary_union
linec = LineString([(2, 1), (3, 1), (4, 1), (5, 2)])
out_df = gpd.GeoDataFrame(
geometry=gpd.GeoSeries([multiline_feat, linec]),
crs="epsg:4326",
)
out_df = out_df.rename(columns={0: "geometry"}).set_geometry("geometry")
out_df["attr"] = ["road", "stream"]
return out_df
@pytest.fixture
def multi_point(point_gdf):
"""Create a multi-point GeoDataFrame."""
multi_point = point_gdf.unary_union
out_df = gpd.GeoDataFrame(
gpd.GeoSeries(
[multi_point, Point(2, 5), Point(-11, -14), Point(-10, -12)]
),
crs="epsg:4326",
)
out_df = out_df.rename(columns={0: "geometry"}).set_geometry("geometry")
out_df["attr"] = ["tree", "another tree", "shrub", "berries"]
return out_df
def test_warning_main_clip_function(point_gdf, single_rectangle_gdf):
"""Check that clip_shp returns a deprecated warning."""
with pytest.raises(Warning, match="clip_shp is deprecated in earthpy"):
cl.clip_shp(point_gdf, single_rectangle_gdf)
def test_warning_multi_line_clip_function(multi_line, single_rectangle_gdf):
"""Check that _clip_multi_poly_line returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_multi_poly_line is deprecated. Use the "
"_clip_line_poly()",
):
cl._clip_multi_poly_line(multi_line, single_rectangle_gdf)
def test_warning_line_clip_function(two_line_gdf, single_rectangle_gdf):
"""Check that _clip_line_poly returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_line_poly is deprecated. Use the _clip_line_poly()",
):
cl._clip_line_poly(two_line_gdf, single_rectangle_gdf)
def test_warning_mutli_point_clip_function(multi_point, single_rectangle_gdf):
"""Check that _clip_multi_point returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_multi_point is deprecated. Use the _clip_points()",
):
cl._clip_multi_point(multi_point, single_rectangle_gdf)
def test_warning_point_clip_function(point_gdf, single_rectangle_gdf):
"""Check that _clip_points returns a deprecated warning."""
with pytest.raises(
Warning,
match="_clip_points is deprecated. Use the _clip_points()",
):
cl._clip_points(point_gdf, single_rectangle_gdf)
| 0.845879 | 0.687172 |
"""Layer for modelling and scoring secondary structure."""
import os
from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
# 8-class classes (Q8)
SECONDARY_STRUCTURES = '-HETSGBI'
# Equivalence classes for 3-class (Q3) from Li & Yu 2016.
# See https://swift.cmbi.umcn.nl/gv/dssp/ for letter explanations.
# This below is a SPECIFIC Q3 map for a specific protein
Q3_MAP = ['-TSGIB', 'H', 'E']
def make_q3_matrices():
"""Generate mapping matrices for secstruct Q8:Q3 equivalence classes."""
dimension = len(SECONDARY_STRUCTURES)
q3_map_matrix = np.zeros((dimension, len(Q3_MAP)))
q3_lookup = np.zeros((dimension,), dtype=np.int32)
for i, eclass in enumerate(Q3_MAP): # equivalence classes
for m in eclass: # Members of the class.
ss_type = SECONDARY_STRUCTURES.index(m)
q3_map_matrix[ss_type, i] = 1.0
q3_lookup[ss_type] = i
return q3_map_matrix, q3_lookup
class Secstruct(object):
"""Make a layer that computes hierarchical secstruct."""
# Build static, shared structures:
q3_map_matrix, q3_lookup = make_q3_matrices()
static_dimension = len(SECONDARY_STRUCTURES)
def __init__(self, name='secstruct'):
self.name = name
self._dimension = Secstruct.static_dimension
def make_layer_new(self, activations):
"""Make the layer."""
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
logging.info('Creating secstruct %s', activations)
self.logits = tf.contrib.layers.linear(activations, self._dimension)
self.ss_q8_probs = tf.nn.softmax(self.logits)
self.ss_q3_probs = tf.matmul(
self.ss_q8_probs, tf.constant(self.q3_map_matrix, dtype=tf.float32))
def get_q8_probs(self):
return self.ss_q8_probs
def save_secstructs(dump_dir_path, name, index, sequence, probs,
label='Deepmind secstruct'):
"""Write secstruct prob distributions to an ss2 file.
Can be overloaded to write out asa values too.
Args:
dump_dir_path: directory where to write files.
name: name of domain
index: index number of multiple samples. (or None for no index)
sequence: string of L residue labels
probs: L x D matrix of probabilities. L is length of sequence,
D is probability dimension (usually 3).
label: A label for the file.
"""
filename = os.path.join(dump_dir_path, '%s.ss2' % name)
if index is not None:
filename = os.path.join(dump_dir_path, '%s_%04d.ss2' % (name, index))
with tf.io.gfile.GFile(filename, 'w') as gf:
logging.info('Saving secstruct to %s', filename)
gf.write('# %s CLASSES [%s] %s sample %s\n\n' % (
label, ''.join(SECONDARY_STRUCTURES[:probs.shape[1]]), name, index))
for l in range(probs.shape[0]):
ss = SECONDARY_STRUCTURES[np.argmax(probs[l, :])]
gf.write('%4d %1s %1s %s\n' % (l + 1, sequence[l], ss, ''.join(
[('%6.3f' % p) for p in probs[l, :]])))
|
alphafold_casp13/secstruct.py
|
"""Layer for modelling and scoring secondary structure."""
import os
from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
# 8-class classes (Q8)
SECONDARY_STRUCTURES = '-HETSGBI'
# Equivalence classes for 3-class (Q3) from Li & Yu 2016.
# See https://swift.cmbi.umcn.nl/gv/dssp/ for letter explanations.
# This below is a SPECIFIC Q3 map for a specific protein
Q3_MAP = ['-TSGIB', 'H', 'E']
def make_q3_matrices():
"""Generate mapping matrices for secstruct Q8:Q3 equivalence classes."""
dimension = len(SECONDARY_STRUCTURES)
q3_map_matrix = np.zeros((dimension, len(Q3_MAP)))
q3_lookup = np.zeros((dimension,), dtype=np.int32)
for i, eclass in enumerate(Q3_MAP): # equivalence classes
for m in eclass: # Members of the class.
ss_type = SECONDARY_STRUCTURES.index(m)
q3_map_matrix[ss_type, i] = 1.0
q3_lookup[ss_type] = i
return q3_map_matrix, q3_lookup
class Secstruct(object):
"""Make a layer that computes hierarchical secstruct."""
# Build static, shared structures:
q3_map_matrix, q3_lookup = make_q3_matrices()
static_dimension = len(SECONDARY_STRUCTURES)
def __init__(self, name='secstruct'):
self.name = name
self._dimension = Secstruct.static_dimension
def make_layer_new(self, activations):
"""Make the layer."""
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
logging.info('Creating secstruct %s', activations)
self.logits = tf.contrib.layers.linear(activations, self._dimension)
self.ss_q8_probs = tf.nn.softmax(self.logits)
self.ss_q3_probs = tf.matmul(
self.ss_q8_probs, tf.constant(self.q3_map_matrix, dtype=tf.float32))
def get_q8_probs(self):
return self.ss_q8_probs
def save_secstructs(dump_dir_path, name, index, sequence, probs,
label='Deepmind secstruct'):
"""Write secstruct prob distributions to an ss2 file.
Can be overloaded to write out asa values too.
Args:
dump_dir_path: directory where to write files.
name: name of domain
index: index number of multiple samples. (or None for no index)
sequence: string of L residue labels
probs: L x D matrix of probabilities. L is length of sequence,
D is probability dimension (usually 3).
label: A label for the file.
"""
filename = os.path.join(dump_dir_path, '%s.ss2' % name)
if index is not None:
filename = os.path.join(dump_dir_path, '%s_%04d.ss2' % (name, index))
with tf.io.gfile.GFile(filename, 'w') as gf:
logging.info('Saving secstruct to %s', filename)
gf.write('# %s CLASSES [%s] %s sample %s\n\n' % (
label, ''.join(SECONDARY_STRUCTURES[:probs.shape[1]]), name, index))
for l in range(probs.shape[0]):
ss = SECONDARY_STRUCTURES[np.argmax(probs[l, :])]
gf.write('%4d %1s %1s %s\n' % (l + 1, sequence[l], ss, ''.join(
[('%6.3f' % p) for p in probs[l, :]])))
| 0.858881 | 0.445107 |
from enum import Enum
from typing import Any, Dict, List
from mypy_extensions import TypedDict
from typing_extensions import Protocol
from openslides_backend.shared.interfaces import Filter
from openslides_backend.shared.patterns import Collection, FullQualifiedId
PartialModel = Dict[str, Any]
Found = TypedDict("Found", {"exists": bool, "position": int})
Count = TypedDict("Count", {"count": int, "position": int})
Aggregate = Dict[str, Any]
class DeletedModelsBehaviour(Enum):
NO_DELETED = 1
ONLY_DELETED = 2
ALL_MODELS = 3
class GetManyRequest:
"""Encapsulates a single GetManyRequests
"""
def __init__(
self, collection: Collection, ids: List[int], mapped_fields: List[str] = None,
):
self.collection = collection
self.ids = ids
self.mapped_fields = mapped_fields
def to_dict(self) -> Dict[str, Any]:
result: Dict[str, Any] = {}
result["collection"] = str(self.collection)
if self.ids is not None:
result["ids"] = self.ids
if self.mapped_fields is not None:
result["mapped_fields"] = self.mapped_fields
return result
class Datastore(Protocol):
"""Datastore defines the interface to the datastore
"""
def get(
self,
fqid: FullQualifiedId,
mapped_fields: List[str] = None,
position: int = None,
get_deleted_models: int = None,
) -> PartialModel:
...
def getMany(
self,
get_many_requests: List[GetManyRequest],
mapped_fields: List[str] = None,
position: int = None,
get_deleted_models: int = None,
) -> Dict[str, Dict[int, PartialModel]]:
...
def getManyByFQIDs(
self, ids: List[FullQualifiedId]
) -> Dict[str, Dict[int, PartialModel]]:
...
def getAll(
self,
collection: Collection,
mapped_fields: List[str] = None,
get_deleted_models: int = None,
) -> List[PartialModel]:
...
def filter(
self,
collection: Collection,
filter: Filter,
meeting_id: int = None,
mapped_fields: List[str] = None,
) -> List[PartialModel]:
...
def exists(self, collection: Collection, filter: Filter) -> Found:
...
def count(self, collection: Collection, filter: Filter) -> Count:
...
def min(
self, collection: Collection, filter: Filter, field: str, type: str = None
) -> Aggregate:
...
def max(
self, collection: Collection, filter: Filter, field: str, type: str = None
) -> Aggregate:
...
|
openslides_backend/services/database/adapter/interface.py
|
from enum import Enum
from typing import Any, Dict, List
from mypy_extensions import TypedDict
from typing_extensions import Protocol
from openslides_backend.shared.interfaces import Filter
from openslides_backend.shared.patterns import Collection, FullQualifiedId
PartialModel = Dict[str, Any]
Found = TypedDict("Found", {"exists": bool, "position": int})
Count = TypedDict("Count", {"count": int, "position": int})
Aggregate = Dict[str, Any]
class DeletedModelsBehaviour(Enum):
NO_DELETED = 1
ONLY_DELETED = 2
ALL_MODELS = 3
class GetManyRequest:
"""Encapsulates a single GetManyRequests
"""
def __init__(
self, collection: Collection, ids: List[int], mapped_fields: List[str] = None,
):
self.collection = collection
self.ids = ids
self.mapped_fields = mapped_fields
def to_dict(self) -> Dict[str, Any]:
result: Dict[str, Any] = {}
result["collection"] = str(self.collection)
if self.ids is not None:
result["ids"] = self.ids
if self.mapped_fields is not None:
result["mapped_fields"] = self.mapped_fields
return result
class Datastore(Protocol):
"""Datastore defines the interface to the datastore
"""
def get(
self,
fqid: FullQualifiedId,
mapped_fields: List[str] = None,
position: int = None,
get_deleted_models: int = None,
) -> PartialModel:
...
def getMany(
self,
get_many_requests: List[GetManyRequest],
mapped_fields: List[str] = None,
position: int = None,
get_deleted_models: int = None,
) -> Dict[str, Dict[int, PartialModel]]:
...
def getManyByFQIDs(
self, ids: List[FullQualifiedId]
) -> Dict[str, Dict[int, PartialModel]]:
...
def getAll(
self,
collection: Collection,
mapped_fields: List[str] = None,
get_deleted_models: int = None,
) -> List[PartialModel]:
...
def filter(
self,
collection: Collection,
filter: Filter,
meeting_id: int = None,
mapped_fields: List[str] = None,
) -> List[PartialModel]:
...
def exists(self, collection: Collection, filter: Filter) -> Found:
...
def count(self, collection: Collection, filter: Filter) -> Count:
...
def min(
self, collection: Collection, filter: Filter, field: str, type: str = None
) -> Aggregate:
...
def max(
self, collection: Collection, filter: Filter, field: str, type: str = None
) -> Aggregate:
...
| 0.857067 | 0.213172 |
import base64
import jwt
from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from .provider import EspooADFSProvider, HelsinkiADFSProvider
x509_backend = default_backend()
class ADFSOAuth2Adapter(OAuth2Adapter):
@classmethod
def get_login_view(cls):
return OAuth2LoginView.adapter_view(cls)
@classmethod
def get_callback_view(cls):
return OAuth2CallbackView.adapter_view(cls)
def complete_login(self, request, app, token, **kwargs):
cert_der = base64.b64decode(self.cert)
x509_cert = x509.load_der_x509_certificate(cert_der, backend=x509_backend)
jwt_token = jwt.decode(token.token, key=x509_cert.public_key(),
leeway=10, options={'verify_aud': False})
data = self.clean_attributes(jwt_token)
return self.get_provider().sociallogin_from_response(request, data)
class HelsinkiADFSOAuth2Adapter(ADFSOAuth2Adapter):
provider_id = HelsinkiADFSProvider.id
realm = 'helsinki'
access_token_url = 'https://fs.hel.fi/adfs/oauth2/token'
authorize_url = 'https://fs.hel.fi/adfs/oauth2/authorize'
profile_url = 'https://api.hel.fi/sso/user/'
cert = (
'MIIDMDCCAhigAwIBAgIBATANBgkqhkiG9w0BAQsFADAjMSEwHwYDVQQDExhBR'
'EZTIFNpZ25pbmcgLSBmcy5oZWwuZmkwHhcNMTYwNDAzMjIxMTAwWhcNMjEwND'
'AzMjIxMTAwWjAjMSEwHwYDVQQDExhBREZTIFNpZ25pbmcgLSBmcy5oZWwuZmk'
'wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrCo9kuzljk4F8R12A'
'eIYMARztxkMojcrN1KN3KQeoxcCPaFOTMYHWk8ww1N+m0PJoLl1Eray+cMsoH'
'rdd3iVxmApcQBxD02SnGsEn/3D/sTHcoi9WzqwM8ESbtm0jGIvfWrpJtMO/g7'
'ELW0dXBcWq4LRvBtyTt3jiehIO0HohS8xfQ4+vURFpjvfD0kjPemsMJ7QB8Eo'
<KEY>2CNFO9vct1IJiQJUfRbVWk8I/JFA65ZuXrCjY//<KEY>'
'<KEY>'
'wSsMXiNXh8AitTLUMgpAgMBAAGjbzBtMAwGA1UdEwEB/wQCMAAwHQYDVR0OBB'
'YEFBDL4FpHu+kQEI7MIpSjSACaA9ajMAsGA1UdDwQEAwIFIDARBglghkgBhvh'
'CAQEEBAMCBkAwHgYJYIZIAYb4QgENBBEWD3hjYSBjZXJ0aWZpY2F0ZTANBgkq'
'hkiG9w0BAQsFAAOCAQEAISn44oOdtfdMHh0Z4nezAuDHtKqTd6iV3MY7MwTFm'
'iUFQhJADO2ezpoW3Xj64wWeg3eVXyC7iHk/SV5OVmmo4uU/1YJHiBc5jEUZ5E'
'dvaZQaDH5iaJlK6aiCTznqwu7XJS7LbLeLrVqj3H3IYsV6BiGlT4Z1rXYX+nD'
'fi46TJCKqxE0zTArQQROocfKS+7JM+JU5dLMNOOC+6tCUOP3GEjuE3PMetpbH'
'+k6Wu6d3LzhpU2QICWJnFpj1yJTAb94pWRUKNoBhpxQlWvNzRgFgJesIfkZ4C'
'qqhmHqnV/BO+7MMv/g+WXRD09fo/YIXozpWzmO9LBzEvFe7Itz6C1R4Ng==')
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'company': 'department_name',
'email': 'email',
'winaccountname': 'username',
'group': 'ad_groups',
'unique_name': 'last_first_name',
'given_name': 'first_name',
'family_name': 'last_name',
}
# Convert attribute names to lowercase
attrs_in = {k.lower(): v for k, v in attrs_in.items()}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
attrs[out_name] = val
if 'last_first_name' in attrs:
names = attrs['last_first_name'].split(' ')
if 'first_name' not in attrs:
attrs['first_name'] = [names[0]]
if 'last_name' not in attrs:
attrs['last_name'] = [' '.join(names[1:])]
del attrs['last_first_name']
return attrs
class EspooADFSOAuth2Adapter(ADFSOAuth2Adapter):
provider_id = EspooADFSProvider.id
realm = 'espoo'
access_token_url = 'https://fs.espoo.fi/adfs/oauth2/token'
authorize_url = 'https://fs.espoo.fi/adfs/oauth2/authorize'
profile_url = 'https://api.hel.fi/sso/user/'
cert = (
'MIIG1zCCBL+gAwIBAgITGgAAfQoAbggMFZQDYAAAAAB9CjANBgkqhkiG9w0BAQsF'
'ADBaMRQwEgYKCZImiZPyLGQBGRYEY2l0eTESMBAGCgmSJomT8ixkARkWAmFkMRUw'
'<KEY>kVzcG9vIEggU3ViIENBMB4X'
'DTE3MTEyMjEzMDIxMVoXDTIyMTEyMjEzMTIxMVowKDEmMCQGA1UEAxMdQURGUyBT'
'<KEY>'
'<KEY>'
'<KEY>'
'AZVm6TxMvX4eletZT8iGdb6Al40EriFtdPrTX5NhoTG6YwcQtFa7UHstjsxDktb+'
'ZXphpPoFB65kSi948ThVPdo6UwIhLKioSw/<KEY>5CvqKdPbrhXZYRx4'
'dQY1gKScfbD1XMi+wVMwhp5Abn4D9BNbesMNsZqYHdzyANwMLqszJ6ASRuWoW4xp'
'/sjs/cs16HDOYyTHy09ppaCUx3wD7tqfAgMBAAGjggLGMIICwjA+BgkrBgEEAYI3'
'FQcEMTAvBicrBgEEAYI3FQiE3KFUgeH0QIS5mziD5egZh7aYPoEbhtfpHYSAlToC'
'AWQCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsG'
'AQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEwHQYDVR0OBBYEFA3f0BbRJG1stycIZ+gZ'
'djezdJ3mMB8GA1UdIwQYMBaAFKnS5DPbd9hr720Fh3H1s8Djw+GXMIH+BgNVHR8E'
'<KEY>'
'<KEY>'
'<KEY>'
'PVNlcnZpY2VzLENOPUNvbmZpZ3VyYXRpb24sREM9YWQsREM9Y2l0eT9jZXJ0aWZp'
'Y2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/b2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0'
'aW9uUG9pbnQwgfwGCCsGAQUFBwEBBIHvMIHsMDgGCCsGAQUFBzAChixodHRwOi8v'
'cGtpLmVzcG9vLmZpL0VzcG9vJTIwSCUyMFN1YiUyMENBLmNydDCBrwYIKwYBBQUH'
'MAKGgaJsZGFwOi8vL0NOPUVzcG9vJTIwSCUyMFN1YiUyMENBLENOPUFJQSxDTj1Q'
'dWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxDTj1Db25maWd1cmF0'
'aW9uLERDPWFkLERDPWNpdHk/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNz'
'PWNlcnRpZmljYXRpb25BdXRob3JpdHkwDQYJKoZIhvcNAQELBQADggIBAIGhXVtM'
'rRq2dNz66P1eO+NzZoV7g5RrN/tcOsBvplj4QjhIeyG9I22eESZNHrege0qZDHng'
'<KEY>'
'B4c4r8QeDXn7zcVvh0Z0FbIskAVEA9MoWdo7+uTMb/I+K6h97A9ysg9ry2bwAv/B'
'UletFRVJtMRHqDHd9QeS/G1EmkOP/PstDK5REN9TMo/EUpXYV1mNJF7k0TRtpXu1'
'pd14EaD2xI993Tf4Vzmeht34RjuKMGS3Rwn6DV4OoTr/49RlO6HARnkLrDz7hAT8'
'+CVM2iTOuDoswyP6Slbt/vZh9KJB+0g4f/GZCrcsq44DfpxEPAyomIAmSi0TPsjQ'
'mvQDQQXieY9b6ojxleHMGMD27GpTszXkmtS01Imwy2X7yeZyPEJuPyr0xW2tC6t9'
'ilyfuetzFr9cNawj2z0JvObVQ8X68Bq0MTBiMdtA/IWgzukGlFhCrLG+KCn/Idqz'
'dtXrlETkTPhKlm84Pr3MbEueS0MuIwGf6TGUt7arWJe6zDMf1/ZfBQV1kOjFOH6S'
'DNQhLHEL0mYumZUawi+EaNQOtTE8SN1tbKicI09WR0jdvNs7lvePrB/K1q19hz5m'
'U+rbNk9+8Jgpzd5ielj37oqQOJazbSxNt+xF'
)
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'given_name': 'first_name',
'family_name': 'last_name',
'email': 'email',
}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
attrs[out_name] = val
return attrs
|
adfs_provider/views.py
|
import base64
import jwt
from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from .provider import EspooADFSProvider, HelsinkiADFSProvider
x509_backend = default_backend()
class ADFSOAuth2Adapter(OAuth2Adapter):
@classmethod
def get_login_view(cls):
return OAuth2LoginView.adapter_view(cls)
@classmethod
def get_callback_view(cls):
return OAuth2CallbackView.adapter_view(cls)
def complete_login(self, request, app, token, **kwargs):
cert_der = base64.b64decode(self.cert)
x509_cert = x509.load_der_x509_certificate(cert_der, backend=x509_backend)
jwt_token = jwt.decode(token.token, key=x509_cert.public_key(),
leeway=10, options={'verify_aud': False})
data = self.clean_attributes(jwt_token)
return self.get_provider().sociallogin_from_response(request, data)
class HelsinkiADFSOAuth2Adapter(ADFSOAuth2Adapter):
provider_id = HelsinkiADFSProvider.id
realm = 'helsinki'
access_token_url = 'https://fs.hel.fi/adfs/oauth2/token'
authorize_url = 'https://fs.hel.fi/adfs/oauth2/authorize'
profile_url = 'https://api.hel.fi/sso/user/'
cert = (
'MIIDMDCCAhigAwIBAgIBATANBgkqhkiG9w0BAQsFADAjMSEwHwYDVQQDExhBR'
'EZTIFNpZ25pbmcgLSBmcy5oZWwuZmkwHhcNMTYwNDAzMjIxMTAwWhcNMjEwND'
'AzMjIxMTAwWjAjMSEwHwYDVQQDExhBREZTIFNpZ25pbmcgLSBmcy5oZWwuZmk'
'wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrCo9kuzljk4F8R12A'
'eIYMARztxkMojcrN1KN3KQeoxcCPaFOTMYHWk8ww1N+m0PJoLl1Eray+cMsoH'
'rdd3iVxmApcQBxD02SnGsEn/3D/sTHcoi9WzqwM8ESbtm0jGIvfWrpJtMO/g7'
'ELW0dXBcWq4LRvBtyTt3jiehIO0HohS8xfQ4+vURFpjvfD0kjPemsMJ7QB8Eo'
<KEY>2CNFO9vct1IJiQJUfRbVWk8I/JFA65ZuXrCjY//<KEY>'
'<KEY>'
'wSsMXiNXh8AitTLUMgpAgMBAAGjbzBtMAwGA1UdEwEB/wQCMAAwHQYDVR0OBB'
'YEFBDL4FpHu+kQEI7MIpSjSACaA9ajMAsGA1UdDwQEAwIFIDARBglghkgBhvh'
'CAQEEBAMCBkAwHgYJYIZIAYb4QgENBBEWD3hjYSBjZXJ0aWZpY2F0ZTANBgkq'
'hkiG9w0BAQsFAAOCAQEAISn44oOdtfdMHh0Z4nezAuDHtKqTd6iV3MY7MwTFm'
'iUFQhJADO2ezpoW3Xj64wWeg3eVXyC7iHk/SV5OVmmo4uU/1YJHiBc5jEUZ5E'
'dvaZQaDH5iaJlK6aiCTznqwu7XJS7LbLeLrVqj3H3IYsV6BiGlT4Z1rXYX+nD'
'fi46TJCKqxE0zTArQQROocfKS+7JM+JU5dLMNOOC+6tCUOP3GEjuE3PMetpbH'
'+k6Wu6d3LzhpU2QICWJnFpj1yJTAb94pWRUKNoBhpxQlWvNzRgFgJesIfkZ4C'
'qqhmHqnV/BO+7MMv/g+WXRD09fo/YIXozpWzmO9LBzEvFe7Itz6C1R4Ng==')
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'company': 'department_name',
'email': 'email',
'winaccountname': 'username',
'group': 'ad_groups',
'unique_name': 'last_first_name',
'given_name': 'first_name',
'family_name': 'last_name',
}
# Convert attribute names to lowercase
attrs_in = {k.lower(): v for k, v in attrs_in.items()}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
attrs[out_name] = val
if 'last_first_name' in attrs:
names = attrs['last_first_name'].split(' ')
if 'first_name' not in attrs:
attrs['first_name'] = [names[0]]
if 'last_name' not in attrs:
attrs['last_name'] = [' '.join(names[1:])]
del attrs['last_first_name']
return attrs
class EspooADFSOAuth2Adapter(ADFSOAuth2Adapter):
provider_id = EspooADFSProvider.id
realm = 'espoo'
access_token_url = 'https://fs.espoo.fi/adfs/oauth2/token'
authorize_url = 'https://fs.espoo.fi/adfs/oauth2/authorize'
profile_url = 'https://api.hel.fi/sso/user/'
cert = (
'MIIG1zCCBL+gAwIBAgITGgAAfQoAbggMFZQDYAAAAAB9CjANBgkqhkiG9w0BAQsF'
'ADBaMRQwEgYKCZImiZPyLGQBGRYEY2l0eTESMBAGCgmSJomT8ixkARkWAmFkMRUw'
'<KEY>kVzcG9vIEggU3ViIENBMB4X'
'DTE3MTEyMjEzMDIxMVoXDTIyMTEyMjEzMTIxMVowKDEmMCQGA1UEAxMdQURGUyBT'
'<KEY>'
'<KEY>'
'<KEY>'
'AZVm6TxMvX4eletZT8iGdb6Al40EriFtdPrTX5NhoTG6YwcQtFa7UHstjsxDktb+'
'ZXphpPoFB65kSi948ThVPdo6UwIhLKioSw/<KEY>5CvqKdPbrhXZYRx4'
'dQY1gKScfbD1XMi+wVMwhp5Abn4D9BNbesMNsZqYHdzyANwMLqszJ6ASRuWoW4xp'
'/sjs/cs16HDOYyTHy09ppaCUx3wD7tqfAgMBAAGjggLGMIICwjA+BgkrBgEEAYI3'
'FQcEMTAvBicrBgEEAYI3FQiE3KFUgeH0QIS5mziD5egZh7aYPoEbhtfpHYSAlToC'
'AWQCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDgYDVR0PAQH/BAQDAgWgMBsGCSsG'
'AQQBgjcVCgQOMAwwCgYIKwYBBQUHAwEwHQYDVR0OBBYEFA3f0BbRJG1stycIZ+gZ'
'djezdJ3mMB8GA1UdIwQYMBaAFKnS5DPbd9hr720Fh3H1s8Djw+GXMIH+BgNVHR8E'
'<KEY>'
'<KEY>'
'<KEY>'
'PVNlcnZpY2VzLENOPUNvbmZpZ3VyYXRpb24sREM9YWQsREM9Y2l0eT9jZXJ0aWZp'
'Y2F0ZVJldm9jYXRpb25MaXN0P2Jhc2U/b2JqZWN0Q2xhc3M9Y1JMRGlzdHJpYnV0'
'aW9uUG9pbnQwgfwGCCsGAQUFBwEBBIHvMIHsMDgGCCsGAQUFBzAChixodHRwOi8v'
'cGtpLmVzcG9vLmZpL0VzcG9vJTIwSCUyMFN1YiUyMENBLmNydDCBrwYIKwYBBQUH'
'MAKGgaJsZGFwOi8vL0NOPUVzcG9vJTIwSCUyMFN1YiUyMENBLENOPUFJQSxDTj1Q'
'dWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxDTj1Db25maWd1cmF0'
'aW9uLERDPWFkLERDPWNpdHk/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNz'
'PWNlcnRpZmljYXRpb25BdXRob3JpdHkwDQYJKoZIhvcNAQELBQADggIBAIGhXVtM'
'rRq2dNz66P1eO+NzZoV7g5RrN/tcOsBvplj4QjhIeyG9I22eESZNHrege0qZDHng'
'<KEY>'
'B4c4r8QeDXn7zcVvh0Z0FbIskAVEA9MoWdo7+uTMb/I+K6h97A9ysg9ry2bwAv/B'
'UletFRVJtMRHqDHd9QeS/G1EmkOP/PstDK5REN9TMo/EUpXYV1mNJF7k0TRtpXu1'
'pd14EaD2xI993Tf4Vzmeht34RjuKMGS3Rwn6DV4OoTr/49RlO6HARnkLrDz7hAT8'
'+CVM2iTOuDoswyP6Slbt/vZh9KJB+0g4f/GZCrcsq44DfpxEPAyomIAmSi0TPsjQ'
'mvQDQQXieY9b6ojxleHMGMD27GpTszXkmtS01Imwy2X7yeZyPEJuPyr0xW2tC6t9'
'ilyfuetzFr9cNawj2z0JvObVQ8X68Bq0MTBiMdtA/IWgzukGlFhCrLG+KCn/Idqz'
'dtXrlETkTPhKlm84Pr3MbEueS0MuIwGf6TGUt7arWJe6zDMf1/ZfBQV1kOjFOH6S'
'DNQhLHEL0mYumZUawi+EaNQOtTE8SN1tbKicI09WR0jdvNs7lvePrB/K1q19hz5m'
'U+rbNk9+8Jgpzd5ielj37oqQOJazbSxNt+xF'
)
def clean_attributes(self, attrs_in):
attr_map = {
'primarysid': 'primary_sid',
'given_name': 'first_name',
'family_name': 'last_name',
'email': 'email',
}
attrs = {}
for in_name, out_name in attr_map.items():
val = attrs_in.get(in_name, None)
if val is not None:
if out_name in ('department_name', 'email', 'username'):
val = val.lower()
attrs[out_name] = val
attrs[out_name] = val
return attrs
| 0.396419 | 0.104752 |
# For parsing cli arguments
import argparse
# For parsing JSON files
import json
# Plotting library
import matplotlib as plt
plt.use('Agg')
import matplotlib.pyplot as pyplot
# To access more matplotlib functionality, i.e., default calculated figure
# size
from pylab import rcParams
_version = 0.2
def getVersion(parser):
'''Print program name, description and current version'''
return "{} - {} - Version {}".format(parser.prog, parser.description, _version)
class PlottingConfiguration:
'''Configuration of the benchmark plot'''
def __init__(self, args):
self.inputFile = args.inputFile
self.outputFile = args.outputFile
self.plotTitle = args.plotTitle
self.timeUnit = args.timeUnit
self.xValue = args.xValue
self.yValue = args.yValue
if args.xLabel is None:
self.xLabel = args.xValue
else:
self.xLabel = args.xLabel
if args.yLabel is None:
self.yLabel = "Time in {}".format(args.timeUnit)
else:
self.yLabel = args.yLabel
self.xTickBegin = args.xTickBegin
self.xTickEnd = args.xTickEnd
self.xTickStep = args.xTickStep
self.benchmarkDescription = args.benchmarkDescription
self.xSize = args.xSize
self.ySize = args.ySize
self.dpi = args.dpi
def convertTimeUnit(value, src, dest):
'''Convert time units'''
# This function is necessary since popular libraries like datatime cannot
# handle nanoseconds
if src == dest:
return value
if src == "ns":
if dest == "us":
return value / 1000
if dest == "ms":
return value / 1000000
elif src == "us":
if dest == "ns":
return value * 1000
if dest == "ms":
return value / 1000
elif src == "ms":
if dest == "ns":
return value * 1000000
if dest == "us":
return value * 10000
def parseJSON(configuration):
'''Parses JSON file containing benchmark results'''
with open(configuration.inputFile) as fd:
data = json.load(fd)
ret = []
for bench in data["benchmarks"]:
# Convert time units if necessary
if bench["time_unit"] != configuration.timeUnit:
bench[configuration.yValue] = convertTimeUnit(bench[configuration.yValue],
bench["time_unit"],
configuration.timeUnit)
ret.append((bench["benchmark_visualizer_group"], bench[configuration.xValue],
bench[configuration.yValue], configuration.timeUnit))
return ret
def plot(data, configuration):
benchmarkDict = dict()
for bench in data:
# If no list for this benchmark (group) exist, we create one
if bench[0] not in benchmarkDict:
benchmarkDict.update({bench[0]: ([], [])})
# Append x value if necessary
if bench[1] not in benchmarkDict[bench[0]][0]:
benchmarkDict[bench[0]][0].append(bench[1])
# Append y value
benchmarkDict[bench[0]][1].append(bench[2])
# Use passed arguments if possible, otherwise use automatically calculated
# figure size
if configuration.xSize is None and configuration.xSize is None:
pyplot.figure(dpi=configuration.dpi)
elif configuration.xSize is None:
pyplot.figure(figsize=(rcParams['figure.figsize'][0],
float(configuration.ySize)),
dpi=configuration.dpi)
elif configuration.ySize is None:
pyplot.figure(figsize=(float(configuration.xSize),
rcParams['figure.figsize'][1]),
dpi=configuration.dpi)
else:
pyplot.figure(figsize=(float(configuration.xSize),
float(configuration.ySize)),
dpi=configuration.dpi)
for key, value in benchmarkDict.items():
# Add plotting data
pyplot.plot(value[0], value[1], marker='o',
label=configuration.benchmarkDescription[int(key)])
pyplot.title(configuration.plotTitle)
pyplot.ylabel(configuration.yLabel)
pyplot.xlabel(configuration.xLabel)
pyplot.legend()
pyplot.grid()
# If no end for the x values is set, just take the maximum of them
if configuration.xTickEnd == -1:
for key, val in benchmarkDict.items():
if max(val[0]) > configuration.xTickEnd:
configuration.xTickEnd = max(val[0])
if configuration.xTickStep != "auto":
pyplot.xticks(range(int(configuration.xTickBegin),
int(configuration.xTickEnd)+1, int(configuration.xTickStep)))
pyplot.savefig(configuration.outputFile, bbox_inches='tight')
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description = "Visualize Google Benchmark.",
prog = "Benchmark Visualizer")
parser.add_argument("--version", "-v",
version = getVersion(parser),
action = "version")
parser.add_argument("--input_file", "-i",
metavar = "FILE",
help = "Path to JSON file with benchmark results",
dest = "inputFile",
required = True)
parser.add_argument("--output_file", "-o",
metavar = "FILE",
help = "Path to file where the image of the diagram will "
"be stored.",
dest = "outputFile",
required = True)
parser.add_argument("--title",
metavar = "TITLE",
help = "Diagram title",
dest = "plotTitle",
default = "Benchmark Results")
parser.add_argument("--time_unit",
choices = ["ns", "us", "ms"],
help = "Time unit for measured durations",
dest = "timeUnit",
default = "ns")
parser.add_argument("--x_label",
metavar = "X_LABEL",
dest = "xLabel",
help = "Label on the x axis")
parser.add_argument("--y_label",
metavar = "Y_LABEL",
dest = "yLabel",
help = "Lable on the y axis")
parser.add_argument("--x_value", "-x",
metavar = "X_VALUE",
dest = "xValue",
help = "Name of the counter that stores the x value",
required = True)
parser.add_argument("--y_value", "-y",
choices = ["real_time", "cpu_time"],
metavar = "y_VALUE",
dest = "yValue",
help = "Name of the y value that will be considered",
default = "real_time")
parser.add_argument("--x_tick_begin",
metavar = "VALUE",
help = "Set the begin of the x ticks manually",
dest = "xTickBegin",
default = 0)
parser.add_argument("--x_tick_end",
metavar = "VALUE",
help = "Set the end of the x ticks manually",
dest = "xTickEnd",
default = -1)
parser.add_argument("--x_tick_step",
metavar = "VALUE",
help = "Set the steps of the x ticks manually",
dest = "xTickStep",
default = "auto")
parser.add_argument("--benchmark_description", "-d",
metavar = "DESC",
nargs='*',
help = "Description of benchmarks",
dest = "benchmarkDescription",
required = True)
parser.add_argument("--x_size",
metavar = "VALUE",
help = "The horizontal size of the produced plot in inches",
dest = "xSize")
parser.add_argument("--y_size",
metavar = "VALUE",
help = "The vertical size of the produced plot in inches",
dest = "ySize")
parser.add_argument("--dpi",
type=int,
metavar = "VALUE",
help = "DPI of the produced plot",
dest = "dpi",
default = None)
args = parser.parse_args()
configuration = PlottingConfiguration(args)
data = parseJSON(configuration)
plot(data, configuration)
if __name__ == "__main__":
main()
|
benchmark_visualizer.py
|
# For parsing cli arguments
import argparse
# For parsing JSON files
import json
# Plotting library
import matplotlib as plt
plt.use('Agg')
import matplotlib.pyplot as pyplot
# To access more matplotlib functionality, i.e., default calculated figure
# size
from pylab import rcParams
_version = 0.2
def getVersion(parser):
'''Print program name, description and current version'''
return "{} - {} - Version {}".format(parser.prog, parser.description, _version)
class PlottingConfiguration:
'''Configuration of the benchmark plot'''
def __init__(self, args):
self.inputFile = args.inputFile
self.outputFile = args.outputFile
self.plotTitle = args.plotTitle
self.timeUnit = args.timeUnit
self.xValue = args.xValue
self.yValue = args.yValue
if args.xLabel is None:
self.xLabel = args.xValue
else:
self.xLabel = args.xLabel
if args.yLabel is None:
self.yLabel = "Time in {}".format(args.timeUnit)
else:
self.yLabel = args.yLabel
self.xTickBegin = args.xTickBegin
self.xTickEnd = args.xTickEnd
self.xTickStep = args.xTickStep
self.benchmarkDescription = args.benchmarkDescription
self.xSize = args.xSize
self.ySize = args.ySize
self.dpi = args.dpi
def convertTimeUnit(value, src, dest):
'''Convert time units'''
# This function is necessary since popular libraries like datatime cannot
# handle nanoseconds
if src == dest:
return value
if src == "ns":
if dest == "us":
return value / 1000
if dest == "ms":
return value / 1000000
elif src == "us":
if dest == "ns":
return value * 1000
if dest == "ms":
return value / 1000
elif src == "ms":
if dest == "ns":
return value * 1000000
if dest == "us":
return value * 10000
def parseJSON(configuration):
'''Parses JSON file containing benchmark results'''
with open(configuration.inputFile) as fd:
data = json.load(fd)
ret = []
for bench in data["benchmarks"]:
# Convert time units if necessary
if bench["time_unit"] != configuration.timeUnit:
bench[configuration.yValue] = convertTimeUnit(bench[configuration.yValue],
bench["time_unit"],
configuration.timeUnit)
ret.append((bench["benchmark_visualizer_group"], bench[configuration.xValue],
bench[configuration.yValue], configuration.timeUnit))
return ret
def plot(data, configuration):
benchmarkDict = dict()
for bench in data:
# If no list for this benchmark (group) exist, we create one
if bench[0] not in benchmarkDict:
benchmarkDict.update({bench[0]: ([], [])})
# Append x value if necessary
if bench[1] not in benchmarkDict[bench[0]][0]:
benchmarkDict[bench[0]][0].append(bench[1])
# Append y value
benchmarkDict[bench[0]][1].append(bench[2])
# Use passed arguments if possible, otherwise use automatically calculated
# figure size
if configuration.xSize is None and configuration.xSize is None:
pyplot.figure(dpi=configuration.dpi)
elif configuration.xSize is None:
pyplot.figure(figsize=(rcParams['figure.figsize'][0],
float(configuration.ySize)),
dpi=configuration.dpi)
elif configuration.ySize is None:
pyplot.figure(figsize=(float(configuration.xSize),
rcParams['figure.figsize'][1]),
dpi=configuration.dpi)
else:
pyplot.figure(figsize=(float(configuration.xSize),
float(configuration.ySize)),
dpi=configuration.dpi)
for key, value in benchmarkDict.items():
# Add plotting data
pyplot.plot(value[0], value[1], marker='o',
label=configuration.benchmarkDescription[int(key)])
pyplot.title(configuration.plotTitle)
pyplot.ylabel(configuration.yLabel)
pyplot.xlabel(configuration.xLabel)
pyplot.legend()
pyplot.grid()
# If no end for the x values is set, just take the maximum of them
if configuration.xTickEnd == -1:
for key, val in benchmarkDict.items():
if max(val[0]) > configuration.xTickEnd:
configuration.xTickEnd = max(val[0])
if configuration.xTickStep != "auto":
pyplot.xticks(range(int(configuration.xTickBegin),
int(configuration.xTickEnd)+1, int(configuration.xTickStep)))
pyplot.savefig(configuration.outputFile, bbox_inches='tight')
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description = "Visualize Google Benchmark.",
prog = "Benchmark Visualizer")
parser.add_argument("--version", "-v",
version = getVersion(parser),
action = "version")
parser.add_argument("--input_file", "-i",
metavar = "FILE",
help = "Path to JSON file with benchmark results",
dest = "inputFile",
required = True)
parser.add_argument("--output_file", "-o",
metavar = "FILE",
help = "Path to file where the image of the diagram will "
"be stored.",
dest = "outputFile",
required = True)
parser.add_argument("--title",
metavar = "TITLE",
help = "Diagram title",
dest = "plotTitle",
default = "Benchmark Results")
parser.add_argument("--time_unit",
choices = ["ns", "us", "ms"],
help = "Time unit for measured durations",
dest = "timeUnit",
default = "ns")
parser.add_argument("--x_label",
metavar = "X_LABEL",
dest = "xLabel",
help = "Label on the x axis")
parser.add_argument("--y_label",
metavar = "Y_LABEL",
dest = "yLabel",
help = "Lable on the y axis")
parser.add_argument("--x_value", "-x",
metavar = "X_VALUE",
dest = "xValue",
help = "Name of the counter that stores the x value",
required = True)
parser.add_argument("--y_value", "-y",
choices = ["real_time", "cpu_time"],
metavar = "y_VALUE",
dest = "yValue",
help = "Name of the y value that will be considered",
default = "real_time")
parser.add_argument("--x_tick_begin",
metavar = "VALUE",
help = "Set the begin of the x ticks manually",
dest = "xTickBegin",
default = 0)
parser.add_argument("--x_tick_end",
metavar = "VALUE",
help = "Set the end of the x ticks manually",
dest = "xTickEnd",
default = -1)
parser.add_argument("--x_tick_step",
metavar = "VALUE",
help = "Set the steps of the x ticks manually",
dest = "xTickStep",
default = "auto")
parser.add_argument("--benchmark_description", "-d",
metavar = "DESC",
nargs='*',
help = "Description of benchmarks",
dest = "benchmarkDescription",
required = True)
parser.add_argument("--x_size",
metavar = "VALUE",
help = "The horizontal size of the produced plot in inches",
dest = "xSize")
parser.add_argument("--y_size",
metavar = "VALUE",
help = "The vertical size of the produced plot in inches",
dest = "ySize")
parser.add_argument("--dpi",
type=int,
metavar = "VALUE",
help = "DPI of the produced plot",
dest = "dpi",
default = None)
args = parser.parse_args()
configuration = PlottingConfiguration(args)
data = parseJSON(configuration)
plot(data, configuration)
if __name__ == "__main__":
main()
| 0.654453 | 0.327144 |
import numpy as np
import logging
from ..common.utils import get_command_args, configure_logger
from ..common.gen_samples import read_anomaly_dataset
from .aad_globals import (
IFOR_SCORE_TYPE_NEG_PATH_LEN, ENSEMBLE_SCORE_LINEAR, AAD_IFOREST, INIT_UNIF
)
from .data_stream import DataStream, IdServer
from .random_split_trees import TREE_UPD_INCREMENTAL
from .forest_aad_detector import AadForest
from .anomaly_dataset_support import dataset_configs
"""
pythonw -m ad_examples.aad.test_concept_drift --debug --plot --log_file=temp/test_concept_drift.log --dataset=weather
"""
def get_iforest_model(x):
model = AadForest(n_estimators=100, # 100,
max_samples=256,
score_type=IFOR_SCORE_TYPE_NEG_PATH_LEN, random_state=42,
add_leaf_nodes_only=True,
max_depth=100,
ensemble_score=ENSEMBLE_SCORE_LINEAR,
detector_type=AAD_IFOREST, n_jobs=4,
tree_update_type=TREE_UPD_INCREMENTAL,
feature_partitions=None)
model.fit(x)
model.init_weights(init_type=INIT_UNIF)
return model
def test_kl_data_drift():
logger = logging.getLogger(__name__)
args = get_command_args(debug=False, debug_args=["--debug",
"--plot",
"--log_file=temp/test_concept_drift.log"])
configure_logger(args)
np.random.seed(42)
dataset_config = dataset_configs[args.dataset]
stream_window = dataset_config[2]
alpha = 0.05
X_full, y_full = read_anomaly_dataset(args.dataset)
logger.debug("dataset: %s (%d, %d), stream_window: %d, alpha: %0.3f" %
(args.dataset, X_full.shape[0], X_full.shape[1], stream_window, alpha))
stream = DataStream(X_full, y_full, IdServer(initial=0))
training_set = stream.read_next_from_stream(stream_window)
x, y, ids = training_set.x, training_set.y, training_set.ids
model = get_iforest_model(x)
all_kl_q_alpha = list()
all_reference_kls = list()
all_compare_kls = list()
trees_replaced = list()
# compute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
# now initialize reference p
p = model.get_node_sample_distributions(x)
max_kl = np.max(ref_kls)
window = 0 # already read the first window
while True:
buffer = stream.read_next_from_stream(stream_window)
if buffer is None:
break
window += 1
x, y, ids = buffer.x, buffer.y, buffer.ids
# logger.debug("#new: %d" % x.shape[0])
model.add_samples(X=x)
all_kl_q_alpha.append(kl_q_alpha)
all_reference_kls.append(ref_kls)
# compare KL-divergence of current data dist against reference dist p
comp_kls, _ = model.get_KL_divergence_distribution(x, p=p)
all_compare_kls.append(comp_kls)
max_kl = max(max_kl, np.max(comp_kls))
# find which trees exceed alpha-level threshold
replace_trees_by_kl = model.get_trees_to_replace(comp_kls, kl_q_alpha)
n_trees = model.clf.n_estimators
n_replace = 0 if replace_trees_by_kl is None else len(replace_trees_by_kl)
n_threshold = int(2*alpha*n_trees)
# we will replace if 2*alpha number of trees exceed the alpha-threshold
do_replace = n_trees > 0 and n_replace >= n_threshold
logger.debug("window %d: n_replace: %d, threshold num: %d, do_replace: %s" %
(window, n_replace, n_threshold, str(do_replace)))
if do_replace:
if False:
logger.debug("window %d: #replace_trees_by_kl: %d\n%s" %
(window, len(replace_trees_by_kl), str(list(replace_trees_by_kl))))
trees_replaced.append(len(replace_trees_by_kl))
model.update_model_from_stream_buffer(replace_trees=replace_trees_by_kl)
# recompute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
max_kl = max(max_kl, np.max(ref_kls))
# now recompute reference p
p = model.get_node_sample_distributions(x)
else:
if False:
logger.debug("window %d: model not updated; replace_trees_by_kl: %s" %
(window, str(list(replace_trees_by_kl)) if replace_trees_by_kl is not None else None))
trees_replaced.append(0)
if args.plot:
legend_datasets = None
# legend_datasets = ['ann_thyroid_1v3', 'weather']
xlim = [0, window+1]
ylim = [0, max_kl+3]
dp = DataPlotter(pdfpath="./temp/test_concept_drift_%s.pdf" % args.dataset,
rows=1, cols=1)
pl = dp.get_next_plot()
plt.xlim(xlim)
plt.ylim(ylim)
plt.xlabel('window', fontsize=18)
plt.ylabel('KL-divergence', fontsize=18)
for i in range(window):
ref_label = com_label = threshold_label = replaced_label = None
ref_kls = all_reference_kls[i]
com_kls = all_compare_kls[i]
mkl = max(np.max(ref_kls), np.max(com_kls))
x_coord = i+1
replaced_y_coord = mkl+2
if i == 0:
ref_label = "ref. KL dist"
com_label = "KL-dist w.r.t ref. dist"
threshold_label = "%0.2f-alpha KL" % alpha
replaced_label = "(.) - number of trees replaced"
pl.scatter([x_coord], [replaced_y_coord], color="black", marker=".", s=0, label=replaced_label)
pl.scatter(np.ones(len(ref_kls), dtype=np.float32)*x_coord, ref_kls,
color="orange", marker="*", s=8, label=ref_label)
pl.scatter([x_coord], [all_kl_q_alpha[i]], color="red", marker="+", s=30, label=threshold_label)
pl.scatter(np.ones(len(ref_kls), dtype=np.float32)*x_coord + 0.1, com_kls,
color="green", marker="*", s=8, label=com_label)
pl.text(x_coord-0.2, replaced_y_coord, "(%d)"%trees_replaced[i], fontsize=10, label=replaced_label)
if legend_datasets is None or args.dataset in legend_datasets:
pl.legend(loc='upper left', prop={'size': 14})
dp.close()
if __name__ == "__main__":
test_kl_data_drift()
|
ad_examples/aad/test_concept_drift.py
|
import numpy as np
import logging
from ..common.utils import get_command_args, configure_logger
from ..common.gen_samples import read_anomaly_dataset
from .aad_globals import (
IFOR_SCORE_TYPE_NEG_PATH_LEN, ENSEMBLE_SCORE_LINEAR, AAD_IFOREST, INIT_UNIF
)
from .data_stream import DataStream, IdServer
from .random_split_trees import TREE_UPD_INCREMENTAL
from .forest_aad_detector import AadForest
from .anomaly_dataset_support import dataset_configs
"""
pythonw -m ad_examples.aad.test_concept_drift --debug --plot --log_file=temp/test_concept_drift.log --dataset=weather
"""
def get_iforest_model(x):
model = AadForest(n_estimators=100, # 100,
max_samples=256,
score_type=IFOR_SCORE_TYPE_NEG_PATH_LEN, random_state=42,
add_leaf_nodes_only=True,
max_depth=100,
ensemble_score=ENSEMBLE_SCORE_LINEAR,
detector_type=AAD_IFOREST, n_jobs=4,
tree_update_type=TREE_UPD_INCREMENTAL,
feature_partitions=None)
model.fit(x)
model.init_weights(init_type=INIT_UNIF)
return model
def test_kl_data_drift():
logger = logging.getLogger(__name__)
args = get_command_args(debug=False, debug_args=["--debug",
"--plot",
"--log_file=temp/test_concept_drift.log"])
configure_logger(args)
np.random.seed(42)
dataset_config = dataset_configs[args.dataset]
stream_window = dataset_config[2]
alpha = 0.05
X_full, y_full = read_anomaly_dataset(args.dataset)
logger.debug("dataset: %s (%d, %d), stream_window: %d, alpha: %0.3f" %
(args.dataset, X_full.shape[0], X_full.shape[1], stream_window, alpha))
stream = DataStream(X_full, y_full, IdServer(initial=0))
training_set = stream.read_next_from_stream(stream_window)
x, y, ids = training_set.x, training_set.y, training_set.ids
model = get_iforest_model(x)
all_kl_q_alpha = list()
all_reference_kls = list()
all_compare_kls = list()
trees_replaced = list()
# compute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
# now initialize reference p
p = model.get_node_sample_distributions(x)
max_kl = np.max(ref_kls)
window = 0 # already read the first window
while True:
buffer = stream.read_next_from_stream(stream_window)
if buffer is None:
break
window += 1
x, y, ids = buffer.x, buffer.y, buffer.ids
# logger.debug("#new: %d" % x.shape[0])
model.add_samples(X=x)
all_kl_q_alpha.append(kl_q_alpha)
all_reference_kls.append(ref_kls)
# compare KL-divergence of current data dist against reference dist p
comp_kls, _ = model.get_KL_divergence_distribution(x, p=p)
all_compare_kls.append(comp_kls)
max_kl = max(max_kl, np.max(comp_kls))
# find which trees exceed alpha-level threshold
replace_trees_by_kl = model.get_trees_to_replace(comp_kls, kl_q_alpha)
n_trees = model.clf.n_estimators
n_replace = 0 if replace_trees_by_kl is None else len(replace_trees_by_kl)
n_threshold = int(2*alpha*n_trees)
# we will replace if 2*alpha number of trees exceed the alpha-threshold
do_replace = n_trees > 0 and n_replace >= n_threshold
logger.debug("window %d: n_replace: %d, threshold num: %d, do_replace: %s" %
(window, n_replace, n_threshold, str(do_replace)))
if do_replace:
if False:
logger.debug("window %d: #replace_trees_by_kl: %d\n%s" %
(window, len(replace_trees_by_kl), str(list(replace_trees_by_kl))))
trees_replaced.append(len(replace_trees_by_kl))
model.update_model_from_stream_buffer(replace_trees=replace_trees_by_kl)
# recompute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
max_kl = max(max_kl, np.max(ref_kls))
# now recompute reference p
p = model.get_node_sample_distributions(x)
else:
if False:
logger.debug("window %d: model not updated; replace_trees_by_kl: %s" %
(window, str(list(replace_trees_by_kl)) if replace_trees_by_kl is not None else None))
trees_replaced.append(0)
if args.plot:
legend_datasets = None
# legend_datasets = ['ann_thyroid_1v3', 'weather']
xlim = [0, window+1]
ylim = [0, max_kl+3]
dp = DataPlotter(pdfpath="./temp/test_concept_drift_%s.pdf" % args.dataset,
rows=1, cols=1)
pl = dp.get_next_plot()
plt.xlim(xlim)
plt.ylim(ylim)
plt.xlabel('window', fontsize=18)
plt.ylabel('KL-divergence', fontsize=18)
for i in range(window):
ref_label = com_label = threshold_label = replaced_label = None
ref_kls = all_reference_kls[i]
com_kls = all_compare_kls[i]
mkl = max(np.max(ref_kls), np.max(com_kls))
x_coord = i+1
replaced_y_coord = mkl+2
if i == 0:
ref_label = "ref. KL dist"
com_label = "KL-dist w.r.t ref. dist"
threshold_label = "%0.2f-alpha KL" % alpha
replaced_label = "(.) - number of trees replaced"
pl.scatter([x_coord], [replaced_y_coord], color="black", marker=".", s=0, label=replaced_label)
pl.scatter(np.ones(len(ref_kls), dtype=np.float32)*x_coord, ref_kls,
color="orange", marker="*", s=8, label=ref_label)
pl.scatter([x_coord], [all_kl_q_alpha[i]], color="red", marker="+", s=30, label=threshold_label)
pl.scatter(np.ones(len(ref_kls), dtype=np.float32)*x_coord + 0.1, com_kls,
color="green", marker="*", s=8, label=com_label)
pl.text(x_coord-0.2, replaced_y_coord, "(%d)"%trees_replaced[i], fontsize=10, label=replaced_label)
if legend_datasets is None or args.dataset in legend_datasets:
pl.legend(loc='upper left', prop={'size': 14})
dp.close()
if __name__ == "__main__":
test_kl_data_drift()
| 0.384565 | 0.200734 |
import gzip
import itertools
import numpy as np
import pandas as pd
from scipy import stats
import six.moves.cPickle as pickle
def df_to_struct(df):
"""Converts a DataFrame to RPy-compatible structured array."""
struct_array = df.to_records()
arr_dtype = struct_array.dtype.descr
for i, dtype in enumerate(arr_dtype):
if dtype[1] == np.dtype('object'):
arr_dtype[i] = (dtype[0], dtype[1].replace("|O", "|S"))
struct_array = np.asarray([tuple(d) for d in struct_array],
dtype=arr_dtype)
return struct_array
def df_ttest(df, by, key, paired=False, nice=True, **kwargs):
"""Perform a T-test over a DataFrame groupby."""
test_kind = "rel" if paired else "ind"
test_func = getattr(stats, "ttest_" + test_kind)
args = [d[key] for i, d in df.groupby(by)]
t, p = test_func(*args, **kwargs)
dof = (len(df) / 2) - 1 if paired else len(df) - 2
if nice:
return "t(%d) = %.3f; p = %.3g%s" % (dof, t, p, sig_stars(p))
else:
return pd.Series([t, p], ["t", "p"])
def df_oneway(df, by, key, nice=True, **kwargs):
"""Perform a oneway analysis over variance on a DataFrame groupby."""
args = [d[key] for i, d in df.groupby(by)]
f, p = stats.f_oneway(*args, **kwargs)
dof_b = len(args) - 1
dof_w = len(df) - dof_b
if nice:
return "F(%d, %d) = %.3f; p = %.3g%s" % (dof_b, dof_w, f,
p, sig_stars(p))
else:
return pd.Series([f, p], ["F", "p"])
def product_index(values, names=None):
"""Make a MultiIndex from the combinatorial product of the values."""
iterable = itertools.product(*values)
idx = pd.MultiIndex.from_tuples(list(iterable), names=names)
return idx
def make_master_schedule(evs):
"""Take a list of event specifications and make one schedule.
Parameters
----------
evs : sequence of n x 3 arrays
list of (onset, duration, amplitude) event secifications
Returns
-------
sched : n_event x 5 array
schedule of event specifications with
event and presentation ids
"""
evs = np.asarray(evs)
n_cond = len(evs)
# Make a vector of condition ids and stimulus indices
cond_ids = [np.ones(evs[i].shape[0]) * i for i in range(n_cond)]
cond_ids = np.concatenate(cond_ids)
stim_idxs = np.concatenate([np.arange(len(ev)) for ev in evs])
# Make a schedule of the whole run
sched = np.row_stack(evs)
sched = np.column_stack((sched, cond_ids, stim_idxs))
# Sort the master schedule by onset time
timesorter = np.argsort(sched[:, 0])
sched = sched[timesorter]
return sched
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
class Results(object):
"""Extremely simple namespace for passing around and pickling data."""
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def load_pkl(fname, zip=True):
"""Read pickled data from disk, possible decompressing."""
if zip:
open = gzip.open
with open(fname, "rb") as fid:
res = pickle.load(fid)
return res
def save_pkl(fname, res, zip=True):
"""Write pickled data to disk, possible compressing."""
if zip:
open = gzip.open
with open(fname, "wb") as fid:
pickle.dump(res, fid)
|
moss/misc.py
|
import gzip
import itertools
import numpy as np
import pandas as pd
from scipy import stats
import six.moves.cPickle as pickle
def df_to_struct(df):
"""Converts a DataFrame to RPy-compatible structured array."""
struct_array = df.to_records()
arr_dtype = struct_array.dtype.descr
for i, dtype in enumerate(arr_dtype):
if dtype[1] == np.dtype('object'):
arr_dtype[i] = (dtype[0], dtype[1].replace("|O", "|S"))
struct_array = np.asarray([tuple(d) for d in struct_array],
dtype=arr_dtype)
return struct_array
def df_ttest(df, by, key, paired=False, nice=True, **kwargs):
"""Perform a T-test over a DataFrame groupby."""
test_kind = "rel" if paired else "ind"
test_func = getattr(stats, "ttest_" + test_kind)
args = [d[key] for i, d in df.groupby(by)]
t, p = test_func(*args, **kwargs)
dof = (len(df) / 2) - 1 if paired else len(df) - 2
if nice:
return "t(%d) = %.3f; p = %.3g%s" % (dof, t, p, sig_stars(p))
else:
return pd.Series([t, p], ["t", "p"])
def df_oneway(df, by, key, nice=True, **kwargs):
"""Perform a oneway analysis over variance on a DataFrame groupby."""
args = [d[key] for i, d in df.groupby(by)]
f, p = stats.f_oneway(*args, **kwargs)
dof_b = len(args) - 1
dof_w = len(df) - dof_b
if nice:
return "F(%d, %d) = %.3f; p = %.3g%s" % (dof_b, dof_w, f,
p, sig_stars(p))
else:
return pd.Series([f, p], ["F", "p"])
def product_index(values, names=None):
"""Make a MultiIndex from the combinatorial product of the values."""
iterable = itertools.product(*values)
idx = pd.MultiIndex.from_tuples(list(iterable), names=names)
return idx
def make_master_schedule(evs):
"""Take a list of event specifications and make one schedule.
Parameters
----------
evs : sequence of n x 3 arrays
list of (onset, duration, amplitude) event secifications
Returns
-------
sched : n_event x 5 array
schedule of event specifications with
event and presentation ids
"""
evs = np.asarray(evs)
n_cond = len(evs)
# Make a vector of condition ids and stimulus indices
cond_ids = [np.ones(evs[i].shape[0]) * i for i in range(n_cond)]
cond_ids = np.concatenate(cond_ids)
stim_idxs = np.concatenate([np.arange(len(ev)) for ev in evs])
# Make a schedule of the whole run
sched = np.row_stack(evs)
sched = np.column_stack((sched, cond_ids, stim_idxs))
# Sort the master schedule by onset time
timesorter = np.argsort(sched[:, 0])
sched = sched[timesorter]
return sched
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
class Results(object):
"""Extremely simple namespace for passing around and pickling data."""
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def load_pkl(fname, zip=True):
"""Read pickled data from disk, possible decompressing."""
if zip:
open = gzip.open
with open(fname, "rb") as fid:
res = pickle.load(fid)
return res
def save_pkl(fname, res, zip=True):
"""Write pickled data to disk, possible compressing."""
if zip:
open = gzip.open
with open(fname, "wb") as fid:
pickle.dump(res, fid)
| 0.771069 | 0.400046 |
from password import Password
def create_password(flo,me,beat,joby):
new_password = Password(flo,me,beat,joby)
return new_password
def save_passwords(password):
password.save_Password()
def del_password(password):
password.delete_password()
def find_password(user_name):
return Password.find_by_user_name(user_name)
def check_existng_passwords(user_name):
return Password.password_exist(user_name)
def display_passwords():
return Password.display_passwords()
def main():
print("Hello,What is your name?")
user_name = input()
print(f"Hello {user_name}. What would u like to do?")
print ('\n')
while True:
print("Use these short codes : cc - create a credentials, del - delete credential dc - display password, fc -find a password, ex -exit the password list ")
short_code = input().lower()
if short_code == 'cc':
print("Credential")
print("-"*10)
print("first_name")
f_name = input()
print("last_name")
last_name = input()
print("user_name")
u_user_name = input()
print("password")
p_password = input()
save_passwords(create_password(f_name,last_name,u_user_name,p_password))
print ('\n')
print(f"New credential {f_name} {last_name} created")
print ('\n')
elif short_code == 'dc':
if display_passwords():
print("Here is a list of all your passwords")
print('\n')
for password in display_passwords():
print(f"{password.first_name} {password.last_name} {password.user_name} {password.password}")
print('\n')
else:
print('\n')
print("You dont seem to have any passwords saved yet")
print('\n')
elif short_code == 'del':
print("Enter the username you want to delete")
search_user_name = input()
if check_existng_passwords(search_user_name):
search_password = find_password(search_user_name)
del_password(search_password)
print("account successfully deleted!")
else:
print("That account does not exist")
elif short_code == 'fc':
print("Enter the username you want to search for")
search_user_name = input()
if check_existng_passwords(search_user_name):
search_password = find_password(search_user_name)
print(f"{search_password.first_name} {search_password.last_name}")
print('-' * 20)
print(f"user_name.......{search_password.user_name}")
print(f"password.......{<PASSWORD>}")
else:
print("That password does not exist")
elif short_code == "ex":
print("Bye")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main()
|
run.py
|
from password import Password
def create_password(flo,me,beat,joby):
new_password = Password(flo,me,beat,joby)
return new_password
def save_passwords(password):
password.save_Password()
def del_password(password):
password.delete_password()
def find_password(user_name):
return Password.find_by_user_name(user_name)
def check_existng_passwords(user_name):
return Password.password_exist(user_name)
def display_passwords():
return Password.display_passwords()
def main():
print("Hello,What is your name?")
user_name = input()
print(f"Hello {user_name}. What would u like to do?")
print ('\n')
while True:
print("Use these short codes : cc - create a credentials, del - delete credential dc - display password, fc -find a password, ex -exit the password list ")
short_code = input().lower()
if short_code == 'cc':
print("Credential")
print("-"*10)
print("first_name")
f_name = input()
print("last_name")
last_name = input()
print("user_name")
u_user_name = input()
print("password")
p_password = input()
save_passwords(create_password(f_name,last_name,u_user_name,p_password))
print ('\n')
print(f"New credential {f_name} {last_name} created")
print ('\n')
elif short_code == 'dc':
if display_passwords():
print("Here is a list of all your passwords")
print('\n')
for password in display_passwords():
print(f"{password.first_name} {password.last_name} {password.user_name} {password.password}")
print('\n')
else:
print('\n')
print("You dont seem to have any passwords saved yet")
print('\n')
elif short_code == 'del':
print("Enter the username you want to delete")
search_user_name = input()
if check_existng_passwords(search_user_name):
search_password = find_password(search_user_name)
del_password(search_password)
print("account successfully deleted!")
else:
print("That account does not exist")
elif short_code == 'fc':
print("Enter the username you want to search for")
search_user_name = input()
if check_existng_passwords(search_user_name):
search_password = find_password(search_user_name)
print(f"{search_password.first_name} {search_password.last_name}")
print('-' * 20)
print(f"user_name.......{search_password.user_name}")
print(f"password.......{<PASSWORD>}")
else:
print("That password does not exist")
elif short_code == "ex":
print("Bye")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main()
| 0.276105 | 0.118998 |
from functools import partial
from typing import Callable, List
from pyglet.window import mouse
from engine.models.ship import ShipModel
from engine.views.ship_parts.factories import ConfigViewFactory
from .base import BaseMenu, BaseButton
from .drydock import ControlConfiguration
class ControlConfigMenu(BaseMenu):
def __init__(self, heading: str, buttons, x, y, control_config: ControlConfiguration):
super().__init__(heading, buttons, x, y)
self.control_config = control_config
self.components: List[ControlConfiguration] = [control_config]
@classmethod
def manufacture_for_ship_model(cls, ship_model: ShipModel, close_menu_function: Callable, x, y,
font_size=36, screen_width=1280, screen_height=720):
left = 0
right = screen_width
bottom = 0
top = screen_height
control_config = ControlConfiguration(left, right, bottom, top, ship=ship_model,
view_factory=ConfigViewFactory())
heading = "Configure controls"
callables = [("<- Back", close_menu_function),
("Keyboard", partial(control_config.set_mode, "keyboard")),
("Gamepad", partial(control_config.set_mode, "gamepad")),
("Reset", control_config.reset),
("Save", control_config.save_all)]
height = int(font_size * 1.6)
width = int(height * 6)
height_spacing = int(height * 1.1)
buttons = []
for i, (name, func) in enumerate(callables):
i += 1
button = BaseButton.labeled_button(name, font_size=font_size, left=x, right=x + width,
bottom=y - height_spacing * i, top=y - height_spacing * i + height,
func=func)
buttons.append(button)
return cls(heading, buttons, x, y, control_config)
def _component_at(self, x, y):
for component in self.components:
if component.in_area(x, y):
return component
def draw(self):
super(ControlConfigMenu, self).draw()
self.control_config.draw()
def on_mouse_motion(self, x, y, dx, dy):
super(ControlConfigMenu, self).on_mouse_motion(x, y, dx, dy)
self.control_config.highlight_at(x, y)
def on_mouse_press(self, x, y, button, modifiers):
super(ControlConfigMenu, self).on_mouse_press(x, y, button, modifiers)
self.control_config.on_mouse_press(x, y, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
self.control_config.on_mouse_release(x, y, button, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
component = self._component_at(x, y)
if component:
if buttons & mouse.RIGHT:
component.translate(dx, dy)
if buttons & mouse.LEFT:
self.control_config.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_key_press(self, symbol, modifiers):
self.control_config.on_key_press(symbol, modifiers)
def on_joybutton_press(self, joystick, button):
self.control_config.on_joybutton_press(joystick, button)
def on_joyaxis_motion(self, joystick, axis, value):
if abs(value) > 0.9:
self.control_config.on_joyaxis_motion(joystick, axis, value)
|
engine/views/menus/control_config.py
|
from functools import partial
from typing import Callable, List
from pyglet.window import mouse
from engine.models.ship import ShipModel
from engine.views.ship_parts.factories import ConfigViewFactory
from .base import BaseMenu, BaseButton
from .drydock import ControlConfiguration
class ControlConfigMenu(BaseMenu):
def __init__(self, heading: str, buttons, x, y, control_config: ControlConfiguration):
super().__init__(heading, buttons, x, y)
self.control_config = control_config
self.components: List[ControlConfiguration] = [control_config]
@classmethod
def manufacture_for_ship_model(cls, ship_model: ShipModel, close_menu_function: Callable, x, y,
font_size=36, screen_width=1280, screen_height=720):
left = 0
right = screen_width
bottom = 0
top = screen_height
control_config = ControlConfiguration(left, right, bottom, top, ship=ship_model,
view_factory=ConfigViewFactory())
heading = "Configure controls"
callables = [("<- Back", close_menu_function),
("Keyboard", partial(control_config.set_mode, "keyboard")),
("Gamepad", partial(control_config.set_mode, "gamepad")),
("Reset", control_config.reset),
("Save", control_config.save_all)]
height = int(font_size * 1.6)
width = int(height * 6)
height_spacing = int(height * 1.1)
buttons = []
for i, (name, func) in enumerate(callables):
i += 1
button = BaseButton.labeled_button(name, font_size=font_size, left=x, right=x + width,
bottom=y - height_spacing * i, top=y - height_spacing * i + height,
func=func)
buttons.append(button)
return cls(heading, buttons, x, y, control_config)
def _component_at(self, x, y):
for component in self.components:
if component.in_area(x, y):
return component
def draw(self):
super(ControlConfigMenu, self).draw()
self.control_config.draw()
def on_mouse_motion(self, x, y, dx, dy):
super(ControlConfigMenu, self).on_mouse_motion(x, y, dx, dy)
self.control_config.highlight_at(x, y)
def on_mouse_press(self, x, y, button, modifiers):
super(ControlConfigMenu, self).on_mouse_press(x, y, button, modifiers)
self.control_config.on_mouse_press(x, y, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
self.control_config.on_mouse_release(x, y, button, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
component = self._component_at(x, y)
if component:
if buttons & mouse.RIGHT:
component.translate(dx, dy)
if buttons & mouse.LEFT:
self.control_config.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_key_press(self, symbol, modifiers):
self.control_config.on_key_press(symbol, modifiers)
def on_joybutton_press(self, joystick, button):
self.control_config.on_joybutton_press(joystick, button)
def on_joyaxis_motion(self, joystick, axis, value):
if abs(value) > 0.9:
self.control_config.on_joyaxis_motion(joystick, axis, value)
| 0.707405 | 0.161221 |
import sys
import argparse
from workflow import Workflow, ICON_WEB, ICON_WARNING, ICON_NOTE, web, PasswordNotFound, Workflow3
def main(wf):
def googleFilter(filename):
return 'google' in filename
def exchangeFilter(filename):
return 'exchange' in filename
import os
from workflow.notify import notify
key = os.environ['settings_value']
value = os.environ['value_to_store']
wf.logger.debug(" Key: %s", key)
wf.logger.debug(" Value: %s", value)
if key == 'password':
wf.save_password('<PASSWORD>',value)
notify('Password updated')
else:
wf.settings[key] = {'value':value}
# wf.store_data(key, value)
text = os.environ['text_to_display']
if key == 'use_google':
wf.clear_cache(googleFilter)
if value == '0':
notify("Google Calendar Support", u'\u274C Disabled')
else:
notify("Google Calendar Support", u'\u2705 Enabled')
elif key == 'use_exchange':
wf.clear_cache(exchangeFilter)
if '0' == value:
notify("Exchange Server Support", u'\u274c Disabled')
else:
notify("Exchange Server Support", u'\u2705 Enabled')
elif key == 'use_ntlm':
def exchangeFilter(filename):
return 'exchange' in filename
# Clear outlook events because we are changing the auth type
wf.clear_cache(exchangeFilter)
if '0' == value:
notify("NTLM Authentication", u'\u274c Disabled')
else:
notify("NTLM Authentication", u'\u2705 Enabled')
elif key == 'use_ssl':
if '0' == value:
value = u'\u274c Disabled'
else:
value = u'\u2705 Enabled'
notify(text, value)
else:
notify('Updated ' + text, "To: " + value)
if __name__ == u"__main__":
wf = Workflow3(libraries=['./lib'])
wf.logger.debug(' _______________ ____ ______ ')
wf.logger.debug(' / ___/_ __/ __ \/ __ \/ ____/ ')
wf.logger.debug(' \__ \ / / / / / / /_/ / __/ ')
wf.logger.debug(' ___/ // / / /_/ / _, _/ /___ ')
wf.logger.debug(' /____//_/ \____/_/ |_/_____/ DATA ')
wf.logger.debug(' ')
sys.exit(wf.run(main))
|
src/store_data.py
|
import sys
import argparse
from workflow import Workflow, ICON_WEB, ICON_WARNING, ICON_NOTE, web, PasswordNotFound, Workflow3
def main(wf):
def googleFilter(filename):
return 'google' in filename
def exchangeFilter(filename):
return 'exchange' in filename
import os
from workflow.notify import notify
key = os.environ['settings_value']
value = os.environ['value_to_store']
wf.logger.debug(" Key: %s", key)
wf.logger.debug(" Value: %s", value)
if key == 'password':
wf.save_password('<PASSWORD>',value)
notify('Password updated')
else:
wf.settings[key] = {'value':value}
# wf.store_data(key, value)
text = os.environ['text_to_display']
if key == 'use_google':
wf.clear_cache(googleFilter)
if value == '0':
notify("Google Calendar Support", u'\u274C Disabled')
else:
notify("Google Calendar Support", u'\u2705 Enabled')
elif key == 'use_exchange':
wf.clear_cache(exchangeFilter)
if '0' == value:
notify("Exchange Server Support", u'\u274c Disabled')
else:
notify("Exchange Server Support", u'\u2705 Enabled')
elif key == 'use_ntlm':
def exchangeFilter(filename):
return 'exchange' in filename
# Clear outlook events because we are changing the auth type
wf.clear_cache(exchangeFilter)
if '0' == value:
notify("NTLM Authentication", u'\u274c Disabled')
else:
notify("NTLM Authentication", u'\u2705 Enabled')
elif key == 'use_ssl':
if '0' == value:
value = u'\u274c Disabled'
else:
value = u'\u2705 Enabled'
notify(text, value)
else:
notify('Updated ' + text, "To: " + value)
if __name__ == u"__main__":
wf = Workflow3(libraries=['./lib'])
wf.logger.debug(' _______________ ____ ______ ')
wf.logger.debug(' / ___/_ __/ __ \/ __ \/ ____/ ')
wf.logger.debug(' \__ \ / / / / / / /_/ / __/ ')
wf.logger.debug(' ___/ // / / /_/ / _, _/ /___ ')
wf.logger.debug(' /____//_/ \____/_/ |_/_____/ DATA ')
wf.logger.debug(' ')
sys.exit(wf.run(main))
| 0.107601 | 0.111 |
import os
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from .misc import get_cifar_models
from collections import OrderedDict
__all__ = [
'load_optimizer',
'load_learning_rate_schedule',
'load_checkpoint',
# cifar
'load_transform',
'load_dataset',
'load_model',
# detection
'load_state_dict_path',
'load_checkpoint_path',
'load_ensemble_path',
]
def __process_state_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k[:7] == "module." else k
new_state_dict[name] = v
return new_state_dict
# General loaders compatible with cifar and imagenet
def load_optimizer(args, model):
# Get optimiser name
opt_name = args.optim.lower()
# Print message to LOG
print("==> Creating '{}' optimiser".format(opt_name))
# Supports only SGD and RMSprop
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = "nesterov" in opt_name,
)
elif opt_name.startswith("adam"):
optimizer = torch.optim.Adam(
model.parameters(),
lr = args.lr,
weight_decay = args.weight_decay,
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
eps = 0.0316,
alpha = 0.9,
)
else:
msg = "Invalid optimizer {}. Only SGD and RMSprop are supported."
raise RuntimeError(msg.format(args.opt))
return optimizer
def load_learning_rate_schedule(args, optimizer):
args.lr_scheduler = args.lr_scheduler.lower()
# Print message to LOG
print("==> Creating '{}' learning rate scheduler".format(args.lr_scheduler))
# Supports only MultiStep and Step and Exponential schedules
if args.lr_scheduler == "multisteplr":
main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones = args.schedule, gamma = args.gamma)
elif args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.schedule_step, gamma = args.gamma)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma = args.gamma)
elif args.lr_scheduler == "cycliclr":
step_size_up = args.total_steps // 2
step_size_down = args.total_steps - step_size_up
main_lr_scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer, base_lr = args.base_lr, max_lr = args.max_lr,
step_size_up=step_size_up, step_size_down=step_size_down)
else:
raise RuntimeError(
"Invalid lr scheduler '{}'. Only MultiStepLR, StepLR and ExponentialLR "
"are supported.".format(args.lr_scheduler)
)
return main_lr_scheduler
# Use this when training models
def load_checkpoint(args, model, optimizer, reset = False):
# Defaults
best_acc = 0.0
start_epoch = 0
# Load checkpoint
# args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
# Extract information
if not reset:
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
# For dataparallell and loading issues
try: model.load_state_dict(__process_state_dict(checkpoint['state_dict']))
except RuntimeError: model.model.load_state_dict(__process_state_dict(checkpoint['state_dict']))
# optimizer.load_state_dict(checkpoint['optimizer'])
return model, optimizer, best_acc, start_epoch
# Loaders only compatible with cifar
def load_transform(args):
# Let the normalisation layer be different for daf
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
if args.arch.startswith('daf'): normalize = transforms.Normalize((0.50, 0.50, 0.50), (0.50, 0.50, 0.50))
# Default transformation
transform_train = transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# And with data augmentation
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding = 4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
return transform_train, transform_test
def load_dataset(args, transform_train, transform_test, return_sets = False):
if args.dataset == 'cifar10':
dataloader = datasets.CIFAR10
num_classes = 10
else:
dataloader = datasets.CIFAR100
num_classes = 100
trainloader = None
if transform_train is not None:
trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = None
if transform_test is not None:
testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
if return_sets: return trainloader, testloader, num_classes, (trainset, testset)
return trainloader, testloader, num_classes
def load_model(args, models, num_classes):
if 'densenet' in args.arch:
model = models.__dict__[args.arch](args = args)
elif 'daf' in args.arch:
model = models.__dict__[args.arch](args = args)
else: raise ValueError("==> Model architecture can not be loaded.")
return model
# These loaders are used for detection
def load_state_dict_path(path):
# Load checkpoint
assert os.path.isfile(path) or os.path.islink(path), 'Error: no checkpoint directory found!'
# Get checkpoint dict
checkpoint = torch.load(path)
# Get attributes
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
state_dict = checkpoint['state_dict']
return __process_state_dict(state_dict), {'best_acc': best_acc, 'start_epoch': start_epoch}
def load_checkpoint_path(args, num_classes, path, use_cuda):
# Get model directory
if 'cifar' in args.dataset:
models = get_cifar_models()
model = load_model(args, models, num_classes)
if use_cuda: model = model.cuda()
# Get state dict
state_dict, info = load_state_dict_path(path)
model.load_state_dict(state_dict)
return model
def load_ensemble_path(args, num_classes, path, use_cuda):
# Load every model in ensemble
ensemble = []
for file in os.listdir(path):
# Create full path to file
filepath = os.path.join(path, file)
print("Loading model from:", filepath)
ensemble.append(load_checkpoint_path(args, num_classes, filepath, use_cuda))
return ensemble
|
utils/loaders.py
|
import os
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from .misc import get_cifar_models
from collections import OrderedDict
__all__ = [
'load_optimizer',
'load_learning_rate_schedule',
'load_checkpoint',
# cifar
'load_transform',
'load_dataset',
'load_model',
# detection
'load_state_dict_path',
'load_checkpoint_path',
'load_ensemble_path',
]
def __process_state_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k[:7] == "module." else k
new_state_dict[name] = v
return new_state_dict
# General loaders compatible with cifar and imagenet
def load_optimizer(args, model):
# Get optimiser name
opt_name = args.optim.lower()
# Print message to LOG
print("==> Creating '{}' optimiser".format(opt_name))
# Supports only SGD and RMSprop
if opt_name.startswith("sgd"):
optimizer = torch.optim.SGD(
model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
nesterov = "nesterov" in opt_name,
)
elif opt_name.startswith("adam"):
optimizer = torch.optim.Adam(
model.parameters(),
lr = args.lr,
weight_decay = args.weight_decay,
)
elif opt_name == "rmsprop":
optimizer = torch.optim.RMSprop(
model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay,
eps = 0.0316,
alpha = 0.9,
)
else:
msg = "Invalid optimizer {}. Only SGD and RMSprop are supported."
raise RuntimeError(msg.format(args.opt))
return optimizer
def load_learning_rate_schedule(args, optimizer):
args.lr_scheduler = args.lr_scheduler.lower()
# Print message to LOG
print("==> Creating '{}' learning rate scheduler".format(args.lr_scheduler))
# Supports only MultiStep and Step and Exponential schedules
if args.lr_scheduler == "multisteplr":
main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones = args.schedule, gamma = args.gamma)
elif args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.schedule_step, gamma = args.gamma)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma = args.gamma)
elif args.lr_scheduler == "cycliclr":
step_size_up = args.total_steps // 2
step_size_down = args.total_steps - step_size_up
main_lr_scheduler = torch.optim.lr_scheduler.CyclicLR(
optimizer, base_lr = args.base_lr, max_lr = args.max_lr,
step_size_up=step_size_up, step_size_down=step_size_down)
else:
raise RuntimeError(
"Invalid lr scheduler '{}'. Only MultiStepLR, StepLR and ExponentialLR "
"are supported.".format(args.lr_scheduler)
)
return main_lr_scheduler
# Use this when training models
def load_checkpoint(args, model, optimizer, reset = False):
# Defaults
best_acc = 0.0
start_epoch = 0
# Load checkpoint
# args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
# Extract information
if not reset:
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
# For dataparallell and loading issues
try: model.load_state_dict(__process_state_dict(checkpoint['state_dict']))
except RuntimeError: model.model.load_state_dict(__process_state_dict(checkpoint['state_dict']))
# optimizer.load_state_dict(checkpoint['optimizer'])
return model, optimizer, best_acc, start_epoch
# Loaders only compatible with cifar
def load_transform(args):
# Let the normalisation layer be different for daf
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
if args.arch.startswith('daf'): normalize = transforms.Normalize((0.50, 0.50, 0.50), (0.50, 0.50, 0.50))
# Default transformation
transform_train = transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# And with data augmentation
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding = 4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
return transform_train, transform_test
def load_dataset(args, transform_train, transform_test, return_sets = False):
if args.dataset == 'cifar10':
dataloader = datasets.CIFAR10
num_classes = 10
else:
dataloader = datasets.CIFAR100
num_classes = 100
trainloader = None
if transform_train is not None:
trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testloader = None
if transform_test is not None:
testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
if return_sets: return trainloader, testloader, num_classes, (trainset, testset)
return trainloader, testloader, num_classes
def load_model(args, models, num_classes):
if 'densenet' in args.arch:
model = models.__dict__[args.arch](args = args)
elif 'daf' in args.arch:
model = models.__dict__[args.arch](args = args)
else: raise ValueError("==> Model architecture can not be loaded.")
return model
# These loaders are used for detection
def load_state_dict_path(path):
# Load checkpoint
assert os.path.isfile(path) or os.path.islink(path), 'Error: no checkpoint directory found!'
# Get checkpoint dict
checkpoint = torch.load(path)
# Get attributes
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
state_dict = checkpoint['state_dict']
return __process_state_dict(state_dict), {'best_acc': best_acc, 'start_epoch': start_epoch}
def load_checkpoint_path(args, num_classes, path, use_cuda):
# Get model directory
if 'cifar' in args.dataset:
models = get_cifar_models()
model = load_model(args, models, num_classes)
if use_cuda: model = model.cuda()
# Get state dict
state_dict, info = load_state_dict_path(path)
model.load_state_dict(state_dict)
return model
def load_ensemble_path(args, num_classes, path, use_cuda):
# Load every model in ensemble
ensemble = []
for file in os.listdir(path):
# Create full path to file
filepath = os.path.join(path, file)
print("Loading model from:", filepath)
ensemble.append(load_checkpoint_path(args, num_classes, filepath, use_cuda))
return ensemble
| 0.79657 | 0.365372 |
from ._fitting import __fit_single_decay__, __fit_triple_decay__
from numpy import array, unique
from pandas import Series, concat
from tqdm import tqdm
def fit_relaxation(flevel, seq_time, seq, datetime, blank=0, sat_len=100, rel_len=60, sat_flashlets=None, single_decay=False, bounds=True, single_lims=[100,50000], tau1_lims=[100, 800], tau2_lims=[800, 2000], tau3_lims=[2000, 50000], method='trf', loss='soft_l1', f_scale=0.1, max_nfev=None, xtol=1e-9):
"""
Process the raw transient data and perform the Kolber et al. 1998 relaxation model.
Parameters
----------
seq_time : np.array, dtype=float, shape=[n,]
The sequence time of the flashlets in μs.
flevel : np.array, dtype=float, shape=[n,]
The fluorescence yield of the instrument.
seq : np.array, dtype=int, shape=[n,]
The measurement number.
datetime : np.array, dtype=datetime64, shape=[n,]
The date & time of each measurement in the numpy datetime64 format.
blank : np.array, dtype=float, shape=[n,]
The blank value, must be the same length as flevel.
sat_len : int, default=100
The number of flashlets in the saturation sequence.
rel_len : int, default=60
The number of flashlets in the relaxation sequence.
sat_flashlets : int, default=0
The number of saturation flashlets to include at the start.
single_decay : bool, default=False
If True, will fit a single decay relaxation.
bounds : bool, default=True
If True, will set lower and upper limit bounds for the estimation, not suitable for methods 'lm'.
single_lims : [int, int], default=[100, 50000]
The lower and upper limit bounds for fitting τ, only required if single_decay is True.
tau1_lims: [int, int], default=[100, 800]
The lower and upper limit bounds for fitting τ1.
tau2_lims: [int, int], default=[800, 2000]
The lower and upper limit bounds for fitting τ2.
tau3_lims: [int, int], default=[2000, 50000]
The lower and upper limit bounds for fitting τ3.
fit_method : str, default='trf'
The algorithm to perform minimization. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
loss_method : str, default='soft_l1'
The loss function to be used. Note: Method ‘lm’ supports only ‘linear’ loss. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
fscale : float, default=0.1
The soft margin value between inlier and outlier residuals. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
max_nfev : int, default=None
The number of iterations to perform fitting routine. If None, the value is chosen automatically. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
xtol : float, default=1e-9
The tolerance for termination by the change of the independent variables. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
Returns
-------
res: pandas.DataFrame
The results of the fitting routine with columns as below:
fo_r : np.array, dtype=float, shape=[n,]
The minimum fluorescence level of relaxation phase.
fm_r : np.array, dtype=float, shape=[n,]
The maximum fluorescence level of relaxation phase
tau : np.array, dtype=float, shape=[n,]
The rate of QA\ :sup:`-` reoxidation in μs, only returned if single_decay is True.
alpha1 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`1`, only returned if single_decay is False.
tau1 : np.array, dtype=float, shape=[n,]
The rate of QA\ :sup:`-` reoxidation in μs, only returned if single_decay is False.
alpha2 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`2`.
tau2 : np.array, dtype=float, shape=[n,]
The rate of QB\ :sup:`-` reoxidation in μs, only returned if single_decay is False.
alpha3 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`3`, only returned if single_decay is False.
tau3 : np.array, dtype=float, shape=[n,]
The rate of PQ reoxidation in μs, only returned if single_decay is False.
bias : np.array, dtype=float, shape=[n,]
The bias of fit in %.
rmse : np.array, dtype=float, shape=[n,]
The root mean squared error of the fit.
nrmse : np.array, dtype=float, shape=[n,]
The root mean squared error of the fit normalised to the mean of the fluorescence level.
fo_err : np.array, dtype=float, shape=[n,]
The fit error of Fo_relax in %.
fm_err : np.array, dtype=float, shape=[n,]
The fit error of Fm_relax in %.
tau_err : np.array, dtype=float, shape=[n,]
The fit error of τ, only returned if single_decay is True.
alpha1_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`1`, only returned if single_decay is False.
tau1_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`1`, only returned if single_decay is False.
alpha2_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`2`, only returned if single_decay is False.
tau2_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`2`, only returned if single_decay is False.
alpha3_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`3`, only returned if single_decay is False.
tau3_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`3`, only returned if single_decay is False.
nfl : np.array, dtype=int, shape=[n,]
The number of flashlets used for fitting.
niters : np.array, dype=int, shape=[n,]
The number of functional evaluations done on the fitting routine.
flag : np.array, dtype=int, shape=[n,]
The code associated with the fitting routine success, positive values = SUCCESS, negative values = FAILURE.
-3 : Unable to calculate parameter errors
-2 : F\ :sub:`o` Relax > F\ :sub:`m` Relax
-1 : improper input parameters status returned from MINPACK.
0 : the maximum number of function evaluations is exceeded.
1 : gtol termination condition is satisfied.
2 : ftol termination condition is satisfied.
3 : xtol termination condition is satisfied.
4 : Both ftol and xtol termination conditions are satisfied.
success : np.array, dtype=bool, shape=[n,]
A boolean array reporting whether fit was successful (TRUE) or if not successful (FALSE)
datetime : np.array, dtype=datetime64, shape=[n,]
The date and time associated with the measurement.
Example
-------
>>> rel = ppu.calculate_relaxation(flevel, seq_time, seq, datetime, blank=0, sat_len=100, rel_len=40, single_decay=True, bounds=True, tau_lims=[100, 50000])
"""
seq_time = array(seq_time)
flevel = array(flevel)
seq = array(seq)
dt = array(datetime)
if single_decay:
opts = {'sat_flashlets':sat_flashlets, 'bounds':bounds, 'single_lims':single_lims, 'method':method,'loss':loss, 'f_scale':f_scale, 'max_nfev':max_nfev, 'xtol':xtol}
else:
opts = {'sat_flashlets':sat_flashlets, 'bounds':bounds, 'tau1_lims':tau1_lims, 'tau2_lims':tau2_lims, 'tau3_lims':tau3_lims, 'method':method,'loss':loss, 'f_scale':f_scale, 'max_nfev':max_nfev, 'xtol':xtol}
res = []
for s in tqdm(unique(seq)):
i = seq == s
x = seq_time[i]
y = flevel[i]
x_min = min(x[sat_len:])
x = x[sat_len-sat_flashlets:sat_len+rel_len] - x_min
y = y[sat_len-sat_flashlets:sat_len+rel_len]
if single_decay:
rel = __fit_single_decay__(x, y, **opts)
else:
rel = __fit_triple_decay__(x, y, **opts)
res.append(Series(rel))
res = concat(res, axis=1)
res = res.T
if res.empty:
pass
else:
if single_decay:
res.columns = ['fo_r', 'fm_r', 'tau', 'bias', 'rmse', 'nrmse', 'fo_err', 'fm_err', 'tau_err', 'nfl', 'niters', 'flag', 'success']
else:
res.columns = ['fo_r', 'fm_r', 'alpha1', 'tau1', 'alpha2','tau2', 'alpha3', 'tau3', 'bias', 'rsme', 'nrmse', 'for_err', 'fmr_err', 'alpha1_err', 'tau1_err', 'alpha2_err', 'tau2_err', 'alpha3_err', 'tau3_err', 'nfl', 'niters', 'flag', 'success']
res['datetime'] = unique(dt)
return res
|
phyto_photo_utils/_relaxation.py
|
from ._fitting import __fit_single_decay__, __fit_triple_decay__
from numpy import array, unique
from pandas import Series, concat
from tqdm import tqdm
def fit_relaxation(flevel, seq_time, seq, datetime, blank=0, sat_len=100, rel_len=60, sat_flashlets=None, single_decay=False, bounds=True, single_lims=[100,50000], tau1_lims=[100, 800], tau2_lims=[800, 2000], tau3_lims=[2000, 50000], method='trf', loss='soft_l1', f_scale=0.1, max_nfev=None, xtol=1e-9):
"""
Process the raw transient data and perform the Kolber et al. 1998 relaxation model.
Parameters
----------
seq_time : np.array, dtype=float, shape=[n,]
The sequence time of the flashlets in μs.
flevel : np.array, dtype=float, shape=[n,]
The fluorescence yield of the instrument.
seq : np.array, dtype=int, shape=[n,]
The measurement number.
datetime : np.array, dtype=datetime64, shape=[n,]
The date & time of each measurement in the numpy datetime64 format.
blank : np.array, dtype=float, shape=[n,]
The blank value, must be the same length as flevel.
sat_len : int, default=100
The number of flashlets in the saturation sequence.
rel_len : int, default=60
The number of flashlets in the relaxation sequence.
sat_flashlets : int, default=0
The number of saturation flashlets to include at the start.
single_decay : bool, default=False
If True, will fit a single decay relaxation.
bounds : bool, default=True
If True, will set lower and upper limit bounds for the estimation, not suitable for methods 'lm'.
single_lims : [int, int], default=[100, 50000]
The lower and upper limit bounds for fitting τ, only required if single_decay is True.
tau1_lims: [int, int], default=[100, 800]
The lower and upper limit bounds for fitting τ1.
tau2_lims: [int, int], default=[800, 2000]
The lower and upper limit bounds for fitting τ2.
tau3_lims: [int, int], default=[2000, 50000]
The lower and upper limit bounds for fitting τ3.
fit_method : str, default='trf'
The algorithm to perform minimization. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
loss_method : str, default='soft_l1'
The loss function to be used. Note: Method ‘lm’ supports only ‘linear’ loss. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
fscale : float, default=0.1
The soft margin value between inlier and outlier residuals. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
max_nfev : int, default=None
The number of iterations to perform fitting routine. If None, the value is chosen automatically. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
xtol : float, default=1e-9
The tolerance for termination by the change of the independent variables. See ``scipy.optimize.least_squares`` documentation for more information on non-linear least squares fitting options.
Returns
-------
res: pandas.DataFrame
The results of the fitting routine with columns as below:
fo_r : np.array, dtype=float, shape=[n,]
The minimum fluorescence level of relaxation phase.
fm_r : np.array, dtype=float, shape=[n,]
The maximum fluorescence level of relaxation phase
tau : np.array, dtype=float, shape=[n,]
The rate of QA\ :sup:`-` reoxidation in μs, only returned if single_decay is True.
alpha1 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`1`, only returned if single_decay is False.
tau1 : np.array, dtype=float, shape=[n,]
The rate of QA\ :sup:`-` reoxidation in μs, only returned if single_decay is False.
alpha2 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`2`.
tau2 : np.array, dtype=float, shape=[n,]
The rate of QB\ :sup:`-` reoxidation in μs, only returned if single_decay is False.
alpha3 : np.array, dtype=float, shape=[n,]
The decay coefficient of τ\ :sub:`3`, only returned if single_decay is False.
tau3 : np.array, dtype=float, shape=[n,]
The rate of PQ reoxidation in μs, only returned if single_decay is False.
bias : np.array, dtype=float, shape=[n,]
The bias of fit in %.
rmse : np.array, dtype=float, shape=[n,]
The root mean squared error of the fit.
nrmse : np.array, dtype=float, shape=[n,]
The root mean squared error of the fit normalised to the mean of the fluorescence level.
fo_err : np.array, dtype=float, shape=[n,]
The fit error of Fo_relax in %.
fm_err : np.array, dtype=float, shape=[n,]
The fit error of Fm_relax in %.
tau_err : np.array, dtype=float, shape=[n,]
The fit error of τ, only returned if single_decay is True.
alpha1_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`1`, only returned if single_decay is False.
tau1_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`1`, only returned if single_decay is False.
alpha2_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`2`, only returned if single_decay is False.
tau2_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`2`, only returned if single_decay is False.
alpha3_err : np.array, dtype=float, shape=[n,]
The fit error of α\ :sub:`3`, only returned if single_decay is False.
tau3_err : np.array, dtype=float, shape=[n,]
The fit error of τ\ :sub:`3`, only returned if single_decay is False.
nfl : np.array, dtype=int, shape=[n,]
The number of flashlets used for fitting.
niters : np.array, dype=int, shape=[n,]
The number of functional evaluations done on the fitting routine.
flag : np.array, dtype=int, shape=[n,]
The code associated with the fitting routine success, positive values = SUCCESS, negative values = FAILURE.
-3 : Unable to calculate parameter errors
-2 : F\ :sub:`o` Relax > F\ :sub:`m` Relax
-1 : improper input parameters status returned from MINPACK.
0 : the maximum number of function evaluations is exceeded.
1 : gtol termination condition is satisfied.
2 : ftol termination condition is satisfied.
3 : xtol termination condition is satisfied.
4 : Both ftol and xtol termination conditions are satisfied.
success : np.array, dtype=bool, shape=[n,]
A boolean array reporting whether fit was successful (TRUE) or if not successful (FALSE)
datetime : np.array, dtype=datetime64, shape=[n,]
The date and time associated with the measurement.
Example
-------
>>> rel = ppu.calculate_relaxation(flevel, seq_time, seq, datetime, blank=0, sat_len=100, rel_len=40, single_decay=True, bounds=True, tau_lims=[100, 50000])
"""
seq_time = array(seq_time)
flevel = array(flevel)
seq = array(seq)
dt = array(datetime)
if single_decay:
opts = {'sat_flashlets':sat_flashlets, 'bounds':bounds, 'single_lims':single_lims, 'method':method,'loss':loss, 'f_scale':f_scale, 'max_nfev':max_nfev, 'xtol':xtol}
else:
opts = {'sat_flashlets':sat_flashlets, 'bounds':bounds, 'tau1_lims':tau1_lims, 'tau2_lims':tau2_lims, 'tau3_lims':tau3_lims, 'method':method,'loss':loss, 'f_scale':f_scale, 'max_nfev':max_nfev, 'xtol':xtol}
res = []
for s in tqdm(unique(seq)):
i = seq == s
x = seq_time[i]
y = flevel[i]
x_min = min(x[sat_len:])
x = x[sat_len-sat_flashlets:sat_len+rel_len] - x_min
y = y[sat_len-sat_flashlets:sat_len+rel_len]
if single_decay:
rel = __fit_single_decay__(x, y, **opts)
else:
rel = __fit_triple_decay__(x, y, **opts)
res.append(Series(rel))
res = concat(res, axis=1)
res = res.T
if res.empty:
pass
else:
if single_decay:
res.columns = ['fo_r', 'fm_r', 'tau', 'bias', 'rmse', 'nrmse', 'fo_err', 'fm_err', 'tau_err', 'nfl', 'niters', 'flag', 'success']
else:
res.columns = ['fo_r', 'fm_r', 'alpha1', 'tau1', 'alpha2','tau2', 'alpha3', 'tau3', 'bias', 'rsme', 'nrmse', 'for_err', 'fmr_err', 'alpha1_err', 'tau1_err', 'alpha2_err', 'tau2_err', 'alpha3_err', 'tau3_err', 'nfl', 'niters', 'flag', 'success']
res['datetime'] = unique(dt)
return res
| 0.864353 | 0.577972 |
import re
import sys
import os
from io import StringIO
import tkinter
import IPython
from functools import reduce
#Works by itself, but not able to import it into the GUI at this time.
class IterableIPShell:
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
# This is to get rid of the blockage that occurs during
# IPython.Shell.InteractiveShell.user_setup()
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def execute(self):
self.history_level = 0
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
# keep cache in sync with the prompt counter:
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
def historyBack(self):
self.history_level -= 1
return self._getHistory()
def historyForward(self):
self.history_level += 1
return self._getHistory()
def _getHistory(self):
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
self.history_level = 0
rv = ''
return rv
def updateNamespace(self, ns_dict):
self.IP.user_ns.update(ns_dict)
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = reduce(self._commonPrefix, possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def _commonPrefix(self, str1, str2):
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print(header+cmd)
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print(output.read())
output.close()
input.close()
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
class TkConsoleView(tkinter.Text):
def __init__(self,root):
tkinter.Text.__init__(self,root)
# As the stdout,stderr etc. get fiddled about with we need to put any
# debug output into a file
self.debug=0
if self.debug:
self.o = open('debug.out','w')
# Keeps track of where the insert cursor should be on the entry line
self.mark = 'scroll_mark'
self.mark_set(self.mark,tkinter.END)
self.mark_gravity(self.mark,tkinter.RIGHT)
# Set the tags for colouring the text
for code in ansi_colors:
self.tag_config(code,
foreground=ansi_colors[code])
self.tag_config('notouch') # Tag for indicating what areas of the widget aren't editable
# colour_pat matches the colour tags and places these in a group
# match character with hex value 01 (start of heading?) zero or more times, followed by
# the hex character 1b (escape) then "[" and group ...things.. followed by m (?) and then
# hex character 02 (start of text) zero or more times
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = 'line_start' # Tracks start of user input on the line (excluding prompt)
self.mark_set(self.line_start,tkinter.INSERT)
self.mark_gravity(self.line_start,tkinter.LEFT)
self._setBindings()
def write(self, text, editable=False):
segments = self.color_pat.split(text)
# First is blank line
segment = segments.pop(0)
# Keep track of where we started entering text so we can set as non-editable
self.start_mark = 'start_mark'
self.mark_set(self.start_mark,tkinter.INSERT)
self.mark_gravity(self.start_mark,tkinter.LEFT)
self.insert(tkinter.END, segment)
if segments:
# Just return the colour tags
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.insert(tkinter.END,segments[i+1],tag)
segments.pop(i)
if not editable:
if self.debug:
print("adding notouch between %s : %s" % ( self.index(self.start_mark),\
self.index(tkinter.INSERT) ))
self.tag_add('notouch',self.start_mark,"%s-1c" % tkinter.INSERT)
self.mark_unset(self.start_mark)
#jmht self.scroll_mark_onscreen(self.mark)
def showBanner(self,banner):
"""Print the supplied banner on starting the shell"""
self.write(banner)
def showPrompt(self, prompt):
self.write(prompt)
self.mark_set(self.line_start,tkinter.INSERT)
self.see(tkinter.INSERT) #Make sure we can always see the prompt
def changeLine(self, text):
self.delete(self.line_start,"%s lineend" % self.line_start)
self.write(text, True)
def getCurrentLine(self):
rv = self.get(self.line_start,tkinter.END)
if self.debug:
print("getCurrentline: %s" % rv, file=self.o)
print("INSERT: %s" % tkinter.END, file=self.o)
print("END: %s" % tkinter.INSERT, file=self.o)
print("line_start: %s" % self.index(self.line_start), file=self.o)
return rv
def showReturned(self, text):
self.tag_add('notouch',self.line_start,"%s lineend" % self.line_start )
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
#self.mark_set(self.line_start,Tkinter.END) #jmht don't need this as showprompt sets mark
def _setBindings(self):
""" Bind the keys we require.
REM: if a bound function returns "break" then no other bindings are called
If it returns None, then the other default bindings are called.
"""
self.bind("<Key>",self.processKeyPress)
self.bind("<Return>",self.processEnterPress)
self.bind("<Up>",self.processUpPress)
self.bind("<Down>",self.processDownPress)
self.bind("<Tab>",self.processTabPress)
self.bind("<BackSpace>",self.processBackSpacePress)
def isEditable(self):
""" Scan the notouch tag range in pairs and see if the INSERT index falls
between any of them.
"""
ranges = self.tag_ranges('notouch')
first=None
for idx in ranges:
if not first:
first=idx
continue
else:
if self.debug:
print("Comparing %s between %s : %s " % (self.index(tkinter.INSERT),first,idx))
if self.compare( tkinter.INSERT,'>=',first ) and \
self.compare( tkinter.INSERT,'<=',idx ):
return False
first=None
return True
def processKeyPress(self,event):
if self.debug:
print("processKeyPress got key: %s" % event.char, file=self.o)
print("processKeyPress INSERT: %s" % self.index(tkinter.INSERT), file=self.o)
print("processKeyPress END: %s" % self.index(tkinter.END), file=self.o)
if not self.isEditable():
# Move cursor mark to start of line
self.mark_set(tkinter.INSERT,self.mark)
# Make sure line_start follows inserted text
self.mark_set(self.mark,"%s+1c" % tkinter.INSERT)
def processBackSpacePress(self,event):
if not self.isEditable():
return "break"
def processEnterPress(self,event):
self._processLine()
return "break" # Need break to stop the other bindings being called
def processUpPress(self,event):
self.changeLine(self.historyBack())
return "break"
def processDownPress(self,event):
self.changeLine(self.historyForward())
return "break"
def processTabPress(self,event):
if not self.getCurrentLine().strip():
return
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return "break"
class IPythonView(TkConsoleView, IterableIPShell):
def __init__(self,root,banner=None):
TkConsoleView.__init__(self,root)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout,cerr=self.cout,
input_func=self.raw_input)
if banner:
self.showBanner(banner)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
self.interrupt = False
def raw_input(self, prompt=''):
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def _processLine(self):
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if self.debug:
print("_processLine got rv: %s" % rv, file=self.o)
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
if __name__ == "__main__":
root = tkinter.Tk()
s=IPythonView(root)
s.pack()
root.mainloop()
|
jade2/pyrosetta_toolkit/window_modules/interactive_terminal/interactive_terminal.py
|
import re
import sys
import os
from io import StringIO
import tkinter
import IPython
from functools import reduce
#Works by itself, but not able to import it into the GUI at this time.
class IterableIPShell:
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
# This is to get rid of the blockage that occurs during
# IPython.Shell.InteractiveShell.user_setup()
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def execute(self):
self.history_level = 0
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
# keep cache in sync with the prompt counter:
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
def historyBack(self):
self.history_level -= 1
return self._getHistory()
def historyForward(self):
self.history_level += 1
return self._getHistory()
def _getHistory(self):
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
self.history_level = 0
rv = ''
return rv
def updateNamespace(self, ns_dict):
self.IP.user_ns.update(ns_dict)
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = reduce(self._commonPrefix, possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def _commonPrefix(self, str1, str2):
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print(header+cmd)
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print(output.read())
output.close()
input.close()
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
class TkConsoleView(tkinter.Text):
def __init__(self,root):
tkinter.Text.__init__(self,root)
# As the stdout,stderr etc. get fiddled about with we need to put any
# debug output into a file
self.debug=0
if self.debug:
self.o = open('debug.out','w')
# Keeps track of where the insert cursor should be on the entry line
self.mark = 'scroll_mark'
self.mark_set(self.mark,tkinter.END)
self.mark_gravity(self.mark,tkinter.RIGHT)
# Set the tags for colouring the text
for code in ansi_colors:
self.tag_config(code,
foreground=ansi_colors[code])
self.tag_config('notouch') # Tag for indicating what areas of the widget aren't editable
# colour_pat matches the colour tags and places these in a group
# match character with hex value 01 (start of heading?) zero or more times, followed by
# the hex character 1b (escape) then "[" and group ...things.. followed by m (?) and then
# hex character 02 (start of text) zero or more times
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = 'line_start' # Tracks start of user input on the line (excluding prompt)
self.mark_set(self.line_start,tkinter.INSERT)
self.mark_gravity(self.line_start,tkinter.LEFT)
self._setBindings()
def write(self, text, editable=False):
segments = self.color_pat.split(text)
# First is blank line
segment = segments.pop(0)
# Keep track of where we started entering text so we can set as non-editable
self.start_mark = 'start_mark'
self.mark_set(self.start_mark,tkinter.INSERT)
self.mark_gravity(self.start_mark,tkinter.LEFT)
self.insert(tkinter.END, segment)
if segments:
# Just return the colour tags
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.insert(tkinter.END,segments[i+1],tag)
segments.pop(i)
if not editable:
if self.debug:
print("adding notouch between %s : %s" % ( self.index(self.start_mark),\
self.index(tkinter.INSERT) ))
self.tag_add('notouch',self.start_mark,"%s-1c" % tkinter.INSERT)
self.mark_unset(self.start_mark)
#jmht self.scroll_mark_onscreen(self.mark)
def showBanner(self,banner):
"""Print the supplied banner on starting the shell"""
self.write(banner)
def showPrompt(self, prompt):
self.write(prompt)
self.mark_set(self.line_start,tkinter.INSERT)
self.see(tkinter.INSERT) #Make sure we can always see the prompt
def changeLine(self, text):
self.delete(self.line_start,"%s lineend" % self.line_start)
self.write(text, True)
def getCurrentLine(self):
rv = self.get(self.line_start,tkinter.END)
if self.debug:
print("getCurrentline: %s" % rv, file=self.o)
print("INSERT: %s" % tkinter.END, file=self.o)
print("END: %s" % tkinter.INSERT, file=self.o)
print("line_start: %s" % self.index(self.line_start), file=self.o)
return rv
def showReturned(self, text):
self.tag_add('notouch',self.line_start,"%s lineend" % self.line_start )
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
#self.mark_set(self.line_start,Tkinter.END) #jmht don't need this as showprompt sets mark
def _setBindings(self):
""" Bind the keys we require.
REM: if a bound function returns "break" then no other bindings are called
If it returns None, then the other default bindings are called.
"""
self.bind("<Key>",self.processKeyPress)
self.bind("<Return>",self.processEnterPress)
self.bind("<Up>",self.processUpPress)
self.bind("<Down>",self.processDownPress)
self.bind("<Tab>",self.processTabPress)
self.bind("<BackSpace>",self.processBackSpacePress)
def isEditable(self):
""" Scan the notouch tag range in pairs and see if the INSERT index falls
between any of them.
"""
ranges = self.tag_ranges('notouch')
first=None
for idx in ranges:
if not first:
first=idx
continue
else:
if self.debug:
print("Comparing %s between %s : %s " % (self.index(tkinter.INSERT),first,idx))
if self.compare( tkinter.INSERT,'>=',first ) and \
self.compare( tkinter.INSERT,'<=',idx ):
return False
first=None
return True
def processKeyPress(self,event):
if self.debug:
print("processKeyPress got key: %s" % event.char, file=self.o)
print("processKeyPress INSERT: %s" % self.index(tkinter.INSERT), file=self.o)
print("processKeyPress END: %s" % self.index(tkinter.END), file=self.o)
if not self.isEditable():
# Move cursor mark to start of line
self.mark_set(tkinter.INSERT,self.mark)
# Make sure line_start follows inserted text
self.mark_set(self.mark,"%s+1c" % tkinter.INSERT)
def processBackSpacePress(self,event):
if not self.isEditable():
return "break"
def processEnterPress(self,event):
self._processLine()
return "break" # Need break to stop the other bindings being called
def processUpPress(self,event):
self.changeLine(self.historyBack())
return "break"
def processDownPress(self,event):
self.changeLine(self.historyForward())
return "break"
def processTabPress(self,event):
if not self.getCurrentLine().strip():
return
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return "break"
class IPythonView(TkConsoleView, IterableIPShell):
def __init__(self,root,banner=None):
TkConsoleView.__init__(self,root)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout,cerr=self.cout,
input_func=self.raw_input)
if banner:
self.showBanner(banner)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
self.interrupt = False
def raw_input(self, prompt=''):
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def _processLine(self):
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if self.debug:
print("_processLine got rv: %s" % rv, file=self.o)
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
if __name__ == "__main__":
root = tkinter.Tk()
s=IPythonView(root)
s.pack()
root.mainloop()
| 0.192615 | 0.089177 |
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0023_add_referral_answer_attachment_with_base_class"),
]
operations = [
migrations.CreateModel(
name="ReferralUrgency",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"duration",
models.DurationField(
help_text="Expected treatment duration", verbose_name="duration"
),
),
(
"is_default",
models.BooleanField(
default=False,
help_text="Whether this urgency level is the default level for new referrals",
verbose_name="is default",
),
),
("name", models.CharField(max_length=200, verbose_name="name")),
(
"requires_justification",
models.BooleanField(
help_text="Whether to require a justification when this urgency is selected",
verbose_name="requires justification",
),
),
],
options={
"verbose_name": "referral urgency",
"db_table": "partaj_referral_urgency",
},
),
migrations.AddField(
model_name="referral",
name="urgency_level",
field=models.ForeignKey(
blank=True,
help_text="Urgency level. When is the referral answer needed?",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="core.ReferralUrgency",
verbose_name="urgency",
),
),
]
|
src/backend/partaj/core/migrations/0024_add_urgency_model.py
|
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0023_add_referral_answer_attachment_with_base_class"),
]
operations = [
migrations.CreateModel(
name="ReferralUrgency",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"duration",
models.DurationField(
help_text="Expected treatment duration", verbose_name="duration"
),
),
(
"is_default",
models.BooleanField(
default=False,
help_text="Whether this urgency level is the default level for new referrals",
verbose_name="is default",
),
),
("name", models.CharField(max_length=200, verbose_name="name")),
(
"requires_justification",
models.BooleanField(
help_text="Whether to require a justification when this urgency is selected",
verbose_name="requires justification",
),
),
],
options={
"verbose_name": "referral urgency",
"db_table": "partaj_referral_urgency",
},
),
migrations.AddField(
model_name="referral",
name="urgency_level",
field=models.ForeignKey(
blank=True,
help_text="Urgency level. When is the referral answer needed?",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="core.ReferralUrgency",
verbose_name="urgency",
),
),
]
| 0.509276 | 0.156362 |
import cs_grading as CS
import os
text_editor = 'subl'
#Options for p5
run_p5_test = 1 # Change to 0 to turn off these tests .
p5_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p5_source_files = '../barry.cpp' # The name and location of the student's solution file relative to this script.
p5_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p5_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
#Options for p6
run_p6_test = 1 # Change to 0 to turn off these tests .
p6_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p6_source_files = '../hw1q6.cpp' # The name and location of the student's solution file relative to this script.
p6_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p6_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
#Options for p7
run_p7_test = 1 # Change to 0 to turn off these tests .
p7_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p7_source_files = '../game_of_pointers.cpp' # The name and location of the student's solution file relative to this script.
p7_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p7_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
### p5 run tests
if run_p5_test:
p5_result_file = 'p5_result.txt'
p5_valgrind_file = 'p5_valgrind.txt'
p5_target = 'barry'
if CS.check_file_existence(p5_result_file):
CS.remove_file(p5_result_file)
if CS.check_file_existence(p5_valgrind_file):
CS.remove_file(p5_valgrind_file)
CS.compile_student_code(0,
source_files=p5_source_files,
target=p5_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q5_student_output')
f = open('q5_input/input.txt', 'r')
correct_test = 0
for i in xrange(1,11):
string = f.readline().strip()
output_file = 'q5_student_output/output' + str(i) + '.out'
expected_output_file = 'q5_output/output' + str(i) + '.txt'
CS.run_executable('./',
p5_target,
string + ' > ' + output_file,
use_valgrind=p5_use_valgrind,
valgrind_log_filename=p5_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p5_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p5_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p5_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p5_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == 10:
CS.write_message(p5_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p5_result_file, 'Failed ' + str(10 - correct_test) + ' tests!')
if p5_open_results:
CS.open_file(p5_result_file, text_editor)
if p5_use_valgrind:
CS.open_file(p5_valgrind_file, text_editor)
# Clean up
if p5_remove_files:
CS.remove_file(p5_target)
os.system('rm -r q5_student_output')
### p6 run tests
if run_p6_test:
p6_result_file = 'p6_result.txt'
p6_valgrind_file = 'p6_valgrind.txt'
p6_target = 'hw1q6'
if CS.check_file_existence(p6_result_file):
CS.remove_file(p6_result_file)
if CS.check_file_existence(p6_valgrind_file):
CS.remove_file(p6_valgrind_file)
CS.compile_student_code(0,
source_files=p6_source_files,
target=p6_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q6_student_output')
correct_test = 0
for i in xrange(1,11):
string = 'q6_input/input' + str(i) + '.txt'
output_file = 'q6_student_output/output' + str(i) + '.out'
expected_output_file = 'q6_output/output' + str(i) + '.txt'
CS.run_executable('./',
p6_target,
string + ' > ' + output_file,
use_valgrind=p6_use_valgrind,
valgrind_log_filename=p6_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p6_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p6_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p6_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p6_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == 10:
CS.write_message(p6_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p6_result_file, 'Failed ' + str(10 - correct_test) + ' tests!')
if p6_open_results:
CS.open_file(p6_result_file, text_editor)
if p6_use_valgrind:
CS.open_file(p6_valgrind_file, text_editor)
# Clean up
if p6_remove_files:
CS.remove_file(p6_target)
os.system('rm -r q6_student_output')
if run_p7_test:
p7_result_file = 'got_result.txt'
p7_valgrind_file = 'got_valgrind.txt'
p7_target = 'game_of_pointers'
p7_test_count = 16
if CS.check_file_existence(p7_result_file):
CS.remove_file(p7_result_file)
if CS.check_file_existence(p7_valgrind_file):
CS.remove_file(p7_valgrind_file)
CS.compile_student_code(0,
source_files=p7_source_files,
target=p7_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q7_student_output')
correct_test = 0
for i in xrange(1,p7_test_count + 1):
parameters = 'q7_input/input' + str(i) + '.txt'
parameters += ' q7_student_output/output' + str(i) +'.txt'
output_file = 'q7_student_output/output' + str(i) +'.txt'
expected_output_file = 'q7_input/solution' + str(i) + '.txt'
CS.run_executable('./',
p7_target,
parameters,
use_valgrind=p7_use_valgrind,
valgrind_log_filename=p7_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p7_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p7_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p7_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p7_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == p7_test_count:
CS.write_message(p7_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p7_result_file, 'Failed ' + str(p7_test_count - correct_test) + ' tests!')
if p6_open_results:
CS.open_file(p7_result_file, text_editor)
if p6_use_valgrind:
CS.open_file(p7_valgrind_file, text_editor)
# Clean up
if p7_remove_files:
CS.remove_file(p7_target)
os.system('rm -r q7_student_output')
|
CSCI-104/homework-resources/hw1-test/hw1-checker.py
|
import cs_grading as CS
import os
text_editor = 'subl'
#Options for p5
run_p5_test = 1 # Change to 0 to turn off these tests .
p5_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p5_source_files = '../barry.cpp' # The name and location of the student's solution file relative to this script.
p5_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p5_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
#Options for p6
run_p6_test = 1 # Change to 0 to turn off these tests .
p6_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p6_source_files = '../hw1q6.cpp' # The name and location of the student's solution file relative to this script.
p6_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p6_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
#Options for p7
run_p7_test = 1 # Change to 0 to turn off these tests .
p7_use_valgrind = 1 # Change to 0 if you don't want valgrind to be run.
p7_source_files = '../game_of_pointers.cpp' # The name and location of the student's solution file relative to this script.
p7_open_results = 1 # Change to 0 if you don't want the results files opened automatically.
p7_remove_files = 1 # Change to 0 if you don't want intermediary files to be removed.
### p5 run tests
if run_p5_test:
p5_result_file = 'p5_result.txt'
p5_valgrind_file = 'p5_valgrind.txt'
p5_target = 'barry'
if CS.check_file_existence(p5_result_file):
CS.remove_file(p5_result_file)
if CS.check_file_existence(p5_valgrind_file):
CS.remove_file(p5_valgrind_file)
CS.compile_student_code(0,
source_files=p5_source_files,
target=p5_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q5_student_output')
f = open('q5_input/input.txt', 'r')
correct_test = 0
for i in xrange(1,11):
string = f.readline().strip()
output_file = 'q5_student_output/output' + str(i) + '.out'
expected_output_file = 'q5_output/output' + str(i) + '.txt'
CS.run_executable('./',
p5_target,
string + ' > ' + output_file,
use_valgrind=p5_use_valgrind,
valgrind_log_filename=p5_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p5_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p5_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p5_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p5_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == 10:
CS.write_message(p5_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p5_result_file, 'Failed ' + str(10 - correct_test) + ' tests!')
if p5_open_results:
CS.open_file(p5_result_file, text_editor)
if p5_use_valgrind:
CS.open_file(p5_valgrind_file, text_editor)
# Clean up
if p5_remove_files:
CS.remove_file(p5_target)
os.system('rm -r q5_student_output')
### p6 run tests
if run_p6_test:
p6_result_file = 'p6_result.txt'
p6_valgrind_file = 'p6_valgrind.txt'
p6_target = 'hw1q6'
if CS.check_file_existence(p6_result_file):
CS.remove_file(p6_result_file)
if CS.check_file_existence(p6_valgrind_file):
CS.remove_file(p6_valgrind_file)
CS.compile_student_code(0,
source_files=p6_source_files,
target=p6_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q6_student_output')
correct_test = 0
for i in xrange(1,11):
string = 'q6_input/input' + str(i) + '.txt'
output_file = 'q6_student_output/output' + str(i) + '.out'
expected_output_file = 'q6_output/output' + str(i) + '.txt'
CS.run_executable('./',
p6_target,
string + ' > ' + output_file,
use_valgrind=p6_use_valgrind,
valgrind_log_filename=p6_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p6_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p6_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p6_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p6_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == 10:
CS.write_message(p6_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p6_result_file, 'Failed ' + str(10 - correct_test) + ' tests!')
if p6_open_results:
CS.open_file(p6_result_file, text_editor)
if p6_use_valgrind:
CS.open_file(p6_valgrind_file, text_editor)
# Clean up
if p6_remove_files:
CS.remove_file(p6_target)
os.system('rm -r q6_student_output')
if run_p7_test:
p7_result_file = 'got_result.txt'
p7_valgrind_file = 'got_valgrind.txt'
p7_target = 'game_of_pointers'
p7_test_count = 16
if CS.check_file_existence(p7_result_file):
CS.remove_file(p7_result_file)
if CS.check_file_existence(p7_valgrind_file):
CS.remove_file(p7_valgrind_file)
CS.compile_student_code(0,
source_files=p7_source_files,
target=p7_target,
flags='-g -Wall -std=c++11')
CS.mkdir('q7_student_output')
correct_test = 0
for i in xrange(1,p7_test_count + 1):
parameters = 'q7_input/input' + str(i) + '.txt'
parameters += ' q7_student_output/output' + str(i) +'.txt'
output_file = 'q7_student_output/output' + str(i) +'.txt'
expected_output_file = 'q7_input/solution' + str(i) + '.txt'
CS.run_executable('./',
p7_target,
parameters,
use_valgrind=p7_use_valgrind,
valgrind_log_filename=p7_valgrind_file)
if CS.check_file_existence(output_file):
results = CS.compare_files_with_order( output_file,
expected_output_file,
p7_result_file,
skip_white_space=1,
detailed_results=0)
CS.write_message(p7_result_file, '\n')
if results[1] == 0 and results[2] == 0:
correct_test += 1
CS.write_message(p7_result_file, 'Test ' + str(i) + ' passed!\n\n')
else:
CS.write_message(p7_result_file, 'Test ' + str(i) + ' failed.\n\n')
if correct_test == p7_test_count:
CS.write_message(p7_result_file, '\nAll Test Cases Passed!')
else:
CS.write_message(p7_result_file, 'Failed ' + str(p7_test_count - correct_test) + ' tests!')
if p6_open_results:
CS.open_file(p7_result_file, text_editor)
if p6_use_valgrind:
CS.open_file(p7_valgrind_file, text_editor)
# Clean up
if p7_remove_files:
CS.remove_file(p7_target)
os.system('rm -r q7_student_output')
| 0.095592 | 0.131675 |
import numpy as np
import tvm
import topi
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.cpp.expand_dims(A, axis, num_newaxis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_tranpose(in_shape, axes):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.cpp.transpose(A, axes)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
ctx = tvm.context(device, 0)
foo = tvm.build(s, [A, B], device, name="tranpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_reshape(src_shape, dst_shape):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.cpp.reshape(A, dst_shape)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(dst_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_squeeze(src_shape, axis):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.cpp.squeeze(A, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [out_tensor])
else:
s = topi.cpp.cuda.schedule_injective(target, [out_tensor])
foo = tvm.build(s, tensor_l + [out_tensor], device, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_split(src_shape, indices_or_sections, axis):
A = tvm.placeholder(shape=src_shape, name="A")
tensor_l = topi.cpp.split(A, indices_or_sections, axis)
tensor_l = list(tensor_l)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, tensor_l)
else:
s = topi.cpp.cuda.schedule_injective(target, tensor_l)
ctx = tvm.context(device, 0)
foo = tvm.build(s, [A] + tensor_l, device, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_take(src_shape, indices_src, axis=None):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = tvm.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = tvm.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.cpp.take(A, indices)
else:
out_tensor = topi.cpp.take(A, indices, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
out_npys = np.take(data_npy, indices_src)
else:
out_npys = np.take(data_npy, indices_src, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
indices_nd = tvm.nd.array(indices_src, ctx)
out_nd = tvm.nd.empty(out_npys.shape, ctx=ctx, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys)
for device in ["llvm", "opencl"]:
check_device(device)
def verify_where(condition, x, y):
dtype = "float32"
if len(condition.shape) == 1:
np_out = np.array([xv if c else yv for (c,xv,yv) in zip(condition,x,y)])
else:
np_out = np.where(condition, x, y)
A = tvm.placeholder(shape=condition.shape, dtype=dtype, name="condition")
B = tvm.placeholder(shape=x.shape, dtype=dtype, name="x")
C = tvm.placeholder(shape=y.shape, dtype=dtype, name="y")
out_tensor = topi.cpp.where(A, B, C)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A, B, C, out_tensor], device, name="where")
tvm_out = tvm.nd.empty(x.shape, ctx=ctx, dtype=dtype)
foo(tvm.nd.array(condition, ctx), tvm.nd.array(x, ctx),
tvm.nd.array(y, ctx), tvm_out)
tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate_split(shapes, axis, indices_or_sections):
tensor_l_concatenate = []
for i, shape in enumerate(shapes):
tensor_l_concatenate.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l_concatenate, axis)
tensor_l = topi.cpp.split(out_tensor, indices_or_sections, axis)
tensor_l = list(tensor_l)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, tensor_l)
else:
s = topi.cpp.cuda.schedule_injective(target, tensor_l)
ctx = tvm.context(device, 0)
foo = tvm.build(s, tensor_l_concatenate + tensor_l, device, name="concatenate_split")
data_npys = [np.random.normal(size=shape).astype(tensor_l_concatenate[0].dtype) for shape in shapes]
out_npy_conc = np.concatenate(data_npys, axis=axis)
out_npys_split = np.split(out_npy_conc, indices_or_sections, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys_split]
foo(*(data_nds + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys_split):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate_broadcast(shapes, axis, rhs_shape):
B = tvm.placeholder(shape=rhs_shape, name="B")
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l, axis)
C = out_tensor + B
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [C])
else:
s = topi.cpp.cuda.schedule_injective(target, [C])
ctx = tvm.context(device, 0)
foo = tvm.build(s, tensor_l + [B, C], device, name="broadcast_binary_add")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
lhs_npy = np.concatenate(data_npys, axis=axis)
rhs_npy = np.random.uniform(size=rhs_shape).astype(B.dtype)
out_npy = lhs_npy + rhs_npy
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
rhs_nd = tvm.nd.array(rhs_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), ctx)
for _ in range(1):
foo(*(data_nds + [rhs_nd] + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1E-4, atol=1E-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
def test_tranpose():
verify_tranpose((3, 10, 2), (1, 0, 2))
verify_tranpose((3, 10, 5), (2, 0, 1))
verify_tranpose((3, 10), None)
verify_tranpose((3, 10, 5), (2, -3, 1))
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16, ), (2, 2, 2, 2))
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], 0)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3),
(16, 6, 7, 3),
(12, 6, 7, 3),
(8, 6, 7, 3),
(2, 6, 7, 3)], 0)
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), 3, -1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
def test_where():
shape = (10, 3, 7, 13)
condition = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
x = np.random.uniform(size=shape).astype("float32")
y = np.random.uniform(size=shape).astype("float32")
verify_where(condition, x, y)
condition = np.random.uniform(low=-1, high=1, size=(shape[0],)).astype("float32")
x = np.random.uniform(size=shape).astype("float32")
y = np.random.uniform(size=shape).astype("float32")
verify_where(condition, x, y)
def test_regression_1():
verify_concatenate_split([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1, [3, 7])
verify_concatenate_split([(3, 4), (2, 4), (3, 4)], 0, [1, 2, 3, 4])
def test_regression_2():
verify_concatenate_broadcast([(5, 1, 3), (5, 1, 3)], 1, [2, 1])
verify_concatenate_broadcast([(5, 1, 2), (5, 1, 3)], 2, [1, 5])
if __name__ == "__main__":
test_concatenate()
test_tranpose()
test_expand_dims()
test_reshape()
test_squeeze()
test_split()
test_take()
test_where()
test_regression_1()
test_regression_2()
|
topi/tests/python_cpp/test_topi_transform.py
|
import numpy as np
import tvm
import topi
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.cpp.expand_dims(A, axis, num_newaxis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_tranpose(in_shape, axes):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.cpp.transpose(A, axes)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
ctx = tvm.context(device, 0)
foo = tvm.build(s, [A, B], device, name="tranpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_reshape(src_shape, dst_shape):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.cpp.reshape(A, dst_shape)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(dst_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_squeeze(src_shape, axis):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.cpp.squeeze(A, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
foo = tvm.build(s, [A, B], device, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [out_tensor])
else:
s = topi.cpp.cuda.schedule_injective(target, [out_tensor])
foo = tvm.build(s, tensor_l + [out_tensor], device, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_split(src_shape, indices_or_sections, axis):
A = tvm.placeholder(shape=src_shape, name="A")
tensor_l = topi.cpp.split(A, indices_or_sections, axis)
tensor_l = list(tensor_l)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, tensor_l)
else:
s = topi.cpp.cuda.schedule_injective(target, tensor_l)
ctx = tvm.context(device, 0)
foo = tvm.build(s, [A] + tensor_l, device, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_take(src_shape, indices_src, axis=None):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = tvm.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = tvm.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.cpp.take(A, indices)
else:
out_tensor = topi.cpp.take(A, indices, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
out_npys = np.take(data_npy, indices_src)
else:
out_npys = np.take(data_npy, indices_src, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
indices_nd = tvm.nd.array(indices_src, ctx)
out_nd = tvm.nd.empty(out_npys.shape, ctx=ctx, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys)
for device in ["llvm", "opencl"]:
check_device(device)
def verify_where(condition, x, y):
dtype = "float32"
if len(condition.shape) == 1:
np_out = np.array([xv if c else yv for (c,xv,yv) in zip(condition,x,y)])
else:
np_out = np.where(condition, x, y)
A = tvm.placeholder(shape=condition.shape, dtype=dtype, name="condition")
B = tvm.placeholder(shape=x.shape, dtype=dtype, name="x")
C = tvm.placeholder(shape=y.shape, dtype=dtype, name="y")
out_tensor = topi.cpp.where(A, B, C)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A, B, C, out_tensor], device, name="where")
tvm_out = tvm.nd.empty(x.shape, ctx=ctx, dtype=dtype)
foo(tvm.nd.array(condition, ctx), tvm.nd.array(x, ctx),
tvm.nd.array(y, ctx), tvm_out)
tvm.testing.assert_allclose(tvm_out.asnumpy(), np_out)
for device in ["llvm", "nvptx", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate_split(shapes, axis, indices_or_sections):
tensor_l_concatenate = []
for i, shape in enumerate(shapes):
tensor_l_concatenate.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l_concatenate, axis)
tensor_l = topi.cpp.split(out_tensor, indices_or_sections, axis)
tensor_l = list(tensor_l)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, tensor_l)
else:
s = topi.cpp.cuda.schedule_injective(target, tensor_l)
ctx = tvm.context(device, 0)
foo = tvm.build(s, tensor_l_concatenate + tensor_l, device, name="concatenate_split")
data_npys = [np.random.normal(size=shape).astype(tensor_l_concatenate[0].dtype) for shape in shapes]
out_npy_conc = np.concatenate(data_npys, axis=axis)
out_npys_split = np.split(out_npy_conc, indices_or_sections, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys_split]
foo(*(data_nds + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys_split):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def verify_concatenate_broadcast(shapes, axis, rhs_shape):
B = tvm.placeholder(shape=rhs_shape, name="B")
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.cpp.concatenate(tensor_l, axis)
C = out_tensor + B
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [C])
else:
s = topi.cpp.cuda.schedule_injective(target, [C])
ctx = tvm.context(device, 0)
foo = tvm.build(s, tensor_l + [B, C], device, name="broadcast_binary_add")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
lhs_npy = np.concatenate(data_npys, axis=axis)
rhs_npy = np.random.uniform(size=rhs_shape).astype(B.dtype)
out_npy = lhs_npy + rhs_npy
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
rhs_nd = tvm.nd.array(rhs_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), ctx)
for _ in range(1):
foo(*(data_nds + [rhs_nd] + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1E-4, atol=1E-4)
for device in ["llvm", "cuda", "opencl", "metal", "rocm"]:
check_device(device)
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
def test_tranpose():
verify_tranpose((3, 10, 2), (1, 0, 2))
verify_tranpose((3, 10, 5), (2, 0, 1))
verify_tranpose((3, 10), None)
verify_tranpose((3, 10, 5), (2, -3, 1))
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16, ), (2, 2, 2, 2))
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], 0)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3),
(16, 6, 7, 3),
(12, 6, 7, 3),
(8, 6, 7, 3),
(2, 6, 7, 3)], 0)
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), 3, -1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
def test_where():
shape = (10, 3, 7, 13)
condition = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
x = np.random.uniform(size=shape).astype("float32")
y = np.random.uniform(size=shape).astype("float32")
verify_where(condition, x, y)
condition = np.random.uniform(low=-1, high=1, size=(shape[0],)).astype("float32")
x = np.random.uniform(size=shape).astype("float32")
y = np.random.uniform(size=shape).astype("float32")
verify_where(condition, x, y)
def test_regression_1():
verify_concatenate_split([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1, [3, 7])
verify_concatenate_split([(3, 4), (2, 4), (3, 4)], 0, [1, 2, 3, 4])
def test_regression_2():
verify_concatenate_broadcast([(5, 1, 3), (5, 1, 3)], 1, [2, 1])
verify_concatenate_broadcast([(5, 1, 2), (5, 1, 3)], 2, [1, 5])
if __name__ == "__main__":
test_concatenate()
test_tranpose()
test_expand_dims()
test_reshape()
test_squeeze()
test_split()
test_take()
test_where()
test_regression_1()
test_regression_2()
| 0.346541 | 0.436622 |
import numpy
import talib
class ChartFeature(object):
def __init__(self, selector):
self.selector = selector
self.supported = {"ROCP", "OROCP", "HROCP", "LROCP", "MACD", "RSI", "VROCP", "BOLL", "MA", "VMA", "PRICE_VOLUME"}
self.feature = []
def moving_extract(self, window=30, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
volumes=None, with_label=True, flatten=True):
self.extract(open_prices=open_prices, close_prices=close_prices, high_prices=high_prices, low_prices=low_prices,
volumes=volumes)
feature_arr = numpy.asarray(self.feature)
p = 0
# rows = feature_arr.shape[0]
# print("feature dimension: %s" % rows)
if with_label:
moving_features = []
moving_labels = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
# y = cmp(close_prices[p + window], close_prices[p + window - 1]) + 1
if p + window < feature_arr.shape[1]:
p_change = (close_prices[p + window] - close_prices[p + window - 1]) / close_prices[p + window - 1]
else:
p_change = 0
# use percent of change as label
y = p_change
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
moving_labels.append(y)
p += 1
return numpy.asarray(moving_features), numpy.asarray(moving_labels)
else:
moving_features = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
p += 1
return moving_features
def extract(self, open_prices=None, close_prices=None, high_prices=None, low_prices=None, volumes=None):
self.feature = []
for feature_type in self.selector:
if feature_type in self.supported:
# print("extracting feature : %s" % feature_type)
self.extract_by_type(feature_type, open_prices=open_prices, close_prices=close_prices,
high_prices=high_prices, low_prices=low_prices, volumes=volumes)
else:
print("feature type not supported: %s" % feature_type)
# self.feature_distribution()
return self.feature
def feature_distribution(self):
k = 0
for feature_column in self.feature:
fc = numpy.nan_to_num(feature_column)
mean = numpy.mean(fc)
var = numpy.var(fc)
max_value = numpy.max(fc)
min_value = numpy.min(fc)
print("[%s_th feature] mean: %s, var: %s, max: %s, min: %s" % (k, mean, var, max_value, min_value))
k = k + 1
def extract_by_type(self, feature_type, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
volumes=None):
if feature_type == 'ROCP':
rocp = talib.ROCP(close_prices, timeperiod=1)
self.feature.append(rocp)
if feature_type == 'OROCP':
orocp = talib.ROCP(open_prices, timeperiod=1)
self.feature.append(orocp)
if feature_type == 'HROCP':
hrocp = talib.ROCP(high_prices, timeperiod=1)
self.feature.append(hrocp)
if feature_type == 'LROCP':
lrocp = talib.ROCP(low_prices, timeperiod=1)
self.feature.append(lrocp)
if feature_type == 'MACD':
macd, signal, hist = talib.MACD(close_prices, fastperiod=12, slowperiod=26, signalperiod=9)
norm_signal = numpy.minimum(numpy.maximum(numpy.nan_to_num(signal), -1), 1)
norm_hist = numpy.minimum(numpy.maximum(numpy.nan_to_num(hist), -1), 1)
norm_macd = numpy.minimum(numpy.maximum(numpy.nan_to_num(macd), -1), 1)
zero = numpy.asarray([0])
macdrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(macd)))), -1), 1)
signalrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(signal)))), -1), 1)
histrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(hist)))), -1), 1)
self.feature.append(norm_macd)
self.feature.append(norm_signal)
self.feature.append(norm_hist)
self.feature.append(macdrocp)
self.feature.append(signalrocp)
self.feature.append(histrocp)
if feature_type == 'RSI':
rsi6 = talib.RSI(close_prices, timeperiod=6)
rsi12 = talib.RSI(close_prices, timeperiod=12)
rsi24 = talib.RSI(close_prices, timeperiod=24)
rsi6rocp = talib.ROCP(rsi6 + 100., timeperiod=1)
rsi12rocp = talib.ROCP(rsi12 + 100., timeperiod=1)
rsi24rocp = talib.ROCP(rsi24 + 100., timeperiod=1)
self.feature.append(rsi6 / 100.0 - 0.5)
self.feature.append(rsi12 / 100.0 - 0.5)
self.feature.append(rsi24 / 100.0 - 0.5)
# self.feature.append(numpy.maximum(rsi6 / 100.0 - 0.8, 0))
# self.feature.append(numpy.maximum(rsi12 / 100.0 - 0.8, 0))
# self.feature.append(numpy.maximum(rsi24 / 100.0 - 0.8, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
self.feature.append(rsi6rocp)
self.feature.append(rsi12rocp)
self.feature.append(rsi24rocp)
if feature_type == 'VROCP':
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
# norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes))
# vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1)
# self.feature.append(norm_volumes)
self.feature.append(vrocp)
if feature_type == 'BOLL':
upperband, middleband, lowerband = talib.BBANDS(close_prices, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
self.feature.append((upperband - close_prices) / close_prices)
self.feature.append((middleband - close_prices) / close_prices)
self.feature.append((lowerband - close_prices) / close_prices)
if feature_type == 'MA':
ma5 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=5))
ma10 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=10))
ma20 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=20))
ma30 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=30))
ma60 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=60))
ma90 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=90))
ma120 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=120))
ma180 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=180))
ma360 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=360))
ma720 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=720))
ma5rocp = talib.ROCP(ma5, timeperiod=1)
ma10rocp = talib.ROCP(ma10, timeperiod=1)
ma20rocp = talib.ROCP(ma20, timeperiod=1)
ma30rocp = talib.ROCP(ma30, timeperiod=1)
ma60rocp = talib.ROCP(ma60, timeperiod=1)
ma90rocp = talib.ROCP(ma90, timeperiod=1)
ma120rocp = talib.ROCP(ma120, timeperiod=1)
ma180rocp = talib.ROCP(ma180, timeperiod=1)
ma360rocp = talib.ROCP(ma360, timeperiod=1)
ma720rocp = talib.ROCP(ma720, timeperiod=1)
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append((ma5 - close_prices) / close_prices)
self.feature.append((ma10 - close_prices) / close_prices)
self.feature.append((ma20 - close_prices) / close_prices)
self.feature.append((ma30 - close_prices) / close_prices)
self.feature.append((ma60 - close_prices) / close_prices)
self.feature.append((ma90 - close_prices) / close_prices)
self.feature.append((ma120 - close_prices) / close_prices)
self.feature.append((ma180 - close_prices) / close_prices)
self.feature.append((ma360 - close_prices) / close_prices)
self.feature.append((ma720 - close_prices) / close_prices)
if feature_type == 'VMA':
ma5 = talib.MA(volumes, timeperiod=5)
ma10 = talib.MA(volumes, timeperiod=10)
ma20 = talib.MA(volumes, timeperiod=20)
ma30 = talib.MA(volumes, timeperiod=30)
ma60 = talib.MA(volumes, timeperiod=60)
ma90 = talib.MA(volumes, timeperiod=90)
ma120 = talib.MA(volumes, timeperiod=120)
ma180 = talib.MA(volumes, timeperiod=180)
ma360 = talib.MA(volumes, timeperiod=360)
ma720 = talib.MA(volumes, timeperiod=720)
ma5rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma5, timeperiod=1)))
ma10rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma10, timeperiod=1)))
ma20rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma20, timeperiod=1)))
ma30rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma30, timeperiod=1)))
ma60rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma60, timeperiod=1)))
ma90rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma90, timeperiod=1)))
ma120rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma120, timeperiod=1)))
ma180rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma180, timeperiod=1)))
ma360rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma360, timeperiod=1)))
ma720rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma720, timeperiod=1)))
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append(numpy.arctan(numpy.nan_to_num((ma5 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma10 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma20 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma30 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma60 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma90 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma120 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma180 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma360 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma720 - volumes) / (volumes + 1))))
if feature_type == 'PRICE_VOLUME':
rocp = talib.ROCP(close_prices, timeperiod=1)
# norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes))
# vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1)
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
pv = rocp * vrocp
self.feature.append(pv)
def extract_feature(raw_data, selector, window=30, with_label=True, flatten=True):
chart_feature = ChartFeature(selector)
closes = raw_data.close.values
opens = raw_data.open.values
highs = raw_data.high.values
lows = raw_data.low.values
volumes = raw_data.volume.values
if with_label:
moving_features, moving_labels = chart_feature.moving_extract(window=window, open_prices=opens,
close_prices=closes,
high_prices=highs, low_prices=lows,
volumes=volumes, with_label=with_label,
flatten=flatten)
return moving_features, moving_labels
else:
moving_features = chart_feature.moving_extract(window=window, open_prices=opens, close_prices=closes,
high_prices=highs, low_prices=lows, volumes=volumes,
with_label=with_label, flatten=flatten)
return moving_features
|
chart.py
|
import numpy
import talib
class ChartFeature(object):
def __init__(self, selector):
self.selector = selector
self.supported = {"ROCP", "OROCP", "HROCP", "LROCP", "MACD", "RSI", "VROCP", "BOLL", "MA", "VMA", "PRICE_VOLUME"}
self.feature = []
def moving_extract(self, window=30, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
volumes=None, with_label=True, flatten=True):
self.extract(open_prices=open_prices, close_prices=close_prices, high_prices=high_prices, low_prices=low_prices,
volumes=volumes)
feature_arr = numpy.asarray(self.feature)
p = 0
# rows = feature_arr.shape[0]
# print("feature dimension: %s" % rows)
if with_label:
moving_features = []
moving_labels = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
# y = cmp(close_prices[p + window], close_prices[p + window - 1]) + 1
if p + window < feature_arr.shape[1]:
p_change = (close_prices[p + window] - close_prices[p + window - 1]) / close_prices[p + window - 1]
else:
p_change = 0
# use percent of change as label
y = p_change
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
moving_labels.append(y)
p += 1
return numpy.asarray(moving_features), numpy.asarray(moving_labels)
else:
moving_features = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
p += 1
return moving_features
def extract(self, open_prices=None, close_prices=None, high_prices=None, low_prices=None, volumes=None):
self.feature = []
for feature_type in self.selector:
if feature_type in self.supported:
# print("extracting feature : %s" % feature_type)
self.extract_by_type(feature_type, open_prices=open_prices, close_prices=close_prices,
high_prices=high_prices, low_prices=low_prices, volumes=volumes)
else:
print("feature type not supported: %s" % feature_type)
# self.feature_distribution()
return self.feature
def feature_distribution(self):
k = 0
for feature_column in self.feature:
fc = numpy.nan_to_num(feature_column)
mean = numpy.mean(fc)
var = numpy.var(fc)
max_value = numpy.max(fc)
min_value = numpy.min(fc)
print("[%s_th feature] mean: %s, var: %s, max: %s, min: %s" % (k, mean, var, max_value, min_value))
k = k + 1
def extract_by_type(self, feature_type, open_prices=None, close_prices=None, high_prices=None, low_prices=None,
volumes=None):
if feature_type == 'ROCP':
rocp = talib.ROCP(close_prices, timeperiod=1)
self.feature.append(rocp)
if feature_type == 'OROCP':
orocp = talib.ROCP(open_prices, timeperiod=1)
self.feature.append(orocp)
if feature_type == 'HROCP':
hrocp = talib.ROCP(high_prices, timeperiod=1)
self.feature.append(hrocp)
if feature_type == 'LROCP':
lrocp = talib.ROCP(low_prices, timeperiod=1)
self.feature.append(lrocp)
if feature_type == 'MACD':
macd, signal, hist = talib.MACD(close_prices, fastperiod=12, slowperiod=26, signalperiod=9)
norm_signal = numpy.minimum(numpy.maximum(numpy.nan_to_num(signal), -1), 1)
norm_hist = numpy.minimum(numpy.maximum(numpy.nan_to_num(hist), -1), 1)
norm_macd = numpy.minimum(numpy.maximum(numpy.nan_to_num(macd), -1), 1)
zero = numpy.asarray([0])
macdrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(macd)))), -1), 1)
signalrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(signal)))), -1), 1)
histrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(hist)))), -1), 1)
self.feature.append(norm_macd)
self.feature.append(norm_signal)
self.feature.append(norm_hist)
self.feature.append(macdrocp)
self.feature.append(signalrocp)
self.feature.append(histrocp)
if feature_type == 'RSI':
rsi6 = talib.RSI(close_prices, timeperiod=6)
rsi12 = talib.RSI(close_prices, timeperiod=12)
rsi24 = talib.RSI(close_prices, timeperiod=24)
rsi6rocp = talib.ROCP(rsi6 + 100., timeperiod=1)
rsi12rocp = talib.ROCP(rsi12 + 100., timeperiod=1)
rsi24rocp = talib.ROCP(rsi24 + 100., timeperiod=1)
self.feature.append(rsi6 / 100.0 - 0.5)
self.feature.append(rsi12 / 100.0 - 0.5)
self.feature.append(rsi24 / 100.0 - 0.5)
# self.feature.append(numpy.maximum(rsi6 / 100.0 - 0.8, 0))
# self.feature.append(numpy.maximum(rsi12 / 100.0 - 0.8, 0))
# self.feature.append(numpy.maximum(rsi24 / 100.0 - 0.8, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.minimum(rsi6 / 100.0 - 0.2, 0))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
# self.feature.append(numpy.maximum(numpy.minimum(rsi6 / 100.0 - 0.5, 0.3), -0.3))
self.feature.append(rsi6rocp)
self.feature.append(rsi12rocp)
self.feature.append(rsi24rocp)
if feature_type == 'VROCP':
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
# norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes))
# vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1)
# self.feature.append(norm_volumes)
self.feature.append(vrocp)
if feature_type == 'BOLL':
upperband, middleband, lowerband = talib.BBANDS(close_prices, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0)
self.feature.append((upperband - close_prices) / close_prices)
self.feature.append((middleband - close_prices) / close_prices)
self.feature.append((lowerband - close_prices) / close_prices)
if feature_type == 'MA':
ma5 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=5))
ma10 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=10))
ma20 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=20))
ma30 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=30))
ma60 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=60))
ma90 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=90))
ma120 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=120))
ma180 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=180))
ma360 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=360))
ma720 = numpy.nan_to_num(talib.MA(close_prices, timeperiod=720))
ma5rocp = talib.ROCP(ma5, timeperiod=1)
ma10rocp = talib.ROCP(ma10, timeperiod=1)
ma20rocp = talib.ROCP(ma20, timeperiod=1)
ma30rocp = talib.ROCP(ma30, timeperiod=1)
ma60rocp = talib.ROCP(ma60, timeperiod=1)
ma90rocp = talib.ROCP(ma90, timeperiod=1)
ma120rocp = talib.ROCP(ma120, timeperiod=1)
ma180rocp = talib.ROCP(ma180, timeperiod=1)
ma360rocp = talib.ROCP(ma360, timeperiod=1)
ma720rocp = talib.ROCP(ma720, timeperiod=1)
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append((ma5 - close_prices) / close_prices)
self.feature.append((ma10 - close_prices) / close_prices)
self.feature.append((ma20 - close_prices) / close_prices)
self.feature.append((ma30 - close_prices) / close_prices)
self.feature.append((ma60 - close_prices) / close_prices)
self.feature.append((ma90 - close_prices) / close_prices)
self.feature.append((ma120 - close_prices) / close_prices)
self.feature.append((ma180 - close_prices) / close_prices)
self.feature.append((ma360 - close_prices) / close_prices)
self.feature.append((ma720 - close_prices) / close_prices)
if feature_type == 'VMA':
ma5 = talib.MA(volumes, timeperiod=5)
ma10 = talib.MA(volumes, timeperiod=10)
ma20 = talib.MA(volumes, timeperiod=20)
ma30 = talib.MA(volumes, timeperiod=30)
ma60 = talib.MA(volumes, timeperiod=60)
ma90 = talib.MA(volumes, timeperiod=90)
ma120 = talib.MA(volumes, timeperiod=120)
ma180 = talib.MA(volumes, timeperiod=180)
ma360 = talib.MA(volumes, timeperiod=360)
ma720 = talib.MA(volumes, timeperiod=720)
ma5rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma5, timeperiod=1)))
ma10rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma10, timeperiod=1)))
ma20rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma20, timeperiod=1)))
ma30rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma30, timeperiod=1)))
ma60rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma60, timeperiod=1)))
ma90rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma90, timeperiod=1)))
ma120rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma120, timeperiod=1)))
ma180rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma180, timeperiod=1)))
ma360rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma360, timeperiod=1)))
ma720rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma720, timeperiod=1)))
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append(numpy.arctan(numpy.nan_to_num((ma5 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma10 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma20 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma30 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma60 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma90 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma120 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma180 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma360 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma720 - volumes) / (volumes + 1))))
if feature_type == 'PRICE_VOLUME':
rocp = talib.ROCP(close_prices, timeperiod=1)
# norm_volumes = (volumes - numpy.mean(volumes)) / math.sqrt(numpy.var(volumes))
# vrocp = talib.ROCP(norm_volumes + numpy.max(norm_volumes) - numpy.min(norm_volumes), timeperiod=1)
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
pv = rocp * vrocp
self.feature.append(pv)
def extract_feature(raw_data, selector, window=30, with_label=True, flatten=True):
chart_feature = ChartFeature(selector)
closes = raw_data.close.values
opens = raw_data.open.values
highs = raw_data.high.values
lows = raw_data.low.values
volumes = raw_data.volume.values
if with_label:
moving_features, moving_labels = chart_feature.moving_extract(window=window, open_prices=opens,
close_prices=closes,
high_prices=highs, low_prices=lows,
volumes=volumes, with_label=with_label,
flatten=flatten)
return moving_features, moving_labels
else:
moving_features = chart_feature.moving_extract(window=window, open_prices=opens, close_prices=closes,
high_prices=highs, low_prices=lows, volumes=volumes,
with_label=with_label, flatten=flatten)
return moving_features
| 0.252661 | 0.348091 |
import string
from collections import defaultdict
import torch
import torch.nn as nn
from citextract.utils.model import load_model_params
class TitleTagging(nn.Module):
"""TitleTagging model."""
def __init__(self, input_size, hidden_size, n_layers, n_classes, device):
"""Initialize the model.
Parameters
----------
input_size : int
The number of input neurons.
hidden_size : int
The number of hidden neurons.
n_layers : int
The number of layers.
n_classes : int
The number of output classes.
device : torch.device
The device to run the computations on.
"""
super(TitleTagging, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.n_layers = n_layers
self.lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True, bidirectional=True, dropout=0.5)
self.fc = nn.Linear(hidden_size * 2, n_classes)
def forward(self, x):
"""Forward-propagate the input data.
Parameters
----------
x : torch.Tensor
The input tensor of size (batch_size, sequence_length, input_size).
Returns
-------
torch.Tensor
The output tensor of size (batch_size, sequence_length, n_classes).
"""
# Initiatlize parameters for the first step
h_0 = torch.zeros(2 * self.n_layers, x.size(0), self.hidden_size).to(self.device)
c_0 = torch.zeros(2 * self.n_layers, x.size(0), self.hidden_size).to(self.device)
# Return the output and parameters for the n-th step (n=sequence_len)
lstm_output, _ = self.lstm(x, (h_0, c_0))
# Fully connected layer (hidden_size*2 --> n_classes)
fc_output = self.fc(lstm_output)
# Softmax
softmax_output = nn.Softmax(dim=2)(fc_output)
return softmax_output
def build_titlextract_model(preprocessor, embed_size=32, hidden_size=64, device=None):
"""Build an instance of the TitleXtract model.
Parameters
----------
preprocessor : TitleXtractPreprocessor
The preprocessor to use.
embed_size : int
The number of embedding neurons to use.
hidden_size : int
The number of hidden neurons to use.
device : torch.device
The device to compute on.
Returns
-------
torch.nn.modules.container.Sequential
A RefXtract model instance.
"""
vocab_size = len(preprocessor.chars)
n_classes = 2
return nn.Sequential(
torch.nn.Embedding(vocab_size, embed_size),
TitleTagging(input_size=embed_size, hidden_size=hidden_size, n_layers=2, n_classes=n_classes, device=device).to(
device)
).to(device)
class TitleXtractPreprocessor:
"""TitleXtract preprocessor."""
def __init__(self, device=None):
"""Initialize the preprocessor.
Parameters
----------
device : torch.device
The device to use.
"""
chars = list(string.ascii_letters + string.digits + string.punctuation + string.whitespace)
self.chars = ['<PAD>', '<UNK>'] + chars
self.device = device
self.char_mapping = defaultdict(lambda: 1)
for index, char in enumerate(self.chars):
self.char_mapping[char] = index
def map_text_chars(self, text):
"""Map text to numerical character representations.
Parameters
----------
text : str
The text to map.
Returns
-------
torch.Tensor
The tensor representing the mapped characters.
"""
mapped_chars = list(map(lambda char: self.char_mapping.get(char, 1), text))
return torch.Tensor(mapped_chars).long().view(1, -1).to(self.device)
def map_text_targets(self, text, title):
"""Align and map the targets of a text.
Parameters
----------
text : str
The text to map.
title : str
The title (substring of the text) to map.
Returns
-------
torch.Tensor
A tensor representing the characters of the text for which an element is 1 if and only if a character
is both represented by the text and by the title, 0 otherwise.
"""
start_position = text.index(title)
mapped_target = [1 if start_position <= index < start_position + len(title) else 0 for index in
range(len(text))]
return torch.Tensor(mapped_target).view(1, -1).long().to(self.device)
def __call__(self, text, title):
"""Preprocess a text and a title.
Parameters
----------
text : str
The text to preprocess.
title : str
The title to preprocess.
Returns
-------
tuple
A tuple consisting of the following elements:
- A tensor of the characters of the text.
- A tensor of the targets of the characters of the text.
"""
return self.map_text_chars(text), self.map_text_targets(text, title)
class TitleXtractor:
"""TitleXtractor wrapper class."""
def __init__(self, model=None, preprocessor=None, device=None):
"""Initialize the TitleXtractor.
Parameters
----------
model : torch.nn.modules.container.Sequential
The model to use.
preprocessor : TitleXtractPreprocessor
The preprocessor to use.
device : torch.device
The device to use.
"""
self.device = device
self.preprocessor = preprocessor if preprocessor else TitleXtractPreprocessor(device=device)
self.model = model if model else build_titlextract_model(self.preprocessor, device=device)
def load(self, model_uri=None, ignore_cache=False):
"""Load model parameters from the internet.
Parameters
----------
model_uri : str
The model URI to load from.
ignore_cache : bool
When true, all caches are ignored and the model parameters are forcefully downloaded.
Returns
-------
TitleXtractor
The wrapper itself.
"""
self.model = load_model_params(self.model, 'titlextract', model_uri, ignore_cache=ignore_cache,
device=self.device)
return self
def __call__(self, ref):
"""Run the TitleXtract model.
Parameters
----------
ref : str
Reference to find a title for.
Returns
-------
str
The found title, and none if no title was found.
"""
result = self.model(self.preprocessor.map_text_chars(ref)).argmax(dim=2).cpu()[0].detach().numpy().tolist()
if 1 not in result:
return None
start_pos = result.index(1)
subselection = result[start_pos:]
if 0 in subselection:
length = result[start_pos:].index(0)
title = ref[start_pos:start_pos + length]
else:
title = ref[start_pos:]
return title.strip()
|
citextract/models/titlextract.py
|
import string
from collections import defaultdict
import torch
import torch.nn as nn
from citextract.utils.model import load_model_params
class TitleTagging(nn.Module):
"""TitleTagging model."""
def __init__(self, input_size, hidden_size, n_layers, n_classes, device):
"""Initialize the model.
Parameters
----------
input_size : int
The number of input neurons.
hidden_size : int
The number of hidden neurons.
n_layers : int
The number of layers.
n_classes : int
The number of output classes.
device : torch.device
The device to run the computations on.
"""
super(TitleTagging, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.n_layers = n_layers
self.lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True, bidirectional=True, dropout=0.5)
self.fc = nn.Linear(hidden_size * 2, n_classes)
def forward(self, x):
"""Forward-propagate the input data.
Parameters
----------
x : torch.Tensor
The input tensor of size (batch_size, sequence_length, input_size).
Returns
-------
torch.Tensor
The output tensor of size (batch_size, sequence_length, n_classes).
"""
# Initiatlize parameters for the first step
h_0 = torch.zeros(2 * self.n_layers, x.size(0), self.hidden_size).to(self.device)
c_0 = torch.zeros(2 * self.n_layers, x.size(0), self.hidden_size).to(self.device)
# Return the output and parameters for the n-th step (n=sequence_len)
lstm_output, _ = self.lstm(x, (h_0, c_0))
# Fully connected layer (hidden_size*2 --> n_classes)
fc_output = self.fc(lstm_output)
# Softmax
softmax_output = nn.Softmax(dim=2)(fc_output)
return softmax_output
def build_titlextract_model(preprocessor, embed_size=32, hidden_size=64, device=None):
"""Build an instance of the TitleXtract model.
Parameters
----------
preprocessor : TitleXtractPreprocessor
The preprocessor to use.
embed_size : int
The number of embedding neurons to use.
hidden_size : int
The number of hidden neurons to use.
device : torch.device
The device to compute on.
Returns
-------
torch.nn.modules.container.Sequential
A RefXtract model instance.
"""
vocab_size = len(preprocessor.chars)
n_classes = 2
return nn.Sequential(
torch.nn.Embedding(vocab_size, embed_size),
TitleTagging(input_size=embed_size, hidden_size=hidden_size, n_layers=2, n_classes=n_classes, device=device).to(
device)
).to(device)
class TitleXtractPreprocessor:
"""TitleXtract preprocessor."""
def __init__(self, device=None):
"""Initialize the preprocessor.
Parameters
----------
device : torch.device
The device to use.
"""
chars = list(string.ascii_letters + string.digits + string.punctuation + string.whitespace)
self.chars = ['<PAD>', '<UNK>'] + chars
self.device = device
self.char_mapping = defaultdict(lambda: 1)
for index, char in enumerate(self.chars):
self.char_mapping[char] = index
def map_text_chars(self, text):
"""Map text to numerical character representations.
Parameters
----------
text : str
The text to map.
Returns
-------
torch.Tensor
The tensor representing the mapped characters.
"""
mapped_chars = list(map(lambda char: self.char_mapping.get(char, 1), text))
return torch.Tensor(mapped_chars).long().view(1, -1).to(self.device)
def map_text_targets(self, text, title):
"""Align and map the targets of a text.
Parameters
----------
text : str
The text to map.
title : str
The title (substring of the text) to map.
Returns
-------
torch.Tensor
A tensor representing the characters of the text for which an element is 1 if and only if a character
is both represented by the text and by the title, 0 otherwise.
"""
start_position = text.index(title)
mapped_target = [1 if start_position <= index < start_position + len(title) else 0 for index in
range(len(text))]
return torch.Tensor(mapped_target).view(1, -1).long().to(self.device)
def __call__(self, text, title):
"""Preprocess a text and a title.
Parameters
----------
text : str
The text to preprocess.
title : str
The title to preprocess.
Returns
-------
tuple
A tuple consisting of the following elements:
- A tensor of the characters of the text.
- A tensor of the targets of the characters of the text.
"""
return self.map_text_chars(text), self.map_text_targets(text, title)
class TitleXtractor:
"""TitleXtractor wrapper class."""
def __init__(self, model=None, preprocessor=None, device=None):
"""Initialize the TitleXtractor.
Parameters
----------
model : torch.nn.modules.container.Sequential
The model to use.
preprocessor : TitleXtractPreprocessor
The preprocessor to use.
device : torch.device
The device to use.
"""
self.device = device
self.preprocessor = preprocessor if preprocessor else TitleXtractPreprocessor(device=device)
self.model = model if model else build_titlextract_model(self.preprocessor, device=device)
def load(self, model_uri=None, ignore_cache=False):
"""Load model parameters from the internet.
Parameters
----------
model_uri : str
The model URI to load from.
ignore_cache : bool
When true, all caches are ignored and the model parameters are forcefully downloaded.
Returns
-------
TitleXtractor
The wrapper itself.
"""
self.model = load_model_params(self.model, 'titlextract', model_uri, ignore_cache=ignore_cache,
device=self.device)
return self
def __call__(self, ref):
"""Run the TitleXtract model.
Parameters
----------
ref : str
Reference to find a title for.
Returns
-------
str
The found title, and none if no title was found.
"""
result = self.model(self.preprocessor.map_text_chars(ref)).argmax(dim=2).cpu()[0].detach().numpy().tolist()
if 1 not in result:
return None
start_pos = result.index(1)
subselection = result[start_pos:]
if 0 in subselection:
length = result[start_pos:].index(0)
title = ref[start_pos:start_pos + length]
else:
title = ref[start_pos:]
return title.strip()
| 0.937304 | 0.602354 |
#%%
import cv2
from pathlib import Path
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy as np
PATTERN_SIZE = (9, 6)
SQUARE_SIZE_CM = 3.4 # Measured from my source checkerboard
ROTATE_CAMERA_180 = True
im_paths = list(Path('./calib_data').glob('*.png'))
ims = [cv2.imread(str(p)) for p in im_paths]
if ROTATE_CAMERA_180:
ims = [cv2.rotate(im, cv2.ROTATE_180) for im in ims]
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((PATTERN_SIZE[0] * PATTERN_SIZE[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
objp *= SQUARE_SIZE_CM
retval = [None] * len(ims)
corners = [None] * len(ims)
corners2 = [None] * len(ims)
display_ims = [None] * len(ims)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for i, im in enumerate(ims):
im = im.copy()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
retval[i], corners[i] = cv2.findChessboardCorners(gray, PATTERN_SIZE)
display_ims[i] = cv2.drawChessboardCorners(im, PATTERN_SIZE, corners[i], retval[i])
if retval[i] == True:
objpoints.append(objp)
corners2[i] = cv2.cornerSubPix(gray, corners[i], (11,11), (-1,-1), criteria)
imgpoints.append(corners2[i])
# draw_axis and display the corners
cv2.drawChessboardCorners(im, PATTERN_SIZE, corners2[i], retval[i])
cv2.imshow('img', im)
cv2.waitKey(500)
cv2.destroyAllWindows()
#%%
ok, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
assert ok
#%%
def draw_axis(img, corners, imgpts):
corner = tuple(corners[0].ravel())
corner = tuple(int(x) for x in corner)
imgpts = imgpts.astype(int)
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
for i, im in enumerate(ims):
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
if retval[i] == True:
# Find the rotation and translation vectors.
objp = objpoints[i]
ret,rvecs, tvecs = cv2.solvePnP(objp, corners2[i], mtx, dist)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw_axis(im, corners2[i], imgpts)
cv2.imshow('img',img)
k = cv2.waitKey(500)
cv2.destroyAllWindows()
#%%
floor_im = cv2.imread(r"C:\Users\avivh\Pictures\vlcsnap-2021-09-16-14h33m12s424.png")
if ROTATE_CAMERA_180:
floor_im = cv2.rotate(floor_im, cv2.ROTATE_180)
points = [(158, 127), (240, 121), (292, 144), (173, 151)]
plt.imshow(floor_im)
for pt in points:
plt.plot(pt[0], pt[1], marker='+')
FLOOR_TILE_SIZE_CM = 20
floor_tile_points = np.array([(0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0)]) * FLOOR_TILE_SIZE_CM
# Find the rotation and translation vectors.
ret, rvecs, tvecs = cv2.solvePnP(np.array(floor_tile_points).astype(float), np.array(points).astype(float), mtx, dist)
rotM = cv2.Rodrigues(rvecs.flatten())[0]
cameraPosition = -np.matrix(rotM).T @ np.matrix(tvecs.flatten()).T
uvPoint = np.array([0, 0, 1])
zConst = 0
tempMat = np.linalg.inv(rotM) @ np.linalg.inv(mtx) @ uvPoint
tempMat2 = np.linalg.inv(rotM) @ tvecs
s = zConst + tempMat2[2,0]
s /= tempMat[2]
wcPoint = np.linalg.inv(rotM) @ (s * np.linalg.inv(mtx) @ uvPoint - tvecs)
ppp = wcPoint / wcPoint.flatten()[-1]
# Plot points between camera and focus tile
minx = cameraPosition
# Draw image around tile
xs = np.arange(-20, 20, 1)
ys = np.arange(-20, 20, 1)
pts = np.meshgrid(xs, ys)
def transform_points(pts, m):
if pts.shape[1] == 2:
pts = np.hstack((pts, np.ones((len(pts), 1))))
assert pts.shape[1] == 3
res = m @ pts.T
res = res / res[-1, :]
return res
# Make a new homography that maps the image center point into 0,0
img_center_pt = np.array((320/2, 150))
img_points = np.array(points).astype(float)
img_points[:, 0] += -img_points[0, 0] + img_center_pt[0]
img_points[:, 1] += -img_points[0, 1] + img_center_pt[1]
homog, _ = cv2.findHomography(img_points, np.array(floor_tile_points).astype(float), )
# img_center_pt_world = transform_points(img_center_pt, homog)
# homog2 = homog.copy()
# homog2[0, 2] -= img_center_pt_world.flatten()[0]
# homog2[1, 2] -= img_center_pt_world.flatten()[1]
# delta = [
# [1, 0, -img_center_pt_world.flatten()[0]],
# [0, 1, -img_center_pt_world.flatten()[1]],
# [0, 0, 1]]
# delta @ np.array([[1,0,1]]).T
# homog2 = homog + delta
# homog @ np.array([0, 0, 1]).T
roi_to_render = [-20, -20, 0]
transform_points(np.array([img_center_pt]), homog)
transform_points(np.array([img_center_pt]), homog)
im_dst = cv2.warpPerspective(floor_im, homog, (40, 40))
H, W = floor_im.shape[:2]
img_roi = np.array([[0, H/2], [W, H/2], [W, H], [0, H]])
floor_roi = transform_points(img_roi, homog)
transform_points(np.array([img_center_pt]), np.linalg.inv(homog))
pts = np.array(points).astype(float)
pts.shape[1]
pts2 = np.hstack((pts, np.ones((len(pts), 1))))
res = homog @ pts2.T
res = res / res[-1, :]
transform_points(aaa, homog)
aaa = np.array(floor_tile_points).astype(float)
aaa = aaa[:, 0:2]
cv2.perspectiveTransform(np.array(floor_tile_points).astype(float), homog)
plt.imshow(im_dst)
plt.show()
a = homog @ np.array([240, 121, 1]).T
a /= a[2]
points
# %%
|
calib.py
|
#%%
import cv2
from pathlib import Path
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy as np
PATTERN_SIZE = (9, 6)
SQUARE_SIZE_CM = 3.4 # Measured from my source checkerboard
ROTATE_CAMERA_180 = True
im_paths = list(Path('./calib_data').glob('*.png'))
ims = [cv2.imread(str(p)) for p in im_paths]
if ROTATE_CAMERA_180:
ims = [cv2.rotate(im, cv2.ROTATE_180) for im in ims]
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((PATTERN_SIZE[0] * PATTERN_SIZE[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
objp *= SQUARE_SIZE_CM
retval = [None] * len(ims)
corners = [None] * len(ims)
corners2 = [None] * len(ims)
display_ims = [None] * len(ims)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
for i, im in enumerate(ims):
im = im.copy()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
retval[i], corners[i] = cv2.findChessboardCorners(gray, PATTERN_SIZE)
display_ims[i] = cv2.drawChessboardCorners(im, PATTERN_SIZE, corners[i], retval[i])
if retval[i] == True:
objpoints.append(objp)
corners2[i] = cv2.cornerSubPix(gray, corners[i], (11,11), (-1,-1), criteria)
imgpoints.append(corners2[i])
# draw_axis and display the corners
cv2.drawChessboardCorners(im, PATTERN_SIZE, corners2[i], retval[i])
cv2.imshow('img', im)
cv2.waitKey(500)
cv2.destroyAllWindows()
#%%
ok, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
assert ok
#%%
def draw_axis(img, corners, imgpts):
corner = tuple(corners[0].ravel())
corner = tuple(int(x) for x in corner)
imgpts = imgpts.astype(int)
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
for i, im in enumerate(ims):
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
if retval[i] == True:
# Find the rotation and translation vectors.
objp = objpoints[i]
ret,rvecs, tvecs = cv2.solvePnP(objp, corners2[i], mtx, dist)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw_axis(im, corners2[i], imgpts)
cv2.imshow('img',img)
k = cv2.waitKey(500)
cv2.destroyAllWindows()
#%%
floor_im = cv2.imread(r"C:\Users\avivh\Pictures\vlcsnap-2021-09-16-14h33m12s424.png")
if ROTATE_CAMERA_180:
floor_im = cv2.rotate(floor_im, cv2.ROTATE_180)
points = [(158, 127), (240, 121), (292, 144), (173, 151)]
plt.imshow(floor_im)
for pt in points:
plt.plot(pt[0], pt[1], marker='+')
FLOOR_TILE_SIZE_CM = 20
floor_tile_points = np.array([(0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0)]) * FLOOR_TILE_SIZE_CM
# Find the rotation and translation vectors.
ret, rvecs, tvecs = cv2.solvePnP(np.array(floor_tile_points).astype(float), np.array(points).astype(float), mtx, dist)
rotM = cv2.Rodrigues(rvecs.flatten())[0]
cameraPosition = -np.matrix(rotM).T @ np.matrix(tvecs.flatten()).T
uvPoint = np.array([0, 0, 1])
zConst = 0
tempMat = np.linalg.inv(rotM) @ np.linalg.inv(mtx) @ uvPoint
tempMat2 = np.linalg.inv(rotM) @ tvecs
s = zConst + tempMat2[2,0]
s /= tempMat[2]
wcPoint = np.linalg.inv(rotM) @ (s * np.linalg.inv(mtx) @ uvPoint - tvecs)
ppp = wcPoint / wcPoint.flatten()[-1]
# Plot points between camera and focus tile
minx = cameraPosition
# Draw image around tile
xs = np.arange(-20, 20, 1)
ys = np.arange(-20, 20, 1)
pts = np.meshgrid(xs, ys)
def transform_points(pts, m):
if pts.shape[1] == 2:
pts = np.hstack((pts, np.ones((len(pts), 1))))
assert pts.shape[1] == 3
res = m @ pts.T
res = res / res[-1, :]
return res
# Make a new homography that maps the image center point into 0,0
img_center_pt = np.array((320/2, 150))
img_points = np.array(points).astype(float)
img_points[:, 0] += -img_points[0, 0] + img_center_pt[0]
img_points[:, 1] += -img_points[0, 1] + img_center_pt[1]
homog, _ = cv2.findHomography(img_points, np.array(floor_tile_points).astype(float), )
# img_center_pt_world = transform_points(img_center_pt, homog)
# homog2 = homog.copy()
# homog2[0, 2] -= img_center_pt_world.flatten()[0]
# homog2[1, 2] -= img_center_pt_world.flatten()[1]
# delta = [
# [1, 0, -img_center_pt_world.flatten()[0]],
# [0, 1, -img_center_pt_world.flatten()[1]],
# [0, 0, 1]]
# delta @ np.array([[1,0,1]]).T
# homog2 = homog + delta
# homog @ np.array([0, 0, 1]).T
roi_to_render = [-20, -20, 0]
transform_points(np.array([img_center_pt]), homog)
transform_points(np.array([img_center_pt]), homog)
im_dst = cv2.warpPerspective(floor_im, homog, (40, 40))
H, W = floor_im.shape[:2]
img_roi = np.array([[0, H/2], [W, H/2], [W, H], [0, H]])
floor_roi = transform_points(img_roi, homog)
transform_points(np.array([img_center_pt]), np.linalg.inv(homog))
pts = np.array(points).astype(float)
pts.shape[1]
pts2 = np.hstack((pts, np.ones((len(pts), 1))))
res = homog @ pts2.T
res = res / res[-1, :]
transform_points(aaa, homog)
aaa = np.array(floor_tile_points).astype(float)
aaa = aaa[:, 0:2]
cv2.perspectiveTransform(np.array(floor_tile_points).astype(float), homog)
plt.imshow(im_dst)
plt.show()
a = homog @ np.array([240, 121, 1]).T
a /= a[2]
points
# %%
| 0.439507 | 0.515559 |
import ipaddress
import json
import logging
import re
from tests.common.devices.base import AnsibleHostBase
logger = logging.getLogger(__name__)
def _raise_err(msg):
logger.error(msg)
raise Exception(msg)
class EosHost(AnsibleHostBase):
"""
@summary: Class for Eos switch
For running ansible module on the Eos switch
"""
def __init__(self, ansible_adhoc, hostname, eos_user, eos_passwd, shell_user=None, shell_passwd=None, gather_facts=False):
'''Initialize an object for interacting with EoS type device using ansible modules
Args:
ansible_adhoc (): The pytest-ansible fixture
hostname (string): hostname of the EOS device
eos_user (string): Username for accessing the EOS CLI interface
eos_passwd (string): Password for the <PASSWORD>
shell_user (string, optional): Username for accessing the Linux shell CLI interface. Defaults to None.
shell_passwd (string, optional): Password for the shell_user. Defaults to None.
gather_facts (bool, optional): Whether to gather some basic facts. Defaults to False.
'''
self.eos_user = eos_user
self.eos_passwd = <PASSWORD>
self.shell_user = shell_user
self.shell_passwd = <PASSWORD>
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
def __getattr__(self, module_name):
if module_name.startswith('eos_'):
evars = {
'ansible_connection':'network_cli',
'ansible_network_os':'eos',
'ansible_user': self.eos_user,
'ansible_password': self.eos_passwd,
'ansible_ssh_user': self.eos_user,
'ansible_ssh_pass': self.eos_passwd,
'ansible_become_method': 'enable'
}
else:
if not self.shell_user or not self.shell_passwd:
raise Exception("Please specify shell_user and shell_passwd for {}".format(self.hostname))
evars = {
'ansible_connection':'ssh',
'ansible_network_os':'linux',
'ansible_user': self.shell_user,
'ansible_password': self.<PASSWORD>,
'ansible_ssh_user': self.shell_user,
'ansible_ssh_pass': self.shell_passwd,
'ansible_become_method': 'sudo'
}
self.host.options['variable_manager'].extra_vars.update(evars)
return super(EosHost, self).__getattr__(module_name)
def shutdown(self, interface_name):
out = self.eos_config(
lines=['shutdown'],
parents=['interface {}'.format(interface_name)])
logging.info('Shut interface [%s]' % interface_name)
return out
def shutdown_multiple(self, interfaces):
intf_str = ','.join(interfaces)
return self.shutdown(intf_str)
def no_shutdown(self, interface_name):
out = self.eos_config(
lines=['no shutdown'],
parents=['interface {}'.format(interface_name)])
logging.info('No shut interface [%s]' % interface_name)
return out
def no_shutdown_multiple(self, interfaces):
intf_str = ','.join(interfaces)
return self.no_shutdown(intf_str)
def check_intf_link_state(self, interface_name):
show_int_result = self.eos_command(
commands=['show interface %s' % interface_name])
return 'Up' in show_int_result['stdout_lines'][0]
def set_interface_lacp_rate_mode(self, interface_name, mode):
out = self.eos_config(
lines=['lacp rate %s' % mode],
parents='interface %s' % interface_name)
# FIXME: out['failed'] will be False even when a command is deprecated, so we have to check out['changed']
# However, if the lacp rate is already in expected state, out['changed'] will be False and treated as
# error.
if out['failed'] == True or out['changed'] == False:
# new eos deprecate lacp rate and use lacp timer command
out = self.eos_config(
lines=['lacp timer %s' % mode],
parents='interface %s' % interface_name)
if out['changed'] == False:
logging.warning("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
raise Exception("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
else:
logging.info("Set interface [%s] lacp timer to [%s]" % (interface_name, mode))
else:
logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode))
return out
def kill_bgpd(self):
out = self.eos_config(lines=['agent Rib shutdown'])
return out
def start_bgpd(self):
out = self.eos_config(lines=['no agent Rib shutdown'])
return out
def no_shutdown_bgp(self, asn):
out = self.eos_config(
lines=['no shut'],
parents=['router bgp {}'.format(asn)])
logging.info('No shut BGP [%s]' % asn)
return out
def check_bgp_session_state(self, neigh_ips, neigh_desc, state="established"):
"""
@summary: check if current bgp session equals to the target state
@param neigh_ips: bgp neighbor IPs
@param neigh_desc: bgp neighbor description
@param state: target state
"""
neigh_ips = [ip.lower() for ip in neigh_ips]
neigh_ips_ok = []
neigh_desc_ok = []
neigh_desc_available = False
out_v4 = self.eos_command(
commands=['show ip bgp summary | json'])
logging.info("ip bgp summary: {}".format(out_v4))
out_v6 = self.eos_command(
commands=['show ipv6 bgp summary | json'])
logging.info("ipv6 bgp summary: {}".format(out_v6))
# when bgpd is inactive, the bgp summary output: [{u'vrfs': {}, u'warnings': [u'BGP inactive']}]
if 'BGP inactive' in out_v4['stdout'][0].get('warnings', '') and 'BGP inactive' in out_v6['stdout'][0].get('warnings', ''):
return False
try:
for k, v in out_v4['stdout'][0]['vrfs']['default']['peers'].items():
if v['peerState'].lower() == state.lower():
if k in neigh_ips:
neigh_ips_ok.append(k)
if 'description' in v:
neigh_desc_available = True
if v['description'] in neigh_desc:
neigh_desc_ok.append(v['description'])
for k, v in out_v6['stdout'][0]['vrfs']['default']['peers'].items():
if v['peerState'].lower() == state.lower():
if k.lower() in neigh_ips:
neigh_ips_ok.append(k)
if 'description' in v:
neigh_desc_available = True
if v['description'] in neigh_desc:
neigh_desc_ok.append(v['description'])
except KeyError:
# ignore any KeyError due to unexpected BGP summary output
pass
logging.info("neigh_ips_ok={} neigh_desc_available={} neigh_desc_ok={}"\
.format(str(neigh_ips_ok), str(neigh_desc_available), str(neigh_desc_ok)))
if neigh_desc_available:
if len(neigh_ips) == len(neigh_ips_ok) and len(neigh_desc) == len(neigh_desc_ok):
return True
else:
if len(neigh_ips) == len(neigh_ips_ok):
return True
return False
def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvvvv'
cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory,
fanout_host=self.hostname, extra_vars=json.dumps(kwargs))
res = self.localhost.shell(cli_cmd)
if res["localhost"]["rc"] != 0:
raise Exception("Unable to execute template\n{}".format(res["stdout"]))
def get_route(self, prefix):
cmd = 'show ip bgp' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show ipv6 bgp'
return self.eos_command(commands=[{
'command': '{} {}'.format(cmd, prefix),
'output': 'json'
}])['stdout'][0]
def get_auto_negotiation_mode(self, interface_name):
output = self.eos_command(commands=[{
'command': 'show interfaces %s status' % interface_name,
'output': 'json'
}])
if self._has_cli_cmd_failed(output):
_raise_err('Failed to get auto neg state for {}: {}'.format(interface_name, output['msg']))
autoneg_enabled = output['stdout'][0]['interfaceStatuses'][interface_name]['autoNegotiateActive']
return autoneg_enabled
def _reset_port_speed(self, interface_name):
out = self.eos_config(
lines=['default speed'],
parents=['interface {}'.format(interface_name)])
logger.debug('Reset port speed for %s: %s' % (interface_name, out))
return not self._has_cli_cmd_failed(out)
def set_auto_negotiation_mode(self, interface_name, enabled):
if self.get_auto_negotiation_mode(interface_name) == enabled:
return True
if enabled:
speed_to_advertise = self.get_supported_speeds(interface_name)[-1]
speed_to_advertise = speed_to_advertise[:-3] + 'gfull'
out = self.eos_config(
lines=['speed auto %s' % speed_to_advertise],
parents=['interface {}'.format(interface_name)])
logger.debug('Set auto neg to {} for port {}: {}'.format(enabled, interface_name, out))
return not self._has_cli_cmd_failed(out)
return self._reset_port_speed(interface_name)
def get_speed(self, interface_name):
output = self.eos_command(commands=['show interfaces %s transceiver properties' % interface_name])
found_txt = re.search(r'Operational Speed: (\S+)', output['stdout'][0])
if found_txt is None:
_raise_err('Not able to extract interface %s speed from output: %s' % (interface_name, output['stdout']))
v = found_txt.groups()[0]
return v[:-1] + '000'
def _has_cli_cmd_failed(self, cmd_output_obj):
return 'failed' in cmd_output_obj and cmd_output_obj['failed']
def set_speed(self, interface_name, speed):
if not speed:
# other set_speed implementations advertise port speeds when speed=None
# but in EOS autoneg activation and speeds advertisement is done via a single CLI cmd
# so this branch left nop intentionally
return True
speed_mode = 'auto' if self.get_auto_negotiation_mode(interface_name) else 'forced'
speed = speed[:-3] + 'gfull'
out = self.host.eos_config(
lines=['speed {} {}'.format(speed_mode, speed)],
parents='interface %s' % interface_name)[self.hostname]
logger.debug('Set force speed for port {} : {}'.format(interface_name, out))
return not self._has_cli_cmd_failed(out)
def get_supported_speeds(self, interface_name):
"""Get supported speeds for a given interface
Args:
interface_name (str): Interface name
Returns:
list: A list of supported speed strings or None
"""
commands = ['show interfaces {} capabilities'.format(interface_name), 'show interface {} hardware'.format(interface_name)]
for command in commands:
output = self.eos_command(commands=[command])
found_txt = re.search("Speed/Duplex: (.+)", output['stdout'][0])
if found_txt is not None:
break
if found_txt is None:
_raise_err('Failed to find port speeds list in output: %s' % output['stdout'])
speed_list = found_txt.groups()[0]
speed_list = speed_list.split(',')
speed_list.remove('auto')
def extract_speed_only(v):
return re.match('\d+', v.strip()).group() + '000'
return list(map(extract_speed_only, speed_list))
|
tests/common/devices/eos.py
|
import ipaddress
import json
import logging
import re
from tests.common.devices.base import AnsibleHostBase
logger = logging.getLogger(__name__)
def _raise_err(msg):
logger.error(msg)
raise Exception(msg)
class EosHost(AnsibleHostBase):
"""
@summary: Class for Eos switch
For running ansible module on the Eos switch
"""
def __init__(self, ansible_adhoc, hostname, eos_user, eos_passwd, shell_user=None, shell_passwd=None, gather_facts=False):
'''Initialize an object for interacting with EoS type device using ansible modules
Args:
ansible_adhoc (): The pytest-ansible fixture
hostname (string): hostname of the EOS device
eos_user (string): Username for accessing the EOS CLI interface
eos_passwd (string): Password for the <PASSWORD>
shell_user (string, optional): Username for accessing the Linux shell CLI interface. Defaults to None.
shell_passwd (string, optional): Password for the shell_user. Defaults to None.
gather_facts (bool, optional): Whether to gather some basic facts. Defaults to False.
'''
self.eos_user = eos_user
self.eos_passwd = <PASSWORD>
self.shell_user = shell_user
self.shell_passwd = <PASSWORD>
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
def __getattr__(self, module_name):
if module_name.startswith('eos_'):
evars = {
'ansible_connection':'network_cli',
'ansible_network_os':'eos',
'ansible_user': self.eos_user,
'ansible_password': self.eos_passwd,
'ansible_ssh_user': self.eos_user,
'ansible_ssh_pass': self.eos_passwd,
'ansible_become_method': 'enable'
}
else:
if not self.shell_user or not self.shell_passwd:
raise Exception("Please specify shell_user and shell_passwd for {}".format(self.hostname))
evars = {
'ansible_connection':'ssh',
'ansible_network_os':'linux',
'ansible_user': self.shell_user,
'ansible_password': self.<PASSWORD>,
'ansible_ssh_user': self.shell_user,
'ansible_ssh_pass': self.shell_passwd,
'ansible_become_method': 'sudo'
}
self.host.options['variable_manager'].extra_vars.update(evars)
return super(EosHost, self).__getattr__(module_name)
def shutdown(self, interface_name):
out = self.eos_config(
lines=['shutdown'],
parents=['interface {}'.format(interface_name)])
logging.info('Shut interface [%s]' % interface_name)
return out
def shutdown_multiple(self, interfaces):
intf_str = ','.join(interfaces)
return self.shutdown(intf_str)
def no_shutdown(self, interface_name):
out = self.eos_config(
lines=['no shutdown'],
parents=['interface {}'.format(interface_name)])
logging.info('No shut interface [%s]' % interface_name)
return out
def no_shutdown_multiple(self, interfaces):
intf_str = ','.join(interfaces)
return self.no_shutdown(intf_str)
def check_intf_link_state(self, interface_name):
show_int_result = self.eos_command(
commands=['show interface %s' % interface_name])
return 'Up' in show_int_result['stdout_lines'][0]
def set_interface_lacp_rate_mode(self, interface_name, mode):
out = self.eos_config(
lines=['lacp rate %s' % mode],
parents='interface %s' % interface_name)
# FIXME: out['failed'] will be False even when a command is deprecated, so we have to check out['changed']
# However, if the lacp rate is already in expected state, out['changed'] will be False and treated as
# error.
if out['failed'] == True or out['changed'] == False:
# new eos deprecate lacp rate and use lacp timer command
out = self.eos_config(
lines=['lacp timer %s' % mode],
parents='interface %s' % interface_name)
if out['changed'] == False:
logging.warning("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
raise Exception("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
else:
logging.info("Set interface [%s] lacp timer to [%s]" % (interface_name, mode))
else:
logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode))
return out
def kill_bgpd(self):
out = self.eos_config(lines=['agent Rib shutdown'])
return out
def start_bgpd(self):
out = self.eos_config(lines=['no agent Rib shutdown'])
return out
def no_shutdown_bgp(self, asn):
out = self.eos_config(
lines=['no shut'],
parents=['router bgp {}'.format(asn)])
logging.info('No shut BGP [%s]' % asn)
return out
def check_bgp_session_state(self, neigh_ips, neigh_desc, state="established"):
"""
@summary: check if current bgp session equals to the target state
@param neigh_ips: bgp neighbor IPs
@param neigh_desc: bgp neighbor description
@param state: target state
"""
neigh_ips = [ip.lower() for ip in neigh_ips]
neigh_ips_ok = []
neigh_desc_ok = []
neigh_desc_available = False
out_v4 = self.eos_command(
commands=['show ip bgp summary | json'])
logging.info("ip bgp summary: {}".format(out_v4))
out_v6 = self.eos_command(
commands=['show ipv6 bgp summary | json'])
logging.info("ipv6 bgp summary: {}".format(out_v6))
# when bgpd is inactive, the bgp summary output: [{u'vrfs': {}, u'warnings': [u'BGP inactive']}]
if 'BGP inactive' in out_v4['stdout'][0].get('warnings', '') and 'BGP inactive' in out_v6['stdout'][0].get('warnings', ''):
return False
try:
for k, v in out_v4['stdout'][0]['vrfs']['default']['peers'].items():
if v['peerState'].lower() == state.lower():
if k in neigh_ips:
neigh_ips_ok.append(k)
if 'description' in v:
neigh_desc_available = True
if v['description'] in neigh_desc:
neigh_desc_ok.append(v['description'])
for k, v in out_v6['stdout'][0]['vrfs']['default']['peers'].items():
if v['peerState'].lower() == state.lower():
if k.lower() in neigh_ips:
neigh_ips_ok.append(k)
if 'description' in v:
neigh_desc_available = True
if v['description'] in neigh_desc:
neigh_desc_ok.append(v['description'])
except KeyError:
# ignore any KeyError due to unexpected BGP summary output
pass
logging.info("neigh_ips_ok={} neigh_desc_available={} neigh_desc_ok={}"\
.format(str(neigh_ips_ok), str(neigh_desc_available), str(neigh_desc_ok)))
if neigh_desc_available:
if len(neigh_ips) == len(neigh_ips_ok) and len(neigh_desc) == len(neigh_desc_ok):
return True
else:
if len(neigh_ips) == len(neigh_ips_ok):
return True
return False
def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvvvv'
cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory,
fanout_host=self.hostname, extra_vars=json.dumps(kwargs))
res = self.localhost.shell(cli_cmd)
if res["localhost"]["rc"] != 0:
raise Exception("Unable to execute template\n{}".format(res["stdout"]))
def get_route(self, prefix):
cmd = 'show ip bgp' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show ipv6 bgp'
return self.eos_command(commands=[{
'command': '{} {}'.format(cmd, prefix),
'output': 'json'
}])['stdout'][0]
def get_auto_negotiation_mode(self, interface_name):
output = self.eos_command(commands=[{
'command': 'show interfaces %s status' % interface_name,
'output': 'json'
}])
if self._has_cli_cmd_failed(output):
_raise_err('Failed to get auto neg state for {}: {}'.format(interface_name, output['msg']))
autoneg_enabled = output['stdout'][0]['interfaceStatuses'][interface_name]['autoNegotiateActive']
return autoneg_enabled
def _reset_port_speed(self, interface_name):
out = self.eos_config(
lines=['default speed'],
parents=['interface {}'.format(interface_name)])
logger.debug('Reset port speed for %s: %s' % (interface_name, out))
return not self._has_cli_cmd_failed(out)
def set_auto_negotiation_mode(self, interface_name, enabled):
if self.get_auto_negotiation_mode(interface_name) == enabled:
return True
if enabled:
speed_to_advertise = self.get_supported_speeds(interface_name)[-1]
speed_to_advertise = speed_to_advertise[:-3] + 'gfull'
out = self.eos_config(
lines=['speed auto %s' % speed_to_advertise],
parents=['interface {}'.format(interface_name)])
logger.debug('Set auto neg to {} for port {}: {}'.format(enabled, interface_name, out))
return not self._has_cli_cmd_failed(out)
return self._reset_port_speed(interface_name)
def get_speed(self, interface_name):
output = self.eos_command(commands=['show interfaces %s transceiver properties' % interface_name])
found_txt = re.search(r'Operational Speed: (\S+)', output['stdout'][0])
if found_txt is None:
_raise_err('Not able to extract interface %s speed from output: %s' % (interface_name, output['stdout']))
v = found_txt.groups()[0]
return v[:-1] + '000'
def _has_cli_cmd_failed(self, cmd_output_obj):
return 'failed' in cmd_output_obj and cmd_output_obj['failed']
def set_speed(self, interface_name, speed):
if not speed:
# other set_speed implementations advertise port speeds when speed=None
# but in EOS autoneg activation and speeds advertisement is done via a single CLI cmd
# so this branch left nop intentionally
return True
speed_mode = 'auto' if self.get_auto_negotiation_mode(interface_name) else 'forced'
speed = speed[:-3] + 'gfull'
out = self.host.eos_config(
lines=['speed {} {}'.format(speed_mode, speed)],
parents='interface %s' % interface_name)[self.hostname]
logger.debug('Set force speed for port {} : {}'.format(interface_name, out))
return not self._has_cli_cmd_failed(out)
def get_supported_speeds(self, interface_name):
"""Get supported speeds for a given interface
Args:
interface_name (str): Interface name
Returns:
list: A list of supported speed strings or None
"""
commands = ['show interfaces {} capabilities'.format(interface_name), 'show interface {} hardware'.format(interface_name)]
for command in commands:
output = self.eos_command(commands=[command])
found_txt = re.search("Speed/Duplex: (.+)", output['stdout'][0])
if found_txt is not None:
break
if found_txt is None:
_raise_err('Failed to find port speeds list in output: %s' % output['stdout'])
speed_list = found_txt.groups()[0]
speed_list = speed_list.split(',')
speed_list.remove('auto')
def extract_speed_only(v):
return re.match('\d+', v.strip()).group() + '000'
return list(map(extract_speed_only, speed_list))
| 0.451206 | 0.131452 |
import requests
from typing import List
from bs4 import BeautifulSoup
from src.lyrics.entity import Lyrics
class LyricsSearcher:
def __init__(self, albums_searcher, track_searcher, configurations):
self.albums_searcher = albums_searcher
self.track_searcher = track_searcher
self.__genius_search_url = 'https://api.genius.com/search'
self.__genius_token = configurations.GENIUS_ACCESS_TOKEN
def request_song_info(self, track_name, track_artist):
return requests.get(url=self.__genius_search_url,
data={'q': track_name + ' ' + track_artist},
headers={'Authorization': 'Bearer ' + self.__genius_token})
def check_hits(self, response, artist):
json = response.json()
remote_song_info = None
for hit in json['response']['hits']:
if artist.lower() in hit['result']['primary_artist']['name'].lower():
remote_song_info = hit
break
return remote_song_info
def scrape_lyrics(self, remote_song_info: str):
page = requests.get(remote_song_info['result']['url'])
html = BeautifulSoup(page.text, 'html.parser')
lyrics = None
lyrics_one = html.find("div", class_="lyrics")
if lyrics_one:
lyrics = lyrics_one.get_text()
return lyrics
lyrics_two = html.find("div", class_="Lyrics__Container-sc-1ynbvzw-2 jgQsqn")
if lyrics_two:
lyrics = lyrics_two.get_text()
return lyrics
return lyrics
def get_breno(self, artist: str, track: str):
response = self.request_song_info(track, artist)
remote_song_info = self.check_hits(response, artist)
if remote_song_info:
lyrics = self.scrape_lyrics(remote_song_info)
return lyrics
return None
def get_lyrics(self, artist: str) -> List[Lyrics]:
albums = self.albums_searcher.get_albums(artist)
albums = self.albums_searcher.remove_remaster_and_live_albums(albums)
track_lyrics = []
albums_to_tracks = self.__get_tracks_for(albums)
self.__search_lyrics(albums_to_tracks, artist, track_lyrics)
return track_lyrics
def __search_lyrics(self, albums_to_tracks, artist, track_lyrics):
for album, tracks in albums_to_tracks.items():
for track in tracks:
lyrics = self.get_breno(artist, track)
if not lyrics:
continue
track_lyrics.append(Lyrics(artist=artist, album=album, track=track, lyrics=lyrics))
def __get_tracks_for(self, albums):
albums_to_tracks = {}
for album in albums:
if not albums_to_tracks.get(album):
albums_to_tracks[album] = []
albums_to_tracks[album] = self.track_searcher.get_tracks(album)
return albums_to_tracks
|
src/lyrics/searchers/lyrics_searcher.py
|
import requests
from typing import List
from bs4 import BeautifulSoup
from src.lyrics.entity import Lyrics
class LyricsSearcher:
def __init__(self, albums_searcher, track_searcher, configurations):
self.albums_searcher = albums_searcher
self.track_searcher = track_searcher
self.__genius_search_url = 'https://api.genius.com/search'
self.__genius_token = configurations.GENIUS_ACCESS_TOKEN
def request_song_info(self, track_name, track_artist):
return requests.get(url=self.__genius_search_url,
data={'q': track_name + ' ' + track_artist},
headers={'Authorization': 'Bearer ' + self.__genius_token})
def check_hits(self, response, artist):
json = response.json()
remote_song_info = None
for hit in json['response']['hits']:
if artist.lower() in hit['result']['primary_artist']['name'].lower():
remote_song_info = hit
break
return remote_song_info
def scrape_lyrics(self, remote_song_info: str):
page = requests.get(remote_song_info['result']['url'])
html = BeautifulSoup(page.text, 'html.parser')
lyrics = None
lyrics_one = html.find("div", class_="lyrics")
if lyrics_one:
lyrics = lyrics_one.get_text()
return lyrics
lyrics_two = html.find("div", class_="Lyrics__Container-sc-1ynbvzw-2 jgQsqn")
if lyrics_two:
lyrics = lyrics_two.get_text()
return lyrics
return lyrics
def get_breno(self, artist: str, track: str):
response = self.request_song_info(track, artist)
remote_song_info = self.check_hits(response, artist)
if remote_song_info:
lyrics = self.scrape_lyrics(remote_song_info)
return lyrics
return None
def get_lyrics(self, artist: str) -> List[Lyrics]:
albums = self.albums_searcher.get_albums(artist)
albums = self.albums_searcher.remove_remaster_and_live_albums(albums)
track_lyrics = []
albums_to_tracks = self.__get_tracks_for(albums)
self.__search_lyrics(albums_to_tracks, artist, track_lyrics)
return track_lyrics
def __search_lyrics(self, albums_to_tracks, artist, track_lyrics):
for album, tracks in albums_to_tracks.items():
for track in tracks:
lyrics = self.get_breno(artist, track)
if not lyrics:
continue
track_lyrics.append(Lyrics(artist=artist, album=album, track=track, lyrics=lyrics))
def __get_tracks_for(self, albums):
albums_to_tracks = {}
for album in albums:
if not albums_to_tracks.get(album):
albums_to_tracks[album] = []
albums_to_tracks[album] = self.track_searcher.get_tracks(album)
return albums_to_tracks
| 0.687735 | 0.083965 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ml2.tools.protos import ltl_pb2 as ml2_dot_tools_dot_protos_dot_ltl__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ml2/tools/nuxmv/nuxmv.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bml2/tools/nuxmv/nuxmv.proto\x1a\x1aml2/tools/protos/ltl.proto\"h\n\x07Problem\x12(\n\rspecification\x18\x01 \x01(\x0b\x32\x11.LTLSpecification\x12\x0e\n\x06system\x18\x02 \x01(\t\x12\x12\n\nrealizable\x18\x03 \x01(\x08\x12\x0f\n\x07timeout\x18\x04 \x01(\x02\"x\n\x08Solution\x12 \n\x06status\x18\x01 \x01(\x0e\x32\x10.Solution.Status\"J\n\x06Status\x12\r\n\tSATISFIED\x10\x00\x12\x0c\n\x08VIOLATED\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07TIMEOUT\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x32[\n\x05nuXmv\x12#\n\nModelCheck\x12\x08.Problem\x1a\t.Solution\"\x00\x12-\n\x10ModelCheckStream\x12\x08.Problem\x1a\t.Solution\"\x00(\x01\x30\x01\x62\x06proto3'
,
dependencies=[ml2_dot_tools_dot_protos_dot_ltl__pb2.DESCRIPTOR,])
_SOLUTION_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Solution.Status',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SATISFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VIOLATED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TIMEOUT', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=211,
serialized_end=285,
)
_sym_db.RegisterEnumDescriptor(_SOLUTION_STATUS)
_PROBLEM = _descriptor.Descriptor(
name='Problem',
full_name='Problem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='specification', full_name='Problem.specification', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='system', full_name='Problem.system', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='realizable', full_name='Problem.realizable', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timeout', full_name='Problem.timeout', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=163,
)
_SOLUTION = _descriptor.Descriptor(
name='Solution',
full_name='Solution',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Solution.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOLUTION_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=165,
serialized_end=285,
)
_PROBLEM.fields_by_name['specification'].message_type = ml2_dot_tools_dot_protos_dot_ltl__pb2._LTLSPECIFICATION
_SOLUTION.fields_by_name['status'].enum_type = _SOLUTION_STATUS
_SOLUTION_STATUS.containing_type = _SOLUTION
DESCRIPTOR.message_types_by_name['Problem'] = _PROBLEM
DESCRIPTOR.message_types_by_name['Solution'] = _SOLUTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Problem = _reflection.GeneratedProtocolMessageType('Problem', (_message.Message,), {
'DESCRIPTOR' : _PROBLEM,
'__module__' : 'ml2.tools.nuxmv.nuxmv_pb2'
# @@protoc_insertion_point(class_scope:Problem)
})
_sym_db.RegisterMessage(Problem)
Solution = _reflection.GeneratedProtocolMessageType('Solution', (_message.Message,), {
'DESCRIPTOR' : _SOLUTION,
'__module__' : 'ml2.tools.nuxmv.nuxmv_pb2'
# @@protoc_insertion_point(class_scope:Solution)
})
_sym_db.RegisterMessage(Solution)
_NUXMV = _descriptor.ServiceDescriptor(
name='nuXmv',
full_name='nuXmv',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=287,
serialized_end=378,
methods=[
_descriptor.MethodDescriptor(
name='ModelCheck',
full_name='nuXmv.ModelCheck',
index=0,
containing_service=None,
input_type=_PROBLEM,
output_type=_SOLUTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ModelCheckStream',
full_name='nuXmv.ModelCheckStream',
index=1,
containing_service=None,
input_type=_PROBLEM,
output_type=_SOLUTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_NUXMV)
DESCRIPTOR.services_by_name['nuXmv'] = _NUXMV
# @@protoc_insertion_point(module_scope)
|
ml2/tools/nuxmv/nuxmv_pb2.py
|
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ml2.tools.protos import ltl_pb2 as ml2_dot_tools_dot_protos_dot_ltl__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ml2/tools/nuxmv/nuxmv.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bml2/tools/nuxmv/nuxmv.proto\x1a\x1aml2/tools/protos/ltl.proto\"h\n\x07Problem\x12(\n\rspecification\x18\x01 \x01(\x0b\x32\x11.LTLSpecification\x12\x0e\n\x06system\x18\x02 \x01(\t\x12\x12\n\nrealizable\x18\x03 \x01(\x08\x12\x0f\n\x07timeout\x18\x04 \x01(\x02\"x\n\x08Solution\x12 \n\x06status\x18\x01 \x01(\x0e\x32\x10.Solution.Status\"J\n\x06Status\x12\r\n\tSATISFIED\x10\x00\x12\x0c\n\x08VIOLATED\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07TIMEOUT\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x32[\n\x05nuXmv\x12#\n\nModelCheck\x12\x08.Problem\x1a\t.Solution\"\x00\x12-\n\x10ModelCheckStream\x12\x08.Problem\x1a\t.Solution\"\x00(\x01\x30\x01\x62\x06proto3'
,
dependencies=[ml2_dot_tools_dot_protos_dot_ltl__pb2.DESCRIPTOR,])
_SOLUTION_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='Solution.Status',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SATISFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VIOLATED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TIMEOUT', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=211,
serialized_end=285,
)
_sym_db.RegisterEnumDescriptor(_SOLUTION_STATUS)
_PROBLEM = _descriptor.Descriptor(
name='Problem',
full_name='Problem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='specification', full_name='Problem.specification', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='system', full_name='Problem.system', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='realizable', full_name='Problem.realizable', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timeout', full_name='Problem.timeout', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=163,
)
_SOLUTION = _descriptor.Descriptor(
name='Solution',
full_name='Solution',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='Solution.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOLUTION_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=165,
serialized_end=285,
)
_PROBLEM.fields_by_name['specification'].message_type = ml2_dot_tools_dot_protos_dot_ltl__pb2._LTLSPECIFICATION
_SOLUTION.fields_by_name['status'].enum_type = _SOLUTION_STATUS
_SOLUTION_STATUS.containing_type = _SOLUTION
DESCRIPTOR.message_types_by_name['Problem'] = _PROBLEM
DESCRIPTOR.message_types_by_name['Solution'] = _SOLUTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Problem = _reflection.GeneratedProtocolMessageType('Problem', (_message.Message,), {
'DESCRIPTOR' : _PROBLEM,
'__module__' : 'ml2.tools.nuxmv.nuxmv_pb2'
# @@protoc_insertion_point(class_scope:Problem)
})
_sym_db.RegisterMessage(Problem)
Solution = _reflection.GeneratedProtocolMessageType('Solution', (_message.Message,), {
'DESCRIPTOR' : _SOLUTION,
'__module__' : 'ml2.tools.nuxmv.nuxmv_pb2'
# @@protoc_insertion_point(class_scope:Solution)
})
_sym_db.RegisterMessage(Solution)
_NUXMV = _descriptor.ServiceDescriptor(
name='nuXmv',
full_name='nuXmv',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=287,
serialized_end=378,
methods=[
_descriptor.MethodDescriptor(
name='ModelCheck',
full_name='nuXmv.ModelCheck',
index=0,
containing_service=None,
input_type=_PROBLEM,
output_type=_SOLUTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ModelCheckStream',
full_name='nuXmv.ModelCheckStream',
index=1,
containing_service=None,
input_type=_PROBLEM,
output_type=_SOLUTION,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_NUXMV)
DESCRIPTOR.services_by_name['nuXmv'] = _NUXMV
# @@protoc_insertion_point(module_scope)
| 0.36693 | 0.077413 |
import os
from requests_cache.backends.sqlite import DbDict, DbPickleDict
class BaseCustomDictTestCase(object):
dict_class = DbDict
pickled_dict_class = DbPickleDict
NAMESPACE = 'requests-cache-temporary-db-test-will-be-deleted'
TABLES = ['table%s' % i for i in range(5)]
def tearDown(self):
if self.dict_class is DbDict:
try:
os.unlink(self.NAMESPACE)
except Exception:
pass
return
for table in self.TABLES:
d = self.dict_class(self.NAMESPACE, table)
d.clear()
super().tearDown()
def test_set_get(self):
d1 = self.dict_class(self.NAMESPACE, self.TABLES[0])
d2 = self.dict_class(self.NAMESPACE, self.TABLES[1])
d3 = self.dict_class(self.NAMESPACE, self.TABLES[2])
d1[1] = 1
d2[2] = 2
d3[3] = 3
self.assertEqual(list(d1.keys()), [1])
self.assertEqual(list(d2.keys()), [2])
self.assertEqual(list(d3.keys()), [3])
with self.assertRaises(KeyError):
d1[4]
def test_str(self):
d = self.dict_class(self.NAMESPACE)
d.clear()
d[1] = 1
d[2] = 2
self.assertEqual(str(d), '{1: 1, 2: 2}')
def test_del(self):
d = self.dict_class(self.NAMESPACE)
d.clear()
for i in range(5):
d[i] = i
del d[0]
del d[1]
del d[2]
self.assertEqual(list(d.keys()), list(range(3, 5)))
self.assertEqual(list(d.values()), list(range(3, 5)))
with self.assertRaises(KeyError):
del d[0]
def test_picklable_dict(self):
d = self.pickled_dict_class(self.NAMESPACE)
d[1] = ForPickle()
d = self.pickled_dict_class(self.NAMESPACE)
self.assertEqual(d[1].a, 1)
self.assertEqual(d[1].b, 2)
def test_clear_and_work_again(self):
d = self.dict_class(self.NAMESPACE)
for _ in range(3):
d.clear()
d.clear()
self.assertEqual(len(d), 0)
n = 5
for i in range(n):
d[i] = i * 2
self.assertEqual(len(d), n)
self.assertEqual(d[2], 4)
d.clear()
self.assertEqual(len(d), 0)
def test_same_settings(self):
d1 = self.dict_class(self.NAMESPACE)
d2 = self.dict_class(self.NAMESPACE, connection=d1.connection)
d1.clear()
d2.clear()
d1[1] = 1
d2[2] = 2
self.assertEqual(d1, d2)
def test_len(self):
n = 5
d = self.dict_class(self.NAMESPACE)
d.clear()
for i in range(n):
d[i] = i
self.assertEqual(len(d), 5)
class ForPickle(object):
a = 1
b = 2
|
tests/test_custom_dict.py
|
import os
from requests_cache.backends.sqlite import DbDict, DbPickleDict
class BaseCustomDictTestCase(object):
dict_class = DbDict
pickled_dict_class = DbPickleDict
NAMESPACE = 'requests-cache-temporary-db-test-will-be-deleted'
TABLES = ['table%s' % i for i in range(5)]
def tearDown(self):
if self.dict_class is DbDict:
try:
os.unlink(self.NAMESPACE)
except Exception:
pass
return
for table in self.TABLES:
d = self.dict_class(self.NAMESPACE, table)
d.clear()
super().tearDown()
def test_set_get(self):
d1 = self.dict_class(self.NAMESPACE, self.TABLES[0])
d2 = self.dict_class(self.NAMESPACE, self.TABLES[1])
d3 = self.dict_class(self.NAMESPACE, self.TABLES[2])
d1[1] = 1
d2[2] = 2
d3[3] = 3
self.assertEqual(list(d1.keys()), [1])
self.assertEqual(list(d2.keys()), [2])
self.assertEqual(list(d3.keys()), [3])
with self.assertRaises(KeyError):
d1[4]
def test_str(self):
d = self.dict_class(self.NAMESPACE)
d.clear()
d[1] = 1
d[2] = 2
self.assertEqual(str(d), '{1: 1, 2: 2}')
def test_del(self):
d = self.dict_class(self.NAMESPACE)
d.clear()
for i in range(5):
d[i] = i
del d[0]
del d[1]
del d[2]
self.assertEqual(list(d.keys()), list(range(3, 5)))
self.assertEqual(list(d.values()), list(range(3, 5)))
with self.assertRaises(KeyError):
del d[0]
def test_picklable_dict(self):
d = self.pickled_dict_class(self.NAMESPACE)
d[1] = ForPickle()
d = self.pickled_dict_class(self.NAMESPACE)
self.assertEqual(d[1].a, 1)
self.assertEqual(d[1].b, 2)
def test_clear_and_work_again(self):
d = self.dict_class(self.NAMESPACE)
for _ in range(3):
d.clear()
d.clear()
self.assertEqual(len(d), 0)
n = 5
for i in range(n):
d[i] = i * 2
self.assertEqual(len(d), n)
self.assertEqual(d[2], 4)
d.clear()
self.assertEqual(len(d), 0)
def test_same_settings(self):
d1 = self.dict_class(self.NAMESPACE)
d2 = self.dict_class(self.NAMESPACE, connection=d1.connection)
d1.clear()
d2.clear()
d1[1] = 1
d2[2] = 2
self.assertEqual(d1, d2)
def test_len(self):
n = 5
d = self.dict_class(self.NAMESPACE)
d.clear()
for i in range(n):
d[i] = i
self.assertEqual(len(d), 5)
class ForPickle(object):
a = 1
b = 2
| 0.278061 | 0.281198 |
class OptionalFeatures(object):
def __init__(self, conn, scenario_id):
"""
:param cursor:
:param scenario_id:
"""
of_column_names = [
n for n in get_scenario_table_columns(conn=conn)
if n.startswith("of_")
]
for of in of_column_names:
setattr(
self,
of.upper(),
db_column_to_self(
column=of, conn=conn, scenario_id=scenario_id
)
)
def get_all_available_features(self):
all_features = [
attr[3:].lower() for attr, value in self.__dict__.items()
]
return all_features
def get_active_features(self):
"""
Get list of requested features
:return:
"""
active_features = list()
for attr, value in self.__dict__.items():
if value:
active_features.append(attr[3:].lower())
return active_features
class SubScenarios(object):
"""
The subscenario IDs will be used to format SQL queries, so we set them to
"NULL" (not None) if an ID is not specified for the scenario.
"""
def __init__(self, conn, scenario_id):
"""
:param cursor:
:param scenario_id:
"""
subscenario_column_names = [
n for n in get_scenario_table_columns(conn=conn)
if n.endswith("_scenario_id")
]
for subscenario in subscenario_column_names:
setattr(
self,
subscenario.upper(),
db_column_to_self(
column=subscenario, conn=conn, scenario_id=scenario_id
)
)
def get_all_available_subscenarios(self):
all_subscenarios = [
attr.lower() for attr, value in self.__dict__.items()
if attr != "SCENARIO_ID"
]
return all_subscenarios
class SubProblems(object):
def __init__(self, conn, scenario_id):
"""
:param conn:
:param scenario_id:
"""
cursor = conn.cursor()
# TODO: make sure there is data integrity between subproblems_stages
# and inputs_temporal_horizons and inputs_temporal
subproblems = cursor.execute(
"""SELECT subproblem_id
FROM inputs_temporal_subproblems
INNER JOIN scenarios
USING (temporal_scenario_id)
WHERE scenario_id = {};""".format(scenario_id)
).fetchall()
# SQL returns a list of tuples [(1,), (2,)] so convert to simple list
self.SUBPROBLEMS = [subproblem[0] for subproblem in subproblems]
# store subproblems and stages in dict {subproblem: [stages]}
self.SUBPROBLEM_STAGE_DICT = {}
for s in self.SUBPROBLEMS:
stages = cursor.execute(
"""SELECT stage_id
FROM inputs_temporal_subproblems_stages
INNER JOIN scenarios
USING (temporal_scenario_id)
WHERE scenario_id = {}
AND subproblem_id = {};""".format(scenario_id, s)
).fetchall()
stages = [stage[0] for stage in stages] # convert to simple list
self.SUBPROBLEM_STAGE_DICT[s] = stages
class SolverOptions(object):
def __init__(self, conn, scenario_id):
"""
:param cursor:
:param scenario_id:
"""
cursor = conn.cursor()
self.SOLVER_OPTIONS_ID = cursor.execute("""
SELECT solver_options_id
FROM scenarios
WHERE scenario_id = {}
""".format(scenario_id)
).fetchone()[0]
if self.SOLVER_OPTIONS_ID is None:
self.SOLVER = None
else:
distinct_solvers = cursor.execute(
"""SELECT DISTINCT solver
FROM inputs_options_solver
WHERE solver_options_id = {}""".format(self.SOLVER_OPTIONS_ID)
).fetchall()
if len(distinct_solvers) > 1:
raise ValueError("""
ERROR: Solver options include more than one solver! Only a
single solver must be specified for solver_options_id in the
inputs_options_solver table. See solver_options_id {}.
""".format(self.SOLVER_OPTIONS_ID))
else:
self.SOLVER = distinct_solvers[0][0]
self.SOLVER_OPTIONS = \
None if self.SOLVER_OPTIONS_ID is None \
else {
row[0]: row[1]
for row in cursor.execute("""
SELECT solver_option_name, solver_option_value
FROM inputs_options_solver
WHERE solver_options_id = {};
""".format(self.SOLVER_OPTIONS_ID)
).fetchall() if row[0] is not None and row[0] != ""
}
def db_column_to_self(column, conn, scenario_id):
of = True if column.startswith("of") else False
c = conn.cursor()
query = c.execute(
"""SELECT {}
FROM scenarios
WHERE scenario_id = ?;""".format(column),
(scenario_id,)
).fetchone()[0]
self = "NULL" if query is None and not of else query
return self
def get_scenario_table_columns(conn):
c = conn.cursor()
scenario_query = c.execute(
"""
SELECT * FROM scenarios;
"""
)
column_names = [
description[0] for description in scenario_query.description
]
return column_names
|
gridpath/auxiliary/scenario_chars.py
|
class OptionalFeatures(object):
def __init__(self, conn, scenario_id):
"""
:param cursor:
:param scenario_id:
"""
of_column_names = [
n for n in get_scenario_table_columns(conn=conn)
if n.startswith("of_")
]
for of in of_column_names:
setattr(
self,
of.upper(),
db_column_to_self(
column=of, conn=conn, scenario_id=scenario_id
)
)
def get_all_available_features(self):
all_features = [
attr[3:].lower() for attr, value in self.__dict__.items()
]
return all_features
def get_active_features(self):
"""
Get list of requested features
:return:
"""
active_features = list()
for attr, value in self.__dict__.items():
if value:
active_features.append(attr[3:].lower())
return active_features
class SubScenarios(object):
"""
The subscenario IDs will be used to format SQL queries, so we set them to
"NULL" (not None) if an ID is not specified for the scenario.
"""
def __init__(self, conn, scenario_id):
"""
:param cursor:
:param scenario_id:
"""
subscenario_column_names = [
n for n in get_scenario_table_columns(conn=conn)
if n.endswith("_scenario_id")
]
for subscenario in subscenario_column_names:
setattr(
self,
subscenario.upper(),
db_column_to_self(
column=subscenario, conn=conn, scenario_id=scenario_id
)
)
def get_all_available_subscenarios(self):
all_subscenarios = [
attr.lower() for attr, value in self.__dict__.items()
if attr != "SCENARIO_ID"
]
return all_subscenarios
class SubProblems(object):
def __init__(self, conn, scenario_id):
"""
:param conn:
:param scenario_id:
"""
cursor = conn.cursor()
# TODO: make sure there is data integrity between subproblems_stages
# and inputs_temporal_horizons and inputs_temporal
subproblems = cursor.execute(
"""SELECT subproblem_id
FROM inputs_temporal_subproblems
INNER JOIN scenarios
USING (temporal_scenario_id)
WHERE scenario_id = {};""".format(scenario_id)
).fetchall()
# SQL returns a list of tuples [(1,), (2,)] so convert to simple list
self.SUBPROBLEMS = [subproblem[0] for subproblem in subproblems]
# store subproblems and stages in dict {subproblem: [stages]}
self.SUBPROBLEM_STAGE_DICT = {}
for s in self.SUBPROBLEMS:
stages = cursor.execute(
"""SELECT stage_id
FROM inputs_temporal_subproblems_stages
INNER JOIN scenarios
USING (temporal_scenario_id)
WHERE scenario_id = {}
AND subproblem_id = {};""".format(scenario_id, s)
).fetchall()
stages = [stage[0] for stage in stages] # convert to simple list
self.SUBPROBLEM_STAGE_DICT[s] = stages
class SolverOptions(object):
def __init__(self, conn, scenario_id):
"""
:param cursor:
:param scenario_id:
"""
cursor = conn.cursor()
self.SOLVER_OPTIONS_ID = cursor.execute("""
SELECT solver_options_id
FROM scenarios
WHERE scenario_id = {}
""".format(scenario_id)
).fetchone()[0]
if self.SOLVER_OPTIONS_ID is None:
self.SOLVER = None
else:
distinct_solvers = cursor.execute(
"""SELECT DISTINCT solver
FROM inputs_options_solver
WHERE solver_options_id = {}""".format(self.SOLVER_OPTIONS_ID)
).fetchall()
if len(distinct_solvers) > 1:
raise ValueError("""
ERROR: Solver options include more than one solver! Only a
single solver must be specified for solver_options_id in the
inputs_options_solver table. See solver_options_id {}.
""".format(self.SOLVER_OPTIONS_ID))
else:
self.SOLVER = distinct_solvers[0][0]
self.SOLVER_OPTIONS = \
None if self.SOLVER_OPTIONS_ID is None \
else {
row[0]: row[1]
for row in cursor.execute("""
SELECT solver_option_name, solver_option_value
FROM inputs_options_solver
WHERE solver_options_id = {};
""".format(self.SOLVER_OPTIONS_ID)
).fetchall() if row[0] is not None and row[0] != ""
}
def db_column_to_self(column, conn, scenario_id):
of = True if column.startswith("of") else False
c = conn.cursor()
query = c.execute(
"""SELECT {}
FROM scenarios
WHERE scenario_id = ?;""".format(column),
(scenario_id,)
).fetchone()[0]
self = "NULL" if query is None and not of else query
return self
def get_scenario_table_columns(conn):
c = conn.cursor()
scenario_query = c.execute(
"""
SELECT * FROM scenarios;
"""
)
column_names = [
description[0] for description in scenario_query.description
]
return column_names
| 0.639286 | 0.345381 |
from io import BytesIO
import sys
import os
import zipfile
import argparse
import subprocess
import requests
import xml.etree.ElementTree as etree
def request_data(root, tile_id, output_folder, verbose=False):
namespaces = {"xmlns": "http://www.w3.org/2005/Atom",
"xmlns:georss": "http://www.georss.org/georss"}
tile = root.find('xmlns:entry[xmlns:id="{}.laz.zip"]'.format(tile_id),
namespaces=namespaces)
if tile is None:
return False
url = tile.find('xmlns:link', namespaces=namespaces).attrib['href']
zip_file = '{}{}.laz.zip'.format(output_folder, tile_id)
with open(zip_file, 'wb') as f:
if not verbose:
zipped_data = requests.get(url)
f.write(zipped_data.content)
else:
zipped_data = requests.get(url, stream=True, timeout=10)
total_length = zipped_data.headers.get('content-length')
if total_length is not None:
total_length = int(total_length)
else:
size = tile.find('xmlns:content', namespaces=namespaces).text
size = float(size.split(':')[1].split(
' ')[1].replace(',', '.'))
total_length = int(size * 1048576)
dl = 0
chunk = total_length//100 if total_length is not None else 1048576
for data in zipped_data.iter_content(chunk_size=chunk):
f.write(data)
dl += len(data)
if total_length is not None:
done = int(100 * dl / total_length)
sys.stdout.write("\r[{}{}] - {}% {}/{} mb".format('=' * done,
' ' *
(100 - done),
done,
dl/1048576,
total_length/1048576))
sys.stdout.flush()
elif verbose:
sys.stdout.write(
"\r {:0.1f} mb downloaded..".format(dl/1048576))
sys.stdout.flush()
if verbose:
sys.stdout.write("\n")
if verbose:
print("Download complete, unzipping..")
with zipfile.ZipFile(zip_file) as data:
data.extractall(output_folder)
os.remove(zip_file)
return True
def request_tile(tile_id, output_folder, verbose=False):
# uitgefilterd
if verbose:
print("Downloading filtered out AHN 2 data..")
r = requests.get('http://geodata.nationaalgeoregister.nl/ahn2/'
'atom/ahn2_uitgefilterd.xml')
root = etree.fromstring(r.content)
success = request_data(root, 'u{}'.format(tile_id), output_folder, verbose)
if verbose:
if success:
print("Complete.")
else:
print("Download failed. Tile not found.")
# gefilterd
if verbose:
print("Downloading filtered AHN 2 data..")
r = requests.get('http://geodata.nationaalgeoregister.nl/ahn2/'
'atom/ahn2_gefilterd.xml')
root = etree.fromstring(r.content)
success = request_data(root, 'g{}'.format(tile_id), output_folder, verbose)
if verbose:
if success:
print("Download complete.")
else:
print("Download failed. Tile not found.")
def argument_parser():
"""
Define and return the arguments.
"""
description = "Download an AHN2 data tile by tile id."
parser = argparse.ArgumentParser(description=description)
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-t', '--tileid',
help='The ID of the tile to download.',
required=True)
required_named.add_argument('-o', '--output',
help='The folder to write the data to.',
required=True)
parser.add_argument('-m', '--merge',
help='Merge the filtered and remaining data. '
'Requires PDAL.',
action='store_true',
required=False,
default=False)
parser.add_argument('-v', '--verbose',
help='Enable to print out the progress',
action='store_true',
required=False,
default=False)
args = parser.parse_args()
return args
def main():
args = argument_parser()
args.output.replace('\\', '/')
args.output = args.output + '/' if args.output[-1] != '/' else args.output
request_tile(args.tileid, args.output, args.verbose)
if args.merge:
if args.verbose:
print("Merging point clouds..")
output_file = '{}{}.laz'.format(args.output, args.tileid)
subprocess.call(['pdal', 'merge',
'{}g{}.laz'.format(args.output, args.tileid),
'{}u{}.laz'.format(args.output, args.tileid),
output_file])
if os.path.isfile(output_file):
if args.verbose:
print("Done, removing old files..")
os.remove('{}g{}.laz'.format(args.output, args.tileid))
os.remove('{}u{}.laz'.format(args.output, args.tileid))
if args.verbose:
print("Done!")
elif args.verbose:
print("Merging failed. File not found. Keeping original files.")
if __name__ == '__main__':
main()
|
scripts/ahn2_download/ahn2_downloader.py
|
from io import BytesIO
import sys
import os
import zipfile
import argparse
import subprocess
import requests
import xml.etree.ElementTree as etree
def request_data(root, tile_id, output_folder, verbose=False):
namespaces = {"xmlns": "http://www.w3.org/2005/Atom",
"xmlns:georss": "http://www.georss.org/georss"}
tile = root.find('xmlns:entry[xmlns:id="{}.laz.zip"]'.format(tile_id),
namespaces=namespaces)
if tile is None:
return False
url = tile.find('xmlns:link', namespaces=namespaces).attrib['href']
zip_file = '{}{}.laz.zip'.format(output_folder, tile_id)
with open(zip_file, 'wb') as f:
if not verbose:
zipped_data = requests.get(url)
f.write(zipped_data.content)
else:
zipped_data = requests.get(url, stream=True, timeout=10)
total_length = zipped_data.headers.get('content-length')
if total_length is not None:
total_length = int(total_length)
else:
size = tile.find('xmlns:content', namespaces=namespaces).text
size = float(size.split(':')[1].split(
' ')[1].replace(',', '.'))
total_length = int(size * 1048576)
dl = 0
chunk = total_length//100 if total_length is not None else 1048576
for data in zipped_data.iter_content(chunk_size=chunk):
f.write(data)
dl += len(data)
if total_length is not None:
done = int(100 * dl / total_length)
sys.stdout.write("\r[{}{}] - {}% {}/{} mb".format('=' * done,
' ' *
(100 - done),
done,
dl/1048576,
total_length/1048576))
sys.stdout.flush()
elif verbose:
sys.stdout.write(
"\r {:0.1f} mb downloaded..".format(dl/1048576))
sys.stdout.flush()
if verbose:
sys.stdout.write("\n")
if verbose:
print("Download complete, unzipping..")
with zipfile.ZipFile(zip_file) as data:
data.extractall(output_folder)
os.remove(zip_file)
return True
def request_tile(tile_id, output_folder, verbose=False):
# uitgefilterd
if verbose:
print("Downloading filtered out AHN 2 data..")
r = requests.get('http://geodata.nationaalgeoregister.nl/ahn2/'
'atom/ahn2_uitgefilterd.xml')
root = etree.fromstring(r.content)
success = request_data(root, 'u{}'.format(tile_id), output_folder, verbose)
if verbose:
if success:
print("Complete.")
else:
print("Download failed. Tile not found.")
# gefilterd
if verbose:
print("Downloading filtered AHN 2 data..")
r = requests.get('http://geodata.nationaalgeoregister.nl/ahn2/'
'atom/ahn2_gefilterd.xml')
root = etree.fromstring(r.content)
success = request_data(root, 'g{}'.format(tile_id), output_folder, verbose)
if verbose:
if success:
print("Download complete.")
else:
print("Download failed. Tile not found.")
def argument_parser():
"""
Define and return the arguments.
"""
description = "Download an AHN2 data tile by tile id."
parser = argparse.ArgumentParser(description=description)
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-t', '--tileid',
help='The ID of the tile to download.',
required=True)
required_named.add_argument('-o', '--output',
help='The folder to write the data to.',
required=True)
parser.add_argument('-m', '--merge',
help='Merge the filtered and remaining data. '
'Requires PDAL.',
action='store_true',
required=False,
default=False)
parser.add_argument('-v', '--verbose',
help='Enable to print out the progress',
action='store_true',
required=False,
default=False)
args = parser.parse_args()
return args
def main():
args = argument_parser()
args.output.replace('\\', '/')
args.output = args.output + '/' if args.output[-1] != '/' else args.output
request_tile(args.tileid, args.output, args.verbose)
if args.merge:
if args.verbose:
print("Merging point clouds..")
output_file = '{}{}.laz'.format(args.output, args.tileid)
subprocess.call(['pdal', 'merge',
'{}g{}.laz'.format(args.output, args.tileid),
'{}u{}.laz'.format(args.output, args.tileid),
output_file])
if os.path.isfile(output_file):
if args.verbose:
print("Done, removing old files..")
os.remove('{}g{}.laz'.format(args.output, args.tileid))
os.remove('{}u{}.laz'.format(args.output, args.tileid))
if args.verbose:
print("Done!")
elif args.verbose:
print("Merging failed. File not found. Keeping original files.")
if __name__ == '__main__':
main()
| 0.345216 | 0.132066 |
from nltk.stem.porter import PorterStemmer
import os
def __stem_Tokens(words):
porter_stemmer = PorterStemmer()
return [porter_stemmer.stem(x) for x in words.split(" ")]
def same_pre_post(tokens1, tokens2):
if tokens1[0] == tokens2[0] or tokens1[-1] == tokens2[-1]:
return True
return False
def single_token_same_pre_post_fix(tokens1, tokens2):
if len(tokens1) == 1 and len(tokens2) == 1:
w1 = tokens1[0]
w2 = tokens2[0]
if len(w1) > 3 and len(w2) > 3:
return w1[:3] == w2[:3] or w1[-3:] == w2[-3:]
return False
def share_tokens(tokens1, tokens2):
for tk1 in tokens1:
for tk2 in tokens2:
if tk1 == tk2:
return True
return False
def is_heuristic_ones(w1, w2):
w1_stems = __stem_Tokens(w1)
w2_stems = __stem_Tokens(w2)
if same_pre_post(w1_stems, w2_stems):
return True
return False
for file_name in os.listdir("."):
if not os.path.isfile(file_name) or (not file_name.endswith("txt") and not file_name.endswith("csv")):
continue
# file_name = "FeedForward_Result{}.txt".format(i)
tn = 0
tp = 0
fn = 0
fp = 0
with open(file_name) as fin, open("../filter_result/{}".format(file_name), "w") as fout, open(
"../filter_result/csv/{}".format(file_name), "w") as csv_fout:
cnt = 0
for line in fin:
cnt += 1
line = line.strip("\n")
if "label, correctness, w1, w2" in line:
if cnt == 1:
continue
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = (tp + tn) / (tp + tn + fn + fp)
csv_fout.write("{},{},{},{}\n".format(recall, precision, f1, accuracy))
tn = 0
tp = 0
fn = 0
fp = 0
else:
parts = [x for x in line.split("\t") if len(x) > 0]
if len(parts) < 5:
print(parts)
continue
pre_label = parts[0]
correctness = parts[1]
score = parts[2]
w1 = parts[3]
w2 = parts[4]
if is_heuristic_ones(w1, w2):
continue
if correctness == "Correct":
if pre_label == "yes":
tp += 1
else:
tn += 1
else:
if pre_label == "yes":
fp += 1
else:
fn += 1
fout.write(line + "\n")
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = (tp + tn) / (tp + tn + fn + fp)
csv_fout.write("{},{},{},{}\n".format(recall, precision, f1, accuracy))
|
SENET/result/mergeRQ1.1ANDRQ1.2BySelectNonHeuristic(OriginVerisionIncluded)/not_filter_result/filter_result.py
|
from nltk.stem.porter import PorterStemmer
import os
def __stem_Tokens(words):
porter_stemmer = PorterStemmer()
return [porter_stemmer.stem(x) for x in words.split(" ")]
def same_pre_post(tokens1, tokens2):
if tokens1[0] == tokens2[0] or tokens1[-1] == tokens2[-1]:
return True
return False
def single_token_same_pre_post_fix(tokens1, tokens2):
if len(tokens1) == 1 and len(tokens2) == 1:
w1 = tokens1[0]
w2 = tokens2[0]
if len(w1) > 3 and len(w2) > 3:
return w1[:3] == w2[:3] or w1[-3:] == w2[-3:]
return False
def share_tokens(tokens1, tokens2):
for tk1 in tokens1:
for tk2 in tokens2:
if tk1 == tk2:
return True
return False
def is_heuristic_ones(w1, w2):
w1_stems = __stem_Tokens(w1)
w2_stems = __stem_Tokens(w2)
if same_pre_post(w1_stems, w2_stems):
return True
return False
for file_name in os.listdir("."):
if not os.path.isfile(file_name) or (not file_name.endswith("txt") and not file_name.endswith("csv")):
continue
# file_name = "FeedForward_Result{}.txt".format(i)
tn = 0
tp = 0
fn = 0
fp = 0
with open(file_name) as fin, open("../filter_result/{}".format(file_name), "w") as fout, open(
"../filter_result/csv/{}".format(file_name), "w") as csv_fout:
cnt = 0
for line in fin:
cnt += 1
line = line.strip("\n")
if "label, correctness, w1, w2" in line:
if cnt == 1:
continue
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = (tp + tn) / (tp + tn + fn + fp)
csv_fout.write("{},{},{},{}\n".format(recall, precision, f1, accuracy))
tn = 0
tp = 0
fn = 0
fp = 0
else:
parts = [x for x in line.split("\t") if len(x) > 0]
if len(parts) < 5:
print(parts)
continue
pre_label = parts[0]
correctness = parts[1]
score = parts[2]
w1 = parts[3]
w2 = parts[4]
if is_heuristic_ones(w1, w2):
continue
if correctness == "Correct":
if pre_label == "yes":
tp += 1
else:
tn += 1
else:
if pre_label == "yes":
fp += 1
else:
fn += 1
fout.write(line + "\n")
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall) / (precision + recall)
accuracy = (tp + tn) / (tp + tn + fn + fp)
csv_fout.write("{},{},{},{}\n".format(recall, precision, f1, accuracy))
| 0.448909 | 0.275635 |
from app.extensions.redis.redis_utils import redis_db
from app.extensions.redis.redis_operation import set_multi_hash_in_redis
from app.extensions.redis.redis_operation import set_single_hash_in_redis
from app.extensions.redis.redis_operation import update_single_hash_in_redis
from app.extensions.redis.redis_operation import get_all_in_redis, get_one_in_redis
from app.models.user import User
from app.models.user_info import UserInfo
from app.models.match_game import MatchGame
from app.models.game_rule import GameRule
from app.models.user_mail import UserMail
from app.models.email_info import EmailInfo
from app.models.good_info import GoodInfo
from app.models.reward_info import RewardInfo
from app.models.prop_info import PropInfo
from app.models.income_support import IncomeSupport
def check_user_is_exist(username):
return User.check_user_is_exist(username)
def get_user_info_by_username(username):
return User.get_user_info_by_username(username)
def validate_password(username, password):
return User.validate_password(username, password)
def get_user_info_in_cache(uid):
r_key = 'hu:' + str(uid)
ret = get_one_in_redis(r_key)
return ret
def save_user_info_in_cache(uid, user_info):
r_key = 'hu:' + str(uid)
return set_single_hash_in_redis({r_key: user_info})
def update_user_in_cache(uid, user_info):
r_key = 'hu:' + str(uid)
return update_single_hash_in_redis({r_key: user_info})
def get_match_data_in_cache(uid):
r_key = 'hu:' + str(uid) + ':match'
return get_one_in_redis(r_key)
def get_match_data_by_uid(uid):
return MatchGame.get_user_match_data_by_uid(uid)
def save_match_data_in_cache(uid, match_data):
r_key = 'hu:' + str(uid) + ':match'
return set_single_hash_in_redis({r_key: match_data})
def get_game_rule_in_cache(uid):
r_key = 'hu:' + str(uid) + ':rule'
return get_one_in_redis(r_key)
def get_game_rule_by_uid(uid):
return GameRule.get_user_game_rule_by_uid(uid)
def save_game_rule_in_cache(uid, game_rule):
r_key = 'hu:' + str(uid) + ':rule'
return set_single_hash_in_redis({r_key: game_rule})
def validate_user_mail_in_table(uid):
return UserMail.validate_user_mail_in_table(uid)
def get_user_mail_by_uid(uid):
return UserMail.get_user_mail_by_uid(uid)
def get_one_user_mail_by_id(id):
return UserMail.get_one_user_mail_by_id(id)
def update_user_mail_by_id(id, data):
return UserMail.update_user_mail_by_id(id, data)
def get_email_info_by_id(id):
return EmailInfo.get_email_info_by_id(id)
def get_all_email_info():
return EmailInfo.get_all_email_info()
def get_all_good_info():
return GoodInfo.get_all_good_info()
def get_good_info_by_id(id):
return GoodInfo.get_good_info_by_id(id)
def get_all_reward_info():
return RewardInfo.get_all_reward_info()
def get_all_prop_info():
return PropInfo.get_all_prop_info()
def get_income_support_by_uid(uid):
return IncomeSupport.get_income_support_by_uid(uid)
def save_income_support(data):
return IncomeSupport.add(data)
def update_income_support_by_id(id, data):
return IncomeSupport.update_income_support_by_id(id, data)
def save_user(data):
return User.add(data)
def save_user_info(data):
return UserInfo.add(data)
def get_user_info_by_id(id):
return User.get_info_by_uid(id)
|
echecs_hall/app/data_bridge/mj_hall_bridge.py
|
from app.extensions.redis.redis_utils import redis_db
from app.extensions.redis.redis_operation import set_multi_hash_in_redis
from app.extensions.redis.redis_operation import set_single_hash_in_redis
from app.extensions.redis.redis_operation import update_single_hash_in_redis
from app.extensions.redis.redis_operation import get_all_in_redis, get_one_in_redis
from app.models.user import User
from app.models.user_info import UserInfo
from app.models.match_game import MatchGame
from app.models.game_rule import GameRule
from app.models.user_mail import UserMail
from app.models.email_info import EmailInfo
from app.models.good_info import GoodInfo
from app.models.reward_info import RewardInfo
from app.models.prop_info import PropInfo
from app.models.income_support import IncomeSupport
def check_user_is_exist(username):
return User.check_user_is_exist(username)
def get_user_info_by_username(username):
return User.get_user_info_by_username(username)
def validate_password(username, password):
return User.validate_password(username, password)
def get_user_info_in_cache(uid):
r_key = 'hu:' + str(uid)
ret = get_one_in_redis(r_key)
return ret
def save_user_info_in_cache(uid, user_info):
r_key = 'hu:' + str(uid)
return set_single_hash_in_redis({r_key: user_info})
def update_user_in_cache(uid, user_info):
r_key = 'hu:' + str(uid)
return update_single_hash_in_redis({r_key: user_info})
def get_match_data_in_cache(uid):
r_key = 'hu:' + str(uid) + ':match'
return get_one_in_redis(r_key)
def get_match_data_by_uid(uid):
return MatchGame.get_user_match_data_by_uid(uid)
def save_match_data_in_cache(uid, match_data):
r_key = 'hu:' + str(uid) + ':match'
return set_single_hash_in_redis({r_key: match_data})
def get_game_rule_in_cache(uid):
r_key = 'hu:' + str(uid) + ':rule'
return get_one_in_redis(r_key)
def get_game_rule_by_uid(uid):
return GameRule.get_user_game_rule_by_uid(uid)
def save_game_rule_in_cache(uid, game_rule):
r_key = 'hu:' + str(uid) + ':rule'
return set_single_hash_in_redis({r_key: game_rule})
def validate_user_mail_in_table(uid):
return UserMail.validate_user_mail_in_table(uid)
def get_user_mail_by_uid(uid):
return UserMail.get_user_mail_by_uid(uid)
def get_one_user_mail_by_id(id):
return UserMail.get_one_user_mail_by_id(id)
def update_user_mail_by_id(id, data):
return UserMail.update_user_mail_by_id(id, data)
def get_email_info_by_id(id):
return EmailInfo.get_email_info_by_id(id)
def get_all_email_info():
return EmailInfo.get_all_email_info()
def get_all_good_info():
return GoodInfo.get_all_good_info()
def get_good_info_by_id(id):
return GoodInfo.get_good_info_by_id(id)
def get_all_reward_info():
return RewardInfo.get_all_reward_info()
def get_all_prop_info():
return PropInfo.get_all_prop_info()
def get_income_support_by_uid(uid):
return IncomeSupport.get_income_support_by_uid(uid)
def save_income_support(data):
return IncomeSupport.add(data)
def update_income_support_by_id(id, data):
return IncomeSupport.update_income_support_by_id(id, data)
def save_user(data):
return User.add(data)
def save_user_info(data):
return UserInfo.add(data)
def get_user_info_by_id(id):
return User.get_info_by_uid(id)
| 0.41052 | 0.110807 |
import os
import time
import pytest
import zmq
from jina.excepts import RuntimeFailToStart, RuntimeRunForeverEarlyError
from jina.executors import BaseExecutor
from jina.parsers import set_gateway_parser, set_pea_parser
from jina.peapods import Pea
from jina.peapods.runtimes.zmq.zed import ZEDRuntime
from jina.types.message.common import ControlMessage
def bad_func(*args, **kwargs):
raise Exception('intentional error')
def test_base_pea_with_runtime_bad_init(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
mocker.patch.object(ZEDRuntime, '__init__', bad_func)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
with pytest.raises(RuntimeFailToStart):
with Pea1(arg):
pass
# teardown should be called, cancel should not be called
teardown_spy.assert_not_called()
run_spy.assert_not_called()
cancel_spy.assert_not_called()
@pytest.mark.slow
def test_base_pea_with_runtime_bad_run_forever(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
def mock_run_forever(runtime):
bad_func()
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
mocker.patch.object(ZEDRuntime, 'run_forever', mock_run_forever)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
with pytest.raises(RuntimeRunForeverEarlyError):
with Pea1(arg):
pass
# teardown should be called, cancel should not be called
teardown_spy.assert_called()
run_spy.assert_called()
cancel_spy.assert_not_called()
@pytest.mark.slow
def test_base_pea_with_runtime_bad_teardown(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
def mock_run_forever(*args, **kwargs):
time.sleep(3)
def mock_is_ready(*args, **kwargs):
return True
def mock_cancel(*args, **kwargs):
pass
mocker.patch.object(ZEDRuntime, 'run_forever', mock_run_forever)
mocker.patch.object(ZEDRuntime, 'is_ready', mock_is_ready)
mocker.patch.object(ZEDRuntime, 'teardown', lambda x: bad_func)
mocker.patch.object(ZEDRuntime, 'cancel', lambda *args, **kwargs: mock_cancel)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
with Pea1(arg):
pass
teardown_spy.assert_called()
run_spy.assert_called()
cancel_spy.assert_called_once() # 3s > .join(1), need to cancel
# run_forever cancel should all be called
def test_base_pea_with_runtime_bad_cancel(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
def mock_run_forever(runtime):
time.sleep(3)
def mock_is_ready(*args, **kwargs):
return True
mocker.patch.object(ZEDRuntime, 'run_forever', mock_run_forever)
mocker.patch.object(ZEDRuntime, 'is_ready', mock_is_ready)
mocker.patch.object(Pea, '_cancel_runtime', bad_func)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
with Pea1(arg):
time.sleep(0.1)
pass
teardown_spy.assert_called()
run_spy.assert_called()
cancel_spy.assert_called_once()
# run_forever cancel should all be called
@pytest.fixture()
def fake_env():
os.environ['key_parent'] = 'value3'
yield
os.environ.pop('key_parent', None)
class EnvChecker1(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pea_runtime_env_setting_in_process(fake_env):
with Pea(
set_pea_parser().parse_args(
[
'--uses',
'EnvChecker1',
'--env',
'key1=value1',
'--env',
'key2=value2',
'--runtime-backend',
'process',
]
)
):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
class EnvChecker2(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert 'key1' not in os.environ
assert 'key2' not in os.environ
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pea_runtime_env_setting_in_thread(fake_env):
os.environ['key_parent'] = 'value3'
with Pea(
set_pea_parser().parse_args(
[
'--uses',
'EnvChecker2',
'--env',
'key1=value1',
'--env',
'key2=value2',
'--runtime-backend',
'thread',
]
)
):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
os.environ.pop('key_parent')
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCRuntime'),
('websocket', 'WebSocketRuntime'),
('http', 'HTTPRuntime'),
],
)
def test_gateway_args(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--host',
'jina-custom-gateway',
'--port-expose',
'23456',
'--protocol',
protocol,
]
)
p = Pea(args)
assert p.runtime_cls.__name__ == expected
@pytest.mark.timeout(30)
@pytest.mark.slow
@pytest.mark.parametrize(
'command, response_expected',
[
('IDLE', 0),
('CANCEL', 0),
('TERMINATE', 1),
('STATUS', 1),
('ACTIVATE', 1),
('DEACTIVATE', 1),
],
)
def test_idle_does_not_create_response(command, response_expected):
args = set_pea_parser().parse_args([])
with Pea(args) as p:
msg = ControlMessage(command, pod_name='fake_pod')
with zmq.Context().socket(zmq.PAIR) as socket:
socket.connect(f'tcp://localhost:{p.args.port_ctrl}')
socket.send_multipart(msg.dump())
assert socket.poll(timeout=1000) == response_expected
|
tests/unit/peapods/peas/test_pea.py
|
import os
import time
import pytest
import zmq
from jina.excepts import RuntimeFailToStart, RuntimeRunForeverEarlyError
from jina.executors import BaseExecutor
from jina.parsers import set_gateway_parser, set_pea_parser
from jina.peapods import Pea
from jina.peapods.runtimes.zmq.zed import ZEDRuntime
from jina.types.message.common import ControlMessage
def bad_func(*args, **kwargs):
raise Exception('intentional error')
def test_base_pea_with_runtime_bad_init(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
mocker.patch.object(ZEDRuntime, '__init__', bad_func)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
with pytest.raises(RuntimeFailToStart):
with Pea1(arg):
pass
# teardown should be called, cancel should not be called
teardown_spy.assert_not_called()
run_spy.assert_not_called()
cancel_spy.assert_not_called()
@pytest.mark.slow
def test_base_pea_with_runtime_bad_run_forever(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
def mock_run_forever(runtime):
bad_func()
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
mocker.patch.object(ZEDRuntime, 'run_forever', mock_run_forever)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
with pytest.raises(RuntimeRunForeverEarlyError):
with Pea1(arg):
pass
# teardown should be called, cancel should not be called
teardown_spy.assert_called()
run_spy.assert_called()
cancel_spy.assert_not_called()
@pytest.mark.slow
def test_base_pea_with_runtime_bad_teardown(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
def mock_run_forever(*args, **kwargs):
time.sleep(3)
def mock_is_ready(*args, **kwargs):
return True
def mock_cancel(*args, **kwargs):
pass
mocker.patch.object(ZEDRuntime, 'run_forever', mock_run_forever)
mocker.patch.object(ZEDRuntime, 'is_ready', mock_is_ready)
mocker.patch.object(ZEDRuntime, 'teardown', lambda x: bad_func)
mocker.patch.object(ZEDRuntime, 'cancel', lambda *args, **kwargs: mock_cancel)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
with Pea1(arg):
pass
teardown_spy.assert_called()
run_spy.assert_called()
cancel_spy.assert_called_once() # 3s > .join(1), need to cancel
# run_forever cancel should all be called
def test_base_pea_with_runtime_bad_cancel(mocker):
class Pea1(Pea):
def __init__(self, args):
super().__init__(args)
def mock_run_forever(runtime):
time.sleep(3)
def mock_is_ready(*args, **kwargs):
return True
mocker.patch.object(ZEDRuntime, 'run_forever', mock_run_forever)
mocker.patch.object(ZEDRuntime, 'is_ready', mock_is_ready)
mocker.patch.object(Pea, '_cancel_runtime', bad_func)
teardown_spy = mocker.spy(ZEDRuntime, 'teardown')
cancel_spy = mocker.spy(Pea, '_cancel_runtime')
run_spy = mocker.spy(ZEDRuntime, 'run_forever')
arg = set_pea_parser().parse_args(['--runtime-backend', 'thread'])
with Pea1(arg):
time.sleep(0.1)
pass
teardown_spy.assert_called()
run_spy.assert_called()
cancel_spy.assert_called_once()
# run_forever cancel should all be called
@pytest.fixture()
def fake_env():
os.environ['key_parent'] = 'value3'
yield
os.environ.pop('key_parent', None)
class EnvChecker1(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pea_runtime_env_setting_in_process(fake_env):
with Pea(
set_pea_parser().parse_args(
[
'--uses',
'EnvChecker1',
'--env',
'key1=value1',
'--env',
'key2=value2',
'--runtime-backend',
'process',
]
)
):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
class EnvChecker2(BaseExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pea/pod-specific
assert 'key1' not in os.environ
assert 'key2' not in os.environ
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_pea_runtime_env_setting_in_thread(fake_env):
os.environ['key_parent'] = 'value3'
with Pea(
set_pea_parser().parse_args(
[
'--uses',
'EnvChecker2',
'--env',
'key1=value1',
'--env',
'key2=value2',
'--runtime-backend',
'thread',
]
)
):
pass
# should not affect the main process
assert 'key1' not in os.environ
assert 'key2' not in os.environ
assert 'key_parent' in os.environ
os.environ.pop('key_parent')
@pytest.mark.parametrize(
'protocol, expected',
[
('grpc', 'GRPCRuntime'),
('websocket', 'WebSocketRuntime'),
('http', 'HTTPRuntime'),
],
)
def test_gateway_args(protocol, expected):
args = set_gateway_parser().parse_args(
[
'--host',
'jina-custom-gateway',
'--port-expose',
'23456',
'--protocol',
protocol,
]
)
p = Pea(args)
assert p.runtime_cls.__name__ == expected
@pytest.mark.timeout(30)
@pytest.mark.slow
@pytest.mark.parametrize(
'command, response_expected',
[
('IDLE', 0),
('CANCEL', 0),
('TERMINATE', 1),
('STATUS', 1),
('ACTIVATE', 1),
('DEACTIVATE', 1),
],
)
def test_idle_does_not_create_response(command, response_expected):
args = set_pea_parser().parse_args([])
with Pea(args) as p:
msg = ControlMessage(command, pod_name='fake_pod')
with zmq.Context().socket(zmq.PAIR) as socket:
socket.connect(f'tcp://localhost:{p.args.port_ctrl}')
socket.send_multipart(msg.dump())
assert socket.poll(timeout=1000) == response_expected
| 0.481698 | 0.190329 |
import sys, time, array
import png
import weave
from weave.base_info import custom_info
import numpy as np
from zope.interface import implements
from twisted.internet import defer, reactor
from twisted.internet.interfaces import IPushProducer
import asynqueue
from asynqueue.threads import Consumerator
from mcmandelbrot.colormap import ColorMapper
class my_info(custom_info):
_extra_compile_args = ['-Wcpp']
class MandelbrotValuer(object):
"""
Returns the values (number of iterations to escape, if at all,
inverted) of the Mandelbrot set at point cr + i*ci in the complex
plane, for a range of real values with a constant imaginary component.
C code adapted from <NAME>'s C{iterations} function at::
https://svn.enthought.com/svn/enthought/Mayavi/
branches/3.0.4/examples/mayavi/mandelbrot.py}
with periodicity testing and test-interval updating adapted from
Simpsons's code contribution at::
http://en.wikipedia.org/wiki/User:Simpsons_contributor/
periodicity_checking
and period-2 bulb testing from Wikibooks::
http://en.wikibooks.org/wiki/Fractals/
Iterations_in_the_complex_plane/Mandelbrot_set
The values are inverted, i.e., subtracted from the maximum value,
so that no-escape points (technically, the only points actually in
the Mandelbrot Set) have zero value and points that escape
immediately have the maximum value. This allows simple mapping to
the classic image with a black area in the middle. Then they are
scaled to the 0.0-1.0 range, and an exponent is applied to
emphasize changes at shorter escape times. Finally, they are
mapped to RGB triples and returned.
@ivar cm: A callable object that converts C{NumPy} array inputs in
the 0.0-1.0 range to an unsigned-int8 Python array of RGB
triples.
"""
support_code = """
bool region_test(double zr, double zr2, double zi2)
{
double q;
// (x+1)^2 + y2 < 1/16
if (zr2 + 2*zr + 1 + zi2 < 0.0625) return(true);
// q = (x-1/4)^2 + y^2
q = zr2 - 0.5*zr + 0.0625 + zi2;
// q*(q+(x-1/4)) < 1/4*y^2
q *= (q + zr - 0.25);
if (q < 0.25*zi2) return(true);
return(false);
}
int eval_point(int j, int km, double cr, double ci)
{
int k = 1;
int N = km;
double zr = cr;
double zi = ci;
double zr2 = zr * zr, zi2 = zi * zi;
// If we are in one of the two biggest "lakes," we need go no further
if (region_test(zr, zr2, zi2)) return N;
// Periodicity-testing variables
double zrp = 0, zip = 0;
int k_check = 0, N_check = 3, k_update = 0;
while ( k < N ) {
// Compute Z[n+1] = Z[n]^2 + C, with escape test
if ( zr2+zi2 > 16.0 ) return k;
zi = 2.0 * zr * zi + ci;
zr = zr2 - zi2 + cr;
k++;
// Periodicity test: If same point is reached as previously,
// there is no escape
if ( zr == zrp )
if ( zi == zip ) return N;
// Check if previous-value update needed
if ( k_check == N_check )
{
// Yes, do it
zrp = zr;
zip = zi;
// Check again after another N_check iterations, an
// interval that occasionally doubles
k_check = 0;
if ( k_update == 5 )
{
k_update = 0;
N_check *= 2;
}
k_update++;
}
k_check++;
// Compute squares for next iteration
zr2 = zr * zr;
zi2 = zi * zi;
}
return k;
}
"""
code = """
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
int j, zint;
int N = km;
signed char kx, ky;
double xk, yk;
for (j=0; j<Nx[0]; j++) {
// Evaluate five points in an X arrangement including and around the
// one specified by X1(j) and ci
zint = eval_point(j, km, X1(j), ci);
Z1(j) = zint;
kx = -1;
ky = -1;
while ((zint < km) && (kx < 2)) {
xk = X1(j) + kx * qd;
while ((zint < km) && (ky < 2)) {
yk = (double)ci + ky * qd;
zint = eval_point(j, km, xk, yk);
Z1(j) += zint;
ky += 2;
}
kx += 2;
}
if (zint == km) {
// A no-escape evaluation at one point in the X is treated
// as if there were no escape at any point in the X
Z1(j) = 5*N;
}
}
"""
vars = ['x', 'z', 'ci', 'qd', 'km']
steepness = 3
def __init__(self, N_values):
"""
Constructor:
@param N_values: The number of iterations to try, hence the
range of integer values, for a single call to
L{computeValues}. Because a 5-point star around each point
is evaluated with the values summed, the actual range of
values for each point is 5 times greater.
"""
self.N_values = N_values
self.cm = ColorMapper()
# The maximum possible escape value is mapped to 1.0, before
# exponent and then color mapping are applied
self.scale = 0.2 / N_values
self.infoObj = my_info()
def __call__(self, crMin, crMax, N, ci):
"""
Computes values for I{N} points along the real (horizontal) axis
from I{crMin} to I{crMax}, with the constant imaginary
component I{ci}.
@return: A Python B-array I{3*N} containing RGB triples for an
image representing the escape values.
"""
qd = 0.25 * (crMax - crMin) / N
x = np.linspace(crMin, crMax, N, dtype=np.float64)
z = self.computeValues(N, x, ci, qd)
# Invert the iteration values so that trapped points have zero
# value, then scale to the range [-1.0, +1.0]
z = 2*self.scale * (5*self.N_values - z) - 1.0
# Transform to emphasize details in the middle
z = self.transform(z, self.steepness)
# [-1.0, +1.0] --> [0.0, 1.0]
z = 0.5*(z + 1.0)
# Map to my RGB colormap
return self.cm(z)
def computeValues(self, N, x, ci, qd):
"""
Computes and returns a row vector of escape iterations, integer
values.
"""
km = self.N_values - 1
z = np.zeros(N, dtype=np.int)
weave.inline(
self.code, self.vars,
customize=self.infoObj, support_code=self.support_code)
return z
def transform(self, x, k):
"""
Transforms the input vector I{x} by taking it to a power, which is
zero (no transform) or odd-numbered.
"""
return np.power(x, k)
|
mcmandelbrot/valuer.py
|
import sys, time, array
import png
import weave
from weave.base_info import custom_info
import numpy as np
from zope.interface import implements
from twisted.internet import defer, reactor
from twisted.internet.interfaces import IPushProducer
import asynqueue
from asynqueue.threads import Consumerator
from mcmandelbrot.colormap import ColorMapper
class my_info(custom_info):
_extra_compile_args = ['-Wcpp']
class MandelbrotValuer(object):
"""
Returns the values (number of iterations to escape, if at all,
inverted) of the Mandelbrot set at point cr + i*ci in the complex
plane, for a range of real values with a constant imaginary component.
C code adapted from <NAME>'s C{iterations} function at::
https://svn.enthought.com/svn/enthought/Mayavi/
branches/3.0.4/examples/mayavi/mandelbrot.py}
with periodicity testing and test-interval updating adapted from
Simpsons's code contribution at::
http://en.wikipedia.org/wiki/User:Simpsons_contributor/
periodicity_checking
and period-2 bulb testing from Wikibooks::
http://en.wikibooks.org/wiki/Fractals/
Iterations_in_the_complex_plane/Mandelbrot_set
The values are inverted, i.e., subtracted from the maximum value,
so that no-escape points (technically, the only points actually in
the Mandelbrot Set) have zero value and points that escape
immediately have the maximum value. This allows simple mapping to
the classic image with a black area in the middle. Then they are
scaled to the 0.0-1.0 range, and an exponent is applied to
emphasize changes at shorter escape times. Finally, they are
mapped to RGB triples and returned.
@ivar cm: A callable object that converts C{NumPy} array inputs in
the 0.0-1.0 range to an unsigned-int8 Python array of RGB
triples.
"""
support_code = """
bool region_test(double zr, double zr2, double zi2)
{
double q;
// (x+1)^2 + y2 < 1/16
if (zr2 + 2*zr + 1 + zi2 < 0.0625) return(true);
// q = (x-1/4)^2 + y^2
q = zr2 - 0.5*zr + 0.0625 + zi2;
// q*(q+(x-1/4)) < 1/4*y^2
q *= (q + zr - 0.25);
if (q < 0.25*zi2) return(true);
return(false);
}
int eval_point(int j, int km, double cr, double ci)
{
int k = 1;
int N = km;
double zr = cr;
double zi = ci;
double zr2 = zr * zr, zi2 = zi * zi;
// If we are in one of the two biggest "lakes," we need go no further
if (region_test(zr, zr2, zi2)) return N;
// Periodicity-testing variables
double zrp = 0, zip = 0;
int k_check = 0, N_check = 3, k_update = 0;
while ( k < N ) {
// Compute Z[n+1] = Z[n]^2 + C, with escape test
if ( zr2+zi2 > 16.0 ) return k;
zi = 2.0 * zr * zi + ci;
zr = zr2 - zi2 + cr;
k++;
// Periodicity test: If same point is reached as previously,
// there is no escape
if ( zr == zrp )
if ( zi == zip ) return N;
// Check if previous-value update needed
if ( k_check == N_check )
{
// Yes, do it
zrp = zr;
zip = zi;
// Check again after another N_check iterations, an
// interval that occasionally doubles
k_check = 0;
if ( k_update == 5 )
{
k_update = 0;
N_check *= 2;
}
k_update++;
}
k_check++;
// Compute squares for next iteration
zr2 = zr * zr;
zi2 = zi * zi;
}
return k;
}
"""
code = """
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
int j, zint;
int N = km;
signed char kx, ky;
double xk, yk;
for (j=0; j<Nx[0]; j++) {
// Evaluate five points in an X arrangement including and around the
// one specified by X1(j) and ci
zint = eval_point(j, km, X1(j), ci);
Z1(j) = zint;
kx = -1;
ky = -1;
while ((zint < km) && (kx < 2)) {
xk = X1(j) + kx * qd;
while ((zint < km) && (ky < 2)) {
yk = (double)ci + ky * qd;
zint = eval_point(j, km, xk, yk);
Z1(j) += zint;
ky += 2;
}
kx += 2;
}
if (zint == km) {
// A no-escape evaluation at one point in the X is treated
// as if there were no escape at any point in the X
Z1(j) = 5*N;
}
}
"""
vars = ['x', 'z', 'ci', 'qd', 'km']
steepness = 3
def __init__(self, N_values):
"""
Constructor:
@param N_values: The number of iterations to try, hence the
range of integer values, for a single call to
L{computeValues}. Because a 5-point star around each point
is evaluated with the values summed, the actual range of
values for each point is 5 times greater.
"""
self.N_values = N_values
self.cm = ColorMapper()
# The maximum possible escape value is mapped to 1.0, before
# exponent and then color mapping are applied
self.scale = 0.2 / N_values
self.infoObj = my_info()
def __call__(self, crMin, crMax, N, ci):
"""
Computes values for I{N} points along the real (horizontal) axis
from I{crMin} to I{crMax}, with the constant imaginary
component I{ci}.
@return: A Python B-array I{3*N} containing RGB triples for an
image representing the escape values.
"""
qd = 0.25 * (crMax - crMin) / N
x = np.linspace(crMin, crMax, N, dtype=np.float64)
z = self.computeValues(N, x, ci, qd)
# Invert the iteration values so that trapped points have zero
# value, then scale to the range [-1.0, +1.0]
z = 2*self.scale * (5*self.N_values - z) - 1.0
# Transform to emphasize details in the middle
z = self.transform(z, self.steepness)
# [-1.0, +1.0] --> [0.0, 1.0]
z = 0.5*(z + 1.0)
# Map to my RGB colormap
return self.cm(z)
def computeValues(self, N, x, ci, qd):
"""
Computes and returns a row vector of escape iterations, integer
values.
"""
km = self.N_values - 1
z = np.zeros(N, dtype=np.int)
weave.inline(
self.code, self.vars,
customize=self.infoObj, support_code=self.support_code)
return z
def transform(self, x, k):
"""
Transforms the input vector I{x} by taking it to a power, which is
zero (no transform) or odd-numbered.
"""
return np.power(x, k)
| 0.648244 | 0.421135 |
from pathlib import Path
class Handheld:
def __init__(self, instruction_file):
self.accumulator = 0
# parse to function eval format
self.instructions = [(inst.split()[0], inst.split()[1]) for inst in Path(input_file).read_text().splitlines()]
self.instruction_counter = [0] * len(self.instructions)
self.index = 0
def reset(self):
self.instruction_counter = [0] * len(self.instructions)
self.index = 0
self.accumulator = 0
def next(self):
# program finished
if self.index == len(self.instructions):
return False
elif self.index > len(self.instructions):
raise RuntimeError(f'Invalid index at {self.index}')
self.instruction_counter[self.index] += 1
if self.instruction_counter[self.index] > 1:
raise RuntimeError('Infinite loop')
next_inst = self.instructions[self.index]
eval(f'self.{next_inst[0]}({next_inst[1]})')
return True
def fix(self):
for i in range(len(self.instructions)):
orig_instruction = self.instructions[i]
if orig_instruction[0] == 'nop':
self.instructions[i] = ('jmp', orig_instruction[1])
elif orig_instruction[0] == 'jmp':
self.instructions[i] = ('nop', orig_instruction[1])
try:
self.run()
return
except RuntimeError as e:
self.instructions[i] = orig_instruction
continue
def acc(self, value):
self.accumulator += value
self.index += 1
def jmp(self, step):
self.index += step
def nop(self, value):
self.index += 1
def run(self):
self.reset()
keep_running = True
while keep_running:
keep_running = self.next()
def solve_first(input_file):
handheld = Handheld(input_file)
try:
handheld.run()
except RuntimeError as e:
print(str(e) + f' --> Accumulator at {handheld.accumulator}')
def solve_second(input_file):
handheld = Handheld(input_file)
handheld.fix()
try:
handheld.run()
print(f'Program finished --> Accumulator at {handheld.accumulator}')
except RuntimeError as e:
print(str(e) + f' --> Accumulator at {handheld.accumulator}')
if __name__ == "__main__":
input_file = 'input.txt'
print("Solution to first puzzle:")
solve_first(input_file)
print("Solution to second puzzle:")
solve_second(input_file)
|
2020/day8/solve.py
|
from pathlib import Path
class Handheld:
def __init__(self, instruction_file):
self.accumulator = 0
# parse to function eval format
self.instructions = [(inst.split()[0], inst.split()[1]) for inst in Path(input_file).read_text().splitlines()]
self.instruction_counter = [0] * len(self.instructions)
self.index = 0
def reset(self):
self.instruction_counter = [0] * len(self.instructions)
self.index = 0
self.accumulator = 0
def next(self):
# program finished
if self.index == len(self.instructions):
return False
elif self.index > len(self.instructions):
raise RuntimeError(f'Invalid index at {self.index}')
self.instruction_counter[self.index] += 1
if self.instruction_counter[self.index] > 1:
raise RuntimeError('Infinite loop')
next_inst = self.instructions[self.index]
eval(f'self.{next_inst[0]}({next_inst[1]})')
return True
def fix(self):
for i in range(len(self.instructions)):
orig_instruction = self.instructions[i]
if orig_instruction[0] == 'nop':
self.instructions[i] = ('jmp', orig_instruction[1])
elif orig_instruction[0] == 'jmp':
self.instructions[i] = ('nop', orig_instruction[1])
try:
self.run()
return
except RuntimeError as e:
self.instructions[i] = orig_instruction
continue
def acc(self, value):
self.accumulator += value
self.index += 1
def jmp(self, step):
self.index += step
def nop(self, value):
self.index += 1
def run(self):
self.reset()
keep_running = True
while keep_running:
keep_running = self.next()
def solve_first(input_file):
handheld = Handheld(input_file)
try:
handheld.run()
except RuntimeError as e:
print(str(e) + f' --> Accumulator at {handheld.accumulator}')
def solve_second(input_file):
handheld = Handheld(input_file)
handheld.fix()
try:
handheld.run()
print(f'Program finished --> Accumulator at {handheld.accumulator}')
except RuntimeError as e:
print(str(e) + f' --> Accumulator at {handheld.accumulator}')
if __name__ == "__main__":
input_file = 'input.txt'
print("Solution to first puzzle:")
solve_first(input_file)
print("Solution to second puzzle:")
solve_second(input_file)
| 0.424173 | 0.278186 |
import tempfile
import unittest
import mock
import yaml
from py import release
class ReleaseTest(unittest.TestCase):
@mock.patch("py.release.os.makedirs")
@mock.patch("py.release.os.symlink")
@mock.patch("py.release.util.install_go_deps")
@mock.patch("py.release.util.clone_repo")
@mock.patch("py.release.build_and_push")
def test_build_postsubmit(self, mock_build_and_push, mock_clone, _mock_install, _mock_os, _mock_makedirs): # pylint: disable=no-self-use
parser = release.build_parser()
args = parser.parse_args(["postsubmit", "--src_dir=/top/src_dir"])
release.build_postsubmit(args)
mock_build_and_push.assert_called_once_with(
'/top/src_dir/go', '/top/src_dir/go/src/github.com/tensorflow/k8s',
mock.ANY)
mock_clone.assert_called_once_with(
'/top/src_dir/git_tensorflow_k8s', 'tensorflow', 'k8s', None, None)
@mock.patch("py.release.os.makedirs")
@mock.patch("py.release.os.symlink")
@mock.patch("py.release.util.install_go_deps")
@mock.patch("py.release.util.clone_repo")
@mock.patch("py.release.build_and_push")
def test_build_pr(self, mock_build_and_push, mock_clone, _mock_install, _mock_os, _mock_makedirs): # pylint: disable=no-self-use
parser = release.build_parser()
args = parser.parse_args(["pr", "--pr=10", "--commit=22",
"--src_dir=/top/src_dir"])
release.build_pr(args)
mock_build_and_push.assert_called_once_with(
'/top/src_dir/go', '/top/src_dir/go/src/github.com/tensorflow/k8s',
mock.ANY)
mock_clone.assert_called_once_with(
"/top/src_dir/git_tensorflow_k8s", "tensorflow", "k8s", "22",
["pull/10/head:pr"])
def test_update_values(self):
with tempfile.NamedTemporaryFile(delete=False) as hf:
hf.write("""# Test file
image: gcr.io/image:latest
## Install Default RBAC roles and bindings
rbac:
install: false
apiVersion: v1beta1""")
values_file = hf.name
release.update_values(hf.name, "gcr.io/image:v20171019")
with open(values_file) as hf:
output = hf.read()
expected = """# Test file
image: gcr.io/image:v20171019
## Install Default RBAC roles and bindings
rbac:
install: false
apiVersion: v1beta1"""
self.assertEquals(expected, output)
def test_update_chart_file(self):
with tempfile.NamedTemporaryFile(delete=False) as hf:
hf.write("""
name: tf-job-operator-chart
home: https://github.com/jlewi/mlkube.io
version: 0.1.0
appVersion: 0.1.0
""")
chart_file = hf.name
release.update_chart(chart_file, "v20171019")
with open(chart_file) as hf:
output = yaml.load(hf)
expected = {
"name": "tf-job-operator-chart",
"home": "https://github.com/jlewi/mlkube.io",
"version": "0.1.0-v20171019",
"appVersion": "0.1.0-v20171019",
}
self.assertEquals(expected, output)
if __name__ == "__main__":
unittest.main()
|
py/release_test.py
|
import tempfile
import unittest
import mock
import yaml
from py import release
class ReleaseTest(unittest.TestCase):
@mock.patch("py.release.os.makedirs")
@mock.patch("py.release.os.symlink")
@mock.patch("py.release.util.install_go_deps")
@mock.patch("py.release.util.clone_repo")
@mock.patch("py.release.build_and_push")
def test_build_postsubmit(self, mock_build_and_push, mock_clone, _mock_install, _mock_os, _mock_makedirs): # pylint: disable=no-self-use
parser = release.build_parser()
args = parser.parse_args(["postsubmit", "--src_dir=/top/src_dir"])
release.build_postsubmit(args)
mock_build_and_push.assert_called_once_with(
'/top/src_dir/go', '/top/src_dir/go/src/github.com/tensorflow/k8s',
mock.ANY)
mock_clone.assert_called_once_with(
'/top/src_dir/git_tensorflow_k8s', 'tensorflow', 'k8s', None, None)
@mock.patch("py.release.os.makedirs")
@mock.patch("py.release.os.symlink")
@mock.patch("py.release.util.install_go_deps")
@mock.patch("py.release.util.clone_repo")
@mock.patch("py.release.build_and_push")
def test_build_pr(self, mock_build_and_push, mock_clone, _mock_install, _mock_os, _mock_makedirs): # pylint: disable=no-self-use
parser = release.build_parser()
args = parser.parse_args(["pr", "--pr=10", "--commit=22",
"--src_dir=/top/src_dir"])
release.build_pr(args)
mock_build_and_push.assert_called_once_with(
'/top/src_dir/go', '/top/src_dir/go/src/github.com/tensorflow/k8s',
mock.ANY)
mock_clone.assert_called_once_with(
"/top/src_dir/git_tensorflow_k8s", "tensorflow", "k8s", "22",
["pull/10/head:pr"])
def test_update_values(self):
with tempfile.NamedTemporaryFile(delete=False) as hf:
hf.write("""# Test file
image: gcr.io/image:latest
## Install Default RBAC roles and bindings
rbac:
install: false
apiVersion: v1beta1""")
values_file = hf.name
release.update_values(hf.name, "gcr.io/image:v20171019")
with open(values_file) as hf:
output = hf.read()
expected = """# Test file
image: gcr.io/image:v20171019
## Install Default RBAC roles and bindings
rbac:
install: false
apiVersion: v1beta1"""
self.assertEquals(expected, output)
def test_update_chart_file(self):
with tempfile.NamedTemporaryFile(delete=False) as hf:
hf.write("""
name: tf-job-operator-chart
home: https://github.com/jlewi/mlkube.io
version: 0.1.0
appVersion: 0.1.0
""")
chart_file = hf.name
release.update_chart(chart_file, "v20171019")
with open(chart_file) as hf:
output = yaml.load(hf)
expected = {
"name": "tf-job-operator-chart",
"home": "https://github.com/jlewi/mlkube.io",
"version": "0.1.0-v20171019",
"appVersion": "0.1.0-v20171019",
}
self.assertEquals(expected, output)
if __name__ == "__main__":
unittest.main()
| 0.44553 | 0.241389 |
# coding=utf-8
import time
from datetime import datetime
from django.db import models
from climate.models import TempHumidValue
from plugins.arduino.models import Arduino, set_command
from events.utils import event_setter
MODEL = 'SensorDS18D20'
LOCATION_TYPES = (
('inside', 'В помещении'),
('outside', 'На улице'),
('other', 'Другое'),
)
class SensorDS18D20(models.Model):
"""
Модель для добавления новых датчиков температуры DS18D20.
"""
CONTAINER = 'climate'
TYPE = 'TempHumidSensor'
WIDGET_TYPE = 'positioned'
name = models.SlugField(
max_length=20,
verbose_name='Системное имя',
unique=True
)
controller = models.ForeignKey(
Arduino,
verbose_name='Контроллер Arduino',
)
controller_pin = models.PositiveSmallIntegerField(
verbose_name='Вывод (pin) на Arduino',
)
location_type = models.SlugField(
choices=LOCATION_TYPES,
default='inside',
verbose_name='Тип расположение датчика',
)
class Meta(object):
db_table = 'climate_sensords18d20_ext'
verbose_name = 'Датчик DS18D20'
verbose_name_plural = 'Датчики DS18D20'
def __unicode__(self):
return self.name
def set_command(self):
cmd = 'ds18d20:%d' % (self.controller_pin,)
set_command(self, cmd)
def set_result(self, result):
if result is not None:
try:
temp = int(result)
# Проверяем полученные данные на возможные ошибки показаний.
if self.check_data(temp):
# Добавляем данные датчика в таблицу БД только, если они отличаются от
# предыдущего показания, иначе обновляем время у предыдущего показания.
# Это сделано для более быстрой выгрузки данных для графиков, т.к.
# количество точек существенно сокращается.
try:
value = TempHumidValue.objects.filter(object_id=self.id).latest('id')
except TempHumidValue.DoesNotExist:
value = None
if value is not None and value.temperature == temp:
value.datetime = datetime.now()
value.save()
else:
TempHumidValue.objects.create(content_object=self,
temperature=temp,
humidity=0)
self.set_event(temp)
except ValueError:
pass
def set_event(self, temp):
"""
Запись в журнал событий данных, находящихся за пределами нормы.
:param temp: int Значение температуры
"""
level = 2
if self.location_type == 'inside':
if 28 < temp <= 40 or 13 <= temp < 18:
msg = u'{0}: Температура вне нормы 18-28 С'.format(self.name)
event_setter('climate', msg, 3)
level = 3
elif temp > 40 or temp < 13:
msg = u'{0}: Температура за границами 13-40 С'.format(self.name)
event_setter('climate', msg, 4, email=True)
level = 4
elif self.location_type == 'outside':
if temp > 35:
msg = u'{0}: Температура на улице более 35 С'.format(self.name)
event_setter('climate', msg, 3)
level = 3
elif temp < -15:
msg = u'{0}: Температура на улице менее -15 С'.format(self.name)
event_setter('climate', msg, 3)
level = 3
self.level = level
self.save()
def check_data(self, temp):
"""
Проверка показаний датчика температуры и влажности на определенные условия.
Функция нужна для многократной проверки показаний, если они превысили некоторые
пороговые значения, т.к. датчики иногда врут, а повторный опрос происходит раз
в 5 мин (см. RUN_EVERY_MINS).
Для DS18D20: -55 < temp < 125 +-0.5C
:param temp: int Значение температуры
:returns: возвращает True, если показания попали за границы "нормальных"
"""
return temp < 125 or temp > -55
|
Servus/plugins/arduino_ds18d20/models.py
|
# coding=utf-8
import time
from datetime import datetime
from django.db import models
from climate.models import TempHumidValue
from plugins.arduino.models import Arduino, set_command
from events.utils import event_setter
MODEL = 'SensorDS18D20'
LOCATION_TYPES = (
('inside', 'В помещении'),
('outside', 'На улице'),
('other', 'Другое'),
)
class SensorDS18D20(models.Model):
"""
Модель для добавления новых датчиков температуры DS18D20.
"""
CONTAINER = 'climate'
TYPE = 'TempHumidSensor'
WIDGET_TYPE = 'positioned'
name = models.SlugField(
max_length=20,
verbose_name='Системное имя',
unique=True
)
controller = models.ForeignKey(
Arduino,
verbose_name='Контроллер Arduino',
)
controller_pin = models.PositiveSmallIntegerField(
verbose_name='Вывод (pin) на Arduino',
)
location_type = models.SlugField(
choices=LOCATION_TYPES,
default='inside',
verbose_name='Тип расположение датчика',
)
class Meta(object):
db_table = 'climate_sensords18d20_ext'
verbose_name = 'Датчик DS18D20'
verbose_name_plural = 'Датчики DS18D20'
def __unicode__(self):
return self.name
def set_command(self):
cmd = 'ds18d20:%d' % (self.controller_pin,)
set_command(self, cmd)
def set_result(self, result):
if result is not None:
try:
temp = int(result)
# Проверяем полученные данные на возможные ошибки показаний.
if self.check_data(temp):
# Добавляем данные датчика в таблицу БД только, если они отличаются от
# предыдущего показания, иначе обновляем время у предыдущего показания.
# Это сделано для более быстрой выгрузки данных для графиков, т.к.
# количество точек существенно сокращается.
try:
value = TempHumidValue.objects.filter(object_id=self.id).latest('id')
except TempHumidValue.DoesNotExist:
value = None
if value is not None and value.temperature == temp:
value.datetime = datetime.now()
value.save()
else:
TempHumidValue.objects.create(content_object=self,
temperature=temp,
humidity=0)
self.set_event(temp)
except ValueError:
pass
def set_event(self, temp):
"""
Запись в журнал событий данных, находящихся за пределами нормы.
:param temp: int Значение температуры
"""
level = 2
if self.location_type == 'inside':
if 28 < temp <= 40 or 13 <= temp < 18:
msg = u'{0}: Температура вне нормы 18-28 С'.format(self.name)
event_setter('climate', msg, 3)
level = 3
elif temp > 40 or temp < 13:
msg = u'{0}: Температура за границами 13-40 С'.format(self.name)
event_setter('climate', msg, 4, email=True)
level = 4
elif self.location_type == 'outside':
if temp > 35:
msg = u'{0}: Температура на улице более 35 С'.format(self.name)
event_setter('climate', msg, 3)
level = 3
elif temp < -15:
msg = u'{0}: Температура на улице менее -15 С'.format(self.name)
event_setter('climate', msg, 3)
level = 3
self.level = level
self.save()
def check_data(self, temp):
"""
Проверка показаний датчика температуры и влажности на определенные условия.
Функция нужна для многократной проверки показаний, если они превысили некоторые
пороговые значения, т.к. датчики иногда врут, а повторный опрос происходит раз
в 5 мин (см. RUN_EVERY_MINS).
Для DS18D20: -55 < temp < 125 +-0.5C
:param temp: int Значение температуры
:returns: возвращает True, если показания попали за границы "нормальных"
"""
return temp < 125 or temp > -55
| 0.309232 | 0.199522 |
from pyspark.context import SparkContext
from pyspark.sql.dataframe import DataFrame
from pyspark.rdd import RDD
from pyspark.sql import SparkSession
from h2o.frame import H2OFrame
from pysparkling.initializer import Initializer
from pysparkling.conf import H2OConf
import h2o
from pysparkling.conversions import FrameConversions as fc
import warnings
import atexit
import sys
def _monkey_patch_H2OFrame(hc):
@staticmethod
def determine_java_vec_type(vec):
if vec.isCategorical():
return "enum"
elif vec.isUUID():
return "uuid"
elif vec.isString():
return "string"
elif vec.isInt():
if vec.isTime():
return "time"
else:
return "int"
else:
return "real"
def get_java_h2o_frame(self):
# Can we use cached H2O frame?
# Only if we cached it before and cache was not invalidated by rapids expression
if not hasattr(self, '_java_frame') or self._java_frame is None \
or self._ex._cache._id is None or self._ex._cache.is_empty() \
or not self._ex._cache._id == self._java_frame_sid:
# Note: self.frame_id will trigger frame evaluation
self._java_frame = hc._jhc.asH2OFrame(self.frame_id)
return self._java_frame
@staticmethod
def from_java_h2o_frame(h2o_frame, h2o_frame_id, cols_limit=100):
# Cache Java reference to the backend frame
sid = h2o_frame_id.toString()
cols = cols_limit if h2o_frame.numCols() > cols_limit else -1
fr = H2OFrame.get_frame(sid, cols=cols, light=True)
fr._java_frame = h2o_frame
fr._java_frame_sid = sid
fr._backed_by_java_obj = True
return fr
H2OFrame.determine_java_vec_type = determine_java_vec_type
H2OFrame.from_java_h2o_frame = from_java_h2o_frame
H2OFrame.get_java_h2o_frame = get_java_h2o_frame
def _is_of_simple_type(rdd):
if not isinstance(rdd, RDD):
raise ValueError('rdd is not of type pyspark.rdd.RDD')
# Python 3.6 does not contain type long
# this code ensures we are compatible with both, python 2.7 and python 3.6
if sys.version_info > (3,):
type_checks = (str, int, bool, float)
else:
type_checks = (str, int, bool, long, float)
if isinstance(rdd.first(), type_checks):
return True
else:
return False
def _get_first(rdd):
if rdd.isEmpty():
raise ValueError('rdd is empty')
return rdd.first()
class H2OContext(object):
def __init__(self, spark_session):
"""
This constructor is used just to initialize the environment. It does not start H2OContext.
To start H2OContext use one of the getOrCreate methods. This constructor is internally used in those methods
"""
try:
self.__do_init(spark_session)
_monkey_patch_H2OFrame(self)
# Load sparkling water jar only if it hasn't been already loaded
Initializer.load_sparkling_jar(self._sc)
except:
raise
def __do_init(self, spark_session):
self._spark_session = spark_session
self._sc = self._spark_session._sc
self._sql_context = self._spark_session._wrapped
self._jsql_context = self._spark_session._jwrapped
self._jspark_session = self._spark_session._jsparkSession
self._jvm = self._spark_session._jvm
self.is_initialized = False
@staticmethod
def getOrCreate(spark, conf=None, verbose=True, **kwargs):
"""
Get existing or create new H2OContext based on provided H2O configuration. If the conf parameter is set then
configuration from it is used. Otherwise the configuration properties passed to Sparkling Water are used.
If the values are not found the default values are used in most of the cases. The default cluster mode
is internal, ie. spark.ext.h2o.external.cluster.mode=false
param - Spark Context or Spark Session
returns H2O Context
"""
spark_session = spark
if isinstance(spark, SparkContext):
warnings.warn("Method H2OContext.getOrCreate with argument of type SparkContext is deprecated and " +
"parameter of type SparkSession is preferred.")
spark_session = SparkSession.builder.getOrCreate()
h2o_context = H2OContext(spark_session)
jvm = h2o_context._jvm # JVM
jspark_session = h2o_context._jspark_session # Java Spark Session
if conf is not None:
selected_conf = conf
else:
selected_conf = H2OConf(spark_session)
# Create backing Java H2OContext
jhc = jvm.org.apache.spark.h2o.JavaH2OContext.getOrCreate(jspark_session, selected_conf._jconf)
h2o_context._jhc = jhc
h2o_context._conf = selected_conf
h2o_context._client_ip = jhc.h2oLocalClientIp()
h2o_context._client_port = jhc.h2oLocalClientPort()
# Create H2O REST API client
h2o.connect(ip=h2o_context._client_ip, port=h2o_context._client_port, verbose=verbose, **kwargs)
h2o_context.is_initialized = True
if verbose:
print(h2o_context)
# Stop h2o when running standalone pysparkling scripts, only in client deploy mode
#, so the user does not need explicitly close h2o.
# In driver mode the application would call exit which is handled by Spark AM as failure
deploy_mode = spark_session.sparkContext._conf.get("spark.submit.deployMode")
if deploy_mode != "cluster":
atexit.register(lambda: h2o_context.__stop())
return h2o_context
def __stop(self):
try:
h2o.cluster().shutdown()
except:
pass
def stop(self):
warnings.warn("Stopping H2OContext from PySparkling is not fully supported. Please restart your PySpark session and create a new H2OContext.")
def __del__(self):
self.stop()
def __str__(self):
if self.is_initialized:
return self._jhc.toString()
else:
return "H2OContext: not initialized, call H2OContext.getOrCreate(spark) or H2OContext.getOrCreate(spark, conf)"
def __repr__(self):
self.show()
return ""
def show(self):
print(self)
def get_conf(self):
return self._conf
def as_spark_frame(self, h2o_frame, copy_metadata=True):
"""
Transforms given H2OFrame to Spark DataFrame
Parameters
----------
h2o_frame : H2OFrame
copy_metadata: Bool = True
Returns
-------
Spark DataFrame
"""
if isinstance(h2o_frame, H2OFrame):
j_h2o_frame = h2o_frame.get_java_h2o_frame()
jdf = self._jhc.asDataFrame(j_h2o_frame, copy_metadata, self._jsql_context)
df = DataFrame(jdf, self._sql_context)
# Attach h2o_frame to dataframe which forces python not to delete the frame when we leave the scope of this
# method.
# Without this, after leaving this method python would garbage collect the frame since it's not used
# anywhere and spark. when executing any action on this dataframe, will fail since the frame
# would be missing.
df._h2o_frame = h2o_frame
return df
def as_h2o_frame(self, dataframe, framename=None):
"""
Transforms given Spark RDD or DataFrame to H2OFrame.
Parameters
----------
dataframe : Spark RDD or DataFrame
framename : Optional name for resulting H2OFrame
Returns
-------
H2OFrame which contains data of original input Spark data structure
"""
if isinstance(dataframe, DataFrame):
return fc._as_h2o_frame_from_dataframe(self, dataframe, framename)
elif isinstance(dataframe, RDD):
# First check if the type T in RDD[T] is one of the python "primitive" types
# String, Boolean, Int and Double (Python Long is converted to java.lang.BigInteger)
if _is_of_simple_type(dataframe):
first = _get_first(dataframe)
# Make this code compatible with python 3.6 and python 2.7
global long
if sys.version_info > (3,):
long = int
if isinstance(first, str):
return fc._as_h2o_frame_from_RDD_String(self, dataframe, framename)
elif isinstance(first, bool):
return fc._as_h2o_frame_from_RDD_Bool(self, dataframe, framename)
elif (isinstance(dataframe.min(), int) and isinstance(dataframe.max(), int)) or (isinstance(dataframe.min(), long) and isinstance(dataframe.max(), long)):
if dataframe.min() >= self._jvm.Integer.MIN_VALUE and dataframe.max() <= self._jvm.Integer.MAX_VALUE:
return fc._as_h2o_frame_from_RDD_Int(self, dataframe, framename)
elif dataframe.min() >= self._jvm.Long.MIN_VALUE and dataframe.max() <= self._jvm.Long.MAX_VALUE:
return fc._as_h2o_frame_from_RDD_Long(self, dataframe, framename)
else:
raise ValueError('Numbers in RDD Too Big')
elif isinstance(first, float):
return fc._as_h2o_frame_from_RDD_Float(self, dataframe, framename)
else:
return fc._as_h2o_frame_from_complex_type(self, dataframe, framename)
|
py/pysparkling/context.py
|
from pyspark.context import SparkContext
from pyspark.sql.dataframe import DataFrame
from pyspark.rdd import RDD
from pyspark.sql import SparkSession
from h2o.frame import H2OFrame
from pysparkling.initializer import Initializer
from pysparkling.conf import H2OConf
import h2o
from pysparkling.conversions import FrameConversions as fc
import warnings
import atexit
import sys
def _monkey_patch_H2OFrame(hc):
@staticmethod
def determine_java_vec_type(vec):
if vec.isCategorical():
return "enum"
elif vec.isUUID():
return "uuid"
elif vec.isString():
return "string"
elif vec.isInt():
if vec.isTime():
return "time"
else:
return "int"
else:
return "real"
def get_java_h2o_frame(self):
# Can we use cached H2O frame?
# Only if we cached it before and cache was not invalidated by rapids expression
if not hasattr(self, '_java_frame') or self._java_frame is None \
or self._ex._cache._id is None or self._ex._cache.is_empty() \
or not self._ex._cache._id == self._java_frame_sid:
# Note: self.frame_id will trigger frame evaluation
self._java_frame = hc._jhc.asH2OFrame(self.frame_id)
return self._java_frame
@staticmethod
def from_java_h2o_frame(h2o_frame, h2o_frame_id, cols_limit=100):
# Cache Java reference to the backend frame
sid = h2o_frame_id.toString()
cols = cols_limit if h2o_frame.numCols() > cols_limit else -1
fr = H2OFrame.get_frame(sid, cols=cols, light=True)
fr._java_frame = h2o_frame
fr._java_frame_sid = sid
fr._backed_by_java_obj = True
return fr
H2OFrame.determine_java_vec_type = determine_java_vec_type
H2OFrame.from_java_h2o_frame = from_java_h2o_frame
H2OFrame.get_java_h2o_frame = get_java_h2o_frame
def _is_of_simple_type(rdd):
if not isinstance(rdd, RDD):
raise ValueError('rdd is not of type pyspark.rdd.RDD')
# Python 3.6 does not contain type long
# this code ensures we are compatible with both, python 2.7 and python 3.6
if sys.version_info > (3,):
type_checks = (str, int, bool, float)
else:
type_checks = (str, int, bool, long, float)
if isinstance(rdd.first(), type_checks):
return True
else:
return False
def _get_first(rdd):
if rdd.isEmpty():
raise ValueError('rdd is empty')
return rdd.first()
class H2OContext(object):
def __init__(self, spark_session):
"""
This constructor is used just to initialize the environment. It does not start H2OContext.
To start H2OContext use one of the getOrCreate methods. This constructor is internally used in those methods
"""
try:
self.__do_init(spark_session)
_monkey_patch_H2OFrame(self)
# Load sparkling water jar only if it hasn't been already loaded
Initializer.load_sparkling_jar(self._sc)
except:
raise
def __do_init(self, spark_session):
self._spark_session = spark_session
self._sc = self._spark_session._sc
self._sql_context = self._spark_session._wrapped
self._jsql_context = self._spark_session._jwrapped
self._jspark_session = self._spark_session._jsparkSession
self._jvm = self._spark_session._jvm
self.is_initialized = False
@staticmethod
def getOrCreate(spark, conf=None, verbose=True, **kwargs):
"""
Get existing or create new H2OContext based on provided H2O configuration. If the conf parameter is set then
configuration from it is used. Otherwise the configuration properties passed to Sparkling Water are used.
If the values are not found the default values are used in most of the cases. The default cluster mode
is internal, ie. spark.ext.h2o.external.cluster.mode=false
param - Spark Context or Spark Session
returns H2O Context
"""
spark_session = spark
if isinstance(spark, SparkContext):
warnings.warn("Method H2OContext.getOrCreate with argument of type SparkContext is deprecated and " +
"parameter of type SparkSession is preferred.")
spark_session = SparkSession.builder.getOrCreate()
h2o_context = H2OContext(spark_session)
jvm = h2o_context._jvm # JVM
jspark_session = h2o_context._jspark_session # Java Spark Session
if conf is not None:
selected_conf = conf
else:
selected_conf = H2OConf(spark_session)
# Create backing Java H2OContext
jhc = jvm.org.apache.spark.h2o.JavaH2OContext.getOrCreate(jspark_session, selected_conf._jconf)
h2o_context._jhc = jhc
h2o_context._conf = selected_conf
h2o_context._client_ip = jhc.h2oLocalClientIp()
h2o_context._client_port = jhc.h2oLocalClientPort()
# Create H2O REST API client
h2o.connect(ip=h2o_context._client_ip, port=h2o_context._client_port, verbose=verbose, **kwargs)
h2o_context.is_initialized = True
if verbose:
print(h2o_context)
# Stop h2o when running standalone pysparkling scripts, only in client deploy mode
#, so the user does not need explicitly close h2o.
# In driver mode the application would call exit which is handled by Spark AM as failure
deploy_mode = spark_session.sparkContext._conf.get("spark.submit.deployMode")
if deploy_mode != "cluster":
atexit.register(lambda: h2o_context.__stop())
return h2o_context
def __stop(self):
try:
h2o.cluster().shutdown()
except:
pass
def stop(self):
warnings.warn("Stopping H2OContext from PySparkling is not fully supported. Please restart your PySpark session and create a new H2OContext.")
def __del__(self):
self.stop()
def __str__(self):
if self.is_initialized:
return self._jhc.toString()
else:
return "H2OContext: not initialized, call H2OContext.getOrCreate(spark) or H2OContext.getOrCreate(spark, conf)"
def __repr__(self):
self.show()
return ""
def show(self):
print(self)
def get_conf(self):
return self._conf
def as_spark_frame(self, h2o_frame, copy_metadata=True):
"""
Transforms given H2OFrame to Spark DataFrame
Parameters
----------
h2o_frame : H2OFrame
copy_metadata: Bool = True
Returns
-------
Spark DataFrame
"""
if isinstance(h2o_frame, H2OFrame):
j_h2o_frame = h2o_frame.get_java_h2o_frame()
jdf = self._jhc.asDataFrame(j_h2o_frame, copy_metadata, self._jsql_context)
df = DataFrame(jdf, self._sql_context)
# Attach h2o_frame to dataframe which forces python not to delete the frame when we leave the scope of this
# method.
# Without this, after leaving this method python would garbage collect the frame since it's not used
# anywhere and spark. when executing any action on this dataframe, will fail since the frame
# would be missing.
df._h2o_frame = h2o_frame
return df
def as_h2o_frame(self, dataframe, framename=None):
"""
Transforms given Spark RDD or DataFrame to H2OFrame.
Parameters
----------
dataframe : Spark RDD or DataFrame
framename : Optional name for resulting H2OFrame
Returns
-------
H2OFrame which contains data of original input Spark data structure
"""
if isinstance(dataframe, DataFrame):
return fc._as_h2o_frame_from_dataframe(self, dataframe, framename)
elif isinstance(dataframe, RDD):
# First check if the type T in RDD[T] is one of the python "primitive" types
# String, Boolean, Int and Double (Python Long is converted to java.lang.BigInteger)
if _is_of_simple_type(dataframe):
first = _get_first(dataframe)
# Make this code compatible with python 3.6 and python 2.7
global long
if sys.version_info > (3,):
long = int
if isinstance(first, str):
return fc._as_h2o_frame_from_RDD_String(self, dataframe, framename)
elif isinstance(first, bool):
return fc._as_h2o_frame_from_RDD_Bool(self, dataframe, framename)
elif (isinstance(dataframe.min(), int) and isinstance(dataframe.max(), int)) or (isinstance(dataframe.min(), long) and isinstance(dataframe.max(), long)):
if dataframe.min() >= self._jvm.Integer.MIN_VALUE and dataframe.max() <= self._jvm.Integer.MAX_VALUE:
return fc._as_h2o_frame_from_RDD_Int(self, dataframe, framename)
elif dataframe.min() >= self._jvm.Long.MIN_VALUE and dataframe.max() <= self._jvm.Long.MAX_VALUE:
return fc._as_h2o_frame_from_RDD_Long(self, dataframe, framename)
else:
raise ValueError('Numbers in RDD Too Big')
elif isinstance(first, float):
return fc._as_h2o_frame_from_RDD_Float(self, dataframe, framename)
else:
return fc._as_h2o_frame_from_complex_type(self, dataframe, framename)
| 0.596081 | 0.267242 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
from Model import Model
class Classifier(object):
def __init__(self):
self.graph = None
self.output_operation = None
self.input_operation = None
self.label_file = None
self.sess = None
self.config()
def classify(self, image):
input_height = 299
input_width = 299
input_mean = 0
input_std = 255
t = self.read_tensor_from_image_file(
image,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
results = self.sess.run(self.output_operation.outputs[0], {
self.input_operation.outputs[0]: t
})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = self.load_labels(self.label_file)
resp = []
for i in top_k:
resp.append(Model(str(labels[i]), float(results[i])))
return resp
def load_graph(self, model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(self, file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(
dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(self, label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def config(self):
file_name = "tensorflow/examples/label_image/data/grace_hopper.jpg"
input_layer = "Placeholder"
output_layer = "final_result"
model_file = "classifier/logs/output_graph.pb"
self.label_file = "classifier/logs/output_labels.txt"
self.graph = self.load_graph(model_file)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
self.input_operation = self.graph.get_operation_by_name(input_name)
self.output_operation = self.graph.get_operation_by_name(output_name)
self.sess = tf.Session(graph=self.graph)
|
classifier/Classifier.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
from Model import Model
class Classifier(object):
def __init__(self):
self.graph = None
self.output_operation = None
self.input_operation = None
self.label_file = None
self.sess = None
self.config()
def classify(self, image):
input_height = 299
input_width = 299
input_mean = 0
input_std = 255
t = self.read_tensor_from_image_file(
image,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
results = self.sess.run(self.output_operation.outputs[0], {
self.input_operation.outputs[0]: t
})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = self.load_labels(self.label_file)
resp = []
for i in top_k:
resp.append(Model(str(labels[i]), float(results[i])))
return resp
def load_graph(self, model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(self, file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(
dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(self, label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def config(self):
file_name = "tensorflow/examples/label_image/data/grace_hopper.jpg"
input_layer = "Placeholder"
output_layer = "final_result"
model_file = "classifier/logs/output_graph.pb"
self.label_file = "classifier/logs/output_labels.txt"
self.graph = self.load_graph(model_file)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
self.input_operation = self.graph.get_operation_by_name(input_name)
self.output_operation = self.graph.get_operation_by_name(output_name)
self.sess = tf.Session(graph=self.graph)
| 0.641198 | 0.105487 |
from datetime import datetime as _pydatetime, \
tzinfo as _pytzinfo
import re
class datetime(_pydatetime):
"""Customized datetime class with ISO format parsing."""
_reiso = re.compile('(?P<year>[0-9]{4})'
'-(?P<month>[0-9]{1,2})'
'-(?P<day>[0-9]{1,2})'
'.'
'(?P<hour>[0-9]{2})'
':(?P<min>[0-9]{2})'
'(:(?P<sec>[0-9]{2}))?'
'(?P<tz>Z|'
'(?P<tzdirec>[-+])'
'(?P<tzhour>[0-9]{1,2})'
'(:)?'
'(?P<tzmin>[0-9]{2})?'
')?')
class _tzinfo(_pytzinfo):
def __init__(self, direc='+', hr=0, min=0):
if direc == '-':
hr = -1*int(hr)
self._offset = timedelta(hours=int(hr), minutes=int(min))
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return ''
def dst(self, dt):
return timedelta(0)
@classmethod
def fromIso(cls, isotime, sep='T'):
match = cls._reiso.match(isotime)
if match is None:
raise TypeError("time data '%s' does not match ISO 8601 format"
% isotime)
dt = [int(a) for a in match.groups()[:5]]
if match.group('sec') is not None:
dt.append(int(match.group('sec')))
else:
dt.append(0)
if match.group('tz'):
if match.group('tz') == 'Z':
tz = cls._tzinfo()
elif match.group('tzmin'):
tz = cls._tzinfo(*match.group('tzdirec', 'tzhour', 'tzmin'))
else:
tz = cls._tzinfo(*match.group('tzdirec', 'tzhour'))
dt.append(0)
dt.append(tz)
return cls(*dt)
from request import Request
from tmdb_exceptions import *
syssession = None
def set_session(sessionid):
global syssession
syssession = Session(sessionid)
def get_session(sessionid=None):
global syssession
if sessionid:
return Session(sessionid)
elif syssession is not None:
return syssession
else:
return Session.new()
class Session(object):
@classmethod
def new(cls):
return cls(None)
def __init__(self, sessionid):
self.sessionid = sessionid
@property
def sessionid(self):
if self._sessionid is None:
if self._authtoken is None:
raise TMDBError("No Auth Token to produce Session for")
# TODO: check authtoken expiration against current time
req = Request('authentication/session/new',
request_token=self._authtoken)
req.lifetime = 0
dat = req.readJSON()
if not dat['success']:
raise TMDBError("Session generation failed")
self._sessionid = dat['session_id']
return self._sessionid
@sessionid.setter
def sessionid(self, value):
self._sessionid = value
self._authtoken = None
self._authtokenexpiration = None
if value is None:
self.authenticated = False
else:
self.authenticated = True
@property
def authtoken(self):
if self.authenticated:
raise TMDBError("Session is already authenticated")
if self._authtoken is None:
req = Request('authentication/token/new')
req.lifetime = 0
dat = req.readJSON()
if not dat['success']:
raise TMDBError("Auth Token request failed")
self._authtoken = dat['request_token']
self._authtokenexpiration = datetime.fromIso(dat['expires_at'])
return self._authtoken
@property
def callbackurl(self):
return "http://www.themoviedb.org/authenticate/"+self._authtoken
|
tmdb3/tmdb_auth.py
|
from datetime import datetime as _pydatetime, \
tzinfo as _pytzinfo
import re
class datetime(_pydatetime):
"""Customized datetime class with ISO format parsing."""
_reiso = re.compile('(?P<year>[0-9]{4})'
'-(?P<month>[0-9]{1,2})'
'-(?P<day>[0-9]{1,2})'
'.'
'(?P<hour>[0-9]{2})'
':(?P<min>[0-9]{2})'
'(:(?P<sec>[0-9]{2}))?'
'(?P<tz>Z|'
'(?P<tzdirec>[-+])'
'(?P<tzhour>[0-9]{1,2})'
'(:)?'
'(?P<tzmin>[0-9]{2})?'
')?')
class _tzinfo(_pytzinfo):
def __init__(self, direc='+', hr=0, min=0):
if direc == '-':
hr = -1*int(hr)
self._offset = timedelta(hours=int(hr), minutes=int(min))
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return ''
def dst(self, dt):
return timedelta(0)
@classmethod
def fromIso(cls, isotime, sep='T'):
match = cls._reiso.match(isotime)
if match is None:
raise TypeError("time data '%s' does not match ISO 8601 format"
% isotime)
dt = [int(a) for a in match.groups()[:5]]
if match.group('sec') is not None:
dt.append(int(match.group('sec')))
else:
dt.append(0)
if match.group('tz'):
if match.group('tz') == 'Z':
tz = cls._tzinfo()
elif match.group('tzmin'):
tz = cls._tzinfo(*match.group('tzdirec', 'tzhour', 'tzmin'))
else:
tz = cls._tzinfo(*match.group('tzdirec', 'tzhour'))
dt.append(0)
dt.append(tz)
return cls(*dt)
from request import Request
from tmdb_exceptions import *
syssession = None
def set_session(sessionid):
global syssession
syssession = Session(sessionid)
def get_session(sessionid=None):
global syssession
if sessionid:
return Session(sessionid)
elif syssession is not None:
return syssession
else:
return Session.new()
class Session(object):
@classmethod
def new(cls):
return cls(None)
def __init__(self, sessionid):
self.sessionid = sessionid
@property
def sessionid(self):
if self._sessionid is None:
if self._authtoken is None:
raise TMDBError("No Auth Token to produce Session for")
# TODO: check authtoken expiration against current time
req = Request('authentication/session/new',
request_token=self._authtoken)
req.lifetime = 0
dat = req.readJSON()
if not dat['success']:
raise TMDBError("Session generation failed")
self._sessionid = dat['session_id']
return self._sessionid
@sessionid.setter
def sessionid(self, value):
self._sessionid = value
self._authtoken = None
self._authtokenexpiration = None
if value is None:
self.authenticated = False
else:
self.authenticated = True
@property
def authtoken(self):
if self.authenticated:
raise TMDBError("Session is already authenticated")
if self._authtoken is None:
req = Request('authentication/token/new')
req.lifetime = 0
dat = req.readJSON()
if not dat['success']:
raise TMDBError("Auth Token request failed")
self._authtoken = dat['request_token']
self._authtokenexpiration = datetime.fromIso(dat['expires_at'])
return self._authtoken
@property
def callbackurl(self):
return "http://www.themoviedb.org/authenticate/"+self._authtoken
| 0.511229 | 0.183064 |
import random
from pathlib import Path
import pyglet
TILE_SIZE = 64
TILES_DIRECTORY = Path('static/snake-tiles')
class State:
def __init__(self):
self.snake = [(0, 0), (1, 0)]
self.snake_direction = 0, 1
self.width = 10
self.height = 10
self.food = []
self.add_food()
self.add_food()
self.snake_alive = True
self.queued_directions = []
def move(self):
if self.queued_directions:
new_direction = self.queued_directions[0]
del self.queued_directions[0]
old_x, old_y = self.snake_direction
new_x, new_y = new_direction
if (old_x, old_y) != (-new_x, -new_y):
self.snake_direction = new_direction
if not self.snake_alive:
return
old_x, old_y = self.snake[-1]
dir_x, dir_y = self.snake_direction
new_x = old_x + dir_x
new_y = old_y + dir_y
# Kontrola vylezení z hrací plochy
if new_x < 0:
self.snake_alive = False
if new_y < 0:
self.snake_alive = False
if new_x >= self.width:
self.snake_alive = False
if new_y >= self.height:
self.snake_alive = False
new_head = new_x, new_y
if new_head in self.snake:
self.snake_alive = False
self.snake.append(new_head)
if new_head in self.food:
self.food.remove(new_head)
self.add_food()
else:
del self.snake[0]
def add_food(self):
for try_number in range(100):
x = random.randrange(self.width)
y = random.randrange(self.height)
position = x, y
if (position not in self.snake) and (position not in self.food):
self.food.append(position)
return
def image_name(self, index):
return ("tail", "head")
image_name = []
x, y = self.snake[index]
for index in (index-1, index+1):
if index < 0:
image_name.append('tail')
continue
if index > len(self.snake) - 1:
image_name.append('head')
continue
x1, y1 = self.snake[index]
if x1 < x:
image_name.append('left')
elif x1 > x:
image_name.append('right')
elif y1 < y:
image_name.append('bottom')
elif y1 > y:
image_name.append('top')
return image_name
red_image = pyglet.image.load('static/apple.png')
snake_tiles = {}
for path in TILES_DIRECTORY.glob('*.png'):
print(f'loading {path}')
snake_tiles[path.stem] = pyglet.image.load(path)
window = pyglet.window.Window()
state = State()
state.width = window.width // TILE_SIZE
state.height = window.height // TILE_SIZE
@window.event
def on_draw():
window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
for i, (x, y) in enumerate(state.snake):
source, dest = state.image_name(i)
if dest == 'end' and not state.snake_alive:
dest = 'dead'
snake_tiles[source + '-' + dest].blit(
x * TILE_SIZE, y * TILE_SIZE, width=TILE_SIZE, height=TILE_SIZE)
for x, y in state.food:
red_image.blit(
x * TILE_SIZE, y * TILE_SIZE, width=TILE_SIZE, height=TILE_SIZE)
@window.event
def on_key_press(key_code, modifier):
if key_code == pyglet.window.key.LEFT:
new_direction = -1, 0
if key_code == pyglet.window.key.RIGHT:
new_direction = 1, 0
if key_code == pyglet.window.key.DOWN:
new_direction = 0, -1
if key_code == pyglet.window.key.UP:
new_direction = 0, 1
state.queued_directions.append(new_direction)
def move(dt):
state.move()
pyglet.clock.schedule_interval(move, 1/6)
pyglet.app.run()
|
lessons/projects/snake/snake_game.py
|
import random
from pathlib import Path
import pyglet
TILE_SIZE = 64
TILES_DIRECTORY = Path('static/snake-tiles')
class State:
def __init__(self):
self.snake = [(0, 0), (1, 0)]
self.snake_direction = 0, 1
self.width = 10
self.height = 10
self.food = []
self.add_food()
self.add_food()
self.snake_alive = True
self.queued_directions = []
def move(self):
if self.queued_directions:
new_direction = self.queued_directions[0]
del self.queued_directions[0]
old_x, old_y = self.snake_direction
new_x, new_y = new_direction
if (old_x, old_y) != (-new_x, -new_y):
self.snake_direction = new_direction
if not self.snake_alive:
return
old_x, old_y = self.snake[-1]
dir_x, dir_y = self.snake_direction
new_x = old_x + dir_x
new_y = old_y + dir_y
# Kontrola vylezení z hrací plochy
if new_x < 0:
self.snake_alive = False
if new_y < 0:
self.snake_alive = False
if new_x >= self.width:
self.snake_alive = False
if new_y >= self.height:
self.snake_alive = False
new_head = new_x, new_y
if new_head in self.snake:
self.snake_alive = False
self.snake.append(new_head)
if new_head in self.food:
self.food.remove(new_head)
self.add_food()
else:
del self.snake[0]
def add_food(self):
for try_number in range(100):
x = random.randrange(self.width)
y = random.randrange(self.height)
position = x, y
if (position not in self.snake) and (position not in self.food):
self.food.append(position)
return
def image_name(self, index):
return ("tail", "head")
image_name = []
x, y = self.snake[index]
for index in (index-1, index+1):
if index < 0:
image_name.append('tail')
continue
if index > len(self.snake) - 1:
image_name.append('head')
continue
x1, y1 = self.snake[index]
if x1 < x:
image_name.append('left')
elif x1 > x:
image_name.append('right')
elif y1 < y:
image_name.append('bottom')
elif y1 > y:
image_name.append('top')
return image_name
red_image = pyglet.image.load('static/apple.png')
snake_tiles = {}
for path in TILES_DIRECTORY.glob('*.png'):
print(f'loading {path}')
snake_tiles[path.stem] = pyglet.image.load(path)
window = pyglet.window.Window()
state = State()
state.width = window.width // TILE_SIZE
state.height = window.height // TILE_SIZE
@window.event
def on_draw():
window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
for i, (x, y) in enumerate(state.snake):
source, dest = state.image_name(i)
if dest == 'end' and not state.snake_alive:
dest = 'dead'
snake_tiles[source + '-' + dest].blit(
x * TILE_SIZE, y * TILE_SIZE, width=TILE_SIZE, height=TILE_SIZE)
for x, y in state.food:
red_image.blit(
x * TILE_SIZE, y * TILE_SIZE, width=TILE_SIZE, height=TILE_SIZE)
@window.event
def on_key_press(key_code, modifier):
if key_code == pyglet.window.key.LEFT:
new_direction = -1, 0
if key_code == pyglet.window.key.RIGHT:
new_direction = 1, 0
if key_code == pyglet.window.key.DOWN:
new_direction = 0, -1
if key_code == pyglet.window.key.UP:
new_direction = 0, 1
state.queued_directions.append(new_direction)
def move(dt):
state.move()
pyglet.clock.schedule_interval(move, 1/6)
pyglet.app.run()
| 0.31542 | 0.192407 |
import os
import glob
import re
import sys
import socket
import couchdb
import logging
import argparse
import ConfigParser
import yaml
import json
import distance
import operator
CONFIG = {}
logger = logging.getLogger(__name__)
def associete_samples(samples_name, mode):
couch = setupServer(CONFIG)
projects_db = couch['projects']
for doc_id in projects_db:
#perform sanity check on statusDB project database
if 'creation_time' not in projects_db[doc_id]:
continue
if 'details' not in projects_db[doc_id] or 'customer_project_reference' not in projects_db[doc_id]['details'] or \
'project_id' not in projects_db[doc_id]:
continue
project = projects_db[doc_id]
if 'samples' not in project:
continue
for sample in project['samples']:
if 'customer_name' in project['samples'][sample] and 'details' in project['samples'][sample] and \
'status_(manual)' in project['samples'][sample]['details']:
sample_user_name = project['samples'][sample]['customer_name']
sample_NGI_name = project['samples'][sample]['scilife_name']
status = project['samples'][sample]['details']['status_(manual)']
if mode == 'user2NGI':
if sample_user_name in samples_name:
print "{},{},{}".format(sample_user_name.encode('ascii', 'ignore'), sample_NGI_name, status)
else:
if sample_NGI_name in samples_name:
print "{},{},{}".format(sample_NGI_name, sample_user_name.encode('ascii', 'ignore'), status)
def associate_projects(projects_name, mode):
couch = setupServer(CONFIG)
projects_db = couch['projects']
user2NGI_samples_names = {}
NGI2user_samples_names = {}
for doc_id in projects_db:
#perform sanity check on statusDB project database
if 'creation_time' not in projects_db[doc_id]:
continue
if 'details' not in projects_db[doc_id] or 'customer_project_reference' not in projects_db[doc_id]['details'] or \
'project_id' not in projects_db[doc_id]:
continue
#check the projects
project = projects_db[doc_id]
user_project_name = projects_db[doc_id]['details']['customer_project_reference']
NGI_project_name = projects_db[doc_id]['project_id']
if project['project_id'] in projects_name:
for sample in project['samples']:
sample_user_name = project['samples'][sample]['customer_name']
sample_NGI_name = project['samples'][sample]['scilife_name']
status = project['samples'][sample]['details']['status_(manual)']
if sample_user_name not in user2NGI_samples_names:
user2NGI_samples_names[sample_user_name] = []
user2NGI_samples_names[sample_user_name].append([sample_NGI_name, status, user_project_name, NGI_project_name])
if sample_NGI_name not in NGI2user_samples_names:
NGI2user_samples_names[sample_NGI_name] = []
NGI2user_samples_names[sample_NGI_name].append([sample_user_name, status, user_project_name, NGI_project_name])
if mode == 'user2NGI':
for sample in user2NGI_samples_names:
print "{}".format(sample.encode('ascii', 'ignore')), # handle unicode in sample names
for NGI_id in user2NGI_samples_names[sample]:
print " --- {},{},{},{}".format(NGI_id[0].encode('ascii', 'ignore'),NGI_id[1],NGI_id[2],NGI_id[3]),
print ""
else:
for sample in NGI2user_samples_names:
sys.stdout.write("{}".format(sample))
for user_id in NGI2user_samples_names[sample]:
sys.stdout.write(" --- {},{},{},{}".format(user_id[0].encode('ascii', 'ignore'),user_id[1],user_id[2],user_id[3]))
print ""
def setupServer(conf):
db_conf = conf['statusdb']
url="http://{0}:{1}@{2}:{3}".format(db_conf['username'], db_conf['password'], db_conf['url'], db_conf['port'])
return couchdb.Server(url)
def load_yaml_config(config_file):
"""Load YAML config file
:param str config_file: The path to the configuration file.
:returns: A dict of the parsed config file.
:rtype: dict
:raises IOError: If the config file cannot be opened.
"""
if type(config_file) is file:
CONFIG.update(yaml.load(config_file) or {})
return CONFIG
else:
try:
with open(config_file, 'r') as f:
content = yaml.load(f)
CONFIG.update(content)
return content
except IOError as e:
e.message = "Could not open configuration file \"{}\".".format(config_file)
raise e
def user2NGI(args):
if args.project is not None:
associate_projects(args.project, args.mode)
else:
associete_samples(args.sample, args.mode)
def main(args):
configuration_file = args.config
projects_name = args.project
load_yaml_config(configuration_file)
if args.project != None and args.sample != None: #Mutually exclusive arguments
sys.exit("Only one between --project and --sample can be specified")
if args.project is not None:
associate_projects(args.project, args.mode)
else:
associete_samples(args.sample, args.mode)
#findNGISampleNames("2014-02321")
#findNGISampleNames("2153-08D")
#findUserSampleNames(projects_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser("""This scripts connects to project database in statusDB and tries to associate user names to NGI names and vice-versa""")
parser.add_argument('--config', help="cauchdb configuration file", type=str, required=True)
parser.add_argument('--mode', help="specifies if we want the user2NGI or the NGI2user convertion", required=True, choices=('user2NGI', 'NGI2user') )
parser.add_argument('--project', help="project name. If specified returns all samples associated to this project and the user-NGI convertion outputs also the status of the sample)", type=str, action='append')
parser.add_argument('--sample', help="Sample name. If specified returns a list of the samples the associated user/NGI names", type=str, action='append')
args = parser.parse_args()
main(args)
|
userName2NGIname_finder.py
|
import os
import glob
import re
import sys
import socket
import couchdb
import logging
import argparse
import ConfigParser
import yaml
import json
import distance
import operator
CONFIG = {}
logger = logging.getLogger(__name__)
def associete_samples(samples_name, mode):
couch = setupServer(CONFIG)
projects_db = couch['projects']
for doc_id in projects_db:
#perform sanity check on statusDB project database
if 'creation_time' not in projects_db[doc_id]:
continue
if 'details' not in projects_db[doc_id] or 'customer_project_reference' not in projects_db[doc_id]['details'] or \
'project_id' not in projects_db[doc_id]:
continue
project = projects_db[doc_id]
if 'samples' not in project:
continue
for sample in project['samples']:
if 'customer_name' in project['samples'][sample] and 'details' in project['samples'][sample] and \
'status_(manual)' in project['samples'][sample]['details']:
sample_user_name = project['samples'][sample]['customer_name']
sample_NGI_name = project['samples'][sample]['scilife_name']
status = project['samples'][sample]['details']['status_(manual)']
if mode == 'user2NGI':
if sample_user_name in samples_name:
print "{},{},{}".format(sample_user_name.encode('ascii', 'ignore'), sample_NGI_name, status)
else:
if sample_NGI_name in samples_name:
print "{},{},{}".format(sample_NGI_name, sample_user_name.encode('ascii', 'ignore'), status)
def associate_projects(projects_name, mode):
couch = setupServer(CONFIG)
projects_db = couch['projects']
user2NGI_samples_names = {}
NGI2user_samples_names = {}
for doc_id in projects_db:
#perform sanity check on statusDB project database
if 'creation_time' not in projects_db[doc_id]:
continue
if 'details' not in projects_db[doc_id] or 'customer_project_reference' not in projects_db[doc_id]['details'] or \
'project_id' not in projects_db[doc_id]:
continue
#check the projects
project = projects_db[doc_id]
user_project_name = projects_db[doc_id]['details']['customer_project_reference']
NGI_project_name = projects_db[doc_id]['project_id']
if project['project_id'] in projects_name:
for sample in project['samples']:
sample_user_name = project['samples'][sample]['customer_name']
sample_NGI_name = project['samples'][sample]['scilife_name']
status = project['samples'][sample]['details']['status_(manual)']
if sample_user_name not in user2NGI_samples_names:
user2NGI_samples_names[sample_user_name] = []
user2NGI_samples_names[sample_user_name].append([sample_NGI_name, status, user_project_name, NGI_project_name])
if sample_NGI_name not in NGI2user_samples_names:
NGI2user_samples_names[sample_NGI_name] = []
NGI2user_samples_names[sample_NGI_name].append([sample_user_name, status, user_project_name, NGI_project_name])
if mode == 'user2NGI':
for sample in user2NGI_samples_names:
print "{}".format(sample.encode('ascii', 'ignore')), # handle unicode in sample names
for NGI_id in user2NGI_samples_names[sample]:
print " --- {},{},{},{}".format(NGI_id[0].encode('ascii', 'ignore'),NGI_id[1],NGI_id[2],NGI_id[3]),
print ""
else:
for sample in NGI2user_samples_names:
sys.stdout.write("{}".format(sample))
for user_id in NGI2user_samples_names[sample]:
sys.stdout.write(" --- {},{},{},{}".format(user_id[0].encode('ascii', 'ignore'),user_id[1],user_id[2],user_id[3]))
print ""
def setupServer(conf):
db_conf = conf['statusdb']
url="http://{0}:{1}@{2}:{3}".format(db_conf['username'], db_conf['password'], db_conf['url'], db_conf['port'])
return couchdb.Server(url)
def load_yaml_config(config_file):
"""Load YAML config file
:param str config_file: The path to the configuration file.
:returns: A dict of the parsed config file.
:rtype: dict
:raises IOError: If the config file cannot be opened.
"""
if type(config_file) is file:
CONFIG.update(yaml.load(config_file) or {})
return CONFIG
else:
try:
with open(config_file, 'r') as f:
content = yaml.load(f)
CONFIG.update(content)
return content
except IOError as e:
e.message = "Could not open configuration file \"{}\".".format(config_file)
raise e
def user2NGI(args):
if args.project is not None:
associate_projects(args.project, args.mode)
else:
associete_samples(args.sample, args.mode)
def main(args):
configuration_file = args.config
projects_name = args.project
load_yaml_config(configuration_file)
if args.project != None and args.sample != None: #Mutually exclusive arguments
sys.exit("Only one between --project and --sample can be specified")
if args.project is not None:
associate_projects(args.project, args.mode)
else:
associete_samples(args.sample, args.mode)
#findNGISampleNames("2014-02321")
#findNGISampleNames("2153-08D")
#findUserSampleNames(projects_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser("""This scripts connects to project database in statusDB and tries to associate user names to NGI names and vice-versa""")
parser.add_argument('--config', help="cauchdb configuration file", type=str, required=True)
parser.add_argument('--mode', help="specifies if we want the user2NGI or the NGI2user convertion", required=True, choices=('user2NGI', 'NGI2user') )
parser.add_argument('--project', help="project name. If specified returns all samples associated to this project and the user-NGI convertion outputs also the status of the sample)", type=str, action='append')
parser.add_argument('--sample', help="Sample name. If specified returns a list of the samples the associated user/NGI names", type=str, action='append')
args = parser.parse_args()
main(args)
| 0.158532 | 0.061368 |
import copy
import json
default_config = """{
"listeners": [
{"iface": "127.0.0.1", "port": 8080}
],
"proxy": {"use_proxy": false, "host": "", "port": 0, "is_socks": false}
}"""
class ProxyConfig:
def __init__(self):
self._listeners = [('127.0.0.1', 8080, None)]
self._proxy = {'use_proxy': False, 'host': '', 'port': 0, 'is_socks': False}
def load(self, fname):
try:
with open(fname, 'r') as f:
config_info = json.loads(f.read())
except IOError:
config_info = json.loads(default_config)
with open(fname, 'w') as f:
f.write(default_config)
# Listeners
if 'listeners' in config_info:
self._parse_listeners(config_info['listeners'])
if 'proxy' in config_info:
self._proxy = config_info['proxy']
def _parse_listeners(self, listeners):
self._listeners = []
for info in listeners:
if 'port' in info:
port = info['port']
else:
port = 8080
if 'interface' in info:
iface = info['interface']
elif 'iface' in info:
iface = info['iface']
else:
iface = '127.0.0.1'
if "transparent" in info:
trans_info = info['transparent']
transparent_dest = (trans_info.get('host', ""),
trans_info.get('port', 0),
trans_info.get('use_tls', False))
else:
transparent_dest = None
self._listeners.append((iface, port, transparent_dest))
@property
def listeners(self):
return copy.deepcopy(self._listeners)
@listeners.setter
def listeners(self, val):
self._parse_listeners(val)
@property
def proxy(self):
# don't use this, use the getters to get the parsed values
return self._proxy
@proxy.setter
def proxy(self, val):
self._proxy = val
@property
def use_proxy(self):
if self._proxy is None:
return False
if 'use_proxy' in self._proxy:
if self._proxy['use_proxy']:
return True
return False
@property
def proxy_host(self):
if self._proxy is None:
return ''
if 'host' in self._proxy:
return self._proxy['host']
return ''
@property
def proxy_port(self):
if self._proxy is None:
return ''
if 'port' in self._proxy:
return self._proxy['port']
return ''
@property
def proxy_username(self):
if self._proxy is None:
return ''
if 'username' in self._proxy:
return self._proxy['username']
return ''
@property
def proxy_password(self):
if self._proxy is None:
return ''
if 'password' in self._proxy:
return self._proxy['password']
return ''
@property
def use_proxy_creds(self):
return ('username' in self._proxy or 'password' in self._proxy)
@property
def is_socks_proxy(self):
if self._proxy is None:
return False
if 'is_socks' in self._proxy:
if self._proxy['is_socks']:
return True
return False
|
pappyproxy/config.py
|
import copy
import json
default_config = """{
"listeners": [
{"iface": "127.0.0.1", "port": 8080}
],
"proxy": {"use_proxy": false, "host": "", "port": 0, "is_socks": false}
}"""
class ProxyConfig:
def __init__(self):
self._listeners = [('127.0.0.1', 8080, None)]
self._proxy = {'use_proxy': False, 'host': '', 'port': 0, 'is_socks': False}
def load(self, fname):
try:
with open(fname, 'r') as f:
config_info = json.loads(f.read())
except IOError:
config_info = json.loads(default_config)
with open(fname, 'w') as f:
f.write(default_config)
# Listeners
if 'listeners' in config_info:
self._parse_listeners(config_info['listeners'])
if 'proxy' in config_info:
self._proxy = config_info['proxy']
def _parse_listeners(self, listeners):
self._listeners = []
for info in listeners:
if 'port' in info:
port = info['port']
else:
port = 8080
if 'interface' in info:
iface = info['interface']
elif 'iface' in info:
iface = info['iface']
else:
iface = '127.0.0.1'
if "transparent" in info:
trans_info = info['transparent']
transparent_dest = (trans_info.get('host', ""),
trans_info.get('port', 0),
trans_info.get('use_tls', False))
else:
transparent_dest = None
self._listeners.append((iface, port, transparent_dest))
@property
def listeners(self):
return copy.deepcopy(self._listeners)
@listeners.setter
def listeners(self, val):
self._parse_listeners(val)
@property
def proxy(self):
# don't use this, use the getters to get the parsed values
return self._proxy
@proxy.setter
def proxy(self, val):
self._proxy = val
@property
def use_proxy(self):
if self._proxy is None:
return False
if 'use_proxy' in self._proxy:
if self._proxy['use_proxy']:
return True
return False
@property
def proxy_host(self):
if self._proxy is None:
return ''
if 'host' in self._proxy:
return self._proxy['host']
return ''
@property
def proxy_port(self):
if self._proxy is None:
return ''
if 'port' in self._proxy:
return self._proxy['port']
return ''
@property
def proxy_username(self):
if self._proxy is None:
return ''
if 'username' in self._proxy:
return self._proxy['username']
return ''
@property
def proxy_password(self):
if self._proxy is None:
return ''
if 'password' in self._proxy:
return self._proxy['password']
return ''
@property
def use_proxy_creds(self):
return ('username' in self._proxy or 'password' in self._proxy)
@property
def is_socks_proxy(self):
if self._proxy is None:
return False
if 'is_socks' in self._proxy:
if self._proxy['is_socks']:
return True
return False
| 0.411584 | 0.07373 |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.use_cuda = torch.cuda.is_available()
self.method = method
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, targets, mask=None):
this_batch_size = targets.size(0)
max_len = targets.size(1)
# Create variable to store attention energies
attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S
if torch.cuda.is_available():
attn_energies = attn_energies.cuda()
# For each batch of encoder outputs
for b in range(this_batch_size):
# Calculate energy for each encoder output
for i in range(max_len):
attn_energies[b, i] = self.score(hidden[:, b], targets[b, i].unsqueeze(0))
if mask is not None:
attn_energies = attn_energies + mask
# Normalize energies to weights in range 0 to 1, resize to 1 x B x S
return F.softmax(attn_energies, dim=1).unsqueeze(1)
def score(self, hidden, target):
if self.method == 'dot':
energy = torch.dot(hidden.squeeze(0), target.squeeze(0))
return energy
elif self.method == 'general':
energy = self.attn(target)
return torch.dot(hidden.squeeze(0), energy.squeeze(0))
elif self.method == 'concat':
energy = self.attn(torch.cat((hidden, target), 1))
energy = self.v.dot(energy)
return energy
class BasicRNN(nn.Module):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(BasicRNN, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
if pretrained_embeddings is not None:
for i in range(vocab_size):
word = lang.index2word[i]
if word in pretrained_embeddings:
self.word_embeds.weight[i] = nn.Parameter(torch.FloatTensor(pretrained_embeddings[word]))
self.word_embeds = nn.Embedding.from_pretrained(self.word_embeds.weight)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 1
self.rnn = nn.RNN(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
self.fc1 = nn.Linear(hidden_size * self.num_directions, 64)
self.fc2 = nn.Linear(64, num_classes)
self.dropout = nn.Dropout(p=dropout)
# figure this out
self.use_cuda = torch.cuda.is_available()
def freeze_layer(self, layer):
fc = self.fc1
if layer == "fc2":
fc = self.fc2
for param in fc.parameters():
print(param)
param.requires_grad = False
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if self.use_cuda:
h0 = h0.cuda()
# Forward propagate RNN
outputs, _ = self.rnn(inputs, h0)
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
def to_cuda(self, tensor):
if torch.cuda.is_available():
return tensor.cuda()
else:
return tensor
class AttentionRNN(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(AttentionRNN, self).__init__(
embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.attn = Attn('general', hidden_size)
def forward(self, inputs, lang, seq_lengths):
batch_size = inputs.size(0)
embedded = self.word_embeds(inputs)
total_length = embedded.size(1) # get the max sequence length
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if torch.cuda.is_available():
h0 = h0.cuda()
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, seq_lengths, batch_first=True)
# Forward propagate RNN
# rnn_outputs, state = self.rnn(embedded, h0)
rnn_outputs, state = self.rnn(packed, h0)
rnn_outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(
rnn_outputs, batch_first=True, total_length=total_length) # unpack (back to padded)
encoder_mask = torch.Tensor(np.array(inputs.cpu().data.numpy() == lang.PAD_token,
dtype=float) * (-1e6)) # [b x seq]
encoder_mask = Variable(self.to_cuda(encoder_mask))
# use attention to compute soft alignment score corresponding
# between each of the hidden_state and the last hidden_state of the RNN
attn_weights = self.attn(state, rnn_outputs, mask=encoder_mask)
new_state = attn_weights.bmm(rnn_outputs) # B x 1 x N
# Decode hidden state of last time step
# outputs = F.relu(self.fc1(rnn_outputs[:, -1, :]))
outputs = F.relu(self.fc1(new_state.squeeze(1)))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class LSTM(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(LSTM, self).__init__(embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.LSTM(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if torch.cuda.is_available():
h0 = h0.cuda()
c0 = c0.cuda()
# Forward propagate RNN
outputs, _ = self.rnn(inputs, (h0, c0))
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class GRURNN(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(GRURNN, self).__init__(embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
class AttentionGRURNN(AttentionRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(AttentionGRURNN, self).__init__(
embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
class HighwayNetwork(nn.Module):
def __init__(self, input_size):
super(HighwayNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=True)
self.fc2 = nn.Linear(input_size, input_size, bias=True)
def forward(self, x):
t = F.sigmoid(self.fc1(x))
return torch.mul(t, F.relu(self.fc2(x))) + torch.mul(1 - t, x)
class CNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, lang,
pretrained_embeddings, dropout=0.1):
super(CNN, self).__init__()
self.use_cuda = torch.cuda.is_available()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.output_size = output_size
self.dropout = dropout
print('vocab_size:', vocab_size)
self.embedding = nn.Embedding(vocab_size, embedding_dim)
if pretrained_embeddings is not None:
for i in range(vocab_size):
word = lang.index2word[i]
if word in pretrained_embeddings:
self.embedding.weight[i] = nn.Parameter(torch.FloatTensor(pretrained_embeddings[word]))
self.embedding = nn.Embedding.from_pretrained(self.embedding.weight)
self.conv1 = None
self.conv2 = None
self.init_conv1_layer()
self.maxpool1 = nn.MaxPool2d(kernel_size=(3, 1), stride=(3, 1))
self.init_conv2_layer()
self.maxpool2 = nn.MaxPool2d(kernel_size=(3, 1), stride=(3, 1))
self.fc1 = None
self.fc2 = None
self.init_fc_layers()
# Highway Networks
self.batch_norm = nn.BatchNorm1d(num_features=128, affine=False)
self.highway1 = HighwayNetwork(input_size=128)
self.highway2 = HighwayNetwork(input_size=128)
def init_conv1_layer(self):
self.conv1 = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=(5, self.embedding_dim), stride=1, padding=2),
nn.ReLU())
def init_conv2_layer(self):
self.conv2 = nn.Sequential(
nn.Conv2d(5, 20, kernel_size=(5, 3), stride=1),
nn.ReLU())
def freeze_conv1_layer(self):
for param in self.conv1.parameters():
param.requires_grad = False
def freeze_conv2_layer(self):
for param in self.conv2.parameters():
param.requires_grad = False
def init_fc_layers(self):
self.fc1 = nn.Sequential(
nn.Linear(4160, 256),
nn.ReLU(),
nn.Dropout(p=0.5)
)
self.fc2 = nn.Linear(256, self.output_size)
def forward(self, input_seqs):
x1 = self.embedding(input_seqs)
x2 = x1.unsqueeze(1)
x3 = self.conv1(x2)
x4 = x3.transpose(1, 3)
x5 = self.maxpool1(x4)
x6 = self.conv2(x5)
x7 = x6.transpose(1, 3)
x8 = self.maxpool2(x7)
x9 = x8.view(x8.size(0), -1)
x10 = self.fc1(x9)
x = self.fc2(x10)
# print('x1:', x1.size())
# print('x2:', x2.size())
# print('x3:', x3.size())
# print('x4:', x4.size())
# print('x5:', x5.size())
# print('x6:', x6.size())
# print('x7:', x7.size())
# print('x8:', x8.size())
# print('x9:', x9.size())
# print('x10:', x10.size())
# x = self.batch_norm(x)
# x = self.highway1(x)
# x = self.highway2(x)
return x
|
models.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.use_cuda = torch.cuda.is_available()
self.method = method
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, targets, mask=None):
this_batch_size = targets.size(0)
max_len = targets.size(1)
# Create variable to store attention energies
attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S
if torch.cuda.is_available():
attn_energies = attn_energies.cuda()
# For each batch of encoder outputs
for b in range(this_batch_size):
# Calculate energy for each encoder output
for i in range(max_len):
attn_energies[b, i] = self.score(hidden[:, b], targets[b, i].unsqueeze(0))
if mask is not None:
attn_energies = attn_energies + mask
# Normalize energies to weights in range 0 to 1, resize to 1 x B x S
return F.softmax(attn_energies, dim=1).unsqueeze(1)
def score(self, hidden, target):
if self.method == 'dot':
energy = torch.dot(hidden.squeeze(0), target.squeeze(0))
return energy
elif self.method == 'general':
energy = self.attn(target)
return torch.dot(hidden.squeeze(0), energy.squeeze(0))
elif self.method == 'concat':
energy = self.attn(torch.cat((hidden, target), 1))
energy = self.v.dot(energy)
return energy
class BasicRNN(nn.Module):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(BasicRNN, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
if pretrained_embeddings is not None:
for i in range(vocab_size):
word = lang.index2word[i]
if word in pretrained_embeddings:
self.word_embeds.weight[i] = nn.Parameter(torch.FloatTensor(pretrained_embeddings[word]))
self.word_embeds = nn.Embedding.from_pretrained(self.word_embeds.weight)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 1
self.rnn = nn.RNN(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
self.fc1 = nn.Linear(hidden_size * self.num_directions, 64)
self.fc2 = nn.Linear(64, num_classes)
self.dropout = nn.Dropout(p=dropout)
# figure this out
self.use_cuda = torch.cuda.is_available()
def freeze_layer(self, layer):
fc = self.fc1
if layer == "fc2":
fc = self.fc2
for param in fc.parameters():
print(param)
param.requires_grad = False
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if self.use_cuda:
h0 = h0.cuda()
# Forward propagate RNN
outputs, _ = self.rnn(inputs, h0)
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
def to_cuda(self, tensor):
if torch.cuda.is_available():
return tensor.cuda()
else:
return tensor
class AttentionRNN(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(AttentionRNN, self).__init__(
embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.attn = Attn('general', hidden_size)
def forward(self, inputs, lang, seq_lengths):
batch_size = inputs.size(0)
embedded = self.word_embeds(inputs)
total_length = embedded.size(1) # get the max sequence length
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if torch.cuda.is_available():
h0 = h0.cuda()
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, seq_lengths, batch_first=True)
# Forward propagate RNN
# rnn_outputs, state = self.rnn(embedded, h0)
rnn_outputs, state = self.rnn(packed, h0)
rnn_outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(
rnn_outputs, batch_first=True, total_length=total_length) # unpack (back to padded)
encoder_mask = torch.Tensor(np.array(inputs.cpu().data.numpy() == lang.PAD_token,
dtype=float) * (-1e6)) # [b x seq]
encoder_mask = Variable(self.to_cuda(encoder_mask))
# use attention to compute soft alignment score corresponding
# between each of the hidden_state and the last hidden_state of the RNN
attn_weights = self.attn(state, rnn_outputs, mask=encoder_mask)
new_state = attn_weights.bmm(rnn_outputs) # B x 1 x N
# Decode hidden state of last time step
# outputs = F.relu(self.fc1(rnn_outputs[:, -1, :]))
outputs = F.relu(self.fc1(new_state.squeeze(1)))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class LSTM(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(LSTM, self).__init__(embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.LSTM(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if torch.cuda.is_available():
h0 = h0.cuda()
c0 = c0.cuda()
# Forward propagate RNN
outputs, _ = self.rnn(inputs, (h0, c0))
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class GRURNN(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(GRURNN, self).__init__(embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
class AttentionGRURNN(AttentionRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(AttentionGRURNN, self).__init__(
embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
class HighwayNetwork(nn.Module):
def __init__(self, input_size):
super(HighwayNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=True)
self.fc2 = nn.Linear(input_size, input_size, bias=True)
def forward(self, x):
t = F.sigmoid(self.fc1(x))
return torch.mul(t, F.relu(self.fc2(x))) + torch.mul(1 - t, x)
class CNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, lang,
pretrained_embeddings, dropout=0.1):
super(CNN, self).__init__()
self.use_cuda = torch.cuda.is_available()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.output_size = output_size
self.dropout = dropout
print('vocab_size:', vocab_size)
self.embedding = nn.Embedding(vocab_size, embedding_dim)
if pretrained_embeddings is not None:
for i in range(vocab_size):
word = lang.index2word[i]
if word in pretrained_embeddings:
self.embedding.weight[i] = nn.Parameter(torch.FloatTensor(pretrained_embeddings[word]))
self.embedding = nn.Embedding.from_pretrained(self.embedding.weight)
self.conv1 = None
self.conv2 = None
self.init_conv1_layer()
self.maxpool1 = nn.MaxPool2d(kernel_size=(3, 1), stride=(3, 1))
self.init_conv2_layer()
self.maxpool2 = nn.MaxPool2d(kernel_size=(3, 1), stride=(3, 1))
self.fc1 = None
self.fc2 = None
self.init_fc_layers()
# Highway Networks
self.batch_norm = nn.BatchNorm1d(num_features=128, affine=False)
self.highway1 = HighwayNetwork(input_size=128)
self.highway2 = HighwayNetwork(input_size=128)
def init_conv1_layer(self):
self.conv1 = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=(5, self.embedding_dim), stride=1, padding=2),
nn.ReLU())
def init_conv2_layer(self):
self.conv2 = nn.Sequential(
nn.Conv2d(5, 20, kernel_size=(5, 3), stride=1),
nn.ReLU())
def freeze_conv1_layer(self):
for param in self.conv1.parameters():
param.requires_grad = False
def freeze_conv2_layer(self):
for param in self.conv2.parameters():
param.requires_grad = False
def init_fc_layers(self):
self.fc1 = nn.Sequential(
nn.Linear(4160, 256),
nn.ReLU(),
nn.Dropout(p=0.5)
)
self.fc2 = nn.Linear(256, self.output_size)
def forward(self, input_seqs):
x1 = self.embedding(input_seqs)
x2 = x1.unsqueeze(1)
x3 = self.conv1(x2)
x4 = x3.transpose(1, 3)
x5 = self.maxpool1(x4)
x6 = self.conv2(x5)
x7 = x6.transpose(1, 3)
x8 = self.maxpool2(x7)
x9 = x8.view(x8.size(0), -1)
x10 = self.fc1(x9)
x = self.fc2(x10)
# print('x1:', x1.size())
# print('x2:', x2.size())
# print('x3:', x3.size())
# print('x4:', x4.size())
# print('x5:', x5.size())
# print('x6:', x6.size())
# print('x7:', x7.size())
# print('x8:', x8.size())
# print('x9:', x9.size())
# print('x10:', x10.size())
# x = self.batch_norm(x)
# x = self.highway1(x)
# x = self.highway2(x)
return x
| 0.931641 | 0.461077 |
from typing import Any, Optional, Sequence
from websockets.datastructures import Headers, HeadersLike
from websockets.exceptions import (
InvalidHeader,
InvalidStatusCode,
NegotiationError,
RedirectHandshake,
)
from websockets.extensions import ClientExtensionFactory
from websockets.headers import (
build_authorization_basic,
build_extension,
build_host,
build_subprotocol,
)
from websockets.http import USER_AGENT
from websockets.legacy.client import WebSocketClientProtocol
from websockets.legacy.handshake import build_request, check_response
from websockets.typing import LoggerLike, Origin, Subprotocol
from websockets.uri import WebSocketURI
from .auth import Auth
from .http import HTTPInterface
from .types import Request, Response
class WebsocketAuthProtocol(WebSocketClientProtocol):
"""
Adds support for HTTPX style auth flows
"""
def __init__(
self,
*,
auth: Auth = None,
follow_redirects: bool = False,
max_redirects: int = None,
logger: Optional[LoggerLike] = None,
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
**kwargs: Any
) -> None:
super().__init__(
logger=logger,
origin=origin,
extensions=extensions,
subprotocols=subprotocols,
extra_headers=extra_headers,
**kwargs
)
self.auth = auth or Auth()
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects or 5
async def handshake(
self,
wsuri: WebSocketURI,
origin: Optional[Origin] = None,
available_extensions: Optional[Sequence[ClientExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
) -> None:
"""
Unchanged from base client protocol except HTTP req-resp handled by
auth flow
"""
request_headers = Headers()
request_headers["Host"] = build_host(wsuri.host, wsuri.port, wsuri.secure)
if wsuri.user_info:
request_headers["Authorization"] = build_authorization_basic(
*wsuri.user_info
)
self.auth = Auth()
if origin is not None:
request_headers["Origin"] = origin
key = build_request(request_headers)
if available_extensions is not None:
extensions_header = build_extension(
[
(extension_factory.name, extension_factory.get_request_params())
for extension_factory in available_extensions
]
)
request_headers["Sec-WebSocket-Extensions"] = extensions_header
if available_subprotocols is not None:
protocol_header = build_subprotocol(available_subprotocols)
request_headers["Sec-WebSocket-Protocol"] = protocol_header
extra_headers = extra_headers or self.extra_headers
if extra_headers is not None:
request_headers.update(extra_headers)
request_headers.setdefault("User-Agent", USER_AGENT)
request = (wsuri, request_headers)
try:
status_code, response_headers = await self.http_handling_auth(request)
except BaseException as err:
raise NegotiationError("Auth flow failed") from err
if status_code in (301, 302, 303, 307, 308):
if "Location" not in response_headers:
raise InvalidHeader("Location")
raise RedirectHandshake(response_headers["Location"])
elif status_code != 101:
raise InvalidStatusCode(status_code, response_headers)
check_response(response_headers, key)
self.extensions = self.process_extensions(
response_headers, available_extensions
)
self.subprotocol = self.process_subprotocol(
response_headers, available_subprotocols
)
self.logger.debug("Handshake succeeded")
self.connection_open()
# Handling is functionally equivalent to httpx.AsyncClient auth handling
# though the semantics have changed to fit within websockets framework
# https://github.com/encode/httpx/blob/master/httpx/_client.py
async def http_handling_auth(
self,
request: Request
) -> Response:
"""Create auth flow generator and execute HTTP requests"""
requires_response_body = self.auth.requires_response_body
auth_flow = self.auth.async_auth_flow(request)
interface = HTTPInterface(self)
try:
request = await auth_flow.__anext__()
while True:
response = await interface.handle_async_request(request)
# We dont want the auth flow to continue in the event of
# a redirect
status_code = response[0]
if status_code in (301, 302, 303, 307, 308):
return response[:2]
if requires_response_body:
content = await interface.receive_body()
response = (*response, content)
try:
try:
next_request = await auth_flow.asend(response)
except StopAsyncIteration:
return response[:2]
request = next_request
except Exception as err:
raise err
await interface.start_next_cycle()
finally:
interface.teardown()
await auth_flow.aclose()
|
ws_auth/protocol.py
|
from typing import Any, Optional, Sequence
from websockets.datastructures import Headers, HeadersLike
from websockets.exceptions import (
InvalidHeader,
InvalidStatusCode,
NegotiationError,
RedirectHandshake,
)
from websockets.extensions import ClientExtensionFactory
from websockets.headers import (
build_authorization_basic,
build_extension,
build_host,
build_subprotocol,
)
from websockets.http import USER_AGENT
from websockets.legacy.client import WebSocketClientProtocol
from websockets.legacy.handshake import build_request, check_response
from websockets.typing import LoggerLike, Origin, Subprotocol
from websockets.uri import WebSocketURI
from .auth import Auth
from .http import HTTPInterface
from .types import Request, Response
class WebsocketAuthProtocol(WebSocketClientProtocol):
"""
Adds support for HTTPX style auth flows
"""
def __init__(
self,
*,
auth: Auth = None,
follow_redirects: bool = False,
max_redirects: int = None,
logger: Optional[LoggerLike] = None,
origin: Optional[Origin] = None,
extensions: Optional[Sequence[ClientExtensionFactory]] = None,
subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
**kwargs: Any
) -> None:
super().__init__(
logger=logger,
origin=origin,
extensions=extensions,
subprotocols=subprotocols,
extra_headers=extra_headers,
**kwargs
)
self.auth = auth or Auth()
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects or 5
async def handshake(
self,
wsuri: WebSocketURI,
origin: Optional[Origin] = None,
available_extensions: Optional[Sequence[ClientExtensionFactory]] = None,
available_subprotocols: Optional[Sequence[Subprotocol]] = None,
extra_headers: Optional[HeadersLike] = None,
) -> None:
"""
Unchanged from base client protocol except HTTP req-resp handled by
auth flow
"""
request_headers = Headers()
request_headers["Host"] = build_host(wsuri.host, wsuri.port, wsuri.secure)
if wsuri.user_info:
request_headers["Authorization"] = build_authorization_basic(
*wsuri.user_info
)
self.auth = Auth()
if origin is not None:
request_headers["Origin"] = origin
key = build_request(request_headers)
if available_extensions is not None:
extensions_header = build_extension(
[
(extension_factory.name, extension_factory.get_request_params())
for extension_factory in available_extensions
]
)
request_headers["Sec-WebSocket-Extensions"] = extensions_header
if available_subprotocols is not None:
protocol_header = build_subprotocol(available_subprotocols)
request_headers["Sec-WebSocket-Protocol"] = protocol_header
extra_headers = extra_headers or self.extra_headers
if extra_headers is not None:
request_headers.update(extra_headers)
request_headers.setdefault("User-Agent", USER_AGENT)
request = (wsuri, request_headers)
try:
status_code, response_headers = await self.http_handling_auth(request)
except BaseException as err:
raise NegotiationError("Auth flow failed") from err
if status_code in (301, 302, 303, 307, 308):
if "Location" not in response_headers:
raise InvalidHeader("Location")
raise RedirectHandshake(response_headers["Location"])
elif status_code != 101:
raise InvalidStatusCode(status_code, response_headers)
check_response(response_headers, key)
self.extensions = self.process_extensions(
response_headers, available_extensions
)
self.subprotocol = self.process_subprotocol(
response_headers, available_subprotocols
)
self.logger.debug("Handshake succeeded")
self.connection_open()
# Handling is functionally equivalent to httpx.AsyncClient auth handling
# though the semantics have changed to fit within websockets framework
# https://github.com/encode/httpx/blob/master/httpx/_client.py
async def http_handling_auth(
self,
request: Request
) -> Response:
"""Create auth flow generator and execute HTTP requests"""
requires_response_body = self.auth.requires_response_body
auth_flow = self.auth.async_auth_flow(request)
interface = HTTPInterface(self)
try:
request = await auth_flow.__anext__()
while True:
response = await interface.handle_async_request(request)
# We dont want the auth flow to continue in the event of
# a redirect
status_code = response[0]
if status_code in (301, 302, 303, 307, 308):
return response[:2]
if requires_response_body:
content = await interface.receive_body()
response = (*response, content)
try:
try:
next_request = await auth_flow.asend(response)
except StopAsyncIteration:
return response[:2]
request = next_request
except Exception as err:
raise err
await interface.start_next_cycle()
finally:
interface.teardown()
await auth_flow.aclose()
| 0.843331 | 0.053231 |
# Python modules
from threading import Lock
import operator
# Third-party modules
from django.db import models
import cachetools
# NOC modules
from noc.config import config
from noc.core.model.base import NOCModel
from noc.core.model.decorator import on_init
from noc.main.models.notificationgroup import NotificationGroup
from noc.core.datastream.decorator import datastream
from noc.core.model.decorator import on_delete_check
from noc.core.translation import ugettext as _
from .dnsserver import DNSServer
id_lock = Lock()
@on_init
@datastream
@on_delete_check(check=[("dns.DNSZone", "profile")])
class DNSZoneProfile(NOCModel):
"""
DNS Zone profile is a set of common parameters, shared between zones.
:param name:
:param masters:
:param slaves:
:param zone_soa:
:param zone_contact:
:param zone_refresh:
:param zone_retry:
:param zone_expire:
:param zone_ttl:
:param notification_group:
:param description:
"""
class Meta(object):
verbose_name = _("DNS Zone Profile")
verbose_name_plural = _("DNS Zone Profiles")
db_table = "dns_dnszoneprofile"
app_label = "dns"
name = models.CharField(_("Name"), max_length=32, unique=True)
masters = models.ManyToManyField(
DNSServer, verbose_name=_("Masters"), related_name="masters", blank=True
)
slaves = models.ManyToManyField(
DNSServer, verbose_name=_("Slaves"), related_name="slaves", blank=True
)
zone_soa = models.CharField(_("SOA"), max_length=64)
zone_contact = models.CharField(_("Contact"), max_length=64)
zone_refresh = models.IntegerField(_("Refresh"), default=3600)
zone_retry = models.IntegerField(_("Retry"), default=900)
zone_expire = models.IntegerField(_("Expire"), default=86400)
zone_ttl = models.IntegerField(_("TTL"), default=3600)
notification_group = models.ForeignKey(
NotificationGroup,
verbose_name=_("Notification Group"),
null=True,
blank=True,
help_text=_("Notification group to use when zone group is not set"),
on_delete=models.CASCADE,
)
description = models.TextField(_("Description"), blank=True, null=True)
_id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
_name_cache = cachetools.TTLCache(maxsize=100, ttl=60)
def __str__(self):
return self.name
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
mo = DNSZoneProfile.objects.filter(id=id)[:1]
if mo:
return mo[0]
else:
return None
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_name_cache"), lock=lambda _: id_lock)
def get_by_name(cls, name):
mo = DNSZoneProfile.objects.filter(name=name)[:1]
if mo:
return mo[0]
else:
return None
def iter_changed_datastream(self, changed_fields=None):
if not config.datastream.enable_dnszone:
return
for z in self.dnszone_set.all():
for ds, id in z.iter_changed_datastream(changed_fields=changed_fields):
yield ds, id
@property
def authoritative_servers(self):
"""
Returns a list of DNSServer instances for all zone's master and
slave servers
"""
return list(self.masters.all()) + list(self.slaves.all())
|
dns/models/dnszoneprofile.py
|
# Python modules
from threading import Lock
import operator
# Third-party modules
from django.db import models
import cachetools
# NOC modules
from noc.config import config
from noc.core.model.base import NOCModel
from noc.core.model.decorator import on_init
from noc.main.models.notificationgroup import NotificationGroup
from noc.core.datastream.decorator import datastream
from noc.core.model.decorator import on_delete_check
from noc.core.translation import ugettext as _
from .dnsserver import DNSServer
id_lock = Lock()
@on_init
@datastream
@on_delete_check(check=[("dns.DNSZone", "profile")])
class DNSZoneProfile(NOCModel):
"""
DNS Zone profile is a set of common parameters, shared between zones.
:param name:
:param masters:
:param slaves:
:param zone_soa:
:param zone_contact:
:param zone_refresh:
:param zone_retry:
:param zone_expire:
:param zone_ttl:
:param notification_group:
:param description:
"""
class Meta(object):
verbose_name = _("DNS Zone Profile")
verbose_name_plural = _("DNS Zone Profiles")
db_table = "dns_dnszoneprofile"
app_label = "dns"
name = models.CharField(_("Name"), max_length=32, unique=True)
masters = models.ManyToManyField(
DNSServer, verbose_name=_("Masters"), related_name="masters", blank=True
)
slaves = models.ManyToManyField(
DNSServer, verbose_name=_("Slaves"), related_name="slaves", blank=True
)
zone_soa = models.CharField(_("SOA"), max_length=64)
zone_contact = models.CharField(_("Contact"), max_length=64)
zone_refresh = models.IntegerField(_("Refresh"), default=3600)
zone_retry = models.IntegerField(_("Retry"), default=900)
zone_expire = models.IntegerField(_("Expire"), default=86400)
zone_ttl = models.IntegerField(_("TTL"), default=3600)
notification_group = models.ForeignKey(
NotificationGroup,
verbose_name=_("Notification Group"),
null=True,
blank=True,
help_text=_("Notification group to use when zone group is not set"),
on_delete=models.CASCADE,
)
description = models.TextField(_("Description"), blank=True, null=True)
_id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
_name_cache = cachetools.TTLCache(maxsize=100, ttl=60)
def __str__(self):
return self.name
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
mo = DNSZoneProfile.objects.filter(id=id)[:1]
if mo:
return mo[0]
else:
return None
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_name_cache"), lock=lambda _: id_lock)
def get_by_name(cls, name):
mo = DNSZoneProfile.objects.filter(name=name)[:1]
if mo:
return mo[0]
else:
return None
def iter_changed_datastream(self, changed_fields=None):
if not config.datastream.enable_dnszone:
return
for z in self.dnszone_set.all():
for ds, id in z.iter_changed_datastream(changed_fields=changed_fields):
yield ds, id
@property
def authoritative_servers(self):
"""
Returns a list of DNSServer instances for all zone's master and
slave servers
"""
return list(self.masters.all()) + list(self.slaves.all())
| 0.749912 | 0.123709 |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import permission_required
from django.http import JsonResponse
from hknweb.coursesemester.models import Course
from .models import (
CoursePreference,
Slot,
Tutor,
TutorCourse,
TimeSlot,
TimeSlotPreference,
Room,
RoomPreference,
)
from .forms import (
TimeSlotPreferenceForm,
CoursePreferenceForm,
TutoringAlgorithmOutputForm,
)
import json
def initialize_tutoring():
if Room.objects.all().count() == 0:
generate_all_rooms()
if Slot.objects.all().count() == 0:
generate_all_slots()
if TutorCourse.objects.all().count() == 0:
generate_all_courses()
def index(request):
initialize_tutoring()
days = [name for _, name in TimeSlot.DAY_CHOICES]
hours = TimeSlot.HOUR_CHOICES
offices = []
for room in Room.objects.all():
slot = {
hour: Slot.objects.filter(room=room, timeslot__hour=hour)
.order_by("timeslot__hour")
.order_by("timeslot__day")
for hour, _ in hours
}
office = {
"room": str(room),
"slots": slot,
}
offices.append(office)
context = {
"days": days,
"hours": hours,
"offices": offices,
"form": TutoringAlgorithmOutputForm(),
}
return render(request, "tutoring/index.html", context)
@permission_required("tutoring.add_timeslotpreference", login_url="/accounts/login/")
def tutor_course_preference(request):
if Tutor.objects.filter(user=request.user).exists():
tutor = Tutor.objects.get(user=request.user)
else:
name = request.user.get_full_name()
tutor = Tutor(user=request.user, name=name)
tutor.save()
if CoursePreference.objects.filter(tutor=tutor).count() == 0:
initialize_course_preferences(tutor)
form = CoursePreferenceForm(request.POST or None, tutor=tutor)
context = {"form": form}
if request.method == "POST":
if form.is_valid():
form.save_course_preference_data()
return render(request, "tutoring/coursepref.html", context)
@permission_required("tutoring.add_timeslotpreference", login_url="/accounts/login/")
def tutor_slot_preference(request):
if Tutor.objects.filter(user=request.user).exists():
tutor = Tutor.objects.get(user=request.user)
else:
name = request.user.get_full_name()
tutor = Tutor(user=request.user, name=name)
tutor.save()
initialize_slot_preferences(tutor)
form = TimeSlotPreferenceForm(request.POST or None, tutor=tutor)
day_of_weeks_model = TimeSlot.objects.values_list("day", flat=True).distinct()
day_of_weeks = []
for day in day_of_weeks_model:
day_of_weeks.append(TimeSlot.DAYS_OF_WEEK[day])
hours = []
for hour in TimeSlot.objects.values_list("hour", flat=True).distinct():
hours.append((hour, TimeSlot.time(hour), TimeSlot.time_nexthour(hour)))
context = {"form": form, "days": day_of_weeks, "hours": hours, "message": ""}
if request.method == "POST":
if form.is_valid():
form.save_slot_preference_data()
context[
"message"
] = "Sign up form saved! (Don't forget to screenshot your selections)"
else:
msg = "An error occured, please screenshot your current entries and contact CompServ."
msg += " " + "Also send them the following: " + str(form.errors)
context["message"] = msg
return render(request, "tutoring/slotpref.html", context)
def generate_all_rooms():
for rooms in Room.DEFAULT_ROOM_CHOICES:
room_model = Room(id=rooms[0], building=rooms[1], room_num=rooms[2])
room_model.save()
def generate_all_courses():
for course in Course.objects.all():
tutor_course = TutorCourse(course=course)
tutor_course.save()
def generate_all_slots():
id = 0
timeslot_id = 0
room_querySet = Room.objects.all()
for hour, _ in TimeSlot.HOUR_CHOICES:
for day, _ in TimeSlot.DAY_CHOICES:
timeslot = TimeSlot(hour=hour, day=day, timeslot_id=timeslot_id)
timeslot_id += 1
timeslot.save()
for room in room_querySet:
slot = Slot(timeslot=timeslot, room=room, slot_id=id)
slot.save()
id += 1
def initialize_slot_preferences(tutor):
initialize_tutoring()
if TimeSlotPreference.objects.filter(tutor=tutor).count() == 0:
for timeslot in TimeSlot.objects.all():
timeslot_pref = TimeSlotPreference(tutor=tutor, timeslot=timeslot)
timeslot_pref.save()
if RoomPreference.objects.filter(tutor=tutor).count() == 0:
for timeslot in TimeSlot.objects.all():
for room in Room.objects.all():
room_pref = RoomPreference(tutor=tutor, timeslot=timeslot, room=room)
room_pref.save()
def initialize_course_preferences(tutor):
for course in TutorCourse.objects.all():
pref = CoursePreference(tutor=tutor, course=course)
pref.save()
def get_office_course_preferences(office):
courses = TutorCourse.objects.all()
prefs = []
# Cory
if office == 0:
for course in courses:
prefs.append(course.cory_preference)
# Soda
elif office == 1:
for course in courses:
prefs.append(course.soda_preference)
# TODO: Ability to generalize for good practice, currently assumes neutral
return prefs
# Generates file that will be fed into algorithm
@permission_required("tutoring.add_slot", login_url="/accounts/login/")
def prepare_algorithm_input(request):
input_data = {}
courses = []
for course in TutorCourse.objects.all():
courses.append(str(course.course))
input_data["courseName"] = courses
tutors = []
for tutor in Tutor.objects.all():
tutor_dict = {}
tutor_dict["tid"] = tutor.id
tutor_dict["name"] = tutor.name
slot_time_prefs = []
slot_office_prefs = []
for timeslot_pref in tutor.get_timeslot_preferences():
for _ in Slot.objects.filter(timeslot=timeslot_pref.timeslot):
slot_time_prefs.append(timeslot_pref.preference)
for room_pref in tutor.get_room_preferences():
if Slot.objects.filter(timeslot=room_pref.timeslot, room=room_pref.room).count() > 0:
slot_office_prefs.append(room_pref.preference)
tutor_dict["timeSlots"] = slot_time_prefs
tutor_dict["officePrefs"] = slot_office_prefs
course_prefs = []
for pref in tutor.get_course_preferences():
course_prefs.append(pref.preference)
tutor_dict["courses"] = course_prefs
tutor_dict["adjacentPref"] = tutor.adjacent_pref
tutor_dict["numAssignments"] = tutor.num_assignments
tutors.append(tutor_dict)
input_data["tutors"] = tutors
slots = []
cory_course_prefs = get_office_course_preferences(0)
soda_office_prefs = get_office_course_preferences(1)
for slot in Slot.objects.all().order_by("slot_id"):
slot_dict = {}
slot_dict["sid"] = slot.slot_id
slot_dict["name"] = "Slot {}".format(slot.slot_id)
slot_dict["adjacentSlotIDs"] = get_adjacent_slot_ids(slot)
if slot.room == 0:
slot_dict["courses"] = cory_course_prefs
else:
slot_dict["courses"] = soda_office_prefs
slot_dict["day"] = slot.timeslot.get_day()
slot_dict["hour"] = slot.timeslot.hour
slot_dict["office"] = slot.get_office()
slots.append(slot_dict)
input_data["slots"] = slots
return JsonResponse(input_data)
def get_adjacent_slot_ids(slot):
slots_to_check = [
slot.get_previous_hour_slot(),
slot.get_after_hour_slot(),
]
return [s.slot_id for s in slots_to_check if s]
@permission_required("tutoring.add_slot", login_url="/accounts/login/")
def generate_schedule(request):
if request.method == "POST":
form = TutoringAlgorithmOutputForm(request.POST, request.FILES)
if form.is_valid():
output = request.FILES["output"]
data = json.loads(output.read().decode("utf-8"))
for slot_id in data:
slot = Slot.objects.get(slot_id=slot_id)
tutor_ids = data[slot_id]
for id in tutor_ids:
tutor = Tutor.objects.get(id=id)
slot.tutors.add(tutor)
return redirect("/tutoring/")
|
hknweb/tutoring/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import permission_required
from django.http import JsonResponse
from hknweb.coursesemester.models import Course
from .models import (
CoursePreference,
Slot,
Tutor,
TutorCourse,
TimeSlot,
TimeSlotPreference,
Room,
RoomPreference,
)
from .forms import (
TimeSlotPreferenceForm,
CoursePreferenceForm,
TutoringAlgorithmOutputForm,
)
import json
def initialize_tutoring():
if Room.objects.all().count() == 0:
generate_all_rooms()
if Slot.objects.all().count() == 0:
generate_all_slots()
if TutorCourse.objects.all().count() == 0:
generate_all_courses()
def index(request):
initialize_tutoring()
days = [name for _, name in TimeSlot.DAY_CHOICES]
hours = TimeSlot.HOUR_CHOICES
offices = []
for room in Room.objects.all():
slot = {
hour: Slot.objects.filter(room=room, timeslot__hour=hour)
.order_by("timeslot__hour")
.order_by("timeslot__day")
for hour, _ in hours
}
office = {
"room": str(room),
"slots": slot,
}
offices.append(office)
context = {
"days": days,
"hours": hours,
"offices": offices,
"form": TutoringAlgorithmOutputForm(),
}
return render(request, "tutoring/index.html", context)
@permission_required("tutoring.add_timeslotpreference", login_url="/accounts/login/")
def tutor_course_preference(request):
if Tutor.objects.filter(user=request.user).exists():
tutor = Tutor.objects.get(user=request.user)
else:
name = request.user.get_full_name()
tutor = Tutor(user=request.user, name=name)
tutor.save()
if CoursePreference.objects.filter(tutor=tutor).count() == 0:
initialize_course_preferences(tutor)
form = CoursePreferenceForm(request.POST or None, tutor=tutor)
context = {"form": form}
if request.method == "POST":
if form.is_valid():
form.save_course_preference_data()
return render(request, "tutoring/coursepref.html", context)
@permission_required("tutoring.add_timeslotpreference", login_url="/accounts/login/")
def tutor_slot_preference(request):
if Tutor.objects.filter(user=request.user).exists():
tutor = Tutor.objects.get(user=request.user)
else:
name = request.user.get_full_name()
tutor = Tutor(user=request.user, name=name)
tutor.save()
initialize_slot_preferences(tutor)
form = TimeSlotPreferenceForm(request.POST or None, tutor=tutor)
day_of_weeks_model = TimeSlot.objects.values_list("day", flat=True).distinct()
day_of_weeks = []
for day in day_of_weeks_model:
day_of_weeks.append(TimeSlot.DAYS_OF_WEEK[day])
hours = []
for hour in TimeSlot.objects.values_list("hour", flat=True).distinct():
hours.append((hour, TimeSlot.time(hour), TimeSlot.time_nexthour(hour)))
context = {"form": form, "days": day_of_weeks, "hours": hours, "message": ""}
if request.method == "POST":
if form.is_valid():
form.save_slot_preference_data()
context[
"message"
] = "Sign up form saved! (Don't forget to screenshot your selections)"
else:
msg = "An error occured, please screenshot your current entries and contact CompServ."
msg += " " + "Also send them the following: " + str(form.errors)
context["message"] = msg
return render(request, "tutoring/slotpref.html", context)
def generate_all_rooms():
for rooms in Room.DEFAULT_ROOM_CHOICES:
room_model = Room(id=rooms[0], building=rooms[1], room_num=rooms[2])
room_model.save()
def generate_all_courses():
for course in Course.objects.all():
tutor_course = TutorCourse(course=course)
tutor_course.save()
def generate_all_slots():
id = 0
timeslot_id = 0
room_querySet = Room.objects.all()
for hour, _ in TimeSlot.HOUR_CHOICES:
for day, _ in TimeSlot.DAY_CHOICES:
timeslot = TimeSlot(hour=hour, day=day, timeslot_id=timeslot_id)
timeslot_id += 1
timeslot.save()
for room in room_querySet:
slot = Slot(timeslot=timeslot, room=room, slot_id=id)
slot.save()
id += 1
def initialize_slot_preferences(tutor):
initialize_tutoring()
if TimeSlotPreference.objects.filter(tutor=tutor).count() == 0:
for timeslot in TimeSlot.objects.all():
timeslot_pref = TimeSlotPreference(tutor=tutor, timeslot=timeslot)
timeslot_pref.save()
if RoomPreference.objects.filter(tutor=tutor).count() == 0:
for timeslot in TimeSlot.objects.all():
for room in Room.objects.all():
room_pref = RoomPreference(tutor=tutor, timeslot=timeslot, room=room)
room_pref.save()
def initialize_course_preferences(tutor):
for course in TutorCourse.objects.all():
pref = CoursePreference(tutor=tutor, course=course)
pref.save()
def get_office_course_preferences(office):
courses = TutorCourse.objects.all()
prefs = []
# Cory
if office == 0:
for course in courses:
prefs.append(course.cory_preference)
# Soda
elif office == 1:
for course in courses:
prefs.append(course.soda_preference)
# TODO: Ability to generalize for good practice, currently assumes neutral
return prefs
# Generates file that will be fed into algorithm
@permission_required("tutoring.add_slot", login_url="/accounts/login/")
def prepare_algorithm_input(request):
input_data = {}
courses = []
for course in TutorCourse.objects.all():
courses.append(str(course.course))
input_data["courseName"] = courses
tutors = []
for tutor in Tutor.objects.all():
tutor_dict = {}
tutor_dict["tid"] = tutor.id
tutor_dict["name"] = tutor.name
slot_time_prefs = []
slot_office_prefs = []
for timeslot_pref in tutor.get_timeslot_preferences():
for _ in Slot.objects.filter(timeslot=timeslot_pref.timeslot):
slot_time_prefs.append(timeslot_pref.preference)
for room_pref in tutor.get_room_preferences():
if Slot.objects.filter(timeslot=room_pref.timeslot, room=room_pref.room).count() > 0:
slot_office_prefs.append(room_pref.preference)
tutor_dict["timeSlots"] = slot_time_prefs
tutor_dict["officePrefs"] = slot_office_prefs
course_prefs = []
for pref in tutor.get_course_preferences():
course_prefs.append(pref.preference)
tutor_dict["courses"] = course_prefs
tutor_dict["adjacentPref"] = tutor.adjacent_pref
tutor_dict["numAssignments"] = tutor.num_assignments
tutors.append(tutor_dict)
input_data["tutors"] = tutors
slots = []
cory_course_prefs = get_office_course_preferences(0)
soda_office_prefs = get_office_course_preferences(1)
for slot in Slot.objects.all().order_by("slot_id"):
slot_dict = {}
slot_dict["sid"] = slot.slot_id
slot_dict["name"] = "Slot {}".format(slot.slot_id)
slot_dict["adjacentSlotIDs"] = get_adjacent_slot_ids(slot)
if slot.room == 0:
slot_dict["courses"] = cory_course_prefs
else:
slot_dict["courses"] = soda_office_prefs
slot_dict["day"] = slot.timeslot.get_day()
slot_dict["hour"] = slot.timeslot.hour
slot_dict["office"] = slot.get_office()
slots.append(slot_dict)
input_data["slots"] = slots
return JsonResponse(input_data)
def get_adjacent_slot_ids(slot):
slots_to_check = [
slot.get_previous_hour_slot(),
slot.get_after_hour_slot(),
]
return [s.slot_id for s in slots_to_check if s]
@permission_required("tutoring.add_slot", login_url="/accounts/login/")
def generate_schedule(request):
if request.method == "POST":
form = TutoringAlgorithmOutputForm(request.POST, request.FILES)
if form.is_valid():
output = request.FILES["output"]
data = json.loads(output.read().decode("utf-8"))
for slot_id in data:
slot = Slot.objects.get(slot_id=slot_id)
tutor_ids = data[slot_id]
for id in tutor_ids:
tutor = Tutor.objects.get(id=id)
slot.tutors.add(tutor)
return redirect("/tutoring/")
| 0.262936 | 0.287318 |
from __future__ import print_function
import datetime
import subprocess
import sys
import os
import numpy as np
import pytz
import pygrib
from pyiem.plot import MapPlot
import pyiem.reference as ref
from pyiem.util import utc
HOURS = [
36, 18, 18, 18, 18, 18,
36, 18, 18, 18, 18, 18,
36, 18, 18, 18, 18, 18,
36, 18, 18, 18, 18, 18
]
def compute_bounds(lons, lats):
"""figure out a minimum box to extract data from, save CPU"""
dist = ((lats - ref.MW_NORTH)**2 + (lons - ref.MW_WEST)**2)**0.5
x2, y1 = np.unravel_index(dist.argmin(), dist.shape)
dist = ((lats - ref.MW_SOUTH)**2 + (lons - ref.MW_EAST)**2)**0.5
x1, y2 = np.unravel_index(dist.argmin(), dist.shape)
return x1 - 100, x2 + 100, y1 - 100, y2 + 100
def run(valid, routes):
''' Generate the plot for the given UTC time '''
fn = valid.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/%H/"
"hrrr.t%Hz.refd.grib2"))
if not os.path.isfile(fn):
print("hrrr/plot_ref missing %s" % (fn, ))
return
grbs = pygrib.open(fn)
lats = None
lons = None
i = 0
for minute in range(0, HOURS[valid.hour] * 60 + 1, 15):
if minute > (18 * 60) and minute % 60 != 0:
continue
now = valid + datetime.timedelta(minutes=minute)
now = now.astimezone(pytz.timezone("America/Chicago"))
grbs.seek(0)
try:
gs = grbs.select(level=1000,
forecastTime=(minute
if minute <= (18 * 60)
else int(minute / 60)))
except ValueError:
continue
if lats is None:
lats, lons = gs[0].latlons()
x1, x2, y1, y2 = compute_bounds(lons, lats)
lats = lats[x1:x2, y1:y2]
lons = lons[x1:x2, y1:y2]
# HACK..............
if len(gs) > 1 and minute > (18*60):
reflect = gs[-1]['values'][x1:x2, y1:y2]
else:
reflect = gs[0]['values'][x1:x2, y1:y2]
mp = MapPlot(sector='midwest', axisbg='tan',
title=('%s UTC NCEP HRRR 1 km AGL Reflectivity'
) % (valid.strftime("%-d %b %Y %H"),),
subtitle=('valid: %s'
) % (now.strftime("%-d %b %Y %I:%M %p %Z"),))
mp.pcolormesh(lons, lats, reflect, np.arange(0, 75, 5), units='dBZ',
clip_on=False)
pngfn = '/tmp/hrrr_ref_%s_%03i.png' % (valid.strftime("%Y%m%d%H"), i)
mp.postprocess(filename=pngfn)
mp.close()
subprocess.call(("convert %s "
"%s.gif") % (pngfn, pngfn[:-4]), shell=True)
i += 1
# Generate anim GIF
subprocess.call(("gifsicle --loopcount=0 --delay=50 "
"/tmp/hrrr_ref_%s_???.gif > /tmp/hrrr_ref_%s.gif"
) % (valid.strftime("%Y%m%d%H"),
valid.strftime("%Y%m%d%H")),
shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
pqstr = ("plot %s %s model/hrrr/hrrr_1km_ref.gif "
"model/hrrr/hrrr_1km_ref_%02i.gif gif"
) % (routes, valid.strftime("%Y%m%d%H%M"), valid.hour)
subprocess.call(("/home/ldm/bin/pqinsert -p '%s' /tmp/hrrr_ref_%s.gif"
) % (pqstr, valid.strftime("%Y%m%d%H")),
shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
subprocess.call("rm -f /tmp/hrrr_ref_%s*" % (valid.strftime("%Y%m%d%H"), ),
shell=True)
def main(argv):
"""Go Main"""
valid = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
now = utc()
routes = 'a'
if (now - valid) < datetime.timedelta(hours=2):
routes = 'ac'
# See if we already have output
fn = valid.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/hrrr_1km_ref_%H.gif"
)
if not os.path.isfile(fn):
run(valid, routes)
if __name__ == '__main__':
# go go gadget
main(sys.argv)
|
scripts/hrrr/plot_ref.py
|
from __future__ import print_function
import datetime
import subprocess
import sys
import os
import numpy as np
import pytz
import pygrib
from pyiem.plot import MapPlot
import pyiem.reference as ref
from pyiem.util import utc
HOURS = [
36, 18, 18, 18, 18, 18,
36, 18, 18, 18, 18, 18,
36, 18, 18, 18, 18, 18,
36, 18, 18, 18, 18, 18
]
def compute_bounds(lons, lats):
"""figure out a minimum box to extract data from, save CPU"""
dist = ((lats - ref.MW_NORTH)**2 + (lons - ref.MW_WEST)**2)**0.5
x2, y1 = np.unravel_index(dist.argmin(), dist.shape)
dist = ((lats - ref.MW_SOUTH)**2 + (lons - ref.MW_EAST)**2)**0.5
x1, y2 = np.unravel_index(dist.argmin(), dist.shape)
return x1 - 100, x2 + 100, y1 - 100, y2 + 100
def run(valid, routes):
''' Generate the plot for the given UTC time '''
fn = valid.strftime(("/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/%H/"
"hrrr.t%Hz.refd.grib2"))
if not os.path.isfile(fn):
print("hrrr/plot_ref missing %s" % (fn, ))
return
grbs = pygrib.open(fn)
lats = None
lons = None
i = 0
for minute in range(0, HOURS[valid.hour] * 60 + 1, 15):
if minute > (18 * 60) and minute % 60 != 0:
continue
now = valid + datetime.timedelta(minutes=minute)
now = now.astimezone(pytz.timezone("America/Chicago"))
grbs.seek(0)
try:
gs = grbs.select(level=1000,
forecastTime=(minute
if minute <= (18 * 60)
else int(minute / 60)))
except ValueError:
continue
if lats is None:
lats, lons = gs[0].latlons()
x1, x2, y1, y2 = compute_bounds(lons, lats)
lats = lats[x1:x2, y1:y2]
lons = lons[x1:x2, y1:y2]
# HACK..............
if len(gs) > 1 and minute > (18*60):
reflect = gs[-1]['values'][x1:x2, y1:y2]
else:
reflect = gs[0]['values'][x1:x2, y1:y2]
mp = MapPlot(sector='midwest', axisbg='tan',
title=('%s UTC NCEP HRRR 1 km AGL Reflectivity'
) % (valid.strftime("%-d %b %Y %H"),),
subtitle=('valid: %s'
) % (now.strftime("%-d %b %Y %I:%M %p %Z"),))
mp.pcolormesh(lons, lats, reflect, np.arange(0, 75, 5), units='dBZ',
clip_on=False)
pngfn = '/tmp/hrrr_ref_%s_%03i.png' % (valid.strftime("%Y%m%d%H"), i)
mp.postprocess(filename=pngfn)
mp.close()
subprocess.call(("convert %s "
"%s.gif") % (pngfn, pngfn[:-4]), shell=True)
i += 1
# Generate anim GIF
subprocess.call(("gifsicle --loopcount=0 --delay=50 "
"/tmp/hrrr_ref_%s_???.gif > /tmp/hrrr_ref_%s.gif"
) % (valid.strftime("%Y%m%d%H"),
valid.strftime("%Y%m%d%H")),
shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
pqstr = ("plot %s %s model/hrrr/hrrr_1km_ref.gif "
"model/hrrr/hrrr_1km_ref_%02i.gif gif"
) % (routes, valid.strftime("%Y%m%d%H%M"), valid.hour)
subprocess.call(("/home/ldm/bin/pqinsert -p '%s' /tmp/hrrr_ref_%s.gif"
) % (pqstr, valid.strftime("%Y%m%d%H")),
shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
subprocess.call("rm -f /tmp/hrrr_ref_%s*" % (valid.strftime("%Y%m%d%H"), ),
shell=True)
def main(argv):
"""Go Main"""
valid = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
now = utc()
routes = 'a'
if (now - valid) < datetime.timedelta(hours=2):
routes = 'ac'
# See if we already have output
fn = valid.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/model/hrrr/hrrr_1km_ref_%H.gif"
)
if not os.path.isfile(fn):
run(valid, routes)
if __name__ == '__main__':
# go go gadget
main(sys.argv)
| 0.329392 | 0.237653 |
from __future__ import print_function
import argparse
import os
import webbrowser
from shutil import copyfile
from urllib import pathname2url
import pdfkit
from os.path import join, dirname
from rope.base.pyobjectsdef import _AssignVisitor
p = argparse.ArgumentParser()
p.add_argument('--root', help="The root folder of a dataset, which is "
"a folder. Under this folder I expect to find "
"a name (e.g. 'congo') and then a mega-facade "
"index and an image-index. For example 'congo/1/2'")
p.add_argument('--outdir', help="Folder for results, broken up into small pages")
args = p.parse_args()
counter = 1
def save_html(outpath, root, datasets):
global counter
f = open(outpath, 'w')
print("<html><body>", file=f)
print("<!--", "args.root =", root, "-->", file=f)
for dataset in datasets:
if not os.path.isdir(join(root, dataset)):
continue
print('<div style="float:top;">', file=f)
print('<h1> Dataset ', dataset, '</h1>', file=f)
for megafacade in os.listdir(join(root, dataset)):
print('<div style="float:top;white-space: nowrap;">', file=f)
print('<h2> Megafacade ', megafacade, '</h2>', file=f)
for image in os.listdir(join(root, dataset, megafacade)):
image_folder = join(root, dataset, megafacade, image)
regions_jpg = join(image_folder, 'regions.jpg')
if not os.path.isdir(image_folder):
continue
print('<div style="display:inline-block;">', file=f)
localname = "img_{:06}.jpg".format(counter)
counter += 1
copyfile(regions_jpg, join(dirname(outpath), localname))
print('<img height="400", src="{}"></img>'.format(localname), file=f)
print('</div>', file=f)
print('<div style="clear: both"></div>', file=f)
print('</div>', file=f)
print('</div>', file=f)
print("</body></html>", file=f)
outdir = args.outdir
try:
os.makedirs(outdir)
except OSError as e:
pass
datasets = [d for d in os.listdir(args.root) if os.path.isdir(join(args.root, d))]
n = 5
pages = [datasets[i:min(len(datasets), i + n)] for i in range(0, len(datasets), n)]
idx = open(join(outdir, 'index.html'), 'w')
print("<html><body><ol>", file=idx)
for i, page in enumerate(pages):
print(i+1)
outpath = join(outdir, 'report-page-{:04}.html'.format(i + 1))
print("<li><a href={url}>{url}</a>".format(url=pathname2url(os.path.relpath(outpath, outdir))), file=idx)
save_html(outpath, args.root, page)
print("</ol></body></html>", file=idx)
webbrowser.open(join(outdir, 'index.html'))
|
scripts/i12-eval/report.py
|
from __future__ import print_function
import argparse
import os
import webbrowser
from shutil import copyfile
from urllib import pathname2url
import pdfkit
from os.path import join, dirname
from rope.base.pyobjectsdef import _AssignVisitor
p = argparse.ArgumentParser()
p.add_argument('--root', help="The root folder of a dataset, which is "
"a folder. Under this folder I expect to find "
"a name (e.g. 'congo') and then a mega-facade "
"index and an image-index. For example 'congo/1/2'")
p.add_argument('--outdir', help="Folder for results, broken up into small pages")
args = p.parse_args()
counter = 1
def save_html(outpath, root, datasets):
global counter
f = open(outpath, 'w')
print("<html><body>", file=f)
print("<!--", "args.root =", root, "-->", file=f)
for dataset in datasets:
if not os.path.isdir(join(root, dataset)):
continue
print('<div style="float:top;">', file=f)
print('<h1> Dataset ', dataset, '</h1>', file=f)
for megafacade in os.listdir(join(root, dataset)):
print('<div style="float:top;white-space: nowrap;">', file=f)
print('<h2> Megafacade ', megafacade, '</h2>', file=f)
for image in os.listdir(join(root, dataset, megafacade)):
image_folder = join(root, dataset, megafacade, image)
regions_jpg = join(image_folder, 'regions.jpg')
if not os.path.isdir(image_folder):
continue
print('<div style="display:inline-block;">', file=f)
localname = "img_{:06}.jpg".format(counter)
counter += 1
copyfile(regions_jpg, join(dirname(outpath), localname))
print('<img height="400", src="{}"></img>'.format(localname), file=f)
print('</div>', file=f)
print('<div style="clear: both"></div>', file=f)
print('</div>', file=f)
print('</div>', file=f)
print("</body></html>", file=f)
outdir = args.outdir
try:
os.makedirs(outdir)
except OSError as e:
pass
datasets = [d for d in os.listdir(args.root) if os.path.isdir(join(args.root, d))]
n = 5
pages = [datasets[i:min(len(datasets), i + n)] for i in range(0, len(datasets), n)]
idx = open(join(outdir, 'index.html'), 'w')
print("<html><body><ol>", file=idx)
for i, page in enumerate(pages):
print(i+1)
outpath = join(outdir, 'report-page-{:04}.html'.format(i + 1))
print("<li><a href={url}>{url}</a>".format(url=pathname2url(os.path.relpath(outpath, outdir))), file=idx)
save_html(outpath, args.root, page)
print("</ol></body></html>", file=idx)
webbrowser.open(join(outdir, 'index.html'))
| 0.163646 | 0.119511 |
import json, time, zlib
from sims4.gsi.schema import GsiSchema, CLIENT_GSI_ARCHIVE_UID_FIX
from uid import UniqueIdGenerator
import sims4.gsi.dispatcher, sims4.log, sims4.reload, sims4.zone_utils
logger = sims4.log.Logger('GSI')
with sims4.reload.protected(globals()):
archive_data = {}
archive_schemas = {}
all_archivers = {}
archive_id = UniqueIdGenerator()
ARCHIVE_DEFAULT_RECORDS = 50
ARCHIVE_MAX_RECORDS = ARCHIVE_DEFAULT_RECORDS
def set_max_archive_records(max_records):
global ARCHIVE_MAX_RECORDS
ARCHIVE_MAX_RECORDS = max_records
def set_max_archive_records_default():
set_max_archive_records(ARCHIVE_DEFAULT_RECORDS)
def set_archive_enabled(archive_type, enable=True):
if archive_type in all_archivers:
all_archivers[archive_type].archive_enable_fn(enableLog=enable)
else:
logger.error('Tried to enable {} which is not a valid archive name'.format(archive_type))
def is_archive_enabled(archive_type):
if archive_type in all_archivers:
return all_archivers[archive_type].enabled
logger.error("Tried to determine if {} is enabled but doesn't exist".format(archive_type))
return False
def set_all_archivers_enabled(enable=True):
for archiver in all_archivers.values():
if archiver.enabled != enable:
if not enable or archiver._enable_on_all_enable:
archiver.archive_enable_fn(enableLog=enable)
def clear_archive_records(archive_type, sim_id=None):
if archive_type in all_archivers:
all_archivers[archive_type].clear_archive(sim_id=sim_id)
else:
logger.error('Trying to clear all archive entries from {} which is not a valid archive type.'.format(archive_type))
class BaseArchiver:
__slots__ = ('_type_name', '_custom_enable_fn', '_archive_enabled', '_enable_on_all_enable',
'__weakref__')
def __init__(self, type_name=None, enable_archive_by_default=False, add_to_archive_enable_functions=False, custom_enable_fn=None):
self._type_name = type_name
self._custom_enable_fn = custom_enable_fn
self._enable_on_all_enable = add_to_archive_enable_functions
self._archive_enabled = False
all_archivers[type_name] = self
@property
def enabled(self):
return self._archive_enabled
def archive_enable_fn(self, *args, enableLog=False, **kwargs):
self._archive_enabled = enableLog
if self._custom_enable_fn is not None:
(self._custom_enable_fn)(args, enableLog=enableLog, **kwargs)
return '{{"log_enabled":{}}}'.format('true' if enableLog else 'false')
def clear_archive(self, sim_id=None):
pass
class Archiver(BaseArchiver):
__slots__ = ('_sim_specific', '_max_records')
def __init__(self, type_name=None, schema=None, max_records=None, enable_archive_by_default=False, add_to_archive_enable_functions=False, custom_enable_fn=None):
super().__init__(type_name=type_name, enable_archive_by_default=enable_archive_by_default,
add_to_archive_enable_functions=add_to_archive_enable_functions,
custom_enable_fn=custom_enable_fn)
self._sim_specific = schema.is_sim_specific
self._max_records = max_records
sims4.gsi.dispatcher.add_handler('{}{}'.format(type_name, sims4.gsi.dispatcher.ARCHIVE_TOGGLE_SUFFIX), None, lambda *args, **kwargs: (self.archive_enable_fn)(*args, **kwargs))
register_archive_type(type_name, schema,
partition_by_obj=(self._sim_specific))
def clear_archive(self, sim_id=None):
if self._sim_specific:
if sim_id is not None:
del archive_data[self._type_name][sim_id]
archive_data[self._type_name][sim_id] = []
else:
logger.error('No Sim Id provided when trying to clear a sim specific archive.')
else:
del archive_data[self._type_name]
archive_data[self._type_name] = []
def archive(self, data=None, object_id=None, game_time=None, zone_override=None):
if zone_override is not None:
zone_id = zone_override
else:
zone_id = sims4.zone_utils.zone_id
if not zone_id:
logger.error('Archiving data to zone 0. This data will be inaccessible to the GSI.')
zone_id = 0
else:
now = int(time.time())
record = ArchiveRecord(zone_id=zone_id, object_id=object_id, timestamp=now, game_time=game_time, data=data)
if self._sim_specific:
if object_id is None:
logger.error('Archiving data to a sim_specific archive with no object ID. This data will be inaccessible to the GSI.')
archive_list = archive_data[self._type_name].get(object_id)
if archive_list is None:
archive_list = []
archive_data[self._type_name][object_id] = archive_list
else:
archive_list = archive_data[self._type_name]
archive_list.append(record)
num_max_records = ARCHIVE_MAX_RECORDS
if self._max_records is not None:
if num_max_records < self._max_records:
num_max_records = self._max_records
num_records = len(archive_list)
if num_records > num_max_records:
diff = num_records - num_max_records
while diff > 0:
del archive_list[0]
diff -= 1
class ArchiveRecord:
__slots__ = ('zone_id', 'object_id', 'timestamp', 'uid', 'compressed_json')
def __init__(self, zone_id=None, object_id=None, timestamp=None, game_time=None, data=None):
self.zone_id = zone_id
self.object_id = object_id
self.timestamp = timestamp
self.uid = archive_id()
full_dict = {'zone_id':hex(zone_id),
'object_id':hex(object_id) if object_id is not None else 'None',
'timestamp':timestamp,
'game_time':game_time,
'uid':self.uid}
for key, field in data.items():
full_dict[key] = field
uncompressed_json = json.dumps(full_dict)
self.compressed_json = zlib.compress(uncompressed_json.encode())
def register_archive_type(type_name, schema, partition_by_obj=False):
if isinstance(schema, GsiSchema):
schema = schema.output
if type_name in archive_schemas:
logger.error('Replacing archive type for {}.', type_name)
del archive_schemas[type_name]
path = type_name.strip('/')
new_archive = archive_data.get(type_name)
if new_archive is None:
if partition_by_obj:
new_archive = {}
else:
new_archive = []
archive_data[type_name] = new_archive
actual_schema = {'archive':True,
'perf_toggle':True,
'unique_field':'uid',
'definition':[
{'name':'zone_id',
'type':'string', 'label':'Zone', 'hidden':True},
{'name':'object_id',
'type':'string', 'label':'Object ID', 'hidden':True},
{'name':'timestamp',
'type':'int', 'label':'Time', 'is_time':True, 'axis':'xField', 'sort_field':'uid'},
{'name':'game_time',
'type':'string', 'label':'Game Time', 'hidden':True},
{'name':'uid',
'type':'int', 'label':'UId', 'hidden':True}]}
for key, entry in schema.items():
if key == 'definition':
for definition_entry in entry:
actual_schema['definition'].append(definition_entry)
else:
actual_schema[key] = entry
for key, value in schema.items():
if key not in ('definition', 'associations'):
actual_schema[key] = value
archive_schemas[type_name] = actual_schema
def archive_handler(zone_id=None, object_id=None, sim_id=None, timestamp=None, uid=None, uncompress=True):
if object_id is None:
if sim_id is not None:
object_id = sim_id
elif partition_by_obj:
archive_data_list = archive_data[type_name].get(object_id)
if archive_data_list is None:
return '[]'
else:
archive_data_list = archive_data[type_name]
else:
json_output = '[]'
try:
record_data = []
for record in archive_data_list:
if zone_id is not None:
if zone_id != record.zone_id:
continue
elif object_id is not None and object_id != record.object_id:
continue
if sims4.gsi.dispatcher.gsi_client_version < CLIENT_GSI_ARCHIVE_UID_FIX:
if timestamp is not None and timestamp >= record.timestamp:
continue
elif uid is not None:
if uid >= record.uid:
continue
record_data.append(record.compressed_json)
if uncompress:
json_output = '[{}]'.format(','.join((zlib.decompress(record).decode('utf-8') for record in record_data)))
else:
return record_data
except MemoryError:
logger.error('Archive Data[{}] has too many entries: {}', type_name, len(archive_data_list))
json_output = '[]'
return json_output
sims4.gsi.dispatcher.GsiHandler(path, actual_schema, suppress_json=True)(archive_handler)
|
Scripts/core/sims4/gsi/archive.py
|
import json, time, zlib
from sims4.gsi.schema import GsiSchema, CLIENT_GSI_ARCHIVE_UID_FIX
from uid import UniqueIdGenerator
import sims4.gsi.dispatcher, sims4.log, sims4.reload, sims4.zone_utils
logger = sims4.log.Logger('GSI')
with sims4.reload.protected(globals()):
archive_data = {}
archive_schemas = {}
all_archivers = {}
archive_id = UniqueIdGenerator()
ARCHIVE_DEFAULT_RECORDS = 50
ARCHIVE_MAX_RECORDS = ARCHIVE_DEFAULT_RECORDS
def set_max_archive_records(max_records):
global ARCHIVE_MAX_RECORDS
ARCHIVE_MAX_RECORDS = max_records
def set_max_archive_records_default():
set_max_archive_records(ARCHIVE_DEFAULT_RECORDS)
def set_archive_enabled(archive_type, enable=True):
if archive_type in all_archivers:
all_archivers[archive_type].archive_enable_fn(enableLog=enable)
else:
logger.error('Tried to enable {} which is not a valid archive name'.format(archive_type))
def is_archive_enabled(archive_type):
if archive_type in all_archivers:
return all_archivers[archive_type].enabled
logger.error("Tried to determine if {} is enabled but doesn't exist".format(archive_type))
return False
def set_all_archivers_enabled(enable=True):
for archiver in all_archivers.values():
if archiver.enabled != enable:
if not enable or archiver._enable_on_all_enable:
archiver.archive_enable_fn(enableLog=enable)
def clear_archive_records(archive_type, sim_id=None):
if archive_type in all_archivers:
all_archivers[archive_type].clear_archive(sim_id=sim_id)
else:
logger.error('Trying to clear all archive entries from {} which is not a valid archive type.'.format(archive_type))
class BaseArchiver:
__slots__ = ('_type_name', '_custom_enable_fn', '_archive_enabled', '_enable_on_all_enable',
'__weakref__')
def __init__(self, type_name=None, enable_archive_by_default=False, add_to_archive_enable_functions=False, custom_enable_fn=None):
self._type_name = type_name
self._custom_enable_fn = custom_enable_fn
self._enable_on_all_enable = add_to_archive_enable_functions
self._archive_enabled = False
all_archivers[type_name] = self
@property
def enabled(self):
return self._archive_enabled
def archive_enable_fn(self, *args, enableLog=False, **kwargs):
self._archive_enabled = enableLog
if self._custom_enable_fn is not None:
(self._custom_enable_fn)(args, enableLog=enableLog, **kwargs)
return '{{"log_enabled":{}}}'.format('true' if enableLog else 'false')
def clear_archive(self, sim_id=None):
pass
class Archiver(BaseArchiver):
__slots__ = ('_sim_specific', '_max_records')
def __init__(self, type_name=None, schema=None, max_records=None, enable_archive_by_default=False, add_to_archive_enable_functions=False, custom_enable_fn=None):
super().__init__(type_name=type_name, enable_archive_by_default=enable_archive_by_default,
add_to_archive_enable_functions=add_to_archive_enable_functions,
custom_enable_fn=custom_enable_fn)
self._sim_specific = schema.is_sim_specific
self._max_records = max_records
sims4.gsi.dispatcher.add_handler('{}{}'.format(type_name, sims4.gsi.dispatcher.ARCHIVE_TOGGLE_SUFFIX), None, lambda *args, **kwargs: (self.archive_enable_fn)(*args, **kwargs))
register_archive_type(type_name, schema,
partition_by_obj=(self._sim_specific))
def clear_archive(self, sim_id=None):
if self._sim_specific:
if sim_id is not None:
del archive_data[self._type_name][sim_id]
archive_data[self._type_name][sim_id] = []
else:
logger.error('No Sim Id provided when trying to clear a sim specific archive.')
else:
del archive_data[self._type_name]
archive_data[self._type_name] = []
def archive(self, data=None, object_id=None, game_time=None, zone_override=None):
if zone_override is not None:
zone_id = zone_override
else:
zone_id = sims4.zone_utils.zone_id
if not zone_id:
logger.error('Archiving data to zone 0. This data will be inaccessible to the GSI.')
zone_id = 0
else:
now = int(time.time())
record = ArchiveRecord(zone_id=zone_id, object_id=object_id, timestamp=now, game_time=game_time, data=data)
if self._sim_specific:
if object_id is None:
logger.error('Archiving data to a sim_specific archive with no object ID. This data will be inaccessible to the GSI.')
archive_list = archive_data[self._type_name].get(object_id)
if archive_list is None:
archive_list = []
archive_data[self._type_name][object_id] = archive_list
else:
archive_list = archive_data[self._type_name]
archive_list.append(record)
num_max_records = ARCHIVE_MAX_RECORDS
if self._max_records is not None:
if num_max_records < self._max_records:
num_max_records = self._max_records
num_records = len(archive_list)
if num_records > num_max_records:
diff = num_records - num_max_records
while diff > 0:
del archive_list[0]
diff -= 1
class ArchiveRecord:
__slots__ = ('zone_id', 'object_id', 'timestamp', 'uid', 'compressed_json')
def __init__(self, zone_id=None, object_id=None, timestamp=None, game_time=None, data=None):
self.zone_id = zone_id
self.object_id = object_id
self.timestamp = timestamp
self.uid = archive_id()
full_dict = {'zone_id':hex(zone_id),
'object_id':hex(object_id) if object_id is not None else 'None',
'timestamp':timestamp,
'game_time':game_time,
'uid':self.uid}
for key, field in data.items():
full_dict[key] = field
uncompressed_json = json.dumps(full_dict)
self.compressed_json = zlib.compress(uncompressed_json.encode())
def register_archive_type(type_name, schema, partition_by_obj=False):
if isinstance(schema, GsiSchema):
schema = schema.output
if type_name in archive_schemas:
logger.error('Replacing archive type for {}.', type_name)
del archive_schemas[type_name]
path = type_name.strip('/')
new_archive = archive_data.get(type_name)
if new_archive is None:
if partition_by_obj:
new_archive = {}
else:
new_archive = []
archive_data[type_name] = new_archive
actual_schema = {'archive':True,
'perf_toggle':True,
'unique_field':'uid',
'definition':[
{'name':'zone_id',
'type':'string', 'label':'Zone', 'hidden':True},
{'name':'object_id',
'type':'string', 'label':'Object ID', 'hidden':True},
{'name':'timestamp',
'type':'int', 'label':'Time', 'is_time':True, 'axis':'xField', 'sort_field':'uid'},
{'name':'game_time',
'type':'string', 'label':'Game Time', 'hidden':True},
{'name':'uid',
'type':'int', 'label':'UId', 'hidden':True}]}
for key, entry in schema.items():
if key == 'definition':
for definition_entry in entry:
actual_schema['definition'].append(definition_entry)
else:
actual_schema[key] = entry
for key, value in schema.items():
if key not in ('definition', 'associations'):
actual_schema[key] = value
archive_schemas[type_name] = actual_schema
def archive_handler(zone_id=None, object_id=None, sim_id=None, timestamp=None, uid=None, uncompress=True):
if object_id is None:
if sim_id is not None:
object_id = sim_id
elif partition_by_obj:
archive_data_list = archive_data[type_name].get(object_id)
if archive_data_list is None:
return '[]'
else:
archive_data_list = archive_data[type_name]
else:
json_output = '[]'
try:
record_data = []
for record in archive_data_list:
if zone_id is not None:
if zone_id != record.zone_id:
continue
elif object_id is not None and object_id != record.object_id:
continue
if sims4.gsi.dispatcher.gsi_client_version < CLIENT_GSI_ARCHIVE_UID_FIX:
if timestamp is not None and timestamp >= record.timestamp:
continue
elif uid is not None:
if uid >= record.uid:
continue
record_data.append(record.compressed_json)
if uncompress:
json_output = '[{}]'.format(','.join((zlib.decompress(record).decode('utf-8') for record in record_data)))
else:
return record_data
except MemoryError:
logger.error('Archive Data[{}] has too many entries: {}', type_name, len(archive_data_list))
json_output = '[]'
return json_output
sims4.gsi.dispatcher.GsiHandler(path, actual_schema, suppress_json=True)(archive_handler)
| 0.472927 | 0.112089 |
from __future__ import absolute_import
from cdsl.formats import InstructionFormat
from cdsl.operands import VALUE, VARIABLE_ARGS
from .immediates import imm64, uimm8, uimm32, ieee32, ieee64, offset32
from .immediates import boolean, intcc, floatcc, memflags, regunit, trapcode
from . import entities
from .entities import ebb, sig_ref, func_ref, stack_slot, heap, table
Unary = InstructionFormat(VALUE)
UnaryImm = InstructionFormat(imm64)
UnaryIeee32 = InstructionFormat(ieee32)
UnaryIeee64 = InstructionFormat(ieee64)
UnaryBool = InstructionFormat(boolean)
UnaryGlobalValue = InstructionFormat(entities.global_value)
Binary = InstructionFormat(VALUE, VALUE)
BinaryImm = InstructionFormat(VALUE, imm64)
# The select instructions are controlled by the second VALUE operand.
# The first VALUE operand is the controlling flag which has a derived type.
# The fma instruction has the same constraint on all inputs.
Ternary = InstructionFormat(VALUE, VALUE, VALUE, typevar_operand=1)
# Catch-all for instructions with many outputs and inputs and no immediate
# operands.
MultiAry = InstructionFormat(VARIABLE_ARGS)
NullAry = InstructionFormat()
InsertLane = InstructionFormat(VALUE, ('lane', uimm8), VALUE)
ExtractLane = InstructionFormat(VALUE, ('lane', uimm8))
IntCompare = InstructionFormat(intcc, VALUE, VALUE)
IntCompareImm = InstructionFormat(intcc, VALUE, imm64)
IntCond = InstructionFormat(intcc, VALUE)
FloatCompare = InstructionFormat(floatcc, VALUE, VALUE)
FloatCond = InstructionFormat(floatcc, VALUE)
IntSelect = InstructionFormat(intcc, VALUE, VALUE, VALUE)
Jump = InstructionFormat(ebb, VARIABLE_ARGS)
Branch = InstructionFormat(VALUE, ebb, VARIABLE_ARGS)
BranchInt = InstructionFormat(intcc, VALUE, ebb, VARIABLE_ARGS)
BranchFloat = InstructionFormat(floatcc, VALUE, ebb, VARIABLE_ARGS)
BranchIcmp = InstructionFormat(intcc, VALUE, VALUE, ebb, VARIABLE_ARGS)
BranchTable = InstructionFormat(VALUE, ebb, entities.jump_table)
BranchTableEntry = InstructionFormat(VALUE, VALUE, uimm8, entities.jump_table)
BranchTableBase = InstructionFormat(entities.jump_table)
IndirectJump = InstructionFormat(VALUE, entities.jump_table)
Call = InstructionFormat(func_ref, VARIABLE_ARGS)
CallIndirect = InstructionFormat(sig_ref, VALUE, VARIABLE_ARGS)
FuncAddr = InstructionFormat(func_ref)
Load = InstructionFormat(memflags, VALUE, offset32)
LoadComplex = InstructionFormat(memflags, VARIABLE_ARGS, offset32)
Store = InstructionFormat(memflags, VALUE, VALUE, offset32)
StoreComplex = InstructionFormat(memflags, VALUE, VARIABLE_ARGS, offset32)
StackLoad = InstructionFormat(stack_slot, offset32)
StackStore = InstructionFormat(VALUE, stack_slot, offset32)
# Accessing a WebAssembly heap.
HeapAddr = InstructionFormat(heap, VALUE, uimm32)
# Accessing a WebAssembly table.
TableAddr = InstructionFormat(table, VALUE, offset32)
RegMove = InstructionFormat(VALUE, ('src', regunit), ('dst', regunit))
CopySpecial = InstructionFormat(('src', regunit), ('dst', regunit))
CopyNop = InstructionFormat(
('src', entities.stack_slot), ('dst', entities.stack_slot))
RegSpill = InstructionFormat(
VALUE, ('src', regunit), ('dst', entities.stack_slot))
RegFill = InstructionFormat(
VALUE, ('src', entities.stack_slot), ('dst', regunit))
Trap = InstructionFormat(trapcode)
CondTrap = InstructionFormat(VALUE, trapcode)
IntCondTrap = InstructionFormat(intcc, VALUE, trapcode)
FloatCondTrap = InstructionFormat(floatcc, VALUE, trapcode)
# Finally extract the names of global values in this module.
InstructionFormat.extract_names(globals())
|
cranelift-codegen/meta-python/base/formats.py
|
from __future__ import absolute_import
from cdsl.formats import InstructionFormat
from cdsl.operands import VALUE, VARIABLE_ARGS
from .immediates import imm64, uimm8, uimm32, ieee32, ieee64, offset32
from .immediates import boolean, intcc, floatcc, memflags, regunit, trapcode
from . import entities
from .entities import ebb, sig_ref, func_ref, stack_slot, heap, table
Unary = InstructionFormat(VALUE)
UnaryImm = InstructionFormat(imm64)
UnaryIeee32 = InstructionFormat(ieee32)
UnaryIeee64 = InstructionFormat(ieee64)
UnaryBool = InstructionFormat(boolean)
UnaryGlobalValue = InstructionFormat(entities.global_value)
Binary = InstructionFormat(VALUE, VALUE)
BinaryImm = InstructionFormat(VALUE, imm64)
# The select instructions are controlled by the second VALUE operand.
# The first VALUE operand is the controlling flag which has a derived type.
# The fma instruction has the same constraint on all inputs.
Ternary = InstructionFormat(VALUE, VALUE, VALUE, typevar_operand=1)
# Catch-all for instructions with many outputs and inputs and no immediate
# operands.
MultiAry = InstructionFormat(VARIABLE_ARGS)
NullAry = InstructionFormat()
InsertLane = InstructionFormat(VALUE, ('lane', uimm8), VALUE)
ExtractLane = InstructionFormat(VALUE, ('lane', uimm8))
IntCompare = InstructionFormat(intcc, VALUE, VALUE)
IntCompareImm = InstructionFormat(intcc, VALUE, imm64)
IntCond = InstructionFormat(intcc, VALUE)
FloatCompare = InstructionFormat(floatcc, VALUE, VALUE)
FloatCond = InstructionFormat(floatcc, VALUE)
IntSelect = InstructionFormat(intcc, VALUE, VALUE, VALUE)
Jump = InstructionFormat(ebb, VARIABLE_ARGS)
Branch = InstructionFormat(VALUE, ebb, VARIABLE_ARGS)
BranchInt = InstructionFormat(intcc, VALUE, ebb, VARIABLE_ARGS)
BranchFloat = InstructionFormat(floatcc, VALUE, ebb, VARIABLE_ARGS)
BranchIcmp = InstructionFormat(intcc, VALUE, VALUE, ebb, VARIABLE_ARGS)
BranchTable = InstructionFormat(VALUE, ebb, entities.jump_table)
BranchTableEntry = InstructionFormat(VALUE, VALUE, uimm8, entities.jump_table)
BranchTableBase = InstructionFormat(entities.jump_table)
IndirectJump = InstructionFormat(VALUE, entities.jump_table)
Call = InstructionFormat(func_ref, VARIABLE_ARGS)
CallIndirect = InstructionFormat(sig_ref, VALUE, VARIABLE_ARGS)
FuncAddr = InstructionFormat(func_ref)
Load = InstructionFormat(memflags, VALUE, offset32)
LoadComplex = InstructionFormat(memflags, VARIABLE_ARGS, offset32)
Store = InstructionFormat(memflags, VALUE, VALUE, offset32)
StoreComplex = InstructionFormat(memflags, VALUE, VARIABLE_ARGS, offset32)
StackLoad = InstructionFormat(stack_slot, offset32)
StackStore = InstructionFormat(VALUE, stack_slot, offset32)
# Accessing a WebAssembly heap.
HeapAddr = InstructionFormat(heap, VALUE, uimm32)
# Accessing a WebAssembly table.
TableAddr = InstructionFormat(table, VALUE, offset32)
RegMove = InstructionFormat(VALUE, ('src', regunit), ('dst', regunit))
CopySpecial = InstructionFormat(('src', regunit), ('dst', regunit))
CopyNop = InstructionFormat(
('src', entities.stack_slot), ('dst', entities.stack_slot))
RegSpill = InstructionFormat(
VALUE, ('src', regunit), ('dst', entities.stack_slot))
RegFill = InstructionFormat(
VALUE, ('src', entities.stack_slot), ('dst', regunit))
Trap = InstructionFormat(trapcode)
CondTrap = InstructionFormat(VALUE, trapcode)
IntCondTrap = InstructionFormat(intcc, VALUE, trapcode)
FloatCondTrap = InstructionFormat(floatcc, VALUE, trapcode)
# Finally extract the names of global values in this module.
InstructionFormat.extract_names(globals())
| 0.676727 | 0.141815 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.keras_layers import dynamic_unroll_layer
from tensorflow.python.framework import test_util # TF internal
class AddInputAndStateKerasRNNCell(tf.keras.layers.Layer):
def __init__(self):
super(AddInputAndStateKerasRNNCell, self).__init__()
self.output_size = 1
self.state_size = 1
def call(self, input_, state):
s = input_ + state
return s, s
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
return tf.zeros_like(inputs)
return tf.zeros([batch_size, 1], dtype)
class DynamicUnrollTest(parameterized.TestCase, tf.test.TestCase):
def testFromConfigLSTM(self):
l1 = dynamic_unroll_layer.DynamicUnroll(
tf.keras.layers.LSTMCell(units=3), parallel_iterations=10)
l2 = dynamic_unroll_layer.DynamicUnroll.from_config(l1.get_config())
self.assertEqual(l1.get_config(), l2.get_config())
@parameterized.named_parameters(
('WithMask', True,),
('NoMask', False))
def testDynamicUnrollMatchesDynamicRNNWhenNoReset(self, with_mask):
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(3)
batch_size = 4
max_time = 7
inputs = tf.random.uniform((batch_size, max_time, 2), dtype=tf.float32)
layer = dynamic_unroll_layer.DynamicUnroll(cell, dtype=tf.float32)
if with_mask:
reset_mask = tf.zeros((batch_size, max_time), dtype=tf.bool)
else:
reset_mask = None
outputs_dun, final_state_dun = layer(inputs, reset_mask=reset_mask)
outputs_drnn, final_state_drnn = tf.compat.v1.nn.dynamic_rnn(
cell, inputs, dtype=tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
outputs_dun, final_state_dun, outputs_drnn, final_state_drnn = (
self.evaluate(
(outputs_dun, final_state_dun, outputs_drnn, final_state_drnn)))
self.assertAllClose(outputs_dun, outputs_drnn)
self.assertAllClose(final_state_dun, final_state_drnn)
@parameterized.named_parameters(
('WithMask', True,),
('NoMask', False))
def testDynamicUnrollMatchesDynamicRNNWhenNoResetSingleTimeStep(
self, with_mask):
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(3)
batch_size = 4
max_time = 1
inputs = tf.random.uniform((batch_size, max_time, 2), dtype=tf.float32)
layer = dynamic_unroll_layer.DynamicUnroll(cell, dtype=tf.float32)
if with_mask:
reset_mask = tf.zeros((batch_size, max_time), dtype=tf.bool)
else:
reset_mask = None
outputs_dun, final_state_dun = layer(inputs, reset_mask=reset_mask)
outputs_drnn, final_state_drnn = tf.compat.v1.nn.dynamic_rnn(
cell, inputs, dtype=tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
outputs_dun, final_state_dun, outputs_drnn, final_state_drnn = (
self.evaluate(
(outputs_dun, final_state_dun, outputs_drnn, final_state_drnn)))
self.assertAllClose(outputs_dun, outputs_drnn)
self.assertAllClose(final_state_dun, final_state_drnn)
@test_util.run_in_graph_and_eager_modes()
def testDynamicUnrollResetsStateOnReset(self):
if hasattr(tf, 'contrib'):
class AddInputAndStateRNNCell(tf.contrib.rnn.LayerRNNCell):
@property
def state_size(self):
return tf.TensorShape([1])
@property
def output_size(self):
return tf.TensorShape([1])
def call(self, input_, state):
s = input_ + state
return s, s
self._testDynamicUnrollResetsStateOnReset(
AddInputAndStateRNNCell)
self._testDynamicUnrollResetsStateOnReset(
AddInputAndStateKerasRNNCell)
def _testDynamicUnrollResetsStateOnReset(self, cell_type):
cell = cell_type()
batch_size = 4
max_time = 7
inputs = tf.random.uniform((batch_size, max_time, 1))
reset_mask = (tf.random.normal((batch_size, max_time)) > 0)
layer = dynamic_unroll_layer.DynamicUnroll(cell, dtype=tf.float32)
outputs, final_state = layer(inputs, reset_mask=reset_mask)
tf.nest.assert_same_structure(outputs, cell.output_size)
tf.nest.assert_same_structure(final_state, cell.state_size)
reset_mask, inputs, outputs, final_state = self.evaluate(
(reset_mask, inputs, outputs, final_state))
self.assertAllClose(outputs[:, -1, :], final_state)
# outputs will contain cumulative sums up until a reset
expected_outputs = []
state = np.zeros_like(final_state)
for i, frame in enumerate(np.transpose(inputs, [1, 0, 2])):
state = state * np.reshape(~reset_mask[:, i], state.shape) + frame
expected_outputs.append(np.array(state))
expected_outputs = np.transpose(expected_outputs, [1, 0, 2])
self.assertAllClose(outputs, expected_outputs)
if __name__ == '__main__':
tf.test.main()
|
tf_agents/keras_layers/dynamic_unroll_layer_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.keras_layers import dynamic_unroll_layer
from tensorflow.python.framework import test_util # TF internal
class AddInputAndStateKerasRNNCell(tf.keras.layers.Layer):
def __init__(self):
super(AddInputAndStateKerasRNNCell, self).__init__()
self.output_size = 1
self.state_size = 1
def call(self, input_, state):
s = input_ + state
return s, s
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
return tf.zeros_like(inputs)
return tf.zeros([batch_size, 1], dtype)
class DynamicUnrollTest(parameterized.TestCase, tf.test.TestCase):
def testFromConfigLSTM(self):
l1 = dynamic_unroll_layer.DynamicUnroll(
tf.keras.layers.LSTMCell(units=3), parallel_iterations=10)
l2 = dynamic_unroll_layer.DynamicUnroll.from_config(l1.get_config())
self.assertEqual(l1.get_config(), l2.get_config())
@parameterized.named_parameters(
('WithMask', True,),
('NoMask', False))
def testDynamicUnrollMatchesDynamicRNNWhenNoReset(self, with_mask):
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(3)
batch_size = 4
max_time = 7
inputs = tf.random.uniform((batch_size, max_time, 2), dtype=tf.float32)
layer = dynamic_unroll_layer.DynamicUnroll(cell, dtype=tf.float32)
if with_mask:
reset_mask = tf.zeros((batch_size, max_time), dtype=tf.bool)
else:
reset_mask = None
outputs_dun, final_state_dun = layer(inputs, reset_mask=reset_mask)
outputs_drnn, final_state_drnn = tf.compat.v1.nn.dynamic_rnn(
cell, inputs, dtype=tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
outputs_dun, final_state_dun, outputs_drnn, final_state_drnn = (
self.evaluate(
(outputs_dun, final_state_dun, outputs_drnn, final_state_drnn)))
self.assertAllClose(outputs_dun, outputs_drnn)
self.assertAllClose(final_state_dun, final_state_drnn)
@parameterized.named_parameters(
('WithMask', True,),
('NoMask', False))
def testDynamicUnrollMatchesDynamicRNNWhenNoResetSingleTimeStep(
self, with_mask):
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(3)
batch_size = 4
max_time = 1
inputs = tf.random.uniform((batch_size, max_time, 2), dtype=tf.float32)
layer = dynamic_unroll_layer.DynamicUnroll(cell, dtype=tf.float32)
if with_mask:
reset_mask = tf.zeros((batch_size, max_time), dtype=tf.bool)
else:
reset_mask = None
outputs_dun, final_state_dun = layer(inputs, reset_mask=reset_mask)
outputs_drnn, final_state_drnn = tf.compat.v1.nn.dynamic_rnn(
cell, inputs, dtype=tf.float32)
self.evaluate(tf.compat.v1.global_variables_initializer())
outputs_dun, final_state_dun, outputs_drnn, final_state_drnn = (
self.evaluate(
(outputs_dun, final_state_dun, outputs_drnn, final_state_drnn)))
self.assertAllClose(outputs_dun, outputs_drnn)
self.assertAllClose(final_state_dun, final_state_drnn)
@test_util.run_in_graph_and_eager_modes()
def testDynamicUnrollResetsStateOnReset(self):
if hasattr(tf, 'contrib'):
class AddInputAndStateRNNCell(tf.contrib.rnn.LayerRNNCell):
@property
def state_size(self):
return tf.TensorShape([1])
@property
def output_size(self):
return tf.TensorShape([1])
def call(self, input_, state):
s = input_ + state
return s, s
self._testDynamicUnrollResetsStateOnReset(
AddInputAndStateRNNCell)
self._testDynamicUnrollResetsStateOnReset(
AddInputAndStateKerasRNNCell)
def _testDynamicUnrollResetsStateOnReset(self, cell_type):
cell = cell_type()
batch_size = 4
max_time = 7
inputs = tf.random.uniform((batch_size, max_time, 1))
reset_mask = (tf.random.normal((batch_size, max_time)) > 0)
layer = dynamic_unroll_layer.DynamicUnroll(cell, dtype=tf.float32)
outputs, final_state = layer(inputs, reset_mask=reset_mask)
tf.nest.assert_same_structure(outputs, cell.output_size)
tf.nest.assert_same_structure(final_state, cell.state_size)
reset_mask, inputs, outputs, final_state = self.evaluate(
(reset_mask, inputs, outputs, final_state))
self.assertAllClose(outputs[:, -1, :], final_state)
# outputs will contain cumulative sums up until a reset
expected_outputs = []
state = np.zeros_like(final_state)
for i, frame in enumerate(np.transpose(inputs, [1, 0, 2])):
state = state * np.reshape(~reset_mask[:, i], state.shape) + frame
expected_outputs.append(np.array(state))
expected_outputs = np.transpose(expected_outputs, [1, 0, 2])
self.assertAllClose(outputs, expected_outputs)
if __name__ == '__main__':
tf.test.main()
| 0.857664 | 0.370539 |
from __future__ import annotations
from abc import abstractmethod, ABC
import typing
from opentrons import types
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.protocols.api_support.util import Clearances, PlungerSpeeds, \
FlowRates
from opentrons.protocols.implementations.well import WellImplementation
class InstrumentContextInterface(ABC):
@abstractmethod
def get_default_speed(self) -> float:
...
@abstractmethod
def set_default_speed(self, speed: float) -> None:
...
@abstractmethod
def aspirate(self,
volume: float,
rate: float = 1.0) -> None:
...
@abstractmethod
def dispense(self,
volume: float,
rate: float = 1.0) -> None:
...
@abstractmethod
def blow_out(self) -> None:
...
@abstractmethod
def touch_tip(self,
location: WellImplementation,
radius: float = 1.0,
v_offset: float = -1.0,
speed: float = 60.0) -> None:
...
@abstractmethod
def pick_up_tip(self,
well: WellImplementation,
tip_length: float,
presses: typing.Optional[int] = None,
increment: typing.Optional[float] = None) -> None:
...
@abstractmethod
def drop_tip(self,
home_after: bool = True) -> None:
...
@abstractmethod
def home(self) -> None:
...
@abstractmethod
def home_plunger(self) -> None:
...
@abstractmethod
def delay(self) -> None:
...
@abstractmethod
def move_to(self,
location: types.Location,
force_direct: bool = False,
minimum_z_height: typing.Optional[float] = None,
speed: typing.Optional[float] = None) -> None:
...
@abstractmethod
def get_mount(self) -> types.Mount:
...
@abstractmethod
def get_instrument_name(self) -> str:
...
@abstractmethod
def get_pipette_name(self) -> str:
...
@abstractmethod
def get_model(self) -> str:
...
@abstractmethod
def get_min_volume(self) -> float:
...
@abstractmethod
def get_max_volume(self) -> float:
...
@abstractmethod
def get_current_volume(self) -> float:
...
@abstractmethod
def get_available_volume(self) -> float:
...
@abstractmethod
def get_pipette(self) -> PipetteDict:
...
@abstractmethod
def get_channels(self) -> int:
...
@abstractmethod
def has_tip(self) -> bool:
...
@abstractmethod
def is_ready_to_aspirate(self) -> bool:
...
@abstractmethod
def prepare_for_aspirate(self) -> None:
...
@abstractmethod
def get_return_height(self) -> float:
...
@abstractmethod
def get_well_bottom_clearance(self) -> Clearances:
...
@abstractmethod
def get_speed(self) -> PlungerSpeeds:
...
@abstractmethod
def get_flow_rate(self) -> FlowRates:
...
@abstractmethod
def set_flow_rate(
self,
aspirate: typing.Optional[float] = None,
dispense: typing.Optional[float] = None,
blow_out: typing.Optional[float] = None) -> None:
...
@abstractmethod
def set_pipette_speed(
self,
aspirate: typing.Optional[float] = None,
dispense: typing.Optional[float] = None,
blow_out: typing.Optional[float] = None) -> None:
...
|
api/src/opentrons/protocols/implementations/interfaces/instrument_context.py
|
from __future__ import annotations
from abc import abstractmethod, ABC
import typing
from opentrons import types
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.protocols.api_support.util import Clearances, PlungerSpeeds, \
FlowRates
from opentrons.protocols.implementations.well import WellImplementation
class InstrumentContextInterface(ABC):
@abstractmethod
def get_default_speed(self) -> float:
...
@abstractmethod
def set_default_speed(self, speed: float) -> None:
...
@abstractmethod
def aspirate(self,
volume: float,
rate: float = 1.0) -> None:
...
@abstractmethod
def dispense(self,
volume: float,
rate: float = 1.0) -> None:
...
@abstractmethod
def blow_out(self) -> None:
...
@abstractmethod
def touch_tip(self,
location: WellImplementation,
radius: float = 1.0,
v_offset: float = -1.0,
speed: float = 60.0) -> None:
...
@abstractmethod
def pick_up_tip(self,
well: WellImplementation,
tip_length: float,
presses: typing.Optional[int] = None,
increment: typing.Optional[float] = None) -> None:
...
@abstractmethod
def drop_tip(self,
home_after: bool = True) -> None:
...
@abstractmethod
def home(self) -> None:
...
@abstractmethod
def home_plunger(self) -> None:
...
@abstractmethod
def delay(self) -> None:
...
@abstractmethod
def move_to(self,
location: types.Location,
force_direct: bool = False,
minimum_z_height: typing.Optional[float] = None,
speed: typing.Optional[float] = None) -> None:
...
@abstractmethod
def get_mount(self) -> types.Mount:
...
@abstractmethod
def get_instrument_name(self) -> str:
...
@abstractmethod
def get_pipette_name(self) -> str:
...
@abstractmethod
def get_model(self) -> str:
...
@abstractmethod
def get_min_volume(self) -> float:
...
@abstractmethod
def get_max_volume(self) -> float:
...
@abstractmethod
def get_current_volume(self) -> float:
...
@abstractmethod
def get_available_volume(self) -> float:
...
@abstractmethod
def get_pipette(self) -> PipetteDict:
...
@abstractmethod
def get_channels(self) -> int:
...
@abstractmethod
def has_tip(self) -> bool:
...
@abstractmethod
def is_ready_to_aspirate(self) -> bool:
...
@abstractmethod
def prepare_for_aspirate(self) -> None:
...
@abstractmethod
def get_return_height(self) -> float:
...
@abstractmethod
def get_well_bottom_clearance(self) -> Clearances:
...
@abstractmethod
def get_speed(self) -> PlungerSpeeds:
...
@abstractmethod
def get_flow_rate(self) -> FlowRates:
...
@abstractmethod
def set_flow_rate(
self,
aspirate: typing.Optional[float] = None,
dispense: typing.Optional[float] = None,
blow_out: typing.Optional[float] = None) -> None:
...
@abstractmethod
def set_pipette_speed(
self,
aspirate: typing.Optional[float] = None,
dispense: typing.Optional[float] = None,
blow_out: typing.Optional[float] = None) -> None:
...
| 0.871939 | 0.439266 |
from pathlib import Path
from astrality.actions import ImportContextAction
from astrality.context import Context
def test_null_object_pattern():
"""Test initializing action with no behaviour."""
import_context_action = ImportContextAction(
options={},
directory=Path('/'),
replacer=lambda x: x,
context_store=Context(),
)
import_context_action.execute()
def test_importing_entire_file(context_directory):
"""
Test importing all sections from context file.
All context sections should be imported in the absence of `from_section`.
"""
context_import_dict = {
'from_path': 'several_sections.yml',
}
context_store = Context()
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=lambda x: x,
context_store=context_store,
)
import_context_action.execute()
expected_context = {
'section1': {
'k1_1': 'v1_1',
'k1_2': 'v1_2',
},
'section2': {
'k2_1': 'v2_1',
'k2_2': 'v2_2',
},
}
assert context_store == expected_context
def test_importing_specific_section(context_directory):
"""Test importing specific sections from context file."""
context_import_dict = {
'from_path': 'several_sections.yml',
'from_section': 'section1',
}
context_store = Context({'original': 'value'})
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=lambda x: x,
context_store=context_store,
)
import_context_action.execute()
expected_context = Context({
'original': 'value',
'section1': {
'k1_1': 'v1_1',
'k1_2': 'v1_2',
},
})
assert context_store == expected_context
def test_replacer_function_being_used(context_directory):
"""
Test use of replacement function in option retrieval.
The function should be used when querying values from `options`.
"""
context_import_dict = {
'from_path': 'path',
'from_section': 'from',
'to_section': 'to',
}
context_store = Context()
def replacer(option: str) -> str:
if option == 'path':
return 'several_sections.yml'
elif option == 'from':
return 'section1'
elif option == 'to':
return 'new_section'
else:
raise AssertionError
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=replacer,
context_store=context_store,
)
import_context_action.execute()
assert context_store == {
'new_section': {
'k1_1': 'v1_1',
'k1_2': 'v1_2',
},
}
def test_that_replacer_is_run_every_time(context_directory):
"""
The replacer should be run a new every time self.execute() is invoked.
"""
context_import_dict = {
'from_path': 'several_sections.yml',
'from_section': 'section1',
'to_section': 'whatever',
}
context_store = Context()
class Replacer:
def __init__(self) -> None:
self.invoke_number = 0
def __call__(self, option: str) -> str:
self.invoke_number += 1
return option
replacer = Replacer()
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=replacer,
context_store=context_store,
)
import_context_action.execute()
assert replacer.invoke_number == 3
import_context_action.execute()
assert replacer.invoke_number == 6
|
astrality/tests/actions/test_import_context_action.py
|
from pathlib import Path
from astrality.actions import ImportContextAction
from astrality.context import Context
def test_null_object_pattern():
"""Test initializing action with no behaviour."""
import_context_action = ImportContextAction(
options={},
directory=Path('/'),
replacer=lambda x: x,
context_store=Context(),
)
import_context_action.execute()
def test_importing_entire_file(context_directory):
"""
Test importing all sections from context file.
All context sections should be imported in the absence of `from_section`.
"""
context_import_dict = {
'from_path': 'several_sections.yml',
}
context_store = Context()
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=lambda x: x,
context_store=context_store,
)
import_context_action.execute()
expected_context = {
'section1': {
'k1_1': 'v1_1',
'k1_2': 'v1_2',
},
'section2': {
'k2_1': 'v2_1',
'k2_2': 'v2_2',
},
}
assert context_store == expected_context
def test_importing_specific_section(context_directory):
"""Test importing specific sections from context file."""
context_import_dict = {
'from_path': 'several_sections.yml',
'from_section': 'section1',
}
context_store = Context({'original': 'value'})
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=lambda x: x,
context_store=context_store,
)
import_context_action.execute()
expected_context = Context({
'original': 'value',
'section1': {
'k1_1': 'v1_1',
'k1_2': 'v1_2',
},
})
assert context_store == expected_context
def test_replacer_function_being_used(context_directory):
"""
Test use of replacement function in option retrieval.
The function should be used when querying values from `options`.
"""
context_import_dict = {
'from_path': 'path',
'from_section': 'from',
'to_section': 'to',
}
context_store = Context()
def replacer(option: str) -> str:
if option == 'path':
return 'several_sections.yml'
elif option == 'from':
return 'section1'
elif option == 'to':
return 'new_section'
else:
raise AssertionError
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=replacer,
context_store=context_store,
)
import_context_action.execute()
assert context_store == {
'new_section': {
'k1_1': 'v1_1',
'k1_2': 'v1_2',
},
}
def test_that_replacer_is_run_every_time(context_directory):
"""
The replacer should be run a new every time self.execute() is invoked.
"""
context_import_dict = {
'from_path': 'several_sections.yml',
'from_section': 'section1',
'to_section': 'whatever',
}
context_store = Context()
class Replacer:
def __init__(self) -> None:
self.invoke_number = 0
def __call__(self, option: str) -> str:
self.invoke_number += 1
return option
replacer = Replacer()
import_context_action = ImportContextAction(
options=context_import_dict,
directory=context_directory,
replacer=replacer,
context_store=context_store,
)
import_context_action.execute()
assert replacer.invoke_number == 3
import_context_action.execute()
assert replacer.invoke_number == 6
| 0.8709 | 0.345436 |
from knack.arguments import ArgumentsContext
from knack.commands import CLICommandsLoader, CommandGroup
class SuperBenchCommandsLoader(CLICommandsLoader):
"""SuperBench CLI commands loader."""
def load_command_table(self, args):
"""Load commands into the command table.
Args:
args (list): List of arguments from the command line.
Returns:
collections.OrderedDict: Load commands into the command table.
"""
with CommandGroup(self, '', 'superbench.cli._handler#{}') as g:
g.command('version', 'version_command_handler')
g.command('deploy', 'deploy_command_handler')
g.command('exec', 'exec_command_handler')
g.command('run', 'run_command_handler')
with CommandGroup(self, 'node', 'superbench.cli._node_handler#{}') as g:
g.command('info', 'info_command_handler')
return super().load_command_table(args)
def load_arguments(self, command):
"""Load arguments for commands.
Args:
command: The command to load arguments for.
"""
with ArgumentsContext(self, '') as ac:
ac.argument('docker_image', options_list=('--docker-image', '-i'), type=str, help='Docker image URI.')
ac.argument('docker_username', type=str, help='Docker registry username if authentication is needed.')
ac.argument('docker_password', type=str, help='Docker registry password if authentication is needed.')
ac.argument(
'host_file', options_list=('--host-file', '-f'), type=str, help='Path to Ansible inventory host file.'
)
ac.argument('host_list', options_list=('--host-list', '-l'), type=str, help='Comma separated host list.')
ac.argument('host_username', type=str, help='Host username if needed.')
ac.argument('host_password', type=str, help='Host password or key passphase if needed.')
ac.argument(
'output_dir',
type=str,
help='Path to output directory, outputs/{datetime} will be used if not specified.'
)
ac.argument('private_key', type=str, help='Path to private key if needed.')
ac.argument(
'config_file', options_list=('--config-file', '-c'), type=str, help='Path to SuperBench config file.'
)
ac.argument(
'config_override',
options_list=('--config-override', '-C'),
type=str,
nargs='+',
help='Extra arguments to override config_file.'
)
super().load_arguments(command)
|
superbench/cli/_commands.py
|
from knack.arguments import ArgumentsContext
from knack.commands import CLICommandsLoader, CommandGroup
class SuperBenchCommandsLoader(CLICommandsLoader):
"""SuperBench CLI commands loader."""
def load_command_table(self, args):
"""Load commands into the command table.
Args:
args (list): List of arguments from the command line.
Returns:
collections.OrderedDict: Load commands into the command table.
"""
with CommandGroup(self, '', 'superbench.cli._handler#{}') as g:
g.command('version', 'version_command_handler')
g.command('deploy', 'deploy_command_handler')
g.command('exec', 'exec_command_handler')
g.command('run', 'run_command_handler')
with CommandGroup(self, 'node', 'superbench.cli._node_handler#{}') as g:
g.command('info', 'info_command_handler')
return super().load_command_table(args)
def load_arguments(self, command):
"""Load arguments for commands.
Args:
command: The command to load arguments for.
"""
with ArgumentsContext(self, '') as ac:
ac.argument('docker_image', options_list=('--docker-image', '-i'), type=str, help='Docker image URI.')
ac.argument('docker_username', type=str, help='Docker registry username if authentication is needed.')
ac.argument('docker_password', type=str, help='Docker registry password if authentication is needed.')
ac.argument(
'host_file', options_list=('--host-file', '-f'), type=str, help='Path to Ansible inventory host file.'
)
ac.argument('host_list', options_list=('--host-list', '-l'), type=str, help='Comma separated host list.')
ac.argument('host_username', type=str, help='Host username if needed.')
ac.argument('host_password', type=str, help='Host password or key passphase if needed.')
ac.argument(
'output_dir',
type=str,
help='Path to output directory, outputs/{datetime} will be used if not specified.'
)
ac.argument('private_key', type=str, help='Path to private key if needed.')
ac.argument(
'config_file', options_list=('--config-file', '-c'), type=str, help='Path to SuperBench config file.'
)
ac.argument(
'config_override',
options_list=('--config-override', '-C'),
type=str,
nargs='+',
help='Extra arguments to override config_file.'
)
super().load_arguments(command)
| 0.873134 | 0.095856 |
point = {
"type": "Point",
"coordinates": [100.0, 0.0]
}
linestring = {
"type": "LineString",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
polygon = {
"type": "Polygon",
"coordinates": [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
]
]
}
polygon_with_hole = {
"type": "Polygon",
"coordinates": [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.8, 0.8],
[100.8, 0.2],
[100.2, 0.2],
[100.2, 0.8],
[100.8, 0.8]
]
]
}
multipoint = {
"type": "MultiPoint",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
multilinestring = {
"type": "MultiLineString",
"coordinates": [
[
[100.0, 0.0],
[101.0, 1.0]
],
[
[102.0, 2.0],
[103.0, 3.0]
]
]
}
multipolygon = {
"type": "MultiPolygon",
"coordinates": [
[
[
[102.0, 2.0],
[103.0, 2.0],
[103.0, 3.0],
[102.0, 3.0],
[102.0, 2.0]
]
],
[
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.2, 0.2],
[100.2, 0.8],
[100.8, 0.8],
[100.8, 0.2],
[100.2, 0.2]
]
]
]
}
geometry_collection = {
"type": "GeometryCollection",
"geometries": [{
"type": "Point",
"coordinates": [100.0, 0.0]
}, {
"type": "LineString",
"coordinates": [
[101.0, 0.0],
[102.0, 1.0]
]
}]
}
|
stac_api_validator/geometries.py
|
point = {
"type": "Point",
"coordinates": [100.0, 0.0]
}
linestring = {
"type": "LineString",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
polygon = {
"type": "Polygon",
"coordinates": [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
]
]
}
polygon_with_hole = {
"type": "Polygon",
"coordinates": [
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.8, 0.8],
[100.8, 0.2],
[100.2, 0.2],
[100.2, 0.8],
[100.8, 0.8]
]
]
}
multipoint = {
"type": "MultiPoint",
"coordinates": [
[100.0, 0.0],
[101.0, 1.0]
]
}
multilinestring = {
"type": "MultiLineString",
"coordinates": [
[
[100.0, 0.0],
[101.0, 1.0]
],
[
[102.0, 2.0],
[103.0, 3.0]
]
]
}
multipolygon = {
"type": "MultiPolygon",
"coordinates": [
[
[
[102.0, 2.0],
[103.0, 2.0],
[103.0, 3.0],
[102.0, 3.0],
[102.0, 2.0]
]
],
[
[
[100.0, 0.0],
[101.0, 0.0],
[101.0, 1.0],
[100.0, 1.0],
[100.0, 0.0]
],
[
[100.2, 0.2],
[100.2, 0.8],
[100.8, 0.8],
[100.8, 0.2],
[100.2, 0.2]
]
]
]
}
geometry_collection = {
"type": "GeometryCollection",
"geometries": [{
"type": "Point",
"coordinates": [100.0, 0.0]
}, {
"type": "LineString",
"coordinates": [
[101.0, 0.0],
[102.0, 1.0]
]
}]
}
| 0.553264 | 0.698728 |
import logging
import os
import subprocess
import sys
import impersonate
import proc_util
import uac
class UpdaterTestRPCHandler():
def echo(self, message):
"""Test method to check if server is reachable."""
return message
def RunAsSystem(self, command, env=None, cwd=None, timeout=30):
"""Runs the command as SYSTEM user.
Args:
command: The command to run. This argument will be forwarded to
subprocess.Popen().
env: Environment variables to pass to command.
cwd: Working directory for the command.
timeout: How long the child process should wait before timeout.
Returns:
(pid, exit_code, sdtout, stderr) tuple.
"""
try:
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env, cwd=cwd)
# TODO(crbug.com/1233612): `communicate()` in Python 2.7 does not support
# timeout value, pass the value here once we migrate to Python 3. Also
# don't forget to handle subprocess.TimeoutExpired exception.
stdout, stderr = process.communicate()
logging.info('Command %s stdout:\n %s', command, stdout)
if stderr:
logging.error('Command %s stderr:\n %s', command, stderr)
return (process.pid, process.returncode, stdout, stderr)
except OSError as err:
logging.exception(err)
return (None, None, None, None)
def RunAsStandardUser(self, command_line, env=None, cwd=None, timeout=30):
"""Runs the command as the non-elevated logon user on default desktop.
Args:
command_line: The command line string, includes all arguments.
env: Environment variables to pass to command.
cwd: Working directory for the command.
timeout: How long the child process should wait before timeout.
Returns:
(pid, exit_code, sdtout, stderr) tuple.
"""
return impersonate.RunAsStandardUser(command_line, env, cwd, timeout)
def AnswerUpcomingUACPrompt(self,
actions,
timeout=10,
wait_child=False,
source=''):
"""Answers upcoming UAC prompt that does not require username/password.
Args:
actions: Actions to take in string, such as 'AADDA', 'A' to accept,
'D' to deny.
timeout: How long the child process should wait for each UAC click.
wait_child: Whether this thread should wait the completion of child proc.
source: Optional name of the source that triggers this action (for logging
and debugging purpose).
Returns:
(pid, exit_code) of the created UAC-answering process. If the sub-process
is not created, or did not finish in wait time, returns (None, None).
"""
uac_tool = os.path.join(os.path.dirname(__file__), 'answer_uac.py')
command = ('python %s --actions=%s --timeout=%d --source=%s' %
(uac_tool, actions, timeout, source))
logging.info('Running command: %s', command)
if wait_child:
if timeout > 0:
# Each button click could take `timeout` seconds, and add 1 second
# extra for child process to finish.
timeout = timeout * len(actions) + 1
else:
# Negative timeout has special meanings, such as win32event.INFINITE.
# Don't touch it.
pass
else:
timeout = 0 # no wait
# There could be multiple winlogon.exe instances when there are multiple
# login sessions. For example, when there's remote desktop session. In this
# case, find the active session where the UAC prompt is supposed to display.
winlogon_pids = proc_util.GetPIDsWithName('winlogon.exe',
proc_util.GetActiveSessionID())
if not winlogon_pids:
logging.error('Unexpected: no active session or no winlogon.exe in it.')
return (None, None)
elif len(winlogon_pids) > 1:
logging.warning('Unexpected multiple winlogon.exe instances within '
'active session, the first instance will be used.')
# Must spawn child process on the same desktop as the one that UAC prompts,
# otherwise the child process will not be able to find the UAC dialog.
# Please note that there is a slight race condition here as user could
# change UAC desktop at any time. But we can tolerate this for the testing
# purpose.
desktop = 'winlogon' if uac.IsPromptingOnSecureDesktop() else 'default'
logging.info('Spawn process [%s] for UAC on desktop [%s].',
command, desktop)
pid, exit_code, stdout, stderr = impersonate.RunAsPidOnDeskstop(
command, winlogon_pids[0], desktop=desktop, timeout=timeout)
logging.info('Process [%s] is created to answer UAC, exit_code: %s', pid,
exit_code)
if stdout and stdout.strip():
logging.info('STDOUT: [%s]', stdout)
if stderr and stderr.strip():
logging.error('STDERR: [%s]', stderr)
return (pid, exit_code)
|
chrome/updater/test/service/win/rpc_handler.py
|
import logging
import os
import subprocess
import sys
import impersonate
import proc_util
import uac
class UpdaterTestRPCHandler():
def echo(self, message):
"""Test method to check if server is reachable."""
return message
def RunAsSystem(self, command, env=None, cwd=None, timeout=30):
"""Runs the command as SYSTEM user.
Args:
command: The command to run. This argument will be forwarded to
subprocess.Popen().
env: Environment variables to pass to command.
cwd: Working directory for the command.
timeout: How long the child process should wait before timeout.
Returns:
(pid, exit_code, sdtout, stderr) tuple.
"""
try:
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env, cwd=cwd)
# TODO(crbug.com/1233612): `communicate()` in Python 2.7 does not support
# timeout value, pass the value here once we migrate to Python 3. Also
# don't forget to handle subprocess.TimeoutExpired exception.
stdout, stderr = process.communicate()
logging.info('Command %s stdout:\n %s', command, stdout)
if stderr:
logging.error('Command %s stderr:\n %s', command, stderr)
return (process.pid, process.returncode, stdout, stderr)
except OSError as err:
logging.exception(err)
return (None, None, None, None)
def RunAsStandardUser(self, command_line, env=None, cwd=None, timeout=30):
"""Runs the command as the non-elevated logon user on default desktop.
Args:
command_line: The command line string, includes all arguments.
env: Environment variables to pass to command.
cwd: Working directory for the command.
timeout: How long the child process should wait before timeout.
Returns:
(pid, exit_code, sdtout, stderr) tuple.
"""
return impersonate.RunAsStandardUser(command_line, env, cwd, timeout)
def AnswerUpcomingUACPrompt(self,
actions,
timeout=10,
wait_child=False,
source=''):
"""Answers upcoming UAC prompt that does not require username/password.
Args:
actions: Actions to take in string, such as 'AADDA', 'A' to accept,
'D' to deny.
timeout: How long the child process should wait for each UAC click.
wait_child: Whether this thread should wait the completion of child proc.
source: Optional name of the source that triggers this action (for logging
and debugging purpose).
Returns:
(pid, exit_code) of the created UAC-answering process. If the sub-process
is not created, or did not finish in wait time, returns (None, None).
"""
uac_tool = os.path.join(os.path.dirname(__file__), 'answer_uac.py')
command = ('python %s --actions=%s --timeout=%d --source=%s' %
(uac_tool, actions, timeout, source))
logging.info('Running command: %s', command)
if wait_child:
if timeout > 0:
# Each button click could take `timeout` seconds, and add 1 second
# extra for child process to finish.
timeout = timeout * len(actions) + 1
else:
# Negative timeout has special meanings, such as win32event.INFINITE.
# Don't touch it.
pass
else:
timeout = 0 # no wait
# There could be multiple winlogon.exe instances when there are multiple
# login sessions. For example, when there's remote desktop session. In this
# case, find the active session where the UAC prompt is supposed to display.
winlogon_pids = proc_util.GetPIDsWithName('winlogon.exe',
proc_util.GetActiveSessionID())
if not winlogon_pids:
logging.error('Unexpected: no active session or no winlogon.exe in it.')
return (None, None)
elif len(winlogon_pids) > 1:
logging.warning('Unexpected multiple winlogon.exe instances within '
'active session, the first instance will be used.')
# Must spawn child process on the same desktop as the one that UAC prompts,
# otherwise the child process will not be able to find the UAC dialog.
# Please note that there is a slight race condition here as user could
# change UAC desktop at any time. But we can tolerate this for the testing
# purpose.
desktop = 'winlogon' if uac.IsPromptingOnSecureDesktop() else 'default'
logging.info('Spawn process [%s] for UAC on desktop [%s].',
command, desktop)
pid, exit_code, stdout, stderr = impersonate.RunAsPidOnDeskstop(
command, winlogon_pids[0], desktop=desktop, timeout=timeout)
logging.info('Process [%s] is created to answer UAC, exit_code: %s', pid,
exit_code)
if stdout and stdout.strip():
logging.info('STDOUT: [%s]', stdout)
if stderr and stderr.strip():
logging.error('STDERR: [%s]', stderr)
return (pid, exit_code)
| 0.52902 | 0.110279 |
from pprint import pformat
from six import iteritems
import re
class WorkflowTaskMeta(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'action_type': 'str',
'description': 'str',
'input_keys': 'list[str]',
'internal': 'bool',
'name': 'str',
'output_keys': 'list[str]',
'response_timeout_sec': 'int',
'retry_count': 'int',
'retry_delay_sec': 'int',
'retry_logic': 'str',
'src': 'str',
'timeout_policy': 'str',
'timeout_sec': 'int'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'action_type': 'ActionType',
'description': 'Description',
'input_keys': 'InputKeys',
'internal': 'Internal',
'name': 'Name',
'output_keys': 'OutputKeys',
'response_timeout_sec': 'ResponseTimeoutSec',
'retry_count': 'RetryCount',
'retry_delay_sec': 'RetryDelaySec',
'retry_logic': 'RetryLogic',
'src': 'Src',
'timeout_policy': 'TimeoutPolicy',
'timeout_sec': 'TimeoutSec'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, action_type=None, description=None, input_keys=None, internal=None, name=None, output_keys=None, response_timeout_sec=None, retry_count=None, retry_delay_sec=None, retry_logic=None, src=None, timeout_policy=None, timeout_sec=None):
"""
WorkflowTaskMeta - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._action_type = None
self._description = None
self._input_keys = None
self._internal = None
self._name = None
self._output_keys = None
self._response_timeout_sec = None
self._retry_count = None
self._retry_delay_sec = None
self._retry_logic = None
self._src = None
self._timeout_policy = None
self._timeout_sec = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if action_type is not None:
self.action_type = action_type
if description is not None:
self.description = description
if input_keys is not None:
self.input_keys = input_keys
if internal is not None:
self.internal = internal
if name is not None:
self.name = name
if output_keys is not None:
self.output_keys = output_keys
if response_timeout_sec is not None:
self.response_timeout_sec = response_timeout_sec
if retry_count is not None:
self.retry_count = retry_count
if retry_delay_sec is not None:
self.retry_delay_sec = retry_delay_sec
if retry_logic is not None:
self.retry_logic = retry_logic
if src is not None:
self.src = src
if timeout_policy is not None:
self.timeout_policy = timeout_policy
if timeout_sec is not None:
self.timeout_sec = timeout_sec
@property
def account_moid(self):
"""
Gets the account_moid of this WorkflowTaskMeta.
The Account ID for this managed object.
:return: The account_moid of this WorkflowTaskMeta.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this WorkflowTaskMeta.
The Account ID for this managed object.
:param account_moid: The account_moid of this WorkflowTaskMeta.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this WorkflowTaskMeta.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this WorkflowTaskMeta.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this WorkflowTaskMeta.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this WorkflowTaskMeta.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this WorkflowTaskMeta.
The time when this managed object was created.
:return: The create_time of this WorkflowTaskMeta.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this WorkflowTaskMeta.
The time when this managed object was created.
:param create_time: The create_time of this WorkflowTaskMeta.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this WorkflowTaskMeta.
The time when this managed object was last modified.
:return: The mod_time of this WorkflowTaskMeta.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this WorkflowTaskMeta.
The time when this managed object was last modified.
:param mod_time: The mod_time of this WorkflowTaskMeta.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this WorkflowTaskMeta.
A unique identifier of this Managed Object instance.
:return: The moid of this WorkflowTaskMeta.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this WorkflowTaskMeta.
A unique identifier of this Managed Object instance.
:param moid: The moid of this WorkflowTaskMeta.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this WorkflowTaskMeta.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this WorkflowTaskMeta.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this WorkflowTaskMeta.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this WorkflowTaskMeta.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this WorkflowTaskMeta.
An array of owners which represent effective ownership of this object.
:return: The owners of this WorkflowTaskMeta.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this WorkflowTaskMeta.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this WorkflowTaskMeta.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this WorkflowTaskMeta.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this WorkflowTaskMeta.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this WorkflowTaskMeta.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this WorkflowTaskMeta.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this WorkflowTaskMeta.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this WorkflowTaskMeta.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this WorkflowTaskMeta.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this WorkflowTaskMeta.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this WorkflowTaskMeta.
The versioning info for this managed object
:return: The version_context of this WorkflowTaskMeta.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this WorkflowTaskMeta.
The versioning info for this managed object
:param version_context: The version_context of this WorkflowTaskMeta.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def action_type(self):
"""
Gets the action_type of this WorkflowTaskMeta.
A task execution type to indicate if it is a system task
:return: The action_type of this WorkflowTaskMeta.
:rtype: str
"""
return self._action_type
@action_type.setter
def action_type(self, action_type):
"""
Sets the action_type of this WorkflowTaskMeta.
A task execution type to indicate if it is a system task
:param action_type: The action_type of this WorkflowTaskMeta.
:type: str
"""
self._action_type = action_type
@property
def description(self):
"""
Gets the description of this WorkflowTaskMeta.
A description of the task
:return: The description of this WorkflowTaskMeta.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this WorkflowTaskMeta.
A description of the task
:param description: The description of this WorkflowTaskMeta.
:type: str
"""
self._description = description
@property
def input_keys(self):
"""
Gets the input_keys of this WorkflowTaskMeta.
An input key for the task
:return: The input_keys of this WorkflowTaskMeta.
:rtype: list[str]
"""
return self._input_keys
@input_keys.setter
def input_keys(self, input_keys):
"""
Sets the input_keys of this WorkflowTaskMeta.
An input key for the task
:param input_keys: The input_keys of this WorkflowTaskMeta.
:type: list[str]
"""
self._input_keys = input_keys
@property
def internal(self):
"""
Gets the internal of this WorkflowTaskMeta.
Denotes whether or not this is an internal task. Internal tasks will be hidden from the UI within a workflow.
:return: The internal of this WorkflowTaskMeta.
:rtype: bool
"""
return self._internal
@internal.setter
def internal(self, internal):
"""
Sets the internal of this WorkflowTaskMeta.
Denotes whether or not this is an internal task. Internal tasks will be hidden from the UI within a workflow.
:param internal: The internal of this WorkflowTaskMeta.
:type: bool
"""
self._internal = internal
@property
def name(self):
"""
Gets the name of this WorkflowTaskMeta.
A task name that should be unique in Conductor DB
:return: The name of this WorkflowTaskMeta.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this WorkflowTaskMeta.
A task name that should be unique in Conductor DB
:param name: The name of this WorkflowTaskMeta.
:type: str
"""
self._name = name
@property
def output_keys(self):
"""
Gets the output_keys of this WorkflowTaskMeta.
An output key for the task
:return: The output_keys of this WorkflowTaskMeta.
:rtype: list[str]
"""
return self._output_keys
@output_keys.setter
def output_keys(self, output_keys):
"""
Sets the output_keys of this WorkflowTaskMeta.
An output key for the task
:param output_keys: The output_keys of this WorkflowTaskMeta.
:type: list[str]
"""
self._output_keys = output_keys
@property
def response_timeout_sec(self):
"""
Gets the response_timeout_sec of this WorkflowTaskMeta.
The worker respnose timeout value
:return: The response_timeout_sec of this WorkflowTaskMeta.
:rtype: int
"""
return self._response_timeout_sec
@response_timeout_sec.setter
def response_timeout_sec(self, response_timeout_sec):
"""
Sets the response_timeout_sec of this WorkflowTaskMeta.
The worker respnose timeout value
:param response_timeout_sec: The response_timeout_sec of this WorkflowTaskMeta.
:type: int
"""
self._response_timeout_sec = response_timeout_sec
@property
def retry_count(self):
"""
Gets the retry_count of this WorkflowTaskMeta.
A number of reties for this task
:return: The retry_count of this WorkflowTaskMeta.
:rtype: int
"""
return self._retry_count
@retry_count.setter
def retry_count(self, retry_count):
"""
Sets the retry_count of this WorkflowTaskMeta.
A number of reties for this task
:param retry_count: The retry_count of this WorkflowTaskMeta.
:type: int
"""
self._retry_count = retry_count
@property
def retry_delay_sec(self):
"""
Gets the retry_delay_sec of this WorkflowTaskMeta.
The time on which the retry will be delayed
:return: The retry_delay_sec of this WorkflowTaskMeta.
:rtype: int
"""
return self._retry_delay_sec
@retry_delay_sec.setter
def retry_delay_sec(self, retry_delay_sec):
"""
Sets the retry_delay_sec of this WorkflowTaskMeta.
The time on which the retry will be delayed
:param retry_delay_sec: The retry_delay_sec of this WorkflowTaskMeta.
:type: int
"""
self._retry_delay_sec = retry_delay_sec
@property
def retry_logic(self):
"""
Gets the retry_logic of this WorkflowTaskMeta.
A logic which defines the way to handle retry (FIXED, EXPONENTIAL_BACKOFF)
:return: The retry_logic of this WorkflowTaskMeta.
:rtype: str
"""
return self._retry_logic
@retry_logic.setter
def retry_logic(self, retry_logic):
"""
Sets the retry_logic of this WorkflowTaskMeta.
A logic which defines the way to handle retry (FIXED, EXPONENTIAL_BACKOFF)
:param retry_logic: The retry_logic of this WorkflowTaskMeta.
:type: str
"""
self._retry_logic = retry_logic
@property
def src(self):
"""
Gets the src of this WorkflowTaskMeta.
A service owns the task metadata
:return: The src of this WorkflowTaskMeta.
:rtype: str
"""
return self._src
@src.setter
def src(self, src):
"""
Sets the src of this WorkflowTaskMeta.
A service owns the task metadata
:param src: The src of this WorkflowTaskMeta.
:type: str
"""
self._src = src
@property
def timeout_policy(self):
"""
Gets the timeout_policy of this WorkflowTaskMeta.
A policy which defines the way to handle timeout (RETRY, TIME_OUT_WF, ALERT_ONLY)
:return: The timeout_policy of this WorkflowTaskMeta.
:rtype: str
"""
return self._timeout_policy
@timeout_policy.setter
def timeout_policy(self, timeout_policy):
"""
Sets the timeout_policy of this WorkflowTaskMeta.
A policy which defines the way to handle timeout (RETRY, TIME_OUT_WF, ALERT_ONLY)
:param timeout_policy: The timeout_policy of this WorkflowTaskMeta.
:type: str
"""
self._timeout_policy = timeout_policy
@property
def timeout_sec(self):
"""
Gets the timeout_sec of this WorkflowTaskMeta.
A timeout value for the task ( in second )
:return: The timeout_sec of this WorkflowTaskMeta.
:rtype: int
"""
return self._timeout_sec
@timeout_sec.setter
def timeout_sec(self, timeout_sec):
"""
Sets the timeout_sec of this WorkflowTaskMeta.
A timeout value for the task ( in second )
:param timeout_sec: The timeout_sec of this WorkflowTaskMeta.
:type: int
"""
self._timeout_sec = timeout_sec
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WorkflowTaskMeta):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
intersight/models/workflow_task_meta.py
|
from pprint import pformat
from six import iteritems
import re
class WorkflowTaskMeta(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'action_type': 'str',
'description': 'str',
'input_keys': 'list[str]',
'internal': 'bool',
'name': 'str',
'output_keys': 'list[str]',
'response_timeout_sec': 'int',
'retry_count': 'int',
'retry_delay_sec': 'int',
'retry_logic': 'str',
'src': 'str',
'timeout_policy': 'str',
'timeout_sec': 'int'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'action_type': 'ActionType',
'description': 'Description',
'input_keys': 'InputKeys',
'internal': 'Internal',
'name': 'Name',
'output_keys': 'OutputKeys',
'response_timeout_sec': 'ResponseTimeoutSec',
'retry_count': 'RetryCount',
'retry_delay_sec': 'RetryDelaySec',
'retry_logic': 'RetryLogic',
'src': 'Src',
'timeout_policy': 'TimeoutPolicy',
'timeout_sec': 'TimeoutSec'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, action_type=None, description=None, input_keys=None, internal=None, name=None, output_keys=None, response_timeout_sec=None, retry_count=None, retry_delay_sec=None, retry_logic=None, src=None, timeout_policy=None, timeout_sec=None):
"""
WorkflowTaskMeta - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._action_type = None
self._description = None
self._input_keys = None
self._internal = None
self._name = None
self._output_keys = None
self._response_timeout_sec = None
self._retry_count = None
self._retry_delay_sec = None
self._retry_logic = None
self._src = None
self._timeout_policy = None
self._timeout_sec = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if action_type is not None:
self.action_type = action_type
if description is not None:
self.description = description
if input_keys is not None:
self.input_keys = input_keys
if internal is not None:
self.internal = internal
if name is not None:
self.name = name
if output_keys is not None:
self.output_keys = output_keys
if response_timeout_sec is not None:
self.response_timeout_sec = response_timeout_sec
if retry_count is not None:
self.retry_count = retry_count
if retry_delay_sec is not None:
self.retry_delay_sec = retry_delay_sec
if retry_logic is not None:
self.retry_logic = retry_logic
if src is not None:
self.src = src
if timeout_policy is not None:
self.timeout_policy = timeout_policy
if timeout_sec is not None:
self.timeout_sec = timeout_sec
@property
def account_moid(self):
"""
Gets the account_moid of this WorkflowTaskMeta.
The Account ID for this managed object.
:return: The account_moid of this WorkflowTaskMeta.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this WorkflowTaskMeta.
The Account ID for this managed object.
:param account_moid: The account_moid of this WorkflowTaskMeta.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this WorkflowTaskMeta.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this WorkflowTaskMeta.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this WorkflowTaskMeta.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this WorkflowTaskMeta.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this WorkflowTaskMeta.
The time when this managed object was created.
:return: The create_time of this WorkflowTaskMeta.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this WorkflowTaskMeta.
The time when this managed object was created.
:param create_time: The create_time of this WorkflowTaskMeta.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this WorkflowTaskMeta.
The time when this managed object was last modified.
:return: The mod_time of this WorkflowTaskMeta.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this WorkflowTaskMeta.
The time when this managed object was last modified.
:param mod_time: The mod_time of this WorkflowTaskMeta.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this WorkflowTaskMeta.
A unique identifier of this Managed Object instance.
:return: The moid of this WorkflowTaskMeta.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this WorkflowTaskMeta.
A unique identifier of this Managed Object instance.
:param moid: The moid of this WorkflowTaskMeta.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this WorkflowTaskMeta.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this WorkflowTaskMeta.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this WorkflowTaskMeta.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this WorkflowTaskMeta.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this WorkflowTaskMeta.
An array of owners which represent effective ownership of this object.
:return: The owners of this WorkflowTaskMeta.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this WorkflowTaskMeta.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this WorkflowTaskMeta.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this WorkflowTaskMeta.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this WorkflowTaskMeta.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this WorkflowTaskMeta.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this WorkflowTaskMeta.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this WorkflowTaskMeta.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this WorkflowTaskMeta.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this WorkflowTaskMeta.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this WorkflowTaskMeta.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this WorkflowTaskMeta.
The versioning info for this managed object
:return: The version_context of this WorkflowTaskMeta.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this WorkflowTaskMeta.
The versioning info for this managed object
:param version_context: The version_context of this WorkflowTaskMeta.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def action_type(self):
"""
Gets the action_type of this WorkflowTaskMeta.
A task execution type to indicate if it is a system task
:return: The action_type of this WorkflowTaskMeta.
:rtype: str
"""
return self._action_type
@action_type.setter
def action_type(self, action_type):
"""
Sets the action_type of this WorkflowTaskMeta.
A task execution type to indicate if it is a system task
:param action_type: The action_type of this WorkflowTaskMeta.
:type: str
"""
self._action_type = action_type
@property
def description(self):
"""
Gets the description of this WorkflowTaskMeta.
A description of the task
:return: The description of this WorkflowTaskMeta.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this WorkflowTaskMeta.
A description of the task
:param description: The description of this WorkflowTaskMeta.
:type: str
"""
self._description = description
@property
def input_keys(self):
"""
Gets the input_keys of this WorkflowTaskMeta.
An input key for the task
:return: The input_keys of this WorkflowTaskMeta.
:rtype: list[str]
"""
return self._input_keys
@input_keys.setter
def input_keys(self, input_keys):
"""
Sets the input_keys of this WorkflowTaskMeta.
An input key for the task
:param input_keys: The input_keys of this WorkflowTaskMeta.
:type: list[str]
"""
self._input_keys = input_keys
@property
def internal(self):
"""
Gets the internal of this WorkflowTaskMeta.
Denotes whether or not this is an internal task. Internal tasks will be hidden from the UI within a workflow.
:return: The internal of this WorkflowTaskMeta.
:rtype: bool
"""
return self._internal
@internal.setter
def internal(self, internal):
"""
Sets the internal of this WorkflowTaskMeta.
Denotes whether or not this is an internal task. Internal tasks will be hidden from the UI within a workflow.
:param internal: The internal of this WorkflowTaskMeta.
:type: bool
"""
self._internal = internal
@property
def name(self):
"""
Gets the name of this WorkflowTaskMeta.
A task name that should be unique in Conductor DB
:return: The name of this WorkflowTaskMeta.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this WorkflowTaskMeta.
A task name that should be unique in Conductor DB
:param name: The name of this WorkflowTaskMeta.
:type: str
"""
self._name = name
@property
def output_keys(self):
"""
Gets the output_keys of this WorkflowTaskMeta.
An output key for the task
:return: The output_keys of this WorkflowTaskMeta.
:rtype: list[str]
"""
return self._output_keys
@output_keys.setter
def output_keys(self, output_keys):
"""
Sets the output_keys of this WorkflowTaskMeta.
An output key for the task
:param output_keys: The output_keys of this WorkflowTaskMeta.
:type: list[str]
"""
self._output_keys = output_keys
@property
def response_timeout_sec(self):
"""
Gets the response_timeout_sec of this WorkflowTaskMeta.
The worker respnose timeout value
:return: The response_timeout_sec of this WorkflowTaskMeta.
:rtype: int
"""
return self._response_timeout_sec
@response_timeout_sec.setter
def response_timeout_sec(self, response_timeout_sec):
"""
Sets the response_timeout_sec of this WorkflowTaskMeta.
The worker respnose timeout value
:param response_timeout_sec: The response_timeout_sec of this WorkflowTaskMeta.
:type: int
"""
self._response_timeout_sec = response_timeout_sec
@property
def retry_count(self):
"""
Gets the retry_count of this WorkflowTaskMeta.
A number of reties for this task
:return: The retry_count of this WorkflowTaskMeta.
:rtype: int
"""
return self._retry_count
@retry_count.setter
def retry_count(self, retry_count):
"""
Sets the retry_count of this WorkflowTaskMeta.
A number of reties for this task
:param retry_count: The retry_count of this WorkflowTaskMeta.
:type: int
"""
self._retry_count = retry_count
@property
def retry_delay_sec(self):
"""
Gets the retry_delay_sec of this WorkflowTaskMeta.
The time on which the retry will be delayed
:return: The retry_delay_sec of this WorkflowTaskMeta.
:rtype: int
"""
return self._retry_delay_sec
@retry_delay_sec.setter
def retry_delay_sec(self, retry_delay_sec):
"""
Sets the retry_delay_sec of this WorkflowTaskMeta.
The time on which the retry will be delayed
:param retry_delay_sec: The retry_delay_sec of this WorkflowTaskMeta.
:type: int
"""
self._retry_delay_sec = retry_delay_sec
@property
def retry_logic(self):
"""
Gets the retry_logic of this WorkflowTaskMeta.
A logic which defines the way to handle retry (FIXED, EXPONENTIAL_BACKOFF)
:return: The retry_logic of this WorkflowTaskMeta.
:rtype: str
"""
return self._retry_logic
@retry_logic.setter
def retry_logic(self, retry_logic):
"""
Sets the retry_logic of this WorkflowTaskMeta.
A logic which defines the way to handle retry (FIXED, EXPONENTIAL_BACKOFF)
:param retry_logic: The retry_logic of this WorkflowTaskMeta.
:type: str
"""
self._retry_logic = retry_logic
@property
def src(self):
"""
Gets the src of this WorkflowTaskMeta.
A service owns the task metadata
:return: The src of this WorkflowTaskMeta.
:rtype: str
"""
return self._src
@src.setter
def src(self, src):
"""
Sets the src of this WorkflowTaskMeta.
A service owns the task metadata
:param src: The src of this WorkflowTaskMeta.
:type: str
"""
self._src = src
@property
def timeout_policy(self):
"""
Gets the timeout_policy of this WorkflowTaskMeta.
A policy which defines the way to handle timeout (RETRY, TIME_OUT_WF, ALERT_ONLY)
:return: The timeout_policy of this WorkflowTaskMeta.
:rtype: str
"""
return self._timeout_policy
@timeout_policy.setter
def timeout_policy(self, timeout_policy):
"""
Sets the timeout_policy of this WorkflowTaskMeta.
A policy which defines the way to handle timeout (RETRY, TIME_OUT_WF, ALERT_ONLY)
:param timeout_policy: The timeout_policy of this WorkflowTaskMeta.
:type: str
"""
self._timeout_policy = timeout_policy
@property
def timeout_sec(self):
"""
Gets the timeout_sec of this WorkflowTaskMeta.
A timeout value for the task ( in second )
:return: The timeout_sec of this WorkflowTaskMeta.
:rtype: int
"""
return self._timeout_sec
@timeout_sec.setter
def timeout_sec(self, timeout_sec):
"""
Sets the timeout_sec of this WorkflowTaskMeta.
A timeout value for the task ( in second )
:param timeout_sec: The timeout_sec of this WorkflowTaskMeta.
:type: int
"""
self._timeout_sec = timeout_sec
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WorkflowTaskMeta):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 0.489015 | 0.099121 |
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import solve_ivp
import csv
def red_blue_ode(t, p):
r, b, rt, bt = p
dp = [0, 0, 0, 0]
lambda_a = 1.0
lambda_t = 1.0
p_t = 0.5
p_c = 0.5
k = 10.0
flag_r_0 = 1.0 if r > 0 else 0.0
flag_b_0 = 1.0 if b > 0 else 0.0
flag_bt_0 = 1.0 if bt > 0 else 0.0
flag_rt_0 = 1.0 if rt > 0 else 0.0
flag_r_rt_0 = 1.0 if r+rt > 0 else 0.0
flag_b_bt_0 = 1.0 if b+bt > 0 else 0.0
# R_INDEX = 0;
dp[0] = -flag_r_0 * lambda_a * k * r / (r + bt) * p_t * r - flag_r_0 * lambda_a * k * r / (r + bt) * p_t * rt \
+ flag_r_rt_0 * lambda_t * rt - flag_r_0 * lambda_t * r / (r + rt) * rt \
+ flag_b_0 * lambda_t * (b / (b + bt)) * bt \
+ flag_bt_0 * lambda_t * (bt / (b + bt)) * bt \
+ flag_rt_0 * lambda_a * p_c * k * rt / (rt + b) * (b + bt)
# B_INDEX = 1;
dp[1] = -flag_b_0 * lambda_a * k * b / (b + rt) * p_t * b \
- flag_b_0 * lambda_a * k * b / ((b + rt)) * p_t * bt \
+ flag_b_bt_0 * lambda_t * bt \
+ flag_r_0 * lambda_t * r / (r + rt) * rt \
- flag_b_0 * lambda_t * b / (b + bt) * bt \
+ flag_rt_0 * lambda_t * (rt / (r + rt)) * rt \
+ flag_bt_0 * lambda_a * p_c * k * (bt / (r + bt)) * (r + rt)
# RT_INEDX = 2;
dp[2] = -flag_rt_0 * lambda_a * p_c * k * rt / (rt + b) * (b + bt) \
- flag_r_rt_0 * lambda_t * rt \
- flag_rt_0 * lambda_t * (rt / (r + rt)) * rt \
+ flag_r_0 * lambda_a * k * (r / (r + bt)) * p_t * r \
+ flag_r_0 * lambda_a * k * (r / (r + bt)) * p_t * rt
# BT_INDEX = 3;
dp[3] = -flag_bt_0 * lambda_a * k * (bt / (r + bt)) * p_c * (r + rt) \
- flag_b_bt_0 * lambda_t * bt \
- flag_bt_0 * lambda_t * (bt / (b + bt)) * bt \
+ flag_b_0 * lambda_a * k * b / (b + rt) * p_t * b \
+ flag_b_0 * lambda_a * k * b / (b + rt) * p_t * bt
return dp
def solve_ode( ):
return solve_ivp(red_blue_ode, [0, 10], [0.97, 0.01, 0.01, 0.01], dense_output=True)
def load_data_file(file, scale=1):
t = []
data = []
with open(file, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter='\t')
for row in plots:
t.append(float(row[0]))
data.append(float(row[1])/(100*scale))
return t, data
def load_simulation_data(source_dir, scale):
t, r_data = load_data_file(source_dir+'rb_'+str(scale)+'__r_.data',scale=scale)
_, b_data = load_data_file(source_dir+'rb_'+str(scale)+'__b_.data',scale=scale)
_, rt_data = load_data_file(source_dir+'rb_'+str(scale)+'__rt_.data',scale=scale)
_, bt_data = load_data_file(source_dir+'rb_'+str(scale)+'__bt_.data',scale=scale)
return t, r_data, b_data, rt_data, bt_data
def setup_legend_and_fonts(title,file):
plt.legend(fontsize=15,loc='best')
plt.title(title,fontsize=20)
plt.ylim(-0.05, 1.1)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.xlabel('Time units',fontsize=15)
plt.ylabel('% Population',fontsize=15)
plt.savefig(file)
plt.show()
def plot_all_simulation_data(source_dir, scale):
time, r_data, b_data, rt_data, bt_data = load_simulation_data(source_dir, scale)
plt.plot(time, r_data,label='R')
plt.plot(time, b_data,label='B')
plt.plot(time, rt_data,label='RT')
plt.plot(time, bt_data,label='BT')
setup_legend_and_fonts('Simulation (N='+str(scale)+")",'ac_sim_'+str(scale)+'.png')
def plot_red_blue_simulation_data(source_dir, scale):
time, r_data, b_data, rt_data, bt_data = load_simulation_data(source_dir, scale)
red = [ r_data[i]+rt_data[i] for i in range(0,len(time))]
blue = [ b_data[i]+bt_data[i] for i in range(0,len(time))]
plt.plot(time, red, label='R+RT')
plt.plot(time, blue, label='B+BT')
setup_legend_and_fonts('Simulation (N='+str(scale)+')', 'ac_sim_rb_'+str(scale)+'.png')
def plot_red_simulation_data_with_ode(source_dir, scale):
time, r_data, _, rt_data, _ = load_simulation_data(source_dir, scale)
sol = solve_ode();
t = np.linspace(0, 10, 100)
z = sol.sol(t)
plt.plot(time, r_data,label='R')
plt.plot(time, rt_data,label='RT')
plt.plot(t, z[0],label='R ODE')
plt.plot(t, z[2],label='RT ODE')
setup_legend_and_fonts('Fluid approximation and Simulation (N='+str(scale)+')', 'ac_sim_ode_r_rt_'+str(scale)+'.png')
def plot_blue_simulation_data_with_ode(source_dir, scale):
time, _, b_data, _, bt_data = load_simulation_data(source_dir, scale)
sol = solve_ode();
t = np.linspace(0, 10, 100)
z = sol.sol(t)
plt.plot(time, b_data,label='B')
plt.plot(time, bt_data,label='BT')
plt.plot(t, z[1],label='B ODE')
plt.plot(t, z[3],label='BT ODE')
setup_legend_and_fonts('Fluid approximation and Simulation (N='+str(scale)+')', 'ac_sim_ode_b_bt_'+str(scale)+'.png')
def plot_red_blue_simulation_data_with_ode(source_dir, scale):
time, r_data, b_data, rt_data, bt_data = load_simulation_data(source_dir, scale)
sol = solve_ode();
t = np.linspace(0, 10, 100)
z = sol.sol(t)
red = [ r_data[i]+rt_data[i] for i in range(0,len(time))]
blue = [ b_data[i]+bt_data[i] for i in range(0,len(time))]
red_ode = [ z[0][i]+z[2][i] for i in range(0,len(t)) ]
blue_ode = [ z[1][i]+z[3][i] for i in range(0,len(t)) ]
plt.plot(time, red, label='R+RT')
plt.plot(time, blue, label='B+BT')
plt.plot(t, red_ode, label='R+RT ODE')
plt.plot(t, blue_ode, label='B+BT ODE')
setup_legend_and_fonts('Fluid approximation and Simulation (N='+str(scale)+')', 'ac_sim_ode_rb_'+str(scale)+'.png')
if __name__=='__main__':
dir = '../data/'
#dir = '/Users/loreti/Desktop/DATA/'
plot_all_simulation_data(dir,10)
plot_red_blue_simulation_data(dir,10)
for scale in [1, 10, 100, 1000]:
plot_blue_simulation_data_with_ode(dir, scale)
plot_red_simulation_data_with_ode(dir, scale)
plot_red_blue_simulation_data_with_ode(dir, scale)
|
plotscript/plot_rb_files.py
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import solve_ivp
import csv
def red_blue_ode(t, p):
r, b, rt, bt = p
dp = [0, 0, 0, 0]
lambda_a = 1.0
lambda_t = 1.0
p_t = 0.5
p_c = 0.5
k = 10.0
flag_r_0 = 1.0 if r > 0 else 0.0
flag_b_0 = 1.0 if b > 0 else 0.0
flag_bt_0 = 1.0 if bt > 0 else 0.0
flag_rt_0 = 1.0 if rt > 0 else 0.0
flag_r_rt_0 = 1.0 if r+rt > 0 else 0.0
flag_b_bt_0 = 1.0 if b+bt > 0 else 0.0
# R_INDEX = 0;
dp[0] = -flag_r_0 * lambda_a * k * r / (r + bt) * p_t * r - flag_r_0 * lambda_a * k * r / (r + bt) * p_t * rt \
+ flag_r_rt_0 * lambda_t * rt - flag_r_0 * lambda_t * r / (r + rt) * rt \
+ flag_b_0 * lambda_t * (b / (b + bt)) * bt \
+ flag_bt_0 * lambda_t * (bt / (b + bt)) * bt \
+ flag_rt_0 * lambda_a * p_c * k * rt / (rt + b) * (b + bt)
# B_INDEX = 1;
dp[1] = -flag_b_0 * lambda_a * k * b / (b + rt) * p_t * b \
- flag_b_0 * lambda_a * k * b / ((b + rt)) * p_t * bt \
+ flag_b_bt_0 * lambda_t * bt \
+ flag_r_0 * lambda_t * r / (r + rt) * rt \
- flag_b_0 * lambda_t * b / (b + bt) * bt \
+ flag_rt_0 * lambda_t * (rt / (r + rt)) * rt \
+ flag_bt_0 * lambda_a * p_c * k * (bt / (r + bt)) * (r + rt)
# RT_INEDX = 2;
dp[2] = -flag_rt_0 * lambda_a * p_c * k * rt / (rt + b) * (b + bt) \
- flag_r_rt_0 * lambda_t * rt \
- flag_rt_0 * lambda_t * (rt / (r + rt)) * rt \
+ flag_r_0 * lambda_a * k * (r / (r + bt)) * p_t * r \
+ flag_r_0 * lambda_a * k * (r / (r + bt)) * p_t * rt
# BT_INDEX = 3;
dp[3] = -flag_bt_0 * lambda_a * k * (bt / (r + bt)) * p_c * (r + rt) \
- flag_b_bt_0 * lambda_t * bt \
- flag_bt_0 * lambda_t * (bt / (b + bt)) * bt \
+ flag_b_0 * lambda_a * k * b / (b + rt) * p_t * b \
+ flag_b_0 * lambda_a * k * b / (b + rt) * p_t * bt
return dp
def solve_ode( ):
return solve_ivp(red_blue_ode, [0, 10], [0.97, 0.01, 0.01, 0.01], dense_output=True)
def load_data_file(file, scale=1):
t = []
data = []
with open(file, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter='\t')
for row in plots:
t.append(float(row[0]))
data.append(float(row[1])/(100*scale))
return t, data
def load_simulation_data(source_dir, scale):
t, r_data = load_data_file(source_dir+'rb_'+str(scale)+'__r_.data',scale=scale)
_, b_data = load_data_file(source_dir+'rb_'+str(scale)+'__b_.data',scale=scale)
_, rt_data = load_data_file(source_dir+'rb_'+str(scale)+'__rt_.data',scale=scale)
_, bt_data = load_data_file(source_dir+'rb_'+str(scale)+'__bt_.data',scale=scale)
return t, r_data, b_data, rt_data, bt_data
def setup_legend_and_fonts(title,file):
plt.legend(fontsize=15,loc='best')
plt.title(title,fontsize=20)
plt.ylim(-0.05, 1.1)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
plt.xlabel('Time units',fontsize=15)
plt.ylabel('% Population',fontsize=15)
plt.savefig(file)
plt.show()
def plot_all_simulation_data(source_dir, scale):
time, r_data, b_data, rt_data, bt_data = load_simulation_data(source_dir, scale)
plt.plot(time, r_data,label='R')
plt.plot(time, b_data,label='B')
plt.plot(time, rt_data,label='RT')
plt.plot(time, bt_data,label='BT')
setup_legend_and_fonts('Simulation (N='+str(scale)+")",'ac_sim_'+str(scale)+'.png')
def plot_red_blue_simulation_data(source_dir, scale):
time, r_data, b_data, rt_data, bt_data = load_simulation_data(source_dir, scale)
red = [ r_data[i]+rt_data[i] for i in range(0,len(time))]
blue = [ b_data[i]+bt_data[i] for i in range(0,len(time))]
plt.plot(time, red, label='R+RT')
plt.plot(time, blue, label='B+BT')
setup_legend_and_fonts('Simulation (N='+str(scale)+')', 'ac_sim_rb_'+str(scale)+'.png')
def plot_red_simulation_data_with_ode(source_dir, scale):
time, r_data, _, rt_data, _ = load_simulation_data(source_dir, scale)
sol = solve_ode();
t = np.linspace(0, 10, 100)
z = sol.sol(t)
plt.plot(time, r_data,label='R')
plt.plot(time, rt_data,label='RT')
plt.plot(t, z[0],label='R ODE')
plt.plot(t, z[2],label='RT ODE')
setup_legend_and_fonts('Fluid approximation and Simulation (N='+str(scale)+')', 'ac_sim_ode_r_rt_'+str(scale)+'.png')
def plot_blue_simulation_data_with_ode(source_dir, scale):
time, _, b_data, _, bt_data = load_simulation_data(source_dir, scale)
sol = solve_ode();
t = np.linspace(0, 10, 100)
z = sol.sol(t)
plt.plot(time, b_data,label='B')
plt.plot(time, bt_data,label='BT')
plt.plot(t, z[1],label='B ODE')
plt.plot(t, z[3],label='BT ODE')
setup_legend_and_fonts('Fluid approximation and Simulation (N='+str(scale)+')', 'ac_sim_ode_b_bt_'+str(scale)+'.png')
def plot_red_blue_simulation_data_with_ode(source_dir, scale):
time, r_data, b_data, rt_data, bt_data = load_simulation_data(source_dir, scale)
sol = solve_ode();
t = np.linspace(0, 10, 100)
z = sol.sol(t)
red = [ r_data[i]+rt_data[i] for i in range(0,len(time))]
blue = [ b_data[i]+bt_data[i] for i in range(0,len(time))]
red_ode = [ z[0][i]+z[2][i] for i in range(0,len(t)) ]
blue_ode = [ z[1][i]+z[3][i] for i in range(0,len(t)) ]
plt.plot(time, red, label='R+RT')
plt.plot(time, blue, label='B+BT')
plt.plot(t, red_ode, label='R+RT ODE')
plt.plot(t, blue_ode, label='B+BT ODE')
setup_legend_and_fonts('Fluid approximation and Simulation (N='+str(scale)+')', 'ac_sim_ode_rb_'+str(scale)+'.png')
if __name__=='__main__':
dir = '../data/'
#dir = '/Users/loreti/Desktop/DATA/'
plot_all_simulation_data(dir,10)
plot_red_blue_simulation_data(dir,10)
for scale in [1, 10, 100, 1000]:
plot_blue_simulation_data_with_ode(dir, scale)
plot_red_simulation_data_with_ode(dir, scale)
plot_red_blue_simulation_data_with_ode(dir, scale)
| 0.314156 | 0.477371 |
import keras
from keras.models import Sequential, Model
from keras.layers import Activation, Merge, Reshape
from keras.layers import Input, Embedding, Dense, dot
from keras.layers.core import Lambda
from keras import optimizers
from keras import backend as K
import numpy as np
import random
import utils.process as process
from utils.log_tool import data_process_logger as logger
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
# Sequential paradigm
if paradigm == 'Sequential':
target = Sequential()
target.add(Embedding(vocab_size, embedding_dim, input_length=1))
context = Sequential()
context.add(Embedding(vocab_size, embedding_dim, input_length=1))
# merge the pivot and context models
model = Sequential()
model.add(Merge([target, context], mode='dot'))
model.add(Reshape((1,), input_shape=(1,1)))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
# Functional paradigm
elif paradigm == 'Functional':
target = Input(shape=(1,), name='target')
context = Input(shape=(1,), name='context')
#print target.shape, context.shape
shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
embedding_target = shared_embedding(target)
embedding_context = shared_embedding(context)
#print embedding_target.shape, embedding_context.shape
merged_vector = dot([embedding_target, embedding_context], axes=-1)
reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
#print merged_vector.shape
prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
#print prediction.shape
model = Model(inputs=[target, context], outputs=prediction)
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
else:
print('paradigm error')
return None
def skipgram_reader_generator(movie_dict, file_name=process.DoulistCorpusNameFile, context_window=2):
def reader():
vocabulary_size = len(movie_dict)
with open(file_name) as fopen:
for line in fopen:
line_list = line.strip().split('\t')
movie_ids = [movie_dict.get(_, movie_dict['<unk>']) for _ in line_list]
for i in range(len(movie_ids)):
target = movie_ids[i]
# generate positive sample
context_list = []
j = i - context_window
while j <= i + context_window and j < len(movie_ids):
if j >= 0 and j != i:
context_list.append(movie_ids[j])
yield ((target, movie_ids[j]), 1)
j += 1
# generate negative sample
for _ in range(len(context_list)):
ne_idx = random.randrange(0, vocabulary_size)
while ne_idx in context_list:
ne_idx = random.randrange(0, vocabulary_size)
yield ((target, ne_idx), 0)
return reader
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
model = keras.models.Sequential()
model.add(Embedding(dict_size, emb_size,
input_length=context_window_size,
embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
))
model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
model.add(Dense(dict_size))
model.add(Activation('softmax')) # TODO: use nce
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',)
return model
def train_cbow_base_model():
min_word_freq = 5
word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
dict_size = len(word_dict)
emb_size = 100
context_window_size = 4
epochs = 20
batch_size = 128
model = cbow_base_model(dict_size, emb_size, context_window_size)
for epoch_id in xrange(epochs):
# train by batch
batch_id = 0
x_batch = []
y_batch = []
for movie_ids in process.shuffle(process.reader_creator(word_dict, ngram=context_window_size+1), 10000)():
batch_id += 1
if batch_id % (batch_size*50) == 0:
# Print evaluate log
score = model.evaluate(np.array(x_batch),
keras.utils.to_categorical(y_batch, num_classes=dict_size))
logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, score))
if batch_id % batch_size == 0:
# Convert labels to categorical one-hot encoding
model.train_on_batch(np.array(x_batch),
keras.utils.to_categorical(y_batch, num_classes=dict_size))
x_batch = []
y_batch = []
x = np.array(movie_ids[:context_window_size])
y = movie_ids[-1]
x_batch.append(x)
y_batch.append(y)
logger.info('model train done')
# store word embedding
with open('./models/keras_0804_09_cbow', 'w') as fwrite:
for idx, vec in enumerate(model.layers[0].get_weights()[0].tolist()):
fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))
if __name__ == '__main__':
# network conf
paradigm = 'Functional'
min_word_freq = 10
word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
dict_size = len(word_dict)
emb_size = 100
context_window_size = 2
epochs = 50
batch_size = 256
model = skipgram_model(dict_size, emb_size, paradigm)
#print model.layers
for epoch_id in xrange(epochs):
# train by batch
batch_id = 0
x_batch = [[],[]]
y_batch = []
loss_list = []
for movie_ids, label in process.shuffle(skipgram_reader_generator(word_dict, context_window=context_window_size), 10000)():
batch_id += 1
x_batch[0].append(movie_ids[0])
x_batch[1].append(movie_ids[1])
y_batch.append(label)
if batch_id % (batch_size*1000) == 0:
# Print evaluate log
logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, np.mean(loss_list)))
loss_list = []
if batch_id % batch_size == 0:
X = [np.array(x_batch[0]), np.array(x_batch[1])]
loss = model.train_on_batch(X, np.array(y_batch))
loss_list.append(loss)
x_batch = [[],[]]
y_batch = []
logger.info('model train done')
# store word embedding
with open('./models/keras_0804_09_skipgram', 'w') as fwrite:
for idx, vec in enumerate(model.layers[2].get_weights()[0].tolist()):
fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))
|
keras_item2vec.py
|
import keras
from keras.models import Sequential, Model
from keras.layers import Activation, Merge, Reshape
from keras.layers import Input, Embedding, Dense, dot
from keras.layers.core import Lambda
from keras import optimizers
from keras import backend as K
import numpy as np
import random
import utils.process as process
from utils.log_tool import data_process_logger as logger
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
# Sequential paradigm
if paradigm == 'Sequential':
target = Sequential()
target.add(Embedding(vocab_size, embedding_dim, input_length=1))
context = Sequential()
context.add(Embedding(vocab_size, embedding_dim, input_length=1))
# merge the pivot and context models
model = Sequential()
model.add(Merge([target, context], mode='dot'))
model.add(Reshape((1,), input_shape=(1,1)))
model.add(Activation('sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
# Functional paradigm
elif paradigm == 'Functional':
target = Input(shape=(1,), name='target')
context = Input(shape=(1,), name='context')
#print target.shape, context.shape
shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
embedding_target = shared_embedding(target)
embedding_context = shared_embedding(context)
#print embedding_target.shape, embedding_context.shape
merged_vector = dot([embedding_target, embedding_context], axes=-1)
reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
#print merged_vector.shape
prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
#print prediction.shape
model = Model(inputs=[target, context], outputs=prediction)
model.compile(optimizer='adam', loss='binary_crossentropy')
return model
else:
print('paradigm error')
return None
def skipgram_reader_generator(movie_dict, file_name=process.DoulistCorpusNameFile, context_window=2):
def reader():
vocabulary_size = len(movie_dict)
with open(file_name) as fopen:
for line in fopen:
line_list = line.strip().split('\t')
movie_ids = [movie_dict.get(_, movie_dict['<unk>']) for _ in line_list]
for i in range(len(movie_ids)):
target = movie_ids[i]
# generate positive sample
context_list = []
j = i - context_window
while j <= i + context_window and j < len(movie_ids):
if j >= 0 and j != i:
context_list.append(movie_ids[j])
yield ((target, movie_ids[j]), 1)
j += 1
# generate negative sample
for _ in range(len(context_list)):
ne_idx = random.randrange(0, vocabulary_size)
while ne_idx in context_list:
ne_idx = random.randrange(0, vocabulary_size)
yield ((target, ne_idx), 0)
return reader
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
model = keras.models.Sequential()
model.add(Embedding(dict_size, emb_size,
input_length=context_window_size,
embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
))
model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
model.add(Dense(dict_size))
model.add(Activation('softmax')) # TODO: use nce
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',)
return model
def train_cbow_base_model():
min_word_freq = 5
word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
dict_size = len(word_dict)
emb_size = 100
context_window_size = 4
epochs = 20
batch_size = 128
model = cbow_base_model(dict_size, emb_size, context_window_size)
for epoch_id in xrange(epochs):
# train by batch
batch_id = 0
x_batch = []
y_batch = []
for movie_ids in process.shuffle(process.reader_creator(word_dict, ngram=context_window_size+1), 10000)():
batch_id += 1
if batch_id % (batch_size*50) == 0:
# Print evaluate log
score = model.evaluate(np.array(x_batch),
keras.utils.to_categorical(y_batch, num_classes=dict_size))
logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, score))
if batch_id % batch_size == 0:
# Convert labels to categorical one-hot encoding
model.train_on_batch(np.array(x_batch),
keras.utils.to_categorical(y_batch, num_classes=dict_size))
x_batch = []
y_batch = []
x = np.array(movie_ids[:context_window_size])
y = movie_ids[-1]
x_batch.append(x)
y_batch.append(y)
logger.info('model train done')
# store word embedding
with open('./models/keras_0804_09_cbow', 'w') as fwrite:
for idx, vec in enumerate(model.layers[0].get_weights()[0].tolist()):
fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))
if __name__ == '__main__':
# network conf
paradigm = 'Functional'
min_word_freq = 10
word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
dict_size = len(word_dict)
emb_size = 100
context_window_size = 2
epochs = 50
batch_size = 256
model = skipgram_model(dict_size, emb_size, paradigm)
#print model.layers
for epoch_id in xrange(epochs):
# train by batch
batch_id = 0
x_batch = [[],[]]
y_batch = []
loss_list = []
for movie_ids, label in process.shuffle(skipgram_reader_generator(word_dict, context_window=context_window_size), 10000)():
batch_id += 1
x_batch[0].append(movie_ids[0])
x_batch[1].append(movie_ids[1])
y_batch.append(label)
if batch_id % (batch_size*1000) == 0:
# Print evaluate log
logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, np.mean(loss_list)))
loss_list = []
if batch_id % batch_size == 0:
X = [np.array(x_batch[0]), np.array(x_batch[1])]
loss = model.train_on_batch(X, np.array(y_batch))
loss_list.append(loss)
x_batch = [[],[]]
y_batch = []
logger.info('model train done')
# store word embedding
with open('./models/keras_0804_09_skipgram', 'w') as fwrite:
for idx, vec in enumerate(model.layers[2].get_weights()[0].tolist()):
fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))
| 0.425247 | 0.271336 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import json
import logging
from datetime import datetime
import parsedatetime
from dateutil.parser import parse
from flask import flash, Markup
from flask_appbuilder.security.sqla import models as ab_models
from markdown import markdown as md
from sqlalchemy.types import TypeDecorator, TEXT
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
class memoized(object): # noqa
"""Decorator that caches a function's return value each time it is called
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def list_minus(l, minus):
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime("now") <= datetime.now()
True
>>> parse_human_datetime("yesterday") <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(cal.parse(s)[0])
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
def dttm_from_timtuple(d):
return datetime(
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
def merge_perm(sm, permission_name, view_menu_name):
pv = sm.find_permission_view_menu(permission_name, view_menu_name)
if not pv:
sm.add_permission_view_menu(permission_name, view_menu_name)
def parse_human_timedelta(s):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime("now") <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s, dttm)[0]
d = datetime(
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
def init(caravel):
"""Inits the Caravel application with security roles and such"""
db = caravel.db
models = caravel.models
sm = caravel.appbuilder.sm
alpha = sm.add_role("Alpha")
admin = sm.add_role("Admin")
merge_perm(sm, 'all_datasource_access', 'all_datasource_access')
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if perm.permission.name == 'datasource_access':
continue
if perm.view_menu and perm.view_menu.name not in (
'UserDBModelView', 'RoleModelView', 'ResetPasswordView',
'Security'):
sm.add_permission_role(alpha, perm)
sm.add_permission_role(admin, perm)
gamma = sm.add_role("Gamma")
for perm in perms:
if(
perm.view_menu and perm.view_menu.name not in (
'ResetPasswordView',
'RoleModelView',
'UserDBModelView',
'Security') and
perm.permission.name not in (
'all_datasource_access',
'can_add',
'can_download',
'can_delete',
'can_edit',
'can_save',
'datasource_access',
'muldelete',
)):
sm.add_permission_role(gamma, perm)
session = db.session()
table_perms = [
table.perm for table in session.query(models.SqlaTable).all()]
table_perms += [
table.perm for table in session.query(models.DruidDatasource).all()]
for table_perm in table_perms:
merge_perm(sm, 'datasource_access', table_perm)
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return "<nobr>{}</nobr>".format(dttm)
def json_iso_dttm_ser(obj):
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
if isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def markdown(s, markup_wrap=False):
s = s or ''
s = md(s, [
'markdown.extensions.tables',
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
])
if markup_wrap:
s = Markup(s)
return s
def readfile(filepath):
with open(filepath) as f:
content = f.read()
return content
|
caravel/utils.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import json
import logging
from datetime import datetime
import parsedatetime
from dateutil.parser import parse
from flask import flash, Markup
from flask_appbuilder.security.sqla import models as ab_models
from markdown import markdown as md
from sqlalchemy.types import TypeDecorator, TEXT
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
class memoized(object): # noqa
"""Decorator that caches a function's return value each time it is called
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
def list_minus(l, minus):
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime("now") <= datetime.now()
True
>>> parse_human_datetime("yesterday") <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(cal.parse(s)[0])
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
def dttm_from_timtuple(d):
return datetime(
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
def merge_perm(sm, permission_name, view_menu_name):
pv = sm.find_permission_view_menu(permission_name, view_menu_name)
if not pv:
sm.add_permission_view_menu(permission_name, view_menu_name)
def parse_human_timedelta(s):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime("now") <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s, dttm)[0]
d = datetime(
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm
class JSONEncodedDict(TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
def init(caravel):
"""Inits the Caravel application with security roles and such"""
db = caravel.db
models = caravel.models
sm = caravel.appbuilder.sm
alpha = sm.add_role("Alpha")
admin = sm.add_role("Admin")
merge_perm(sm, 'all_datasource_access', 'all_datasource_access')
perms = db.session.query(ab_models.PermissionView).all()
for perm in perms:
if perm.permission.name == 'datasource_access':
continue
if perm.view_menu and perm.view_menu.name not in (
'UserDBModelView', 'RoleModelView', 'ResetPasswordView',
'Security'):
sm.add_permission_role(alpha, perm)
sm.add_permission_role(admin, perm)
gamma = sm.add_role("Gamma")
for perm in perms:
if(
perm.view_menu and perm.view_menu.name not in (
'ResetPasswordView',
'RoleModelView',
'UserDBModelView',
'Security') and
perm.permission.name not in (
'all_datasource_access',
'can_add',
'can_download',
'can_delete',
'can_edit',
'can_save',
'datasource_access',
'muldelete',
)):
sm.add_permission_role(gamma, perm)
session = db.session()
table_perms = [
table.perm for table in session.query(models.SqlaTable).all()]
table_perms += [
table.perm for table in session.query(models.DruidDatasource).all()]
for table_perm in table_perms:
merge_perm(sm, 'datasource_access', table_perm)
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return "<nobr>{}</nobr>".format(dttm)
def json_iso_dttm_ser(obj):
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
if isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def markdown(s, markup_wrap=False):
s = s or ''
s = md(s, [
'markdown.extensions.tables',
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
])
if markup_wrap:
s = Markup(s)
return s
def readfile(filepath):
with open(filepath) as f:
content = f.read()
return content
| 0.728169 | 0.157752 |
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.colors as colors
import itertools
import pandas as pd
from imblearn.metrics import sensitivity_specificity_support
import os
def multiclass_predict_1d_to_nd(y_, unique_labels):
if(len(np.unique(y_)) != len(unique_labels)):
y_ = y_.argmax(axis=1)
y_new = []
for y in y_:
values = []
for u in unique_labels:
if(u == y):
values.append(1)
else:
values.append(0)
y_new.append(values)
return np.array(y_new)
def multiclass_predict_nd_to_1d(y_):
return y_.argmax(axis=1)
def prc_auc(y_true, y_pred, class_names):
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_pred))
y_true = multiclass_predict_1d_to_nd(y_true, np.unique(y_true))
n_classes = len(class_names)
precision = dict()
recall = dict()
average_precision = []
for i in range(n_classes):
precision[i], recall[i], _ = metrics.precision_recall_curve(y_true[:, i],
y_pred[:, i])
average_precision.append(
metrics.average_precision_score(y_true[:, i], y_pred[:, i]))
return average_precision
def roc_auc(y_true, y_pred, class_names):
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_pred))
y_true = multiclass_predict_1d_to_nd(y_true, np.unique(y_true))
n_classes = len(class_names)
fpr = dict()
tpr = dict()
roc_auc = []
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_true[:, i], y_pred[:, i])
roc_auc.append(metrics.auc(fpr[i], tpr[i]))
return roc_auc
def recall(tp, p):
return tp/p
def specificity(tn, n):
return tn/n
def accuracy(tn, tp, p, n):
return (tn + tp) / (p + n)
def precision(tp, fp):
return tp/(fp + tp)
def f1_score(y_true, y_pred):
if(len(np.unique(y_pred)) != len(np.unique(y_true))):
y_pred = multiclass_predict_nd_to_1d(y_pred)
y_true = multiclass_predict_nd_to_1d(y_true)
return metrics.f1_score(y_true, y_pred, average=None)
def get_metrics(y_test, y_pred, class_names=None, save_path=None):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
uniques = np.unique(y_test)
if(class_names is None):
class_names = list(uniques)
if(len(y_test.shape) == 1):
matrix = metrics.confusion_matrix(y_test, y_pred, labels=uniques)
#y_pred = multiclass_predict_1d_to_nd(y_pred, columns)
#y_true = multiclass_predict_1d_to_nd(y_true, columns)
else:
#y_pred = multiclass_predict_nd_to_1d(y_pred)
#y_true = multiclass_predict_nd_to_1d(y_true)
matrix = metrics.confusion_matrix(multiclass_predict_nd_to_1d(
y_test), multiclass_predict_nd_to_1d(y_pred))
TP = np.diag(matrix)
FP = matrix.sum(axis=0) - TP
FN = matrix.sum(axis=1) - TP
TN = matrix.sum() - (FP + FN + TP)
P = TP+FN
N = TN+FP
metrics_ = pd.DataFrame()
rows = class_names.copy()
rows.append('Média')
metrics_['Classes'] = rows
_f1 = np.around(f1_score(y_test, y_pred), decimals=2)
_f1 = np.append(_f1, np.around(np.mean(_f1), decimals=2))
_roc_auc = np.around(roc_auc(y_test, y_pred, class_names), decimals=2)
_roc_auc = np.append(_roc_auc, np.around(np.mean(_roc_auc), decimals=2))
_prc_auc = np.around(prc_auc(y_test, y_pred, class_names), decimals=2)
_prc_auc = np.append(_prc_auc, np.around(np.mean(_prc_auc), decimals=2))
_precision = np.around(precision(TP, FP), decimals=2)
_precision = np.append(_precision, np.around(
np.mean(_precision), decimals=2))
_recall = np.around(recall(TP, P), decimals=2)
_recall = np.append(_recall, np.around(np.mean(_recall), decimals=2))
_specificity = np.around(specificity(TN, N), decimals=2)
_specificity = np.append(_specificity, np.around(
np.mean(_specificity), decimals=2))
_accuracy = np.around(accuracy(TN, TP, P, N), decimals=2)
_accuracy = np.append(_accuracy, np.around(np.mean(_accuracy), decimals=2))
metrics_["F1"] = _f1
metrics_["ROC AUC"] = _roc_auc
metrics_["PRC AUC"] = _prc_auc
metrics_["Precision"] = _precision
metrics_["Recall"] = _recall
metrics_["Specificity"] = _specificity
metrics_["Accuracy"] = _accuracy
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
metrics_.to_csv(os.path.join(save_path, 'metrics.csv'),
index=False, header=True)
return metrics_
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def plot_confusion_matrix(y_test, y_pred, class_names=None, save_path=None, visualize=False, cmap=None, normalize=True, labels=True, title='Matriz de confusão'):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
uniques = np.unique(y_pred)
if(len(y_pred.shape) == 1):
cm = metrics.confusion_matrix(y_test, y_pred, labels=uniques)
else:
y_test = multiclass_predict_nd_to_1d(y_test)
y_pred = multiclass_predict_nd_to_1d(y_pred)
cm = metrics.confusion_matrix(y_test, y_pred)
rotulos = []
for index, value in enumerate(uniques):
for i, v in enumerate(uniques):
rotulos.append('')
if cmap is None:
cmap = plt.get_cmap('Blues')
cmap = truncate_colormap(cmap, 0.35, 0.85)
perc_cm = None
if normalize:
perc_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# modificação wenisten para poder elevar para percetual o resultado.
perc_cm = perc_cm*100
fig = plt.figure(figsize=(6, 6), edgecolor='k') # (8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
#plt.clim(-5, 2.0)
plt.xlim(-0.5, len(np.unique(y_test))-0.5)
plt.ylim(len(np.unique(y_test))-0.5, -0.5)
plt.title(title, fontsize=16)
plt.colorbar()
#plt.ylim(-0.5, len(class_names) - 0.5)
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
if class_names is not None:
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, fontsize=16,
rotation=45, ha='right', rotation_mode="anchor")
plt.yticks(tick_marks, class_names, fontsize=16)
contador = 0
if labels:
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, f"{'{:0.2f}%'.format(perc_cm[i, j])}\n({cm[i, j]})", fontsize=16,
horizontalalignment='center', verticalalignment='center',
color='white' if cm[i, j] > thresh else 'white')
contador = contador+1
else:
plt.text(j, i, '{:,}'.format(cm[i, j]), fontsize=16,
horizontalalignment='center', verticalalignment='center',
color='white' if cm[i, j] > thresh else 'white')
plt.tight_layout()
plt.ylabel('True label', fontsize=16)
plt.xlabel('Predicted label', fontsize=16)
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
fig.savefig(os.path.join(save_path, 'confusion_matriz.png'),
dpi=180, bbox_inches='tight')
if(visualize):
plt.show()
plt.close()
def plot_auc_roc_multi_class(y_test, y_pred, class_names, save_path=None):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_test))
y_test = multiclass_predict_1d_to_nd(y_test, np.unique(y_test))
# else:
#y_pred = multiclass_predict_nd_to_1d(y_pred)
#y_test = multiclass_predict_nd_to_1d(y_test)
#y_pred = multiclass_predict_1d_to_nd(y_pred, class_names)
#y_test = multiclass_predict_1d_to_nd(y_test, class_names)
n_classes = len(class_names)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(
y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
lw = 2
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(15, 10))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = itertools.cycle(['aqua', 'darkorange', 'cornflowerblue'])
roc_auc_of_classes = []
for i, color in zip(range(n_classes), colors):
roc_auc_of_classes.append(roc_auc[i])
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(class_names[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('AUC - ROC Curve')
plt.legend(loc="lower right")
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, 'AUC_ROC.png'))
plt.show()
def plot_prc_auc_multiclass(y_test, y_pred, class_names, save_path=None):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_test))
y_test = multiclass_predict_1d_to_nd(y_test, np.unique(y_test))
# else:
#y_pred = multiclass_predict_nd_to_1d(y_pred)
#y_test = multiclass_predict_nd_to_1d(y_test)
#y_pred = multiclass_predict_1d_to_nd(y_pred, class_names)
#y_test = multiclass_predict_1d_to_nd(y_test, class_names)
n_classes = len(class_names)
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = metrics.precision_recall_curve(y_test[:, i],
y_pred[:, i])
average_precision[i] = metrics.average_precision_score(
y_test[:, i], y_pred[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = metrics.precision_recall_curve(y_test.ravel(),
y_pred.ravel())
average_precision["micro"] = metrics.average_precision_score(y_test, y_pred,
average="micro")
# print('Average precision score, micro-averaged over all classes: {0:0.2f}'
# .format(average_precision["micro"]))
colors = itertools.cycle(
['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(15, 10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(class_names[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, 'AUC_PRC.png'))
plt.show()
def plot_graphics(y_true, y_pred, class_names=None, save_path=None):
if(class_names is None):
class_names = np.unique(np.array(y_pred))
display(plot_confusion_matrix(y_true, y_pred, visualize=True,
normalize=True, class_names=class_names, save_path=save_path))
display(plot_auc_roc_multi_class(y_true, y_pred,
class_names=class_names, save_path=save_path))
display(plot_prc_auc_multiclass(y_true, y_pred,
class_names=class_names, save_path=save_path))
|
pyaiutils/__init__.py
|
from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.colors as colors
import itertools
import pandas as pd
from imblearn.metrics import sensitivity_specificity_support
import os
def multiclass_predict_1d_to_nd(y_, unique_labels):
if(len(np.unique(y_)) != len(unique_labels)):
y_ = y_.argmax(axis=1)
y_new = []
for y in y_:
values = []
for u in unique_labels:
if(u == y):
values.append(1)
else:
values.append(0)
y_new.append(values)
return np.array(y_new)
def multiclass_predict_nd_to_1d(y_):
return y_.argmax(axis=1)
def prc_auc(y_true, y_pred, class_names):
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_pred))
y_true = multiclass_predict_1d_to_nd(y_true, np.unique(y_true))
n_classes = len(class_names)
precision = dict()
recall = dict()
average_precision = []
for i in range(n_classes):
precision[i], recall[i], _ = metrics.precision_recall_curve(y_true[:, i],
y_pred[:, i])
average_precision.append(
metrics.average_precision_score(y_true[:, i], y_pred[:, i]))
return average_precision
def roc_auc(y_true, y_pred, class_names):
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_pred))
y_true = multiclass_predict_1d_to_nd(y_true, np.unique(y_true))
n_classes = len(class_names)
fpr = dict()
tpr = dict()
roc_auc = []
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_true[:, i], y_pred[:, i])
roc_auc.append(metrics.auc(fpr[i], tpr[i]))
return roc_auc
def recall(tp, p):
return tp/p
def specificity(tn, n):
return tn/n
def accuracy(tn, tp, p, n):
return (tn + tp) / (p + n)
def precision(tp, fp):
return tp/(fp + tp)
def f1_score(y_true, y_pred):
if(len(np.unique(y_pred)) != len(np.unique(y_true))):
y_pred = multiclass_predict_nd_to_1d(y_pred)
y_true = multiclass_predict_nd_to_1d(y_true)
return metrics.f1_score(y_true, y_pred, average=None)
def get_metrics(y_test, y_pred, class_names=None, save_path=None):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
uniques = np.unique(y_test)
if(class_names is None):
class_names = list(uniques)
if(len(y_test.shape) == 1):
matrix = metrics.confusion_matrix(y_test, y_pred, labels=uniques)
#y_pred = multiclass_predict_1d_to_nd(y_pred, columns)
#y_true = multiclass_predict_1d_to_nd(y_true, columns)
else:
#y_pred = multiclass_predict_nd_to_1d(y_pred)
#y_true = multiclass_predict_nd_to_1d(y_true)
matrix = metrics.confusion_matrix(multiclass_predict_nd_to_1d(
y_test), multiclass_predict_nd_to_1d(y_pred))
TP = np.diag(matrix)
FP = matrix.sum(axis=0) - TP
FN = matrix.sum(axis=1) - TP
TN = matrix.sum() - (FP + FN + TP)
P = TP+FN
N = TN+FP
metrics_ = pd.DataFrame()
rows = class_names.copy()
rows.append('Média')
metrics_['Classes'] = rows
_f1 = np.around(f1_score(y_test, y_pred), decimals=2)
_f1 = np.append(_f1, np.around(np.mean(_f1), decimals=2))
_roc_auc = np.around(roc_auc(y_test, y_pred, class_names), decimals=2)
_roc_auc = np.append(_roc_auc, np.around(np.mean(_roc_auc), decimals=2))
_prc_auc = np.around(prc_auc(y_test, y_pred, class_names), decimals=2)
_prc_auc = np.append(_prc_auc, np.around(np.mean(_prc_auc), decimals=2))
_precision = np.around(precision(TP, FP), decimals=2)
_precision = np.append(_precision, np.around(
np.mean(_precision), decimals=2))
_recall = np.around(recall(TP, P), decimals=2)
_recall = np.append(_recall, np.around(np.mean(_recall), decimals=2))
_specificity = np.around(specificity(TN, N), decimals=2)
_specificity = np.append(_specificity, np.around(
np.mean(_specificity), decimals=2))
_accuracy = np.around(accuracy(TN, TP, P, N), decimals=2)
_accuracy = np.append(_accuracy, np.around(np.mean(_accuracy), decimals=2))
metrics_["F1"] = _f1
metrics_["ROC AUC"] = _roc_auc
metrics_["PRC AUC"] = _prc_auc
metrics_["Precision"] = _precision
metrics_["Recall"] = _recall
metrics_["Specificity"] = _specificity
metrics_["Accuracy"] = _accuracy
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
metrics_.to_csv(os.path.join(save_path, 'metrics.csv'),
index=False, header=True)
return metrics_
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def plot_confusion_matrix(y_test, y_pred, class_names=None, save_path=None, visualize=False, cmap=None, normalize=True, labels=True, title='Matriz de confusão'):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
uniques = np.unique(y_pred)
if(len(y_pred.shape) == 1):
cm = metrics.confusion_matrix(y_test, y_pred, labels=uniques)
else:
y_test = multiclass_predict_nd_to_1d(y_test)
y_pred = multiclass_predict_nd_to_1d(y_pred)
cm = metrics.confusion_matrix(y_test, y_pred)
rotulos = []
for index, value in enumerate(uniques):
for i, v in enumerate(uniques):
rotulos.append('')
if cmap is None:
cmap = plt.get_cmap('Blues')
cmap = truncate_colormap(cmap, 0.35, 0.85)
perc_cm = None
if normalize:
perc_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# modificação wenisten para poder elevar para percetual o resultado.
perc_cm = perc_cm*100
fig = plt.figure(figsize=(6, 6), edgecolor='k') # (8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
#plt.clim(-5, 2.0)
plt.xlim(-0.5, len(np.unique(y_test))-0.5)
plt.ylim(len(np.unique(y_test))-0.5, -0.5)
plt.title(title, fontsize=16)
plt.colorbar()
#plt.ylim(-0.5, len(class_names) - 0.5)
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
if class_names is not None:
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, fontsize=16,
rotation=45, ha='right', rotation_mode="anchor")
plt.yticks(tick_marks, class_names, fontsize=16)
contador = 0
if labels:
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, f"{'{:0.2f}%'.format(perc_cm[i, j])}\n({cm[i, j]})", fontsize=16,
horizontalalignment='center', verticalalignment='center',
color='white' if cm[i, j] > thresh else 'white')
contador = contador+1
else:
plt.text(j, i, '{:,}'.format(cm[i, j]), fontsize=16,
horizontalalignment='center', verticalalignment='center',
color='white' if cm[i, j] > thresh else 'white')
plt.tight_layout()
plt.ylabel('True label', fontsize=16)
plt.xlabel('Predicted label', fontsize=16)
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
fig.savefig(os.path.join(save_path, 'confusion_matriz.png'),
dpi=180, bbox_inches='tight')
if(visualize):
plt.show()
plt.close()
def plot_auc_roc_multi_class(y_test, y_pred, class_names, save_path=None):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_test))
y_test = multiclass_predict_1d_to_nd(y_test, np.unique(y_test))
# else:
#y_pred = multiclass_predict_nd_to_1d(y_pred)
#y_test = multiclass_predict_nd_to_1d(y_test)
#y_pred = multiclass_predict_1d_to_nd(y_pred, class_names)
#y_test = multiclass_predict_1d_to_nd(y_test, class_names)
n_classes = len(class_names)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(
y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
lw = 2
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(15, 10))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = itertools.cycle(['aqua', 'darkorange', 'cornflowerblue'])
roc_auc_of_classes = []
for i, color in zip(range(n_classes), colors):
roc_auc_of_classes.append(roc_auc[i])
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(class_names[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('AUC - ROC Curve')
plt.legend(loc="lower right")
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, 'AUC_ROC.png'))
plt.show()
def plot_prc_auc_multiclass(y_test, y_pred, class_names, save_path=None):
y_test = np.array(y_test)
y_pred = np.array(y_pred)
if(len(y_pred.shape) == 1):
y_pred = multiclass_predict_1d_to_nd(y_pred, np.unique(y_test))
y_test = multiclass_predict_1d_to_nd(y_test, np.unique(y_test))
# else:
#y_pred = multiclass_predict_nd_to_1d(y_pred)
#y_test = multiclass_predict_nd_to_1d(y_test)
#y_pred = multiclass_predict_1d_to_nd(y_pred, class_names)
#y_test = multiclass_predict_1d_to_nd(y_test, class_names)
n_classes = len(class_names)
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = metrics.precision_recall_curve(y_test[:, i],
y_pred[:, i])
average_precision[i] = metrics.average_precision_score(
y_test[:, i], y_pred[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = metrics.precision_recall_curve(y_test.ravel(),
y_pred.ravel())
average_precision["micro"] = metrics.average_precision_score(y_test, y_pred,
average="micro")
# print('Average precision score, micro-averaged over all classes: {0:0.2f}'
# .format(average_precision["micro"]))
colors = itertools.cycle(
['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(15, 10))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(class_names[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
if(save_path is not None):
if(not os.path.isdir(save_path)):
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, 'AUC_PRC.png'))
plt.show()
def plot_graphics(y_true, y_pred, class_names=None, save_path=None):
if(class_names is None):
class_names = np.unique(np.array(y_pred))
display(plot_confusion_matrix(y_true, y_pred, visualize=True,
normalize=True, class_names=class_names, save_path=save_path))
display(plot_auc_roc_multi_class(y_true, y_pred,
class_names=class_names, save_path=save_path))
display(plot_prc_auc_multiclass(y_true, y_pred,
class_names=class_names, save_path=save_path))
| 0.518546 | 0.368264 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class UpdateConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'UpdateConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OpenSuperAcl(self): # String
return self.get_body_params().get('OpenSuperAcl')
def set_OpenSuperAcl(self, OpenSuperAcl): # String
self.add_body_params('OpenSuperAcl', OpenSuperAcl)
def get_ConfigAuthEnabled(self): # Boolean
return self.get_query_params().get('ConfigAuthEnabled')
def set_ConfigAuthEnabled(self, ConfigAuthEnabled): # Boolean
self.add_query_param('ConfigAuthEnabled', ConfigAuthEnabled)
def get_PassWord(self): # String
return self.get_query_params().get('PassWord')
def set_PassWord(self, PassWord): # String
self.add_query_param('PassWord', <PASSWORD>Word)
def get_MaxClientCnxns(self): # String
return self.get_query_params().get('MaxClientCnxns')
def set_MaxClientCnxns(self, MaxClientCnxns): # String
self.add_query_param('MaxClientCnxns', MaxClientCnxns)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_JuteMaxbuffer(self): # String
return self.get_query_params().get('JuteMaxbuffer')
def set_JuteMaxbuffer(self, JuteMaxbuffer): # String
self.add_query_param('JuteMaxbuffer', JuteMaxbuffer)
def get_ConfigType(self): # String
return self.get_query_params().get('ConfigType')
def set_ConfigType(self, ConfigType): # String
self.add_query_param('ConfigType', ConfigType)
def get_AutopurgeSnapRetainCount(self): # String
return self.get_query_params().get('AutopurgeSnapRetainCount')
def set_AutopurgeSnapRetainCount(self, AutopurgeSnapRetainCount): # String
self.add_query_param('AutopurgeSnapRetainCount', AutopurgeSnapRetainCount)
def get_ConfigSecretEnabled(self): # Boolean
return self.get_query_params().get('ConfigSecretEnabled')
def set_ConfigSecretEnabled(self, ConfigSecretEnabled): # Boolean
self.add_query_param('ConfigSecretEnabled', ConfigSecretEnabled)
def get_MCPEnabled(self): # Boolean
return self.get_query_params().get('MCPEnabled')
def set_MCPEnabled(self, MCPEnabled): # Boolean
self.add_query_param('MCPEnabled', MCPEnabled)
def get_TickTime(self): # String
return self.get_query_params().get('TickTime')
def set_TickTime(self, TickTime): # String
self.add_query_param('TickTime', TickTime)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_SyncLimit(self): # String
return self.get_query_params().get('SyncLimit')
def set_SyncLimit(self, SyncLimit): # String
self.add_query_param('SyncLimit', SyncLimit)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_AutopurgePurgeInterval(self): # String
return self.get_query_params().get('AutopurgePurgeInterval')
def set_AutopurgePurgeInterval(self, AutopurgePurgeInterval): # String
self.add_query_param('AutopurgePurgeInterval', AutopurgePurgeInterval)
def get_InitLimit(self): # String
return self.get_query_params().get('InitLimit')
def set_InitLimit(self, InitLimit): # String
self.add_query_param('InitLimit', InitLimit)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName)
|
aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/UpdateConfigRequest.py
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class UpdateConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'UpdateConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OpenSuperAcl(self): # String
return self.get_body_params().get('OpenSuperAcl')
def set_OpenSuperAcl(self, OpenSuperAcl): # String
self.add_body_params('OpenSuperAcl', OpenSuperAcl)
def get_ConfigAuthEnabled(self): # Boolean
return self.get_query_params().get('ConfigAuthEnabled')
def set_ConfigAuthEnabled(self, ConfigAuthEnabled): # Boolean
self.add_query_param('ConfigAuthEnabled', ConfigAuthEnabled)
def get_PassWord(self): # String
return self.get_query_params().get('PassWord')
def set_PassWord(self, PassWord): # String
self.add_query_param('PassWord', <PASSWORD>Word)
def get_MaxClientCnxns(self): # String
return self.get_query_params().get('MaxClientCnxns')
def set_MaxClientCnxns(self, MaxClientCnxns): # String
self.add_query_param('MaxClientCnxns', MaxClientCnxns)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_JuteMaxbuffer(self): # String
return self.get_query_params().get('JuteMaxbuffer')
def set_JuteMaxbuffer(self, JuteMaxbuffer): # String
self.add_query_param('JuteMaxbuffer', JuteMaxbuffer)
def get_ConfigType(self): # String
return self.get_query_params().get('ConfigType')
def set_ConfigType(self, ConfigType): # String
self.add_query_param('ConfigType', ConfigType)
def get_AutopurgeSnapRetainCount(self): # String
return self.get_query_params().get('AutopurgeSnapRetainCount')
def set_AutopurgeSnapRetainCount(self, AutopurgeSnapRetainCount): # String
self.add_query_param('AutopurgeSnapRetainCount', AutopurgeSnapRetainCount)
def get_ConfigSecretEnabled(self): # Boolean
return self.get_query_params().get('ConfigSecretEnabled')
def set_ConfigSecretEnabled(self, ConfigSecretEnabled): # Boolean
self.add_query_param('ConfigSecretEnabled', ConfigSecretEnabled)
def get_MCPEnabled(self): # Boolean
return self.get_query_params().get('MCPEnabled')
def set_MCPEnabled(self, MCPEnabled): # Boolean
self.add_query_param('MCPEnabled', MCPEnabled)
def get_TickTime(self): # String
return self.get_query_params().get('TickTime')
def set_TickTime(self, TickTime): # String
self.add_query_param('TickTime', TickTime)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_SyncLimit(self): # String
return self.get_query_params().get('SyncLimit')
def set_SyncLimit(self, SyncLimit): # String
self.add_query_param('SyncLimit', SyncLimit)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_AutopurgePurgeInterval(self): # String
return self.get_query_params().get('AutopurgePurgeInterval')
def set_AutopurgePurgeInterval(self, AutopurgePurgeInterval): # String
self.add_query_param('AutopurgePurgeInterval', AutopurgePurgeInterval)
def get_InitLimit(self): # String
return self.get_query_params().get('InitLimit')
def set_InitLimit(self, InitLimit): # String
self.add_query_param('InitLimit', InitLimit)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName)
| 0.490724 | 0.050588 |
from pathlib import Path
from pymongo import MongoClient
import os
import csv
client = MongoClient("localhost", 27017)
cursor = client["dvdrental"]["customer"].aggregate(
[
{
u"$project": {
u"_id": 0,
u"customer": u"$$ROOT"
}
},
{
u"$lookup": {
u"localField": u"customer.customer_id",
u"from": u"rental",
u"foreignField": u"customer_id",
u"as": u"rental"
}
},
{
u"$unwind": {
u"path": u"$rental",
u"preserveNullAndEmptyArrays": False
}
},
{
u"$lookup": {
u"localField": u"rental.inventory_id",
u"from": u"inventory",
u"foreignField": u"inventory_id",
u"as": u"inventory"
}
},
{
u"$unwind": {
u"path": u"$inventory",
u"preserveNullAndEmptyArrays": False
}
},
{
u"$lookup": {
u"localField": u"inventory.film_id",
u"from": u"film",
u"foreignField": u"film_id",
u"as": u"film"
}
},
{
u"$unwind": {
u"path": u"$film",
u"preserveNullAndEmptyArrays": False
}
},
{
u"$group": {
u"_id": {
u"film\u1390film_id": u"$film.film_id",
u"film\u1390title": u"$film.title",
u"customer\u1390customer_id": u"$customer.customer_id",
u"customer\u1390last_name": u"$customer.last_name",
u"customer\u1390first_name": u"$customer.first_name"
}
}
},
{
u"$project": {
u"customer_id": u"$_id.customer\u1390customer_id",
u"first_name": u"$_id.customer\u1390first_name",
u"last_name": u"$_id.customer\u1390last_name",
u"film_id": u"$_id.film\u1390film_id",
u"film": u"$_id.film\u1390title",
u"_id": 0
}
},
{
u"$sort": {
"customer_id": 1
}
}
],
allowDiskUse=True
)
rentals = []
for rental in cursor:
rentals.append(rental)
client.close()
curdir = os.path.dirname(os.path.dirname(
os.path.abspath(__file__))) + "\\results"
Path(curdir).mkdir(exist_ok=True)
target = int(input("Target customer's id: "))
customers = {}
for rental in rentals:
id = rental["customer_id"]
if id not in customers:
customers[id] = {
"name": rental["first_name"] + " " + rental["last_name"],
"films": {}
}
customers[id]["films"][rental["film_id"]] = rental["film"]
target_films = customers[target]["films"].keys()
result_films = {}
CD = 1 / len(customers)
KD = 1 / len(target_films)
for _, customer in customers.items():
K = KD * sum(f in target_films for f in customer["films"])
for film in [f for f in customer["films"] if f not in target_films]:
if film not in result_films:
result_films[film] = {
"title": customer["films"][film],
"C": 0
}
result_films[film]["C"] += CD * K
with open(curdir + "\\4.csv", "w") as file:
sheet = csv.writer(file, lineterminator='\n')
sheet.writerow(["Recommendations for", customers[target]['name']])
for id in sorted(result_films, key=lambda f: result_films[f]["C"], reverse=True):
film = result_films[id]
sheet.writerow([film["title"], film["C"]])
|
1/queries/4.py
|
from pathlib import Path
from pymongo import MongoClient
import os
import csv
client = MongoClient("localhost", 27017)
cursor = client["dvdrental"]["customer"].aggregate(
[
{
u"$project": {
u"_id": 0,
u"customer": u"$$ROOT"
}
},
{
u"$lookup": {
u"localField": u"customer.customer_id",
u"from": u"rental",
u"foreignField": u"customer_id",
u"as": u"rental"
}
},
{
u"$unwind": {
u"path": u"$rental",
u"preserveNullAndEmptyArrays": False
}
},
{
u"$lookup": {
u"localField": u"rental.inventory_id",
u"from": u"inventory",
u"foreignField": u"inventory_id",
u"as": u"inventory"
}
},
{
u"$unwind": {
u"path": u"$inventory",
u"preserveNullAndEmptyArrays": False
}
},
{
u"$lookup": {
u"localField": u"inventory.film_id",
u"from": u"film",
u"foreignField": u"film_id",
u"as": u"film"
}
},
{
u"$unwind": {
u"path": u"$film",
u"preserveNullAndEmptyArrays": False
}
},
{
u"$group": {
u"_id": {
u"film\u1390film_id": u"$film.film_id",
u"film\u1390title": u"$film.title",
u"customer\u1390customer_id": u"$customer.customer_id",
u"customer\u1390last_name": u"$customer.last_name",
u"customer\u1390first_name": u"$customer.first_name"
}
}
},
{
u"$project": {
u"customer_id": u"$_id.customer\u1390customer_id",
u"first_name": u"$_id.customer\u1390first_name",
u"last_name": u"$_id.customer\u1390last_name",
u"film_id": u"$_id.film\u1390film_id",
u"film": u"$_id.film\u1390title",
u"_id": 0
}
},
{
u"$sort": {
"customer_id": 1
}
}
],
allowDiskUse=True
)
rentals = []
for rental in cursor:
rentals.append(rental)
client.close()
curdir = os.path.dirname(os.path.dirname(
os.path.abspath(__file__))) + "\\results"
Path(curdir).mkdir(exist_ok=True)
target = int(input("Target customer's id: "))
customers = {}
for rental in rentals:
id = rental["customer_id"]
if id not in customers:
customers[id] = {
"name": rental["first_name"] + " " + rental["last_name"],
"films": {}
}
customers[id]["films"][rental["film_id"]] = rental["film"]
target_films = customers[target]["films"].keys()
result_films = {}
CD = 1 / len(customers)
KD = 1 / len(target_films)
for _, customer in customers.items():
K = KD * sum(f in target_films for f in customer["films"])
for film in [f for f in customer["films"] if f not in target_films]:
if film not in result_films:
result_films[film] = {
"title": customer["films"][film],
"C": 0
}
result_films[film]["C"] += CD * K
with open(curdir + "\\4.csv", "w") as file:
sheet = csv.writer(file, lineterminator='\n')
sheet.writerow(["Recommendations for", customers[target]['name']])
for id in sorted(result_films, key=lambda f: result_films[f]["C"], reverse=True):
film = result_films[id]
sheet.writerow([film["title"], film["C"]])
| 0.282394 | 0.251429 |
import json
import os
import pickle
import warnings
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from joblib.parallel import Parallel, delayed
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram
from sklearn.metrics import adjusted_rand_score, silhouette_score
from spherecluster import SphericalKMeans
from graspy.cluster import AutoGMMCluster, GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed
from graspy.models import DCSBMEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.utils import binarize, cartprod, get_lcc, pass_to_ranks
from src.cluster import DivisiveCluster
from src.data import load_everything
from src.embed import lse
from src.hierarchy import signal_flow
from src.io import savefig
from src.utils import export_skeleton_json
from src.visualization import clustergram, palplot, sankey, stacked_barplot
warnings.simplefilter("ignore", category=FutureWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2019-12-18"
SAVEFIGS = True
SAVESKELS = True
SAVEOBJS = True
PTR = True
if PTR:
ptr_type = "PTR"
else:
ptr_type = "Raw"
ONLY_RIGHT = False
if ONLY_RIGHT:
brain_type = "Right Hemisphere"
else:
brain_type = "Full Brain"
GRAPH_TYPE = "Gad"
if GRAPH_TYPE == "Gad":
graph_type = r"A $\to$ D"
N_INIT = 200
CLUSTER_METHOD = "graspy-gmm"
if CLUSTER_METHOD == "graspy-gmm":
cluster_type = "GraspyGMM"
elif CLUSTER_METHOD == "auto-gmm":
cluster_type = "AutoGMM"
EMBED = "LSE"
if EMBED == "LSE":
embed_type = "LSE"
N_COMPONENTS = None
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=SAVEFIGS, **kws)
def stashskel(name, ids, colors, palette=None, **kws):
if SAVESKELS:
return export_skeleton_json(
name, ids, colors, palette=palette, foldername=FNAME, **kws
)
def stashobj(obj, name, **kws):
foldername = FNAME
subfoldername = "objs"
pathname = "./maggot_models/notebooks/outs"
if SAVEOBJS:
path = Path(pathname)
if foldername is not None:
path = path / foldername
if not os.path.isdir(path):
os.mkdir(path)
if subfoldername is not None:
path = path / subfoldername
if not os.path.isdir(path):
os.mkdir(path)
with open(path / str(name + ".pickle"), "wb") as f:
pickle.dump(obj, f)
def preprocess_graph(adj, class_labels, skeleton_labels):
# sort by number of synapses
degrees = adj.sum(axis=0) + adj.sum(axis=1)
sort_inds = np.argsort(degrees)[::-1]
adj = adj[np.ix_(sort_inds, sort_inds)]
class_labels = class_labels[sort_inds]
skeleton_labels = skeleton_labels[sort_inds]
# remove disconnected nodes
adj, lcc_inds = get_lcc(adj, return_inds=True)
class_labels = class_labels[lcc_inds]
skeleton_labels = skeleton_labels[lcc_inds]
# remove pendants
degrees = np.count_nonzero(adj, axis=0) + np.count_nonzero(adj, axis=1)
not_pendant_mask = degrees != 1
not_pendant_inds = np.array(range(len(degrees)))[not_pendant_mask]
adj = adj[np.ix_(not_pendant_inds, not_pendant_inds)]
class_labels = class_labels[not_pendant_inds]
skeleton_labels = skeleton_labels[not_pendant_inds]
return adj, class_labels, skeleton_labels
def bartreeplot(
dc,
class_labels,
show_props=True,
text_pad=0.01,
inverse_memberships=True,
figsize=(24, 23),
title=None,
):
# gather necessary info from model
linkage, labels = dc.build_linkage(bic_distance=False) # hackily built like scipy's
pred_labels = dc.predict(latent)
uni_class_labels, uni_class_counts = np.unique(class_labels, return_counts=True)
uni_pred_labels, uni_pred_counts = np.unique(pred_labels, return_counts=True)
# set up the figure
fig = plt.figure(figsize=figsize)
r = fig.canvas.get_renderer()
gs0 = plt.GridSpec(1, 2, figure=fig, width_ratios=[0.2, 0.8], wspace=0)
gs1 = plt.GridSpec(1, 1, figure=fig, width_ratios=[0.2], wspace=0.1)
# title the plot
plt.suptitle(title, y=0.92, fontsize=30, x=0.5)
# plot the dendrogram
ax0 = fig.add_subplot(gs0[0])
dendr_data = dendrogram(
linkage,
orientation="left",
labels=labels,
color_threshold=0,
above_threshold_color="k",
ax=ax0,
)
ax0.axis("off")
ax0.set_title("Dendrogram", loc="left")
# get the ticks from the dendrogram to apply to the bar plot
ticks = ax0.get_yticks()
# plot the barplot (and ticks to the right of them)
leaf_names = np.array(dendr_data["ivl"])[::-1]
ax1 = fig.add_subplot(gs0[1], sharey=ax0)
ax1, prop_data, uni_class, subcategory_colors = stacked_barplot(
pred_labels,
class_labels,
label_pos=ticks,
category_order=leaf_names,
ax=ax1,
bar_height=5,
horizontal_pad=0,
palette="tab20",
norm_bar_width=show_props,
return_data=True,
)
ax1.set_frame_on(False)
ax1.yaxis.tick_right()
if show_props:
ax1_title = "Cluster proportion of known cell types"
else:
ax1_title = "Cluster counts by known cell types"
ax1_title = ax1.set_title(ax1_title, loc="left")
transformer = ax1.transData.inverted()
bbox = ax1_title.get_window_extent(renderer=r)
bbox_points = bbox.get_points()
out_points = transformer.transform(bbox_points)
xlim = ax1.get_xlim()
ax1.text(
xlim[1], out_points[0][1], "Cluster name (size)", verticalalignment="bottom"
)
# plot the cluster compositions as text to the right of the bars
gs0.update(right=0.4)
ax2 = fig.add_subplot(gs1[0], sharey=ax0)
ax2.axis("off")
gs1.update(left=0.48)
text_kws = {
"verticalalignment": "center",
"horizontalalignment": "left",
"fontsize": 12,
"alpha": 1,
"weight": "bold",
}
ax2.set_xlim((0, 1))
transformer = ax2.transData.inverted()
for i, y in enumerate(ticks):
x = 0
for j, (colname, color) in enumerate(zip(uni_class, subcategory_colors)):
prop = prop_data[i, j]
if prop > 0:
if inverse_memberships:
if show_props:
# find the size of the cluster, multiply by prop to get count
# get size of known cluster, divide to get proportion
cluster_name = leaf_names[i]
ind = np.where(uni_pred_labels == cluster_name)[0][0]
cluster_size = uni_pred_counts[ind]
prop = cluster_size * prop
prop = prop / uni_class_counts[j]
name = f"{colname} ({prop:3.0%})"
else:
if show_props:
name = f"{colname} ({prop:3.0%})"
else:
name = f"{colname} ({prop})"
text = ax2.text(x, y, name, color=color, **text_kws)
bbox = text.get_window_extent(renderer=r)
bbox_points = bbox.get_points()
out_points = transformer.transform(bbox_points)
width = out_points[1][0] - out_points[0][0]
x += width + text_pad
# deal with title for the last plot column based on options
if inverse_memberships:
ax2_title = "Known cell type (percentage of cell type in cluster)"
else:
if show_props:
ax2_title = "Known cell type (percentage of cluster)"
else:
ax2_title = "Known cell type (count in cluster)"
ax2.set_title(ax2_title, loc="left")
# Set up plotting constants
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=0.8)
# %% [markdown]
# # Load the data
from graspy.simulations import er_np, sbm
def n_to_labels(n):
"""Converts n vector (sbm input) to an array of labels
Parameters
----------
n : list or array
length K vector indicating num vertices in each block
Returns
-------
np.array
shape (n_verts), indicator of what block each vertex
is in
"""
n = np.array(n)
n_cumsum = n.cumsum()
labels = np.zeros(n.sum(), dtype=np.int64)
for i in range(1, len(n)):
labels[n_cumsum[i - 1] : n_cumsum[i]] = i
return labels
B1 = np.array([[0.3, 0.25, 0.25], [0.25, 0.3, 0.25], [0.25, 0.25, 0.7]])
B2 = np.array([[0.4, 0.25, 0.25], [0.25, 0.4, 0.25], [0.25, 0.25, 0.4]])
B3 = np.array([[0.25, 0.2, 0.2], [0.2, 0.8, 0.2], [0.2, 0.2, 0.25]])
n = np.array([300, 600, 600, 600, 700, 600, 300, 400]).astype(float)
n = n.astype(int)
block_labels = n_to_labels(n)
n_verts = np.sum(n)
global_p = 0.01
prop = np.array(
[
[0.4, 0.2, 0.4],
[0.25, 0.5, 0.25],
[0.25, 0.5, 0.25],
[0.4, 0.2, 0.4],
[0.25, 0.5, 0.25],
[0.25, 0.5, 0.25],
[0.25, 0.5, 0.25],
[0.4, 0.2, 0.4],
]
)
n_blocks = len(prop)
subblock_labels = block_labels.copy()
for i, (n_in_block, block_prop) in enumerate(zip(n, prop)):
block_n = []
for p in block_prop:
num = int(p * n_in_block)
block_n.append(num)
temp_labels = n_to_labels(block_n) + n_blocks + i * 3
subblock_labels[block_labels == i] = temp_labels
B_list = [B1, B2, B3, B1, B3, B3, B2, B1]
# B_list = [B1, B2, B1, B1, B3, B3, B1, B2]
graph = er_np(n_verts, global_p)
for i, n_sub_verts in enumerate(n):
p = prop[i, :]
n_vec = n_sub_verts * p
n_vec = n_vec.astype(int)
B = B_list[i]
subgraph = sbm(n_vec, B)
inds = block_labels == i
graph[np.ix_(inds, inds)] = subgraph
heatmap(
graph,
figsize=(15, 15),
cbar=False,
inner_hier_labels=subblock_labels,
outer_hier_labels=block_labels,
)
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspy.plot import pairplot
ase = LaplacianSpectralEmbed(form="R-DAD")
latent = ase.fit_transform(graph)
pairplot(latent)
norm_latent = latent.copy()
norm_latent /= np.linalg.norm(latent, axis=1)[:, np.newaxis]
pairplot(norm_latent, labels=block_labels)
# %% [markdown]
# # Embedding
adj = graph
n_verts = adj.shape[0]
class_labels = block_labels
# %% [markdown]
# # Fitting divisive cluster model
start = timer()
dc = DivisiveCluster(n_init=N_INIT, cluster_method=CLUSTER_METHOD)
dc.fit(latent)
end = end = timer()
print()
print(f"DivisiveCluster took {(end - start)/60.0} minutes to fit")
print()
dc.print_tree(print_val="bic_ratio")
# %% [markdown]
# # Plotting divisive cluster hierarchy results
title = (
f"Divisive hierarchical clustering, {cluster_type}, {embed_type}, {ptr_type},"
+ f" {brain_type}, {graph_type}"
)
class_labels = subblock_labels
name_base = f"-{cluster_type}-{embed_type}-{ptr_type}-{brain_type}-{graph_type}"
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=False, title=title)
stashfig("bartree-props" + name_base)
bartreeplot(dc, class_labels, show_props=False, inverse_memberships=False, title=title)
stashfig("bartree-counts" + name_base)
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=True, title=title)
stashfig("bartree-props-inv" + name_base)
# %% [markdown]
# # Fitting divisive cluster model
CLUSTER_METHOD = "auto-gmm"
cluster_type = "AutoGMM"
start = timer()
dc = DivisiveCluster(n_init=N_INIT, cluster_method=CLUSTER_METHOD)
dc.fit(latent)
end = end = timer()
print()
print(f"DivisiveCluster took {(end - start)/60.0} minutes to fit")
print()
dc.print_tree(print_val="bic_ratio")
# %% [markdown]
# # Plotting divisive cluster hierarchy results
title = (
f"Divisive hierarchical clustering, {cluster_type}, {embed_type}, {ptr_type},"
+ f" {brain_type}, {graph_type}"
)
name_base = f"-{cluster_type}-{embed_type}-{GRAPH_TYPE}"
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=False, title=title)
stashfig("bartree-props" + name_base)
bartreeplot(dc, class_labels, show_props=False, inverse_memberships=False, title=title)
stashfig("bartree-counts" + name_base)
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=True, title=title)
stashfig("bartree-props-inv" + name_base)
|
notebooks/49.0-BDP-divisive-clust-hsbm.py
|
import json
import os
import pickle
import warnings
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from joblib.parallel import Parallel, delayed
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram
from sklearn.metrics import adjusted_rand_score, silhouette_score
from spherecluster import SphericalKMeans
from graspy.cluster import AutoGMMCluster, GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed
from graspy.models import DCSBMEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.utils import binarize, cartprod, get_lcc, pass_to_ranks
from src.cluster import DivisiveCluster
from src.data import load_everything
from src.embed import lse
from src.hierarchy import signal_flow
from src.io import savefig
from src.utils import export_skeleton_json
from src.visualization import clustergram, palplot, sankey, stacked_barplot
warnings.simplefilter("ignore", category=FutureWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2019-12-18"
SAVEFIGS = True
SAVESKELS = True
SAVEOBJS = True
PTR = True
if PTR:
ptr_type = "PTR"
else:
ptr_type = "Raw"
ONLY_RIGHT = False
if ONLY_RIGHT:
brain_type = "Right Hemisphere"
else:
brain_type = "Full Brain"
GRAPH_TYPE = "Gad"
if GRAPH_TYPE == "Gad":
graph_type = r"A $\to$ D"
N_INIT = 200
CLUSTER_METHOD = "graspy-gmm"
if CLUSTER_METHOD == "graspy-gmm":
cluster_type = "GraspyGMM"
elif CLUSTER_METHOD == "auto-gmm":
cluster_type = "AutoGMM"
EMBED = "LSE"
if EMBED == "LSE":
embed_type = "LSE"
N_COMPONENTS = None
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=SAVEFIGS, **kws)
def stashskel(name, ids, colors, palette=None, **kws):
if SAVESKELS:
return export_skeleton_json(
name, ids, colors, palette=palette, foldername=FNAME, **kws
)
def stashobj(obj, name, **kws):
foldername = FNAME
subfoldername = "objs"
pathname = "./maggot_models/notebooks/outs"
if SAVEOBJS:
path = Path(pathname)
if foldername is not None:
path = path / foldername
if not os.path.isdir(path):
os.mkdir(path)
if subfoldername is not None:
path = path / subfoldername
if not os.path.isdir(path):
os.mkdir(path)
with open(path / str(name + ".pickle"), "wb") as f:
pickle.dump(obj, f)
def preprocess_graph(adj, class_labels, skeleton_labels):
# sort by number of synapses
degrees = adj.sum(axis=0) + adj.sum(axis=1)
sort_inds = np.argsort(degrees)[::-1]
adj = adj[np.ix_(sort_inds, sort_inds)]
class_labels = class_labels[sort_inds]
skeleton_labels = skeleton_labels[sort_inds]
# remove disconnected nodes
adj, lcc_inds = get_lcc(adj, return_inds=True)
class_labels = class_labels[lcc_inds]
skeleton_labels = skeleton_labels[lcc_inds]
# remove pendants
degrees = np.count_nonzero(adj, axis=0) + np.count_nonzero(adj, axis=1)
not_pendant_mask = degrees != 1
not_pendant_inds = np.array(range(len(degrees)))[not_pendant_mask]
adj = adj[np.ix_(not_pendant_inds, not_pendant_inds)]
class_labels = class_labels[not_pendant_inds]
skeleton_labels = skeleton_labels[not_pendant_inds]
return adj, class_labels, skeleton_labels
def bartreeplot(
dc,
class_labels,
show_props=True,
text_pad=0.01,
inverse_memberships=True,
figsize=(24, 23),
title=None,
):
# gather necessary info from model
linkage, labels = dc.build_linkage(bic_distance=False) # hackily built like scipy's
pred_labels = dc.predict(latent)
uni_class_labels, uni_class_counts = np.unique(class_labels, return_counts=True)
uni_pred_labels, uni_pred_counts = np.unique(pred_labels, return_counts=True)
# set up the figure
fig = plt.figure(figsize=figsize)
r = fig.canvas.get_renderer()
gs0 = plt.GridSpec(1, 2, figure=fig, width_ratios=[0.2, 0.8], wspace=0)
gs1 = plt.GridSpec(1, 1, figure=fig, width_ratios=[0.2], wspace=0.1)
# title the plot
plt.suptitle(title, y=0.92, fontsize=30, x=0.5)
# plot the dendrogram
ax0 = fig.add_subplot(gs0[0])
dendr_data = dendrogram(
linkage,
orientation="left",
labels=labels,
color_threshold=0,
above_threshold_color="k",
ax=ax0,
)
ax0.axis("off")
ax0.set_title("Dendrogram", loc="left")
# get the ticks from the dendrogram to apply to the bar plot
ticks = ax0.get_yticks()
# plot the barplot (and ticks to the right of them)
leaf_names = np.array(dendr_data["ivl"])[::-1]
ax1 = fig.add_subplot(gs0[1], sharey=ax0)
ax1, prop_data, uni_class, subcategory_colors = stacked_barplot(
pred_labels,
class_labels,
label_pos=ticks,
category_order=leaf_names,
ax=ax1,
bar_height=5,
horizontal_pad=0,
palette="tab20",
norm_bar_width=show_props,
return_data=True,
)
ax1.set_frame_on(False)
ax1.yaxis.tick_right()
if show_props:
ax1_title = "Cluster proportion of known cell types"
else:
ax1_title = "Cluster counts by known cell types"
ax1_title = ax1.set_title(ax1_title, loc="left")
transformer = ax1.transData.inverted()
bbox = ax1_title.get_window_extent(renderer=r)
bbox_points = bbox.get_points()
out_points = transformer.transform(bbox_points)
xlim = ax1.get_xlim()
ax1.text(
xlim[1], out_points[0][1], "Cluster name (size)", verticalalignment="bottom"
)
# plot the cluster compositions as text to the right of the bars
gs0.update(right=0.4)
ax2 = fig.add_subplot(gs1[0], sharey=ax0)
ax2.axis("off")
gs1.update(left=0.48)
text_kws = {
"verticalalignment": "center",
"horizontalalignment": "left",
"fontsize": 12,
"alpha": 1,
"weight": "bold",
}
ax2.set_xlim((0, 1))
transformer = ax2.transData.inverted()
for i, y in enumerate(ticks):
x = 0
for j, (colname, color) in enumerate(zip(uni_class, subcategory_colors)):
prop = prop_data[i, j]
if prop > 0:
if inverse_memberships:
if show_props:
# find the size of the cluster, multiply by prop to get count
# get size of known cluster, divide to get proportion
cluster_name = leaf_names[i]
ind = np.where(uni_pred_labels == cluster_name)[0][0]
cluster_size = uni_pred_counts[ind]
prop = cluster_size * prop
prop = prop / uni_class_counts[j]
name = f"{colname} ({prop:3.0%})"
else:
if show_props:
name = f"{colname} ({prop:3.0%})"
else:
name = f"{colname} ({prop})"
text = ax2.text(x, y, name, color=color, **text_kws)
bbox = text.get_window_extent(renderer=r)
bbox_points = bbox.get_points()
out_points = transformer.transform(bbox_points)
width = out_points[1][0] - out_points[0][0]
x += width + text_pad
# deal with title for the last plot column based on options
if inverse_memberships:
ax2_title = "Known cell type (percentage of cell type in cluster)"
else:
if show_props:
ax2_title = "Known cell type (percentage of cluster)"
else:
ax2_title = "Known cell type (count in cluster)"
ax2.set_title(ax2_title, loc="left")
# Set up plotting constants
plt.style.use("seaborn-white")
sns.set_palette("deep")
sns.set_context("talk", font_scale=0.8)
# %% [markdown]
# # Load the data
from graspy.simulations import er_np, sbm
def n_to_labels(n):
"""Converts n vector (sbm input) to an array of labels
Parameters
----------
n : list or array
length K vector indicating num vertices in each block
Returns
-------
np.array
shape (n_verts), indicator of what block each vertex
is in
"""
n = np.array(n)
n_cumsum = n.cumsum()
labels = np.zeros(n.sum(), dtype=np.int64)
for i in range(1, len(n)):
labels[n_cumsum[i - 1] : n_cumsum[i]] = i
return labels
B1 = np.array([[0.3, 0.25, 0.25], [0.25, 0.3, 0.25], [0.25, 0.25, 0.7]])
B2 = np.array([[0.4, 0.25, 0.25], [0.25, 0.4, 0.25], [0.25, 0.25, 0.4]])
B3 = np.array([[0.25, 0.2, 0.2], [0.2, 0.8, 0.2], [0.2, 0.2, 0.25]])
n = np.array([300, 600, 600, 600, 700, 600, 300, 400]).astype(float)
n = n.astype(int)
block_labels = n_to_labels(n)
n_verts = np.sum(n)
global_p = 0.01
prop = np.array(
[
[0.4, 0.2, 0.4],
[0.25, 0.5, 0.25],
[0.25, 0.5, 0.25],
[0.4, 0.2, 0.4],
[0.25, 0.5, 0.25],
[0.25, 0.5, 0.25],
[0.25, 0.5, 0.25],
[0.4, 0.2, 0.4],
]
)
n_blocks = len(prop)
subblock_labels = block_labels.copy()
for i, (n_in_block, block_prop) in enumerate(zip(n, prop)):
block_n = []
for p in block_prop:
num = int(p * n_in_block)
block_n.append(num)
temp_labels = n_to_labels(block_n) + n_blocks + i * 3
subblock_labels[block_labels == i] = temp_labels
B_list = [B1, B2, B3, B1, B3, B3, B2, B1]
# B_list = [B1, B2, B1, B1, B3, B3, B1, B2]
graph = er_np(n_verts, global_p)
for i, n_sub_verts in enumerate(n):
p = prop[i, :]
n_vec = n_sub_verts * p
n_vec = n_vec.astype(int)
B = B_list[i]
subgraph = sbm(n_vec, B)
inds = block_labels == i
graph[np.ix_(inds, inds)] = subgraph
heatmap(
graph,
figsize=(15, 15),
cbar=False,
inner_hier_labels=subblock_labels,
outer_hier_labels=block_labels,
)
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspy.plot import pairplot
ase = LaplacianSpectralEmbed(form="R-DAD")
latent = ase.fit_transform(graph)
pairplot(latent)
norm_latent = latent.copy()
norm_latent /= np.linalg.norm(latent, axis=1)[:, np.newaxis]
pairplot(norm_latent, labels=block_labels)
# %% [markdown]
# # Embedding
adj = graph
n_verts = adj.shape[0]
class_labels = block_labels
# %% [markdown]
# # Fitting divisive cluster model
start = timer()
dc = DivisiveCluster(n_init=N_INIT, cluster_method=CLUSTER_METHOD)
dc.fit(latent)
end = end = timer()
print()
print(f"DivisiveCluster took {(end - start)/60.0} minutes to fit")
print()
dc.print_tree(print_val="bic_ratio")
# %% [markdown]
# # Plotting divisive cluster hierarchy results
title = (
f"Divisive hierarchical clustering, {cluster_type}, {embed_type}, {ptr_type},"
+ f" {brain_type}, {graph_type}"
)
class_labels = subblock_labels
name_base = f"-{cluster_type}-{embed_type}-{ptr_type}-{brain_type}-{graph_type}"
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=False, title=title)
stashfig("bartree-props" + name_base)
bartreeplot(dc, class_labels, show_props=False, inverse_memberships=False, title=title)
stashfig("bartree-counts" + name_base)
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=True, title=title)
stashfig("bartree-props-inv" + name_base)
# %% [markdown]
# # Fitting divisive cluster model
CLUSTER_METHOD = "auto-gmm"
cluster_type = "AutoGMM"
start = timer()
dc = DivisiveCluster(n_init=N_INIT, cluster_method=CLUSTER_METHOD)
dc.fit(latent)
end = end = timer()
print()
print(f"DivisiveCluster took {(end - start)/60.0} minutes to fit")
print()
dc.print_tree(print_val="bic_ratio")
# %% [markdown]
# # Plotting divisive cluster hierarchy results
title = (
f"Divisive hierarchical clustering, {cluster_type}, {embed_type}, {ptr_type},"
+ f" {brain_type}, {graph_type}"
)
name_base = f"-{cluster_type}-{embed_type}-{GRAPH_TYPE}"
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=False, title=title)
stashfig("bartree-props" + name_base)
bartreeplot(dc, class_labels, show_props=False, inverse_memberships=False, title=title)
stashfig("bartree-counts" + name_base)
bartreeplot(dc, class_labels, show_props=True, inverse_memberships=True, title=title)
stashfig("bartree-props-inv" + name_base)
| 0.532182 | 0.26944 |
import clr
import clr
# Copyright (c) 2011 AlphaSierraPapa for the SharpDevelop Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from System import *
from System.Collections.Generic import *
from System.Collections.ObjectModel import *
from System.Collections.Specialized import *
from System.ComponentModel import *
from System.Linq import *
from System.Text.RegularExpressions import *
from System.Threading import *
from System.Windows import *
from System.Windows.Controls import *
from System.Windows.Input import *
from System.Windows.Media import *
from System.Windows.Threading import *
from ICSharpCode.ILSpy.TreeNodes import *
from ICSharpCode.NRefactory.CSharp import *
from ICSharpCode.NRefactory.Utils import *
from Mono.Cecil import *
from Mono.Cecil.Cil import *
class SearchPane(UserControl, IPane):
""" <summary>
Search pane
</summary>
"""
def get_Instance(self):
if self._instance == None:
App.Current.VerifyAccess()
self._instance = SearchPane()
return self._instance
Instance = property(fget=get_Instance)
def __init__(self):
self._SearchTermProperty = DependencyProperty.Register("SearchTerm", clr.GetClrType(str), clr.GetClrType(SearchPane), FrameworkPropertyMetadata(str.Empty, OnSearchTermChanged))
self.InitializeComponent()
searchModeComboBox.Items.Add((Image = Images.Library, Name = "Types and Members"))
searchModeComboBox.Items.Add((Image = Images.Class, Name = "Type"))
searchModeComboBox.Items.Add((Image = Images.Property, Name = "Member"))
searchModeComboBox.Items.Add((Image = Images.Method, Name = "Method"))
searchModeComboBox.Items.Add((Image = Images.Field, Name = "Field"))
searchModeComboBox.Items.Add((Image = Images.Property, Name = "Property"))
searchModeComboBox.Items.Add((Image = Images.Event, Name = "Event"))
searchModeComboBox.Items.Add((Image = Images.Literal, Name = "Constant"))
searchModeComboBox.SelectedIndex = SearchMode.TypeAndMember
ContextMenuProvider.Add(listBox)
MainWindow.Instance.CurrentAssemblyListChanged += self.MainWindow_Instance_CurrentAssemblyListChanged
def MainWindow_Instance_CurrentAssemblyListChanged(self, sender, e):
if IsVisible:
self.StartSearch(self._SearchTerm)
else:
self.StartSearch(None)
self._runSearchOnNextShow = True
def Show(self):
if not IsVisible:
MainWindow.Instance.ShowInTopPane("Search", self)
if self._runSearchOnNextShow:
self._runSearchOnNextShow = False
self.StartSearch(self._SearchTerm)
Dispatcher.BeginInvoke(DispatcherPriority.Background, Action())
def get_SearchTerm(self):
return self.GetValue(self._SearchTermProperty)
def set_SearchTerm(self, value):
self.SetValue(self._SearchTermProperty, value)
SearchTerm = property(fget=get_SearchTerm, fset=set_SearchTerm)
def OnSearchTermChanged(o, e):
(o).StartSearch(e.NewValue)
OnSearchTermChanged = staticmethod(OnSearchTermChanged)
def SearchModeComboBox_SelectionChanged(self, sender, e):
self.StartSearch(self.SearchTerm)
def StartSearch(self, searchTerm):
if self._currentSearch != None:
self._currentSearch.Cancel()
if str.IsNullOrEmpty(searchTerm):
self._currentSearch = None
listBox.ItemsSource = None
else:
mainWindow = MainWindow.Instance
self._currentSearch = RunningSearch(mainWindow.CurrentAssemblyList.GetAssemblies(), searchTerm, searchModeComboBox.SelectedIndex, mainWindow.CurrentLanguage)
listBox.ItemsSource = self._currentSearch.Results
Thread(self._currentSearch.Run).Start()
def Closed(self):
self.SearchTerm = str.Empty
def ListBox_MouseDoubleClick(self, sender, e):
self.JumpToSelectedItem()
e.Handled = True
def ListBox_KeyDown(self, sender, e):
if e.Key == Key.Return:
e.Handled = True
self.JumpToSelectedItem()
def JumpToSelectedItem(self):
result = listBox.SelectedItem
if result != None:
MainWindow.Instance.JumpToReference(result.Member)
def OnKeyDown(self, e):
self.OnKeyDown(e)
if e.Key == Key.T and e.KeyboardDevice.Modifiers == ModifierKeys.Control:
searchModeComboBox.SelectedIndex = SearchMode.Type
e.Handled = True
elif e.Key == Key.M and e.KeyboardDevice.Modifiers == ModifierKeys.Control:
searchModeComboBox.SelectedIndex = SearchMode.Member
e.Handled = True
elif e.Key == Key.S and e.KeyboardDevice.Modifiers == ModifierKeys.Control:
searchModeComboBox.SelectedIndex = SearchMode.Literal
e.Handled = True
def SearchBox_PreviewKeyDown(self, sender, e):
if e.Key == Key.Down and listBox.HasItems:
e.Handled = True
listBox.MoveFocus(TraversalRequest(FocusNavigationDirection.First))
listBox.SelectedIndex = 0
class RunningSearch(object):
def __init__(self, assemblies, searchTerm, searchMode, language):
self._cts = CancellationTokenSource()
self._Results = ObservableCollection[SearchResult]()
self._dispatcher = Dispatcher.CurrentDispatcher
self._assemblies = assemblies
self._searchTerm = searchTerm.Split(Array[Char]((' ')), StringSplitOptions.RemoveEmptyEntries)
self._language = language
self._searchMode = searchMode
self._Results.Add(SearchResult(Name = "Searching..."))
def Cancel(self):
self._cts.Cancel()
def Run(self):
try:
searcher = self.GetSearchStrategy(self._searchMode, self._searchTerm)
enumerator = assemblies.GetEnumerator()
while enumerator.MoveNext():
loadedAssembly = enumerator.Current
module = loadedAssembly.ModuleDefinition
if module == None:
continue
cancellationToken = self._cts.Token
enumerator = module.Types.GetEnumerator()
while enumerator.MoveNext():
type = enumerator.Current
cancellationToken.ThrowIfCancellationRequested()
searcher.Search(type, self._language, AddResult)
except OperationCanceledException, :
finally:
# ignore cancellation
# remove the 'Searching...' entry
self._dispatcher.BeginInvoke(DispatcherPriority.Normal, Action())
def AddResult(self, result):
if self._resultCount += 1 == 1000:
result = SearchResult(Name = "Search aborted, more than 1000 results found.")
self._cts.Cancel()
self._dispatcher.BeginInvoke(DispatcherPriority.Normal, Action())
self._cts.Token.ThrowIfCancellationRequested()
def GetSearchStrategy(self, mode, terms):
if terms.Length == 1:
if terms[0].StartsWith("tm:", StringComparison.Ordinal):
return TypeAndMemberSearchStrategy(terms[0].Substring(3))
if terms[0].StartsWith("t:", StringComparison.Ordinal):
return TypeSearchStrategy(terms[0].Substring(2))
if terms[0].StartsWith("m:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2))
if terms[0].StartsWith("md:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(3), MemberSearchKind.Method)
if terms[0].StartsWith("f:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2), MemberSearchKind.Field)
if terms[0].StartsWith("p:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2), MemberSearchKind.Property)
if terms[0].StartsWith("e:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2), MemberSearchKind.Event)
if terms[0].StartsWith("c:", StringComparison.Ordinal):
return LiteralSearchStrategy(terms[0].Substring(2))
if mode == SearchMode.TypeAndMember:
return TypeAndMemberSearchStrategy(terms)
elif mode == SearchMode.Type:
return TypeSearchStrategy(terms)
elif mode == SearchMode.Member:
return MemberSearchStrategy(terms)
elif mode == SearchMode.Literal:
return LiteralSearchStrategy(terms)
elif mode == SearchMode.Method:
return MemberSearchStrategy(terms, MemberSearchKind.Method)
elif mode == SearchMode.Field:
return MemberSearchStrategy(terms, MemberSearchKind.Field)
elif mode == SearchMode.Property:
return MemberSearchStrategy(terms, MemberSearchKind.Property)
elif mode == SearchMode.Event:
return MemberSearchStrategy(terms, MemberSearchKind.Event)
return None
class SearchResult(INotifyPropertyChanged, IMemberTreeNode):
def get_Member(self):
def set_Member(self, value):
Member = property(fget=get_Member, fset=set_Member)
def get_Location(self):
def set_Location(self, value):
Location = property(fget=get_Location, fset=set_Location)
def get_Name(self):
def set_Name(self, value):
Name = property(fget=get_Name, fset=set_Name)
def get_Image(self):
def set_Image(self, value):
Image = property(fget=get_Image, fset=set_Image)
def get_LocationImage(self):
def set_LocationImage(self, value):
LocationImage = property(fget=get_LocationImage, fset=set_LocationImage)
def ToString(self):
return self.Name
class ShowSearchCommand(CommandWrapper):
def __init__(self):
NavigationCommands.Search.InputGestures.Clear()
NavigationCommands.Search.InputGestures.Add(KeyGesture(Key.F, ModifierKeys.Control | ModifierKeys.Shift))
NavigationCommands.Search.InputGestures.Add(KeyGesture(Key.E, ModifierKeys.Control))
class SearchMode(object):
def __init__(self):
|
ILSpy.ConvertedToPython/SearchPane.py
|
import clr
import clr
# Copyright (c) 2011 AlphaSierraPapa for the SharpDevelop Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from System import *
from System.Collections.Generic import *
from System.Collections.ObjectModel import *
from System.Collections.Specialized import *
from System.ComponentModel import *
from System.Linq import *
from System.Text.RegularExpressions import *
from System.Threading import *
from System.Windows import *
from System.Windows.Controls import *
from System.Windows.Input import *
from System.Windows.Media import *
from System.Windows.Threading import *
from ICSharpCode.ILSpy.TreeNodes import *
from ICSharpCode.NRefactory.CSharp import *
from ICSharpCode.NRefactory.Utils import *
from Mono.Cecil import *
from Mono.Cecil.Cil import *
class SearchPane(UserControl, IPane):
""" <summary>
Search pane
</summary>
"""
def get_Instance(self):
if self._instance == None:
App.Current.VerifyAccess()
self._instance = SearchPane()
return self._instance
Instance = property(fget=get_Instance)
def __init__(self):
self._SearchTermProperty = DependencyProperty.Register("SearchTerm", clr.GetClrType(str), clr.GetClrType(SearchPane), FrameworkPropertyMetadata(str.Empty, OnSearchTermChanged))
self.InitializeComponent()
searchModeComboBox.Items.Add((Image = Images.Library, Name = "Types and Members"))
searchModeComboBox.Items.Add((Image = Images.Class, Name = "Type"))
searchModeComboBox.Items.Add((Image = Images.Property, Name = "Member"))
searchModeComboBox.Items.Add((Image = Images.Method, Name = "Method"))
searchModeComboBox.Items.Add((Image = Images.Field, Name = "Field"))
searchModeComboBox.Items.Add((Image = Images.Property, Name = "Property"))
searchModeComboBox.Items.Add((Image = Images.Event, Name = "Event"))
searchModeComboBox.Items.Add((Image = Images.Literal, Name = "Constant"))
searchModeComboBox.SelectedIndex = SearchMode.TypeAndMember
ContextMenuProvider.Add(listBox)
MainWindow.Instance.CurrentAssemblyListChanged += self.MainWindow_Instance_CurrentAssemblyListChanged
def MainWindow_Instance_CurrentAssemblyListChanged(self, sender, e):
if IsVisible:
self.StartSearch(self._SearchTerm)
else:
self.StartSearch(None)
self._runSearchOnNextShow = True
def Show(self):
if not IsVisible:
MainWindow.Instance.ShowInTopPane("Search", self)
if self._runSearchOnNextShow:
self._runSearchOnNextShow = False
self.StartSearch(self._SearchTerm)
Dispatcher.BeginInvoke(DispatcherPriority.Background, Action())
def get_SearchTerm(self):
return self.GetValue(self._SearchTermProperty)
def set_SearchTerm(self, value):
self.SetValue(self._SearchTermProperty, value)
SearchTerm = property(fget=get_SearchTerm, fset=set_SearchTerm)
def OnSearchTermChanged(o, e):
(o).StartSearch(e.NewValue)
OnSearchTermChanged = staticmethod(OnSearchTermChanged)
def SearchModeComboBox_SelectionChanged(self, sender, e):
self.StartSearch(self.SearchTerm)
def StartSearch(self, searchTerm):
if self._currentSearch != None:
self._currentSearch.Cancel()
if str.IsNullOrEmpty(searchTerm):
self._currentSearch = None
listBox.ItemsSource = None
else:
mainWindow = MainWindow.Instance
self._currentSearch = RunningSearch(mainWindow.CurrentAssemblyList.GetAssemblies(), searchTerm, searchModeComboBox.SelectedIndex, mainWindow.CurrentLanguage)
listBox.ItemsSource = self._currentSearch.Results
Thread(self._currentSearch.Run).Start()
def Closed(self):
self.SearchTerm = str.Empty
def ListBox_MouseDoubleClick(self, sender, e):
self.JumpToSelectedItem()
e.Handled = True
def ListBox_KeyDown(self, sender, e):
if e.Key == Key.Return:
e.Handled = True
self.JumpToSelectedItem()
def JumpToSelectedItem(self):
result = listBox.SelectedItem
if result != None:
MainWindow.Instance.JumpToReference(result.Member)
def OnKeyDown(self, e):
self.OnKeyDown(e)
if e.Key == Key.T and e.KeyboardDevice.Modifiers == ModifierKeys.Control:
searchModeComboBox.SelectedIndex = SearchMode.Type
e.Handled = True
elif e.Key == Key.M and e.KeyboardDevice.Modifiers == ModifierKeys.Control:
searchModeComboBox.SelectedIndex = SearchMode.Member
e.Handled = True
elif e.Key == Key.S and e.KeyboardDevice.Modifiers == ModifierKeys.Control:
searchModeComboBox.SelectedIndex = SearchMode.Literal
e.Handled = True
def SearchBox_PreviewKeyDown(self, sender, e):
if e.Key == Key.Down and listBox.HasItems:
e.Handled = True
listBox.MoveFocus(TraversalRequest(FocusNavigationDirection.First))
listBox.SelectedIndex = 0
class RunningSearch(object):
def __init__(self, assemblies, searchTerm, searchMode, language):
self._cts = CancellationTokenSource()
self._Results = ObservableCollection[SearchResult]()
self._dispatcher = Dispatcher.CurrentDispatcher
self._assemblies = assemblies
self._searchTerm = searchTerm.Split(Array[Char]((' ')), StringSplitOptions.RemoveEmptyEntries)
self._language = language
self._searchMode = searchMode
self._Results.Add(SearchResult(Name = "Searching..."))
def Cancel(self):
self._cts.Cancel()
def Run(self):
try:
searcher = self.GetSearchStrategy(self._searchMode, self._searchTerm)
enumerator = assemblies.GetEnumerator()
while enumerator.MoveNext():
loadedAssembly = enumerator.Current
module = loadedAssembly.ModuleDefinition
if module == None:
continue
cancellationToken = self._cts.Token
enumerator = module.Types.GetEnumerator()
while enumerator.MoveNext():
type = enumerator.Current
cancellationToken.ThrowIfCancellationRequested()
searcher.Search(type, self._language, AddResult)
except OperationCanceledException, :
finally:
# ignore cancellation
# remove the 'Searching...' entry
self._dispatcher.BeginInvoke(DispatcherPriority.Normal, Action())
def AddResult(self, result):
if self._resultCount += 1 == 1000:
result = SearchResult(Name = "Search aborted, more than 1000 results found.")
self._cts.Cancel()
self._dispatcher.BeginInvoke(DispatcherPriority.Normal, Action())
self._cts.Token.ThrowIfCancellationRequested()
def GetSearchStrategy(self, mode, terms):
if terms.Length == 1:
if terms[0].StartsWith("tm:", StringComparison.Ordinal):
return TypeAndMemberSearchStrategy(terms[0].Substring(3))
if terms[0].StartsWith("t:", StringComparison.Ordinal):
return TypeSearchStrategy(terms[0].Substring(2))
if terms[0].StartsWith("m:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2))
if terms[0].StartsWith("md:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(3), MemberSearchKind.Method)
if terms[0].StartsWith("f:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2), MemberSearchKind.Field)
if terms[0].StartsWith("p:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2), MemberSearchKind.Property)
if terms[0].StartsWith("e:", StringComparison.Ordinal):
return MemberSearchStrategy(terms[0].Substring(2), MemberSearchKind.Event)
if terms[0].StartsWith("c:", StringComparison.Ordinal):
return LiteralSearchStrategy(terms[0].Substring(2))
if mode == SearchMode.TypeAndMember:
return TypeAndMemberSearchStrategy(terms)
elif mode == SearchMode.Type:
return TypeSearchStrategy(terms)
elif mode == SearchMode.Member:
return MemberSearchStrategy(terms)
elif mode == SearchMode.Literal:
return LiteralSearchStrategy(terms)
elif mode == SearchMode.Method:
return MemberSearchStrategy(terms, MemberSearchKind.Method)
elif mode == SearchMode.Field:
return MemberSearchStrategy(terms, MemberSearchKind.Field)
elif mode == SearchMode.Property:
return MemberSearchStrategy(terms, MemberSearchKind.Property)
elif mode == SearchMode.Event:
return MemberSearchStrategy(terms, MemberSearchKind.Event)
return None
class SearchResult(INotifyPropertyChanged, IMemberTreeNode):
def get_Member(self):
def set_Member(self, value):
Member = property(fget=get_Member, fset=set_Member)
def get_Location(self):
def set_Location(self, value):
Location = property(fget=get_Location, fset=set_Location)
def get_Name(self):
def set_Name(self, value):
Name = property(fget=get_Name, fset=set_Name)
def get_Image(self):
def set_Image(self, value):
Image = property(fget=get_Image, fset=set_Image)
def get_LocationImage(self):
def set_LocationImage(self, value):
LocationImage = property(fget=get_LocationImage, fset=set_LocationImage)
def ToString(self):
return self.Name
class ShowSearchCommand(CommandWrapper):
def __init__(self):
NavigationCommands.Search.InputGestures.Clear()
NavigationCommands.Search.InputGestures.Add(KeyGesture(Key.F, ModifierKeys.Control | ModifierKeys.Shift))
NavigationCommands.Search.InputGestures.Add(KeyGesture(Key.E, ModifierKeys.Control))
class SearchMode(object):
def __init__(self):
| 0.41052 | 0.058831 |
import pandas as pd
# Carga del dataset completo. Elimino clasificador y otros atributos que no
# son necesarios
spy_full = pd.read_csv("SPYV3.csv", sep=',')
spy_full = spy_full.drop(['FECHA','OPEN', 'MAX', 'MIN', 'CLOSE','CLASIFICADOR',
'FECHA.year', 'FECHA.day-of-month',
'FECHA.day-of-week'], 1)
# Las variables categóricas que no son numéricas son factorizadas
spy_full['39'], unique = pd.factorize(spy_full['39'])
spy_full['41'], unique = pd.factorize(spy_full['41'])
spy_full['43'], unique = pd.factorize(spy_full['43'])
spy_full['168'], unique = pd.factorize(spy_full['168'])
spy_full['172'], unique = pd.factorize(spy_full['172'])
# Escalado de variables con MinMax()
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
spy_full_m = min_max_scaler.fit_transform(spy_full)
# Normalización de variables con StandardScaler()
from sklearn.preprocessing import StandardScaler
spy_full_s = StandardScaler().fit_transform(spy_full)
# Análisis de componentes principales (PCA) usando MinMax()
# Para usar StandardSacler() cambiar "spy_full_m" por spy_full_s"
from sklearn.decomposition import PCA
import numpy
n_comp = 4
estimator = PCA (n_components = n_comp)
X_pca = estimator.fit_transform(spy_full_m)
print(estimator.explained_variance_ratio_)
i=0
suma=0
while i < n_comp:
suma= suma + estimator.explained_variance_ratio_[i]
i = i + 1
print("Varianza total: ", suma)
pc1=pd.DataFrame(numpy.matrix.transpose(estimator.components_), columns=['PC-1',
'PC-2', 'PC-3', 'PC-4'], index=spy_full.columns)
print(pc1)
# Filtrado para obtener los mayores PC. Los valores deben ser adaptados
# a cada caso
data_filter = pc1[pc1['PC-1'] >= 0.10]
print(data_filter)
data_filter = pc1[pc1['PC-2'] >= 0.15]
print(data_filter)
data_filter = pc1[pc1['PC-3'] >= 0.25]
print(data_filter)
data_filter2 = pc1[pc1['PC-4'] >= 0.30]
print(data_filter)
# --------------------------------------------------------------------------------
|
PCA_2.py
|
import pandas as pd
# Carga del dataset completo. Elimino clasificador y otros atributos que no
# son necesarios
spy_full = pd.read_csv("SPYV3.csv", sep=',')
spy_full = spy_full.drop(['FECHA','OPEN', 'MAX', 'MIN', 'CLOSE','CLASIFICADOR',
'FECHA.year', 'FECHA.day-of-month',
'FECHA.day-of-week'], 1)
# Las variables categóricas que no son numéricas son factorizadas
spy_full['39'], unique = pd.factorize(spy_full['39'])
spy_full['41'], unique = pd.factorize(spy_full['41'])
spy_full['43'], unique = pd.factorize(spy_full['43'])
spy_full['168'], unique = pd.factorize(spy_full['168'])
spy_full['172'], unique = pd.factorize(spy_full['172'])
# Escalado de variables con MinMax()
from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
spy_full_m = min_max_scaler.fit_transform(spy_full)
# Normalización de variables con StandardScaler()
from sklearn.preprocessing import StandardScaler
spy_full_s = StandardScaler().fit_transform(spy_full)
# Análisis de componentes principales (PCA) usando MinMax()
# Para usar StandardSacler() cambiar "spy_full_m" por spy_full_s"
from sklearn.decomposition import PCA
import numpy
n_comp = 4
estimator = PCA (n_components = n_comp)
X_pca = estimator.fit_transform(spy_full_m)
print(estimator.explained_variance_ratio_)
i=0
suma=0
while i < n_comp:
suma= suma + estimator.explained_variance_ratio_[i]
i = i + 1
print("Varianza total: ", suma)
pc1=pd.DataFrame(numpy.matrix.transpose(estimator.components_), columns=['PC-1',
'PC-2', 'PC-3', 'PC-4'], index=spy_full.columns)
print(pc1)
# Filtrado para obtener los mayores PC. Los valores deben ser adaptados
# a cada caso
data_filter = pc1[pc1['PC-1'] >= 0.10]
print(data_filter)
data_filter = pc1[pc1['PC-2'] >= 0.15]
print(data_filter)
data_filter = pc1[pc1['PC-3'] >= 0.25]
print(data_filter)
data_filter2 = pc1[pc1['PC-4'] >= 0.30]
print(data_filter)
# --------------------------------------------------------------------------------
| 0.325842 | 0.26917 |
import mox
import testtools
from oslo.config import cfg
from rack import exception
from rack import service
from rack import test
from rack.tests import utils
from rack import wsgi
test_service_opts = [
cfg.StrOpt("fake_manager",
default="rack.tests.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
default='127.0.0.1',
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
def test_service_start_with_illegal_workers(self):
CONF.set_override("rackapi_workers", -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "rackapi")
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_service_random_port_with_ipv6(self):
CONF.set_default("test_service_listen", "::1")
test_service = service.WSGIService("test_service")
test_service.start()
self.assertEqual("::1", test_service.host)
self.assertNotEqual(0, test_service.port)
test_service.stop()
class TestLauncher(test.TestCase):
def setUp(self):
super(TestLauncher, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
self.service = service.WSGIService("test_service")
def test_launch_app(self):
service.serve(self.service)
self.assertNotEqual(0, self.service.port)
service._launcher.stop()
|
rack/tests/test_service.py
|
import mox
import testtools
from oslo.config import cfg
from rack import exception
from rack import service
from rack import test
from rack.tests import utils
from rack import wsgi
test_service_opts = [
cfg.StrOpt("fake_manager",
default="rack.tests.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
default='127.0.0.1',
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
def test_service_start_with_illegal_workers(self):
CONF.set_override("rackapi_workers", -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "rackapi")
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_service_random_port_with_ipv6(self):
CONF.set_default("test_service_listen", "::1")
test_service = service.WSGIService("test_service")
test_service.start()
self.assertEqual("::1", test_service.host)
self.assertNotEqual(0, test_service.port)
test_service.stop()
class TestLauncher(test.TestCase):
def setUp(self):
super(TestLauncher, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
self.service = service.WSGIService("test_service")
def test_launch_app(self):
service.serve(self.service)
self.assertNotEqual(0, self.service.port)
service._launcher.stop()
| 0.47098 | 0.332554 |
import email
import os
import shutil
from digestparser.objects import Digest, Image
from provider.article import article
def create_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def delete_folder(folder, recursively=False):
if recursively:
shutil.rmtree(folder)
else:
os.rmdir(folder)
def delete_files_in_folder(folder, filter_out=[]):
file_list = os.listdir(folder)
for file_name in file_list:
if file_name in filter_out:
continue
if os.path.isfile(folder + "/" + file_name):
os.remove(folder + "/" + file_name)
def delete_directories_in_folder(folder):
folder_list = os.listdir(folder)
for dir in folder_list:
dir_path = os.path.join(folder, dir)
if os.path.isdir(dir_path):
delete_folder(dir_path, True)
def delete_everything_in_folder(self, folder):
self.delete_files_in_folder(folder)
def instantiate_article(article_type, doi, is_poa=None, was_ever_poa=None):
"for testing purposes, generate an article object"
article_object = article()
article_object.article_type = article_type
article_object.doi = doi
article_object.doi_id = article_object.get_doi_id(doi)
article_object.is_poa = is_poa
article_object.was_ever_poa = was_ever_poa
return article_object
def create_digest(author=None, doi=None, text=None, title=None, image=None):
"for testing generate a Digest object an populate it"
digest_content = Digest()
digest_content.author = author
digest_content.doi = doi
if text:
digest_content.text = text
if title:
digest_content.title = title
if image:
digest_content.image = image
return digest_content
def create_digest_image(caption=None, file_name=None):
"for testing generate a Digest Image object an populate it"
digest_image = Image()
if caption:
digest_image.caption = caption
if file_name:
digest_image.file = file_name
return digest_image
def body_from_multipart_email_string(email_string):
"""Given a multipart email string, convert to Message and return decoded body"""
body = None
email_message = email.message_from_string(email_string)
if email_message.is_multipart():
for payload in email_message.get_payload():
body = payload.get_payload(decode=True)
return body
|
tests/activity/helpers.py
|
import email
import os
import shutil
from digestparser.objects import Digest, Image
from provider.article import article
def create_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def delete_folder(folder, recursively=False):
if recursively:
shutil.rmtree(folder)
else:
os.rmdir(folder)
def delete_files_in_folder(folder, filter_out=[]):
file_list = os.listdir(folder)
for file_name in file_list:
if file_name in filter_out:
continue
if os.path.isfile(folder + "/" + file_name):
os.remove(folder + "/" + file_name)
def delete_directories_in_folder(folder):
folder_list = os.listdir(folder)
for dir in folder_list:
dir_path = os.path.join(folder, dir)
if os.path.isdir(dir_path):
delete_folder(dir_path, True)
def delete_everything_in_folder(self, folder):
self.delete_files_in_folder(folder)
def instantiate_article(article_type, doi, is_poa=None, was_ever_poa=None):
"for testing purposes, generate an article object"
article_object = article()
article_object.article_type = article_type
article_object.doi = doi
article_object.doi_id = article_object.get_doi_id(doi)
article_object.is_poa = is_poa
article_object.was_ever_poa = was_ever_poa
return article_object
def create_digest(author=None, doi=None, text=None, title=None, image=None):
"for testing generate a Digest object an populate it"
digest_content = Digest()
digest_content.author = author
digest_content.doi = doi
if text:
digest_content.text = text
if title:
digest_content.title = title
if image:
digest_content.image = image
return digest_content
def create_digest_image(caption=None, file_name=None):
"for testing generate a Digest Image object an populate it"
digest_image = Image()
if caption:
digest_image.caption = caption
if file_name:
digest_image.file = file_name
return digest_image
def body_from_multipart_email_string(email_string):
"""Given a multipart email string, convert to Message and return decoded body"""
body = None
email_message = email.message_from_string(email_string)
if email_message.is_multipart():
for payload in email_message.get_payload():
body = payload.get_payload(decode=True)
return body
| 0.240329 | 0.099514 |
from torch.nn.modules.loss import _Loss
from package_core.losses import VariationLoss, L1Loss, PerceptualLoss
from package_core.image_proc import *
# L2 loss
def MSE(para):
return nn.MSELoss()
# L1 loss
def L1(para):
return nn.L1Loss()
def MaskedL1(para):
return L1Loss()
# Variance loss
def Variation(para):
return VariationLoss(nc=2)
# gradient loss
class L1GradientLoss(_Loss):
def __init__(self, para):
super(L1GradientLoss, self).__init__()
self.get_grad = Gradient()
self.L1 = nn.L1Loss()
def forward(self, x, y):
grad_x = self.get_grad(x)
grad_y = self.get_grad(y)
loss = self.L1(grad_x, grad_y)
return loss
class Gradient(nn.Module):
def __init__(self):
super(Gradient, self).__init__()
kernel_v = [[0, -1, 0],
[0, 0, 0],
[0, 1, 0]]
kernel_h = [[0, 0, 0],
[-1, 0, 1],
[0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False).cuda()
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False).cuda()
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-6)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-6)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-6)
x = torch.cat([x0, x1, x2], dim=1)
return x
# Charbonnier loss
class L1_Charbonnier_loss(_Loss):
"""L1 Charbonnierloss."""
def __init__(self, para):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-3
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps * self.eps)
loss = torch.mean(error)
return loss
class L1_Charbonnier_loss_color(_Loss):
"""L1 Charbonnierloss."""
def __init__(self, para):
super(L1_Charbonnier_loss_color, self).__init__()
self.eps = 1e-3
def forward(self, X, Y):
diff = torch.add(X, -Y)
diff_sq = diff * diff
# print(diff_sq.shape)
diff_sq_color = torch.mean(diff_sq, 1, True)
# print(diff_sq_color.shape)
error = torch.sqrt(diff_sq_color + self.eps * self.eps)
loss = torch.mean(error)
return loss
def Perceptual(para):
return PerceptualLoss(loss=nn.L1Loss())
# parse loss parameters
def loss_parse(loss_str):
ratios = []
losses = []
str_temp = loss_str.split('|')
for item in str_temp:
substr_temp = item.split('*')
ratios.append(float(substr_temp[0]))
losses.append(substr_temp[1])
return ratios, losses
# Training loss
class Loss(nn.Module):
def __init__(self, para):
super(Loss, self).__init__()
ratios, losses = loss_parse(para.loss)
self.losses_name = losses
self.ratios = ratios
self.losses = []
self.downsample2 = nn.AvgPool2d(2, stride=2)
for loss in losses:
# module = import_module('train.loss')
# self.losses.append(getattr(module, loss)(para).cuda())
loss_fn = eval('{}(para)'.format(loss))
self.losses.append(loss_fn)
def forward(self, x, y, flow=None, valid_flag=False):
if len(x.shape) == 5:
b, n, c, h, w = x.shape
x = x.reshape(b * n, c, h, w)
y = y.reshape(b * n, c, h, w)
losses = {}
loss_all = None
for i in range(len(self.losses)):
if valid_flag == True and self.losses_name[i] == 'GAN':
loss_sub = self.ratios[i] * self.losses[i](x, y, valid_flag)
elif self.losses_name[i] == 'Variation':
loss_sub = self.ratios[i] * self.losses[i](flow)
else:
loss_sub = self.ratios[i] * self.losses[i](x, y)
losses[self.losses_name[i]] = loss_sub
if loss_all == None:
loss_all = loss_sub
else:
loss_all += loss_sub
losses['all'] = loss_all
return losses
def rscd_forward(self, imgs, labels, masks, flows):
losses = {}
# reshape tensors
if len(labels.shape) == 5:
b, n, c, h, w = labels.shape
labels = labels.reshape(b * n, c, h, w)
gts = [labels, ]
# create multilevel groundtruth
for i in range(1, len(imgs)):
labels = self.downsample2(labels.clone())
gts.append(labels)
# calculate each loss
loss_all = None
for i in range(len(self.losses)):
sub_loss = None
for level in range(len(imgs)):
if self.losses_name[i] == 'Variation':
loss_temp = self.ratios[i] * self.losses[i](flows[0][level], mean=True)
if len(flows) == 2:
loss_temp += self.ratios[i] * self.losses[i](flows[1][level], mean=True)
elif self.losses_name[i] == 'Perceptual':
loss_temp = self.ratios[i] * self.losses[i].get_loss(imgs[level], gts[level])
else:
loss_temp = self.ratios[i] * self.losses[i](imgs[level], gts[level])
if sub_loss == None:
sub_loss = loss_temp
else:
sub_loss += loss_temp
losses[self.losses_name[i]] = sub_loss
if loss_all == None:
loss_all = sub_loss
else:
loss_all += sub_loss
losses['all'] = loss_all
return losses
|
train/loss.py
|
from torch.nn.modules.loss import _Loss
from package_core.losses import VariationLoss, L1Loss, PerceptualLoss
from package_core.image_proc import *
# L2 loss
def MSE(para):
return nn.MSELoss()
# L1 loss
def L1(para):
return nn.L1Loss()
def MaskedL1(para):
return L1Loss()
# Variance loss
def Variation(para):
return VariationLoss(nc=2)
# gradient loss
class L1GradientLoss(_Loss):
def __init__(self, para):
super(L1GradientLoss, self).__init__()
self.get_grad = Gradient()
self.L1 = nn.L1Loss()
def forward(self, x, y):
grad_x = self.get_grad(x)
grad_y = self.get_grad(y)
loss = self.L1(grad_x, grad_y)
return loss
class Gradient(nn.Module):
def __init__(self):
super(Gradient, self).__init__()
kernel_v = [[0, -1, 0],
[0, 0, 0],
[0, 1, 0]]
kernel_h = [[0, 0, 0],
[-1, 0, 1],
[0, 0, 0]]
kernel_h = torch.FloatTensor(kernel_h).unsqueeze(0).unsqueeze(0)
kernel_v = torch.FloatTensor(kernel_v).unsqueeze(0).unsqueeze(0)
self.weight_h = nn.Parameter(data=kernel_h, requires_grad=False).cuda()
self.weight_v = nn.Parameter(data=kernel_v, requires_grad=False).cuda()
def forward(self, x):
x0 = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x0_v = F.conv2d(x0.unsqueeze(1), self.weight_v, padding=2)
x0_h = F.conv2d(x0.unsqueeze(1), self.weight_h, padding=2)
x1_v = F.conv2d(x1.unsqueeze(1), self.weight_v, padding=2)
x1_h = F.conv2d(x1.unsqueeze(1), self.weight_h, padding=2)
x2_v = F.conv2d(x2.unsqueeze(1), self.weight_v, padding=2)
x2_h = F.conv2d(x2.unsqueeze(1), self.weight_h, padding=2)
x0 = torch.sqrt(torch.pow(x0_v, 2) + torch.pow(x0_h, 2) + 1e-6)
x1 = torch.sqrt(torch.pow(x1_v, 2) + torch.pow(x1_h, 2) + 1e-6)
x2 = torch.sqrt(torch.pow(x2_v, 2) + torch.pow(x2_h, 2) + 1e-6)
x = torch.cat([x0, x1, x2], dim=1)
return x
# Charbonnier loss
class L1_Charbonnier_loss(_Loss):
"""L1 Charbonnierloss."""
def __init__(self, para):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-3
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps * self.eps)
loss = torch.mean(error)
return loss
class L1_Charbonnier_loss_color(_Loss):
"""L1 Charbonnierloss."""
def __init__(self, para):
super(L1_Charbonnier_loss_color, self).__init__()
self.eps = 1e-3
def forward(self, X, Y):
diff = torch.add(X, -Y)
diff_sq = diff * diff
# print(diff_sq.shape)
diff_sq_color = torch.mean(diff_sq, 1, True)
# print(diff_sq_color.shape)
error = torch.sqrt(diff_sq_color + self.eps * self.eps)
loss = torch.mean(error)
return loss
def Perceptual(para):
return PerceptualLoss(loss=nn.L1Loss())
# parse loss parameters
def loss_parse(loss_str):
ratios = []
losses = []
str_temp = loss_str.split('|')
for item in str_temp:
substr_temp = item.split('*')
ratios.append(float(substr_temp[0]))
losses.append(substr_temp[1])
return ratios, losses
# Training loss
class Loss(nn.Module):
def __init__(self, para):
super(Loss, self).__init__()
ratios, losses = loss_parse(para.loss)
self.losses_name = losses
self.ratios = ratios
self.losses = []
self.downsample2 = nn.AvgPool2d(2, stride=2)
for loss in losses:
# module = import_module('train.loss')
# self.losses.append(getattr(module, loss)(para).cuda())
loss_fn = eval('{}(para)'.format(loss))
self.losses.append(loss_fn)
def forward(self, x, y, flow=None, valid_flag=False):
if len(x.shape) == 5:
b, n, c, h, w = x.shape
x = x.reshape(b * n, c, h, w)
y = y.reshape(b * n, c, h, w)
losses = {}
loss_all = None
for i in range(len(self.losses)):
if valid_flag == True and self.losses_name[i] == 'GAN':
loss_sub = self.ratios[i] * self.losses[i](x, y, valid_flag)
elif self.losses_name[i] == 'Variation':
loss_sub = self.ratios[i] * self.losses[i](flow)
else:
loss_sub = self.ratios[i] * self.losses[i](x, y)
losses[self.losses_name[i]] = loss_sub
if loss_all == None:
loss_all = loss_sub
else:
loss_all += loss_sub
losses['all'] = loss_all
return losses
def rscd_forward(self, imgs, labels, masks, flows):
losses = {}
# reshape tensors
if len(labels.shape) == 5:
b, n, c, h, w = labels.shape
labels = labels.reshape(b * n, c, h, w)
gts = [labels, ]
# create multilevel groundtruth
for i in range(1, len(imgs)):
labels = self.downsample2(labels.clone())
gts.append(labels)
# calculate each loss
loss_all = None
for i in range(len(self.losses)):
sub_loss = None
for level in range(len(imgs)):
if self.losses_name[i] == 'Variation':
loss_temp = self.ratios[i] * self.losses[i](flows[0][level], mean=True)
if len(flows) == 2:
loss_temp += self.ratios[i] * self.losses[i](flows[1][level], mean=True)
elif self.losses_name[i] == 'Perceptual':
loss_temp = self.ratios[i] * self.losses[i].get_loss(imgs[level], gts[level])
else:
loss_temp = self.ratios[i] * self.losses[i](imgs[level], gts[level])
if sub_loss == None:
sub_loss = loss_temp
else:
sub_loss += loss_temp
losses[self.losses_name[i]] = sub_loss
if loss_all == None:
loss_all = sub_loss
else:
loss_all += sub_loss
losses['all'] = loss_all
return losses
| 0.873701 | 0.492127 |
from ui.pfdview import *
from data.parameter import *
import math
class AltitudeCalibrationView(Dialog):
newSLP = None
surfaceWidth = 300
surfaceHeight = 200
def __init__(self):
Dialog.__init__(self,self.surfaceWidth,self.surfaceHeight)
size = 40
up = '\u2191' # Unicode up arrow
down = '\u2193' # Unicode down arrow
xstart = 95
xtop = 20
xbottom = 110
xinc = 30
self.button100Up = Button(xstart, xtop, up, "monospace", size)
self.button100Down = Button(xstart, xbottom, down, "monospace", size)
self.button10Up = Button(xstart + xinc, xtop, up, "monospace", size)
self.button10Down = Button(xstart + xinc, xbottom, down, "monospace", size)
self.button1Up = Button(xstart + xinc*2, xtop, up, "monospace", size)
self.button1Down = Button(xstart + xinc*2, xbottom, down, "monospace", size)
self.buttonP1Up = Button(xstart + xinc*4, xtop, up, "monospace", size)
self.buttonP1Down = Button(xstart + xinc*4, xbottom, down, "monospace", size)
self.registerWidget(Widget(self.button100Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button100Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button10Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button10Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button1Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button1Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.buttonP1Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.buttonP1Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.slpTextField = TextField(60, 60, "", "monospace", 45)
self.altitudeTextField = TextField(139, 169, "", "monospace", 25)
self.registerWidget(Widget(self.slpTextField,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.altitudeTextField,self.getPhysicalXYOffsetOfSurface()),self.objectID)
def register100UpButtonRelease(self,method):
self.button100Up.registerButtonRelease(method)
def register100DownButtonRelease(self,method):
self.button100Down.registerButtonRelease(method)
def register10UpButtonRelease(self,method):
self.button10Up.registerButtonRelease(method)
def register10DownButtonRelease(self,method):
self.button10Down.registerButtonRelease(method)
def register1UpButtonRelease(self,method):
self.button1Up.registerButtonRelease(method)
def register1DownButtonRelease(self,method):
self.button1Down.registerButtonRelease(method)
def registerP1UpButtonRelease(self,method):
self.buttonP1Up.registerButtonRelease(method)
def registerP1DownButtonRelease(self,method):
self.buttonP1Down.registerButtonRelease(method)
def draw(self):
super().draw()
class AltitudeCalibrationController:
model = None
view = None
def __init__(self,model,view):
self.model = model
self.view = view
def button100UpCallback(self):
self.model.updateSLP(100)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button100DownCallback(self):
self.model.updateSLP(-100)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button10UpCallback(self):
self.model.updateSLP(10)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button10DownCallback(self):
self.model.updateSLP(-10)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button1UpCallback(self):
self.model.updateSLP(1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button1DownCallback(self):
self.model.updateSLP(-1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def buttonP1UpCallback(self):
self.model.updateSLP(0.1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def buttonP1DownCallback(self):
self.model.updateSLP(-0.1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def registerBarosetClosedCallback(self,method):
self.barosetClosedCallback = method
def okButtonCallback(self):
self.model.oap1.slp = self.model.baroset.slp
self.model.oap2.slp = self.model.baroset.slp
self.view.unregisterDialog(self.view.objectID)
self.barosetClosedCallback()
def cancelButtonCallback(self):
self.view.unregisterDialog(self.view.objectID)
self.barosetClosedCallback()
def launch(self):
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
self.view.register100UpButtonRelease(self.button100UpCallback)
self.view.register100DownButtonRelease(self.button100DownCallback)
self.view.register10UpButtonRelease(self.button10UpCallback)
self.view.register10DownButtonRelease(self.button10DownCallback)
self.view.register1UpButtonRelease(self.button1UpCallback)
self.view.register1DownButtonRelease(self.button1DownCallback)
self.view.registerP1UpButtonRelease(self.buttonP1UpCallback)
self.view.registerP1DownButtonRelease(self.buttonP1DownCallback)
self.view.registerOKButton(self.okButtonCallback)
self.view.registerCancelButton(self.cancelButtonCallback)
self.view.draw()
def update(self):
values = np.array([self.model.oap1.value,self.model.oap2.value])
self.model.addPressure(values.mean())
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
self.view.draw()
class AltitudeCalibrationModel:
baroset = None # For baroset dialog
oap1 = None
oap2 = None
def __init__(self,oap1,oap2):
self.baroset = Pressure(10,4.7)
values = np.array([oap1.value,oap2.value])
altitudes = np.array([oap1.elevation,oap2.elevation])
self.baroset.appendAsHPa(values.mean())
self.baroset.calibrateElevation(altitudes.mean())
self.oap1 = oap1
self.oap2 = oap2
def addPressure(self,value):
self.baroset.appendAsHPa(value)
def updateSLP(self,amount):
""" Adjust the current SLP by this amount """
self.baroset.slp = self.baroset.slp + amount
def currentSLP(self):
return self.baroset.slp
def currentAltitude(self):
return self.baroset.elevation
|
ui/dialogs/baroset.py
|
from ui.pfdview import *
from data.parameter import *
import math
class AltitudeCalibrationView(Dialog):
newSLP = None
surfaceWidth = 300
surfaceHeight = 200
def __init__(self):
Dialog.__init__(self,self.surfaceWidth,self.surfaceHeight)
size = 40
up = '\u2191' # Unicode up arrow
down = '\u2193' # Unicode down arrow
xstart = 95
xtop = 20
xbottom = 110
xinc = 30
self.button100Up = Button(xstart, xtop, up, "monospace", size)
self.button100Down = Button(xstart, xbottom, down, "monospace", size)
self.button10Up = Button(xstart + xinc, xtop, up, "monospace", size)
self.button10Down = Button(xstart + xinc, xbottom, down, "monospace", size)
self.button1Up = Button(xstart + xinc*2, xtop, up, "monospace", size)
self.button1Down = Button(xstart + xinc*2, xbottom, down, "monospace", size)
self.buttonP1Up = Button(xstart + xinc*4, xtop, up, "monospace", size)
self.buttonP1Down = Button(xstart + xinc*4, xbottom, down, "monospace", size)
self.registerWidget(Widget(self.button100Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button100Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button10Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button10Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button1Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.button1Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.buttonP1Up,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.buttonP1Down,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.slpTextField = TextField(60, 60, "", "monospace", 45)
self.altitudeTextField = TextField(139, 169, "", "monospace", 25)
self.registerWidget(Widget(self.slpTextField,self.getPhysicalXYOffsetOfSurface()),self.objectID)
self.registerWidget(Widget(self.altitudeTextField,self.getPhysicalXYOffsetOfSurface()),self.objectID)
def register100UpButtonRelease(self,method):
self.button100Up.registerButtonRelease(method)
def register100DownButtonRelease(self,method):
self.button100Down.registerButtonRelease(method)
def register10UpButtonRelease(self,method):
self.button10Up.registerButtonRelease(method)
def register10DownButtonRelease(self,method):
self.button10Down.registerButtonRelease(method)
def register1UpButtonRelease(self,method):
self.button1Up.registerButtonRelease(method)
def register1DownButtonRelease(self,method):
self.button1Down.registerButtonRelease(method)
def registerP1UpButtonRelease(self,method):
self.buttonP1Up.registerButtonRelease(method)
def registerP1DownButtonRelease(self,method):
self.buttonP1Down.registerButtonRelease(method)
def draw(self):
super().draw()
class AltitudeCalibrationController:
model = None
view = None
def __init__(self,model,view):
self.model = model
self.view = view
def button100UpCallback(self):
self.model.updateSLP(100)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button100DownCallback(self):
self.model.updateSLP(-100)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button10UpCallback(self):
self.model.updateSLP(10)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button10DownCallback(self):
self.model.updateSLP(-10)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button1UpCallback(self):
self.model.updateSLP(1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def button1DownCallback(self):
self.model.updateSLP(-1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def buttonP1UpCallback(self):
self.model.updateSLP(0.1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def buttonP1DownCallback(self):
self.model.updateSLP(-0.1)
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
def registerBarosetClosedCallback(self,method):
self.barosetClosedCallback = method
def okButtonCallback(self):
self.model.oap1.slp = self.model.baroset.slp
self.model.oap2.slp = self.model.baroset.slp
self.view.unregisterDialog(self.view.objectID)
self.barosetClosedCallback()
def cancelButtonCallback(self):
self.view.unregisterDialog(self.view.objectID)
self.barosetClosedCallback()
def launch(self):
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
self.view.register100UpButtonRelease(self.button100UpCallback)
self.view.register100DownButtonRelease(self.button100DownCallback)
self.view.register10UpButtonRelease(self.button10UpCallback)
self.view.register10DownButtonRelease(self.button10DownCallback)
self.view.register1UpButtonRelease(self.button1UpCallback)
self.view.register1DownButtonRelease(self.button1DownCallback)
self.view.registerP1UpButtonRelease(self.buttonP1UpCallback)
self.view.registerP1DownButtonRelease(self.buttonP1DownCallback)
self.view.registerOKButton(self.okButtonCallback)
self.view.registerCancelButton(self.cancelButtonCallback)
self.view.draw()
def update(self):
values = np.array([self.model.oap1.value,self.model.oap2.value])
self.model.addPressure(values.mean())
self.view.slpTextField.updateText(str(round(self.model.currentSLP(),1)))
self.view.altitudeTextField.updateText(str(round(self.model.currentAltitude(),1)))
self.view.draw()
class AltitudeCalibrationModel:
baroset = None # For baroset dialog
oap1 = None
oap2 = None
def __init__(self,oap1,oap2):
self.baroset = Pressure(10,4.7)
values = np.array([oap1.value,oap2.value])
altitudes = np.array([oap1.elevation,oap2.elevation])
self.baroset.appendAsHPa(values.mean())
self.baroset.calibrateElevation(altitudes.mean())
self.oap1 = oap1
self.oap2 = oap2
def addPressure(self,value):
self.baroset.appendAsHPa(value)
def updateSLP(self,amount):
""" Adjust the current SLP by this amount """
self.baroset.slp = self.baroset.slp + amount
def currentSLP(self):
return self.baroset.slp
def currentAltitude(self):
return self.baroset.elevation
| 0.410756 | 0.119152 |
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)] # pylint: disable=E1101
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42) # pylint: disable=E1101
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
_unittests/ut_testing/data/plot_anomaly_comparison.py
|
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)] # pylint: disable=E1101
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42) # pylint: disable=E1101
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| 0.78695 | 0.623033 |
# Lint as: python3
"""LintReport implementation that outputs to the terminal."""
import logging
import os
import sys
import textwrap
from typing import Optional
import blessings
from gcp_doctor import config, lint, models
OUTPUT_WIDTH = 68
def _emoji_wrap(char):
if os.getenv('CLOUD_SHELL'):
# emoji not displayed as double width in Cloud Shell (bug?)
return char + ' '
else:
return char
class _LintReportTerminalLoggingHandler(logging.Handler):
"""logging.Handler implementation used when producing a lint report."""
def __init__(self, report):
super().__init__()
self.report = report
def format(self, record: logging.LogRecord):
return record.getMessage()
def emit(self, record):
if record.levelno == logging.INFO and self.report.log_info_for_progress_only:
msg = ' ... ' + self.format(record)
# make sure we don't go beyond the terminal width
if self.report.term.width:
term_overflow = len(msg) - self.report.term.width
if term_overflow > 0:
msg = msg[:-term_overflow]
self.report.terminal_update_line(msg)
else:
msg = f'[{record.levelname}] ' + self.format(record) + ' '
# workaround for bug:
# https://github.com/googleapis/google-api-python-client/issues/1116
if 'Invalid JSON content from response' in msg:
return
self.report.terminal_print_line(msg)
class LintReportTerminal(lint.LintReport):
"""LintReport implementation that outputs to the terminal."""
def __init__(self,
file=sys.stdout,
log_info_for_progress_only=True,
show_ok=True,
show_skipped=False):
super().__init__()
self.file = file
self.line_unfinished = False
self.rule_has_results = False
self.log_info_for_progress_only = log_info_for_progress_only
self.show_ok = show_ok
self.show_skipped = show_skipped
self.per_rule_data = {}
if file == sys.stdout:
self.term = blessings.Terminal()
else:
self.term = blessings.Terminal()
def _wrap_indent(self, text, prefix):
width = self.term.width or 80
if width > 80:
width = 80
return textwrap.indent(textwrap.fill(text, width - len(prefix)), prefix)
def banner(self):
if self.term.does_styling:
print(
self.term.bold('gcp-doctor ' + _emoji_wrap('🩺') + ' ' +
config.VERSION) + '\n')
else:
print('gcp-doctor ' + config.VERSION + '\n')
def lint_start(self, context):
print(f'Starting lint inspection ({context})...\n')
def terminal_update_line(self, text: str):
"""Update the current line on the terminal."""
if self.term.width:
print(self.term.move_x(0) + self.term.clear_eol() + text,
end='',
flush=True,
file=self.file)
self.line_unfinished = True
else:
# If it's a stream, do not output anything, assuming that the
# interesting output will be passed via terminal_print_line
pass
def terminal_erase_line(self):
"""Remove the current content on the line."""
if self.line_unfinished and self.term.width:
print(self.term.move_x(0) + self.term.clear_eol(),
flush=True,
end='',
file=self.file)
self.line_unfinished = False
def terminal_print_line(self, text: str = ''):
"""Write a line to the terminal, replacing any current line content, and add a line feed."""
if self.line_unfinished and self.term.width:
self.terminal_update_line(text)
print(file=self.file)
else:
print(text, file=self.file)
# flush the output, so that we can more easily grep, tee, etc.
sys.stdout.flush()
self.line_unfinished = False
def get_logging_handler(self):
return _LintReportTerminalLoggingHandler(self)
def rule_start(self, rule: lint.LintRule, context: models.Context):
rule_interface = super().rule_start(rule, context)
bullet = ''
if self.term.does_styling:
bullet = _emoji_wrap('🔎') + ' '
else:
bullet = '* '
self.terminal_print_line(
bullet +
self.term.yellow(f'{rule.product}/{rule.rule_class}/{rule.rule_id}') +
': ' + f'{rule.short_desc}')
self.rule_has_results = False
return rule_interface
def rule_end(self, rule: lint.LintRule, context: models.Context):
super().rule_end(rule, context)
self.terminal_erase_line()
if self.rule_has_results:
self.terminal_print_line()
# If the rule failed, add more information about the rule.
if rule in self.per_rule_data and self.per_rule_data[rule]['failed_count']:
width = self.term.width or 80
if width > 80:
width = 80
self.terminal_print_line(
self.term.italic(self._wrap_indent(rule.long_desc, ' ')))
self.terminal_print_line()
def add_skipped(self, rule: lint.LintRule, context: models.Context,
resource: Optional[models.Resource], reason: str,
short_info: Optional[str]):
super().add_skipped(rule, context, resource, reason, short_info)
if not self.show_skipped:
return
self.rule_has_results = True
if short_info:
short_info = ' ' + short_info
else:
short_info = ''
if resource:
self.terminal_print_line(' - ' +
resource.short_path.ljust(OUTPUT_WIDTH) +
' [SKIP]' + short_info)
self.terminal_print_line(textwrap.indent(reason, ' '))
else:
self.terminal_print_line(' ' +
('(' + reason + ')').ljust(OUTPUT_WIDTH + 2) +
' [SKIP]' + short_info)
def add_ok(self, rule: lint.LintRule, context: models.Context,
resource: models.Resource, short_info: Optional[str]):
super().add_ok(rule, context, resource, short_info)
if not self.show_ok:
return
self.rule_has_results = True
if short_info:
short_info = ' ' + short_info
else:
short_info = ''
self.terminal_print_line(' - ' + resource.short_path.ljust(OUTPUT_WIDTH) +
' [' + self.term.green(' OK ') + ']' + short_info)
def add_failed(self, rule: lint.LintRule, context: models.Context,
resource: models.Resource, reason: Optional[str],
short_info: Optional[str]):
super().add_failed(rule, context, resource, reason, short_info)
self.rule_has_results = True
rule_data = self.per_rule_data.setdefault(rule, {'failed_count': 0})
rule_data['failed_count'] += 1
if short_info:
short_info = ' ' + short_info
else:
short_info = ''
self.terminal_print_line(' - ' + resource.short_path.ljust(OUTPUT_WIDTH) +
' [' + self.term.red('FAIL') + ']' + short_info)
if reason:
self.terminal_print_line(textwrap.indent(reason, ' '))
def finish(self, context: models.Context):
exit_code = super().finish(context)
totals = {
'skipped': 0,
'ok': 0,
'failed': 0,
}
for rule in self.rules_report.values():
totals[rule['overall_status']] += 1
if not self.rule_has_results:
self.terminal_print_line()
print(
f"Rules summary: {totals['skipped']} skipped, {totals['ok']} ok, {totals['failed']} failed"
)
return exit_code
|
gcp_doctor/lint/report_terminal.py
|
# Lint as: python3
"""LintReport implementation that outputs to the terminal."""
import logging
import os
import sys
import textwrap
from typing import Optional
import blessings
from gcp_doctor import config, lint, models
OUTPUT_WIDTH = 68
def _emoji_wrap(char):
if os.getenv('CLOUD_SHELL'):
# emoji not displayed as double width in Cloud Shell (bug?)
return char + ' '
else:
return char
class _LintReportTerminalLoggingHandler(logging.Handler):
"""logging.Handler implementation used when producing a lint report."""
def __init__(self, report):
super().__init__()
self.report = report
def format(self, record: logging.LogRecord):
return record.getMessage()
def emit(self, record):
if record.levelno == logging.INFO and self.report.log_info_for_progress_only:
msg = ' ... ' + self.format(record)
# make sure we don't go beyond the terminal width
if self.report.term.width:
term_overflow = len(msg) - self.report.term.width
if term_overflow > 0:
msg = msg[:-term_overflow]
self.report.terminal_update_line(msg)
else:
msg = f'[{record.levelname}] ' + self.format(record) + ' '
# workaround for bug:
# https://github.com/googleapis/google-api-python-client/issues/1116
if 'Invalid JSON content from response' in msg:
return
self.report.terminal_print_line(msg)
class LintReportTerminal(lint.LintReport):
"""LintReport implementation that outputs to the terminal."""
def __init__(self,
file=sys.stdout,
log_info_for_progress_only=True,
show_ok=True,
show_skipped=False):
super().__init__()
self.file = file
self.line_unfinished = False
self.rule_has_results = False
self.log_info_for_progress_only = log_info_for_progress_only
self.show_ok = show_ok
self.show_skipped = show_skipped
self.per_rule_data = {}
if file == sys.stdout:
self.term = blessings.Terminal()
else:
self.term = blessings.Terminal()
def _wrap_indent(self, text, prefix):
width = self.term.width or 80
if width > 80:
width = 80
return textwrap.indent(textwrap.fill(text, width - len(prefix)), prefix)
def banner(self):
if self.term.does_styling:
print(
self.term.bold('gcp-doctor ' + _emoji_wrap('🩺') + ' ' +
config.VERSION) + '\n')
else:
print('gcp-doctor ' + config.VERSION + '\n')
def lint_start(self, context):
print(f'Starting lint inspection ({context})...\n')
def terminal_update_line(self, text: str):
"""Update the current line on the terminal."""
if self.term.width:
print(self.term.move_x(0) + self.term.clear_eol() + text,
end='',
flush=True,
file=self.file)
self.line_unfinished = True
else:
# If it's a stream, do not output anything, assuming that the
# interesting output will be passed via terminal_print_line
pass
def terminal_erase_line(self):
"""Remove the current content on the line."""
if self.line_unfinished and self.term.width:
print(self.term.move_x(0) + self.term.clear_eol(),
flush=True,
end='',
file=self.file)
self.line_unfinished = False
def terminal_print_line(self, text: str = ''):
"""Write a line to the terminal, replacing any current line content, and add a line feed."""
if self.line_unfinished and self.term.width:
self.terminal_update_line(text)
print(file=self.file)
else:
print(text, file=self.file)
# flush the output, so that we can more easily grep, tee, etc.
sys.stdout.flush()
self.line_unfinished = False
def get_logging_handler(self):
return _LintReportTerminalLoggingHandler(self)
def rule_start(self, rule: lint.LintRule, context: models.Context):
rule_interface = super().rule_start(rule, context)
bullet = ''
if self.term.does_styling:
bullet = _emoji_wrap('🔎') + ' '
else:
bullet = '* '
self.terminal_print_line(
bullet +
self.term.yellow(f'{rule.product}/{rule.rule_class}/{rule.rule_id}') +
': ' + f'{rule.short_desc}')
self.rule_has_results = False
return rule_interface
def rule_end(self, rule: lint.LintRule, context: models.Context):
super().rule_end(rule, context)
self.terminal_erase_line()
if self.rule_has_results:
self.terminal_print_line()
# If the rule failed, add more information about the rule.
if rule in self.per_rule_data and self.per_rule_data[rule]['failed_count']:
width = self.term.width or 80
if width > 80:
width = 80
self.terminal_print_line(
self.term.italic(self._wrap_indent(rule.long_desc, ' ')))
self.terminal_print_line()
def add_skipped(self, rule: lint.LintRule, context: models.Context,
resource: Optional[models.Resource], reason: str,
short_info: Optional[str]):
super().add_skipped(rule, context, resource, reason, short_info)
if not self.show_skipped:
return
self.rule_has_results = True
if short_info:
short_info = ' ' + short_info
else:
short_info = ''
if resource:
self.terminal_print_line(' - ' +
resource.short_path.ljust(OUTPUT_WIDTH) +
' [SKIP]' + short_info)
self.terminal_print_line(textwrap.indent(reason, ' '))
else:
self.terminal_print_line(' ' +
('(' + reason + ')').ljust(OUTPUT_WIDTH + 2) +
' [SKIP]' + short_info)
def add_ok(self, rule: lint.LintRule, context: models.Context,
resource: models.Resource, short_info: Optional[str]):
super().add_ok(rule, context, resource, short_info)
if not self.show_ok:
return
self.rule_has_results = True
if short_info:
short_info = ' ' + short_info
else:
short_info = ''
self.terminal_print_line(' - ' + resource.short_path.ljust(OUTPUT_WIDTH) +
' [' + self.term.green(' OK ') + ']' + short_info)
def add_failed(self, rule: lint.LintRule, context: models.Context,
resource: models.Resource, reason: Optional[str],
short_info: Optional[str]):
super().add_failed(rule, context, resource, reason, short_info)
self.rule_has_results = True
rule_data = self.per_rule_data.setdefault(rule, {'failed_count': 0})
rule_data['failed_count'] += 1
if short_info:
short_info = ' ' + short_info
else:
short_info = ''
self.terminal_print_line(' - ' + resource.short_path.ljust(OUTPUT_WIDTH) +
' [' + self.term.red('FAIL') + ']' + short_info)
if reason:
self.terminal_print_line(textwrap.indent(reason, ' '))
def finish(self, context: models.Context):
exit_code = super().finish(context)
totals = {
'skipped': 0,
'ok': 0,
'failed': 0,
}
for rule in self.rules_report.values():
totals[rule['overall_status']] += 1
if not self.rule_has_results:
self.terminal_print_line()
print(
f"Rules summary: {totals['skipped']} skipped, {totals['ok']} ok, {totals['failed']} failed"
)
return exit_code
| 0.67104 | 0.099689 |
import sys
import argparse
def reduce(txt_char, key_char, charset, binop):
letter = binop(charset.index(txt_char), charset.index(key_char))
letter %= len(charset)
return charset[letter]
def opcrypt(txt, key, charset, binop):
return "".join(reduce(txt_char=i, key_char=j, charset=charset, binop=binop) for (i, j) in zip(txt, key))
def encrypt(txt, key, charset):
return opcrypt(txt=txt, key=key, charset=charset, binop=int.__add__)
def decrypt(txt, key, charset):
return opcrypt(txt=txt, key=key, charset=charset, binop=int.__sub__)
def validate(txt, key, charset):
txt_set_diff = set(txt).difference(charset)
key_set_diff = set(key).difference(charset)
if len(txt_set_diff) > 0:
raise Exception("txt contains illegal characters %s" % txt_set_diff)
if len(key_set_diff) > 0:
raise Exception("key contains illegal characters %s" % key_set_diff)
len_txt = len(txt)
len_key = len(key)
if len_key < len_txt:
raise Exception("key too short (%d) for given txt (%d)" % (len_key, len_txt))
return True
def get_charset(file):
with open(file, "r") as charset_file:
return "".join(charset_file.read().splitlines())
def get_txt(file):
if file == "-":
txt = sys.stdin.read()
sys.stderr.write("\n")
return txt
else:
with open(file, "r") as txt_file:
return "".join(txt_file.read().splitlines())
def get_key(file):
with open(file, "r") as key_file:
return "".join(key_file.read().splitlines())
def new_parser():
parser = argparse.ArgumentParser(description="One-time pad")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-c", "--charsetfile", required=True,
help="path to charset file; pick required minimum")
parser.add_argument("-k", "--keyfile", required=True,
help="path to the key file")
parser.add_argument("-o", "--offset", type=int, default=0,
help="key offset; defaults to 0")
mode_group = parser.add_mutually_exclusive_group(required=True)
mode_group.add_argument("-e", action="store_true", help="encrypt")
mode_group.add_argument("-d", action="store_true", help="decrypt")
parser.add_argument("txtfile", metavar="TXTFILE", action="store",
help="file with text to en/decrypt. use single dash '-' to read from stdin")
return parser
def main():
parser = new_parser()
args = parser.parse_args()
charset = get_charset(args.charsetfile)
txt = get_txt(args.txtfile)
key = get_key(args.keyfile)
key = key[args.offset:args.offset + len(txt)]
if validate(txt=txt, key=key, charset=charset):
if args.e:
if args.verbose:
print("Encrypting '%s' with key '%s'" % (txt, key), file=sys.stderr)
print(encrypt(txt=txt, key=key, charset=charset), file=sys.stdout)
elif args.d:
if args.verbose:
print("Decrypting '%s' with key '%s'" % (txt, key), file=sys.stderr)
print(decrypt(txt=txt, key=key, charset=charset), file=sys.stdout)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
otp.py
|
import sys
import argparse
def reduce(txt_char, key_char, charset, binop):
letter = binop(charset.index(txt_char), charset.index(key_char))
letter %= len(charset)
return charset[letter]
def opcrypt(txt, key, charset, binop):
return "".join(reduce(txt_char=i, key_char=j, charset=charset, binop=binop) for (i, j) in zip(txt, key))
def encrypt(txt, key, charset):
return opcrypt(txt=txt, key=key, charset=charset, binop=int.__add__)
def decrypt(txt, key, charset):
return opcrypt(txt=txt, key=key, charset=charset, binop=int.__sub__)
def validate(txt, key, charset):
txt_set_diff = set(txt).difference(charset)
key_set_diff = set(key).difference(charset)
if len(txt_set_diff) > 0:
raise Exception("txt contains illegal characters %s" % txt_set_diff)
if len(key_set_diff) > 0:
raise Exception("key contains illegal characters %s" % key_set_diff)
len_txt = len(txt)
len_key = len(key)
if len_key < len_txt:
raise Exception("key too short (%d) for given txt (%d)" % (len_key, len_txt))
return True
def get_charset(file):
with open(file, "r") as charset_file:
return "".join(charset_file.read().splitlines())
def get_txt(file):
if file == "-":
txt = sys.stdin.read()
sys.stderr.write("\n")
return txt
else:
with open(file, "r") as txt_file:
return "".join(txt_file.read().splitlines())
def get_key(file):
with open(file, "r") as key_file:
return "".join(key_file.read().splitlines())
def new_parser():
parser = argparse.ArgumentParser(description="One-time pad")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-c", "--charsetfile", required=True,
help="path to charset file; pick required minimum")
parser.add_argument("-k", "--keyfile", required=True,
help="path to the key file")
parser.add_argument("-o", "--offset", type=int, default=0,
help="key offset; defaults to 0")
mode_group = parser.add_mutually_exclusive_group(required=True)
mode_group.add_argument("-e", action="store_true", help="encrypt")
mode_group.add_argument("-d", action="store_true", help="decrypt")
parser.add_argument("txtfile", metavar="TXTFILE", action="store",
help="file with text to en/decrypt. use single dash '-' to read from stdin")
return parser
def main():
parser = new_parser()
args = parser.parse_args()
charset = get_charset(args.charsetfile)
txt = get_txt(args.txtfile)
key = get_key(args.keyfile)
key = key[args.offset:args.offset + len(txt)]
if validate(txt=txt, key=key, charset=charset):
if args.e:
if args.verbose:
print("Encrypting '%s' with key '%s'" % (txt, key), file=sys.stderr)
print(encrypt(txt=txt, key=key, charset=charset), file=sys.stdout)
elif args.d:
if args.verbose:
print("Decrypting '%s' with key '%s'" % (txt, key), file=sys.stderr)
print(decrypt(txt=txt, key=key, charset=charset), file=sys.stdout)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 0.305386 | 0.115187 |
import rospy
import rospkg
import cv2
import tf
import io
import os
import numpy as np
import json
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class PushestDatasetJsonWriter:
def __init__(self):
self.tf_listener = tf.TransformListener()
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(
"/digit/digit_alpha/image_raw/", Image, self.callback_digit_image)
# contact episodes related vars
self.contact_episode_idx = 0
self.counter = 0
self.num_incontact = 0
self.min_num_incontact = 5
# to be logged data
self.ee_pose2d_list = []
self.obj_pose2d_list = []
self.contact_episode_list = []
self.contact_flag_list = []
self.init_dataset_params()
self.dstdir = rospy.get_param("dstdir_dataset")
self.bag_name = rospy.get_param("bag_name")
rospy.loginfo(
"[PushestDatasetJsonWriter] Using bag {0}.bag".format(self.bag_name))
rospack = rospkg.RosPack()
self.path_pkg = rospack.get_path("digit_pushing")
self.mean_img = cv2.imread(
"{0}/local/resources/digit/{1}/mean_img.png".format(self.path_pkg, self.bag_name)).astype(np.float32)
self.std_img = cv2.imread(
"{0}/local/resources/digit/{1}/std_img.png".format(self.path_pkg, self.bag_name)).astype(np.float32)
def make_dir(self, dir):
cmd = "mkdir -p {0}".format(dir)
os.popen(cmd, 'r')
def in_contact(self, img):
# Compute per-image sum of stddev squared
diff = np.linalg.norm((img - self.mean_img)/self.std_img)**2
diff = diff / self.mean_img.size
# Count the percent of pixels that are significantly different from their mean values
diff_cnt = np.sum(((img - self.mean_img)/self.std_img)**2 > 4**2)
diff_cnt = float(diff_cnt) / float(self.mean_img.size)
# contact_flag = diff_cnt > 0.05
contact_flag = diff_cnt > 0.01
return contact_flag
def rosimg_to_numpy(self, imgmsg):
if hasattr(imgmsg, 'format') and 'compressed' in imgmsg.format:
return np.asarray(Image.open(io.BytesIO(imgmsg.data)))
return np.frombuffer(imgmsg.data, dtype=np.uint8).reshape(imgmsg.height, imgmsg.width, 3)[:, :, ::-1]
def remove_contact_episode(self, episode_idx):
indices = [idx for idx, elem in enumerate(
self.contact_episode_list) if elem == 4]
for i in sorted(indices, reverse=True):
del self.contact_episode_list[i]
del self.obj_pose2d_list[i]
del self.ee_pose2d_list[i]
def init_dataset_params(self):
self.params = {}
self.params['obj_radius'] = 0.088
def save_data2d_json(self):
data = {'params': self.params,
'ee_poses_2d': self.ee_pose2d_list,
'obj_poses_2d': self.obj_pose2d_list,
'contact_flag': self.contact_flag_list,
'contact_episode': self.contact_episode_list}
dstfile = "{0}/{1}_{2}.json".format(self.dstdir,
self.bag_name, self.contact_episode_idx)
with open(dstfile, 'w') as outfile:
json.dump(data, outfile, indent=4)
rospy.loginfo("Wrote json dataset for episodes 0 to {0} at:\n {1} ".format(
self.contact_episode_idx, dstfile))
def callback_digit_image(self, msg):
try:
# img = self.bridge.imgmsg_to_cv2(msg, "bgr8")
img = self.rosimg_to_numpy(msg)
except CvBridgeError as e:
rospy.logwarn(
"[PushestDatasetJsonWriter::callback_digit_image] {0}".format(e))
return
try:
# looks up arg2 frame transform in arg1 frame
(trans_obj, rot_obj) = self.tf_listener.lookupTransform(
"world", "/object/center/", rospy.Time(0))
(trans_ee, rot_ee) = self.tf_listener.lookupTransform(
"world", "/digit/center/", rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logwarn(
"[PushestDatasetJsonWriter::callback_digit_image] TF lookup failed")
return
if (self.in_contact(img)):
rot_obj_euler = tf.transformations.euler_from_quaternion(rot_obj)
obj_pose2d = [trans_obj[0], trans_obj[1],
rot_obj_euler[2]] # (x, y, yaw)
rot_ee_euler = tf.transformations.euler_from_quaternion(rot_ee)
ee_pose2d = [trans_ee[0], trans_ee[1],
rot_ee_euler[2]] # (x, y, yaw)
# add to data list being logged
self.obj_pose2d_list.append(obj_pose2d)
self.ee_pose2d_list.append(ee_pose2d)
self.contact_flag_list.append([1])
self.contact_episode_list.append([self.contact_episode_idx])
self.num_incontact = self.num_incontact + 1
else:
self.counter = self.counter + 1
# start new contact episode
if ((self.counter > 10) & (self.num_incontact > 1)):
if (self.num_incontact > self.min_num_incontact):
self.save_data2d_json()
self.contact_episode_idx = self.contact_episode_idx + 1
else:
self.remove_contact_episode(self.contact_episode_idx)
self.counter = 0
self.num_incontact = 0
def main():
img_tf_writer = PushestDatasetJsonWriter()
rospy.init_node('pushest_dataset_json_writer', anonymous=True)
rospy.loginfo("Initialized pushest_dataset_json_writer node.")
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main()
|
data_collection/digit_pushing/src/PushestDatasetJsonWriter.py
|
import rospy
import rospkg
import cv2
import tf
import io
import os
import numpy as np
import json
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class PushestDatasetJsonWriter:
def __init__(self):
self.tf_listener = tf.TransformListener()
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(
"/digit/digit_alpha/image_raw/", Image, self.callback_digit_image)
# contact episodes related vars
self.contact_episode_idx = 0
self.counter = 0
self.num_incontact = 0
self.min_num_incontact = 5
# to be logged data
self.ee_pose2d_list = []
self.obj_pose2d_list = []
self.contact_episode_list = []
self.contact_flag_list = []
self.init_dataset_params()
self.dstdir = rospy.get_param("dstdir_dataset")
self.bag_name = rospy.get_param("bag_name")
rospy.loginfo(
"[PushestDatasetJsonWriter] Using bag {0}.bag".format(self.bag_name))
rospack = rospkg.RosPack()
self.path_pkg = rospack.get_path("digit_pushing")
self.mean_img = cv2.imread(
"{0}/local/resources/digit/{1}/mean_img.png".format(self.path_pkg, self.bag_name)).astype(np.float32)
self.std_img = cv2.imread(
"{0}/local/resources/digit/{1}/std_img.png".format(self.path_pkg, self.bag_name)).astype(np.float32)
def make_dir(self, dir):
cmd = "mkdir -p {0}".format(dir)
os.popen(cmd, 'r')
def in_contact(self, img):
# Compute per-image sum of stddev squared
diff = np.linalg.norm((img - self.mean_img)/self.std_img)**2
diff = diff / self.mean_img.size
# Count the percent of pixels that are significantly different from their mean values
diff_cnt = np.sum(((img - self.mean_img)/self.std_img)**2 > 4**2)
diff_cnt = float(diff_cnt) / float(self.mean_img.size)
# contact_flag = diff_cnt > 0.05
contact_flag = diff_cnt > 0.01
return contact_flag
def rosimg_to_numpy(self, imgmsg):
if hasattr(imgmsg, 'format') and 'compressed' in imgmsg.format:
return np.asarray(Image.open(io.BytesIO(imgmsg.data)))
return np.frombuffer(imgmsg.data, dtype=np.uint8).reshape(imgmsg.height, imgmsg.width, 3)[:, :, ::-1]
def remove_contact_episode(self, episode_idx):
indices = [idx for idx, elem in enumerate(
self.contact_episode_list) if elem == 4]
for i in sorted(indices, reverse=True):
del self.contact_episode_list[i]
del self.obj_pose2d_list[i]
del self.ee_pose2d_list[i]
def init_dataset_params(self):
self.params = {}
self.params['obj_radius'] = 0.088
def save_data2d_json(self):
data = {'params': self.params,
'ee_poses_2d': self.ee_pose2d_list,
'obj_poses_2d': self.obj_pose2d_list,
'contact_flag': self.contact_flag_list,
'contact_episode': self.contact_episode_list}
dstfile = "{0}/{1}_{2}.json".format(self.dstdir,
self.bag_name, self.contact_episode_idx)
with open(dstfile, 'w') as outfile:
json.dump(data, outfile, indent=4)
rospy.loginfo("Wrote json dataset for episodes 0 to {0} at:\n {1} ".format(
self.contact_episode_idx, dstfile))
def callback_digit_image(self, msg):
try:
# img = self.bridge.imgmsg_to_cv2(msg, "bgr8")
img = self.rosimg_to_numpy(msg)
except CvBridgeError as e:
rospy.logwarn(
"[PushestDatasetJsonWriter::callback_digit_image] {0}".format(e))
return
try:
# looks up arg2 frame transform in arg1 frame
(trans_obj, rot_obj) = self.tf_listener.lookupTransform(
"world", "/object/center/", rospy.Time(0))
(trans_ee, rot_ee) = self.tf_listener.lookupTransform(
"world", "/digit/center/", rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logwarn(
"[PushestDatasetJsonWriter::callback_digit_image] TF lookup failed")
return
if (self.in_contact(img)):
rot_obj_euler = tf.transformations.euler_from_quaternion(rot_obj)
obj_pose2d = [trans_obj[0], trans_obj[1],
rot_obj_euler[2]] # (x, y, yaw)
rot_ee_euler = tf.transformations.euler_from_quaternion(rot_ee)
ee_pose2d = [trans_ee[0], trans_ee[1],
rot_ee_euler[2]] # (x, y, yaw)
# add to data list being logged
self.obj_pose2d_list.append(obj_pose2d)
self.ee_pose2d_list.append(ee_pose2d)
self.contact_flag_list.append([1])
self.contact_episode_list.append([self.contact_episode_idx])
self.num_incontact = self.num_incontact + 1
else:
self.counter = self.counter + 1
# start new contact episode
if ((self.counter > 10) & (self.num_incontact > 1)):
if (self.num_incontact > self.min_num_incontact):
self.save_data2d_json()
self.contact_episode_idx = self.contact_episode_idx + 1
else:
self.remove_contact_episode(self.contact_episode_idx)
self.counter = 0
self.num_incontact = 0
def main():
img_tf_writer = PushestDatasetJsonWriter()
rospy.init_node('pushest_dataset_json_writer', anonymous=True)
rospy.loginfo("Initialized pushest_dataset_json_writer node.")
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main()
| 0.491944 | 0.176405 |
import sys, getopt
import glob, os
import numpy as np
from fastq_reader import Fastq_Reader
help_message = 'usage example: python intermediate_read_clusters.py -r 1 -i /project/home/hashed_reads/ -o /project/home/cluster_vectors/'
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],'hr:i:o:',["--filerank=","inputdir=","outputdir="])
except:
print help_message
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
print help_message
sys.exit()
elif opt in ('-r',"--filerank"):
fr = int(arg)-1
elif opt in ('-i','--inputdir'):
inputdir = arg
if inputdir[-1] != '/':
inputdir += '/'
elif opt in ('-o','--outputdir'):
outputdir = arg
if outputdir[-1] != '/':
outputdir += '/'
hashobject = Fastq_Reader(inputdir,outputdir)
Hashq_Files = glob.glob(os.path.join(hashobject.input_path,'*.hashq.*'))
hashobject.infile = Hashq_Files[fr]
hashobject.outfile = hashobject.output_path + 'intermediate_clusters/' + str(fr)
hashobject.global_weights = np.load(hashobject.output_path + 'global_weights.npy')
global_weight_sum = hashobject.global_weights.sum(dtype=np.float64)
Cluster_Files = glob.glob(os.path.join(hashobject.output_path,'*.cluster.npy'))
Cluster_Files = [(int(cf[cf.rfind('/')+1:cf.index('.')]),cf) for cf in Cluster_Files]
cluster_sizes = np.load(hashobject.output_path+'kmer_cluster_sizes.npy')
total_set_size = 0
cluster_weights = []
cluster_keys = []
outpart = 0
for ci,cf in Cluster_Files:
# ignore super clusters and super small clusters
if cluster_sizes[ci] < 0.2*2**hashobject.hash_size:
cw = np.load(cf)
cw_sum_prob = hashobject.global_weights[cw].sum(dtype=np.float64)/global_weight_sum
if cw_sum_prob > 0.00002:
cluster_weights.append((set(cw),cw_sum_prob))
cluster_keys.append(cf[cf.rfind('/')+1:cf.rfind('.')])
total_set_size += len(cw)
if total_set_size > 50*10**6:
hashobject.membership_generator(cluster_weights,cluster_keys,outpart)
cluster_weights = []
cluster_keys = []
total_set_size = 0
outpart += 1
if len(cluster_weights) > 0:
hashobject.membership_generator(cluster_weights,cluster_keys,outpart)
|
misc/intermediate_read_clusters.py
|
import sys, getopt
import glob, os
import numpy as np
from fastq_reader import Fastq_Reader
help_message = 'usage example: python intermediate_read_clusters.py -r 1 -i /project/home/hashed_reads/ -o /project/home/cluster_vectors/'
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],'hr:i:o:',["--filerank=","inputdir=","outputdir="])
except:
print help_message
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
print help_message
sys.exit()
elif opt in ('-r',"--filerank"):
fr = int(arg)-1
elif opt in ('-i','--inputdir'):
inputdir = arg
if inputdir[-1] != '/':
inputdir += '/'
elif opt in ('-o','--outputdir'):
outputdir = arg
if outputdir[-1] != '/':
outputdir += '/'
hashobject = Fastq_Reader(inputdir,outputdir)
Hashq_Files = glob.glob(os.path.join(hashobject.input_path,'*.hashq.*'))
hashobject.infile = Hashq_Files[fr]
hashobject.outfile = hashobject.output_path + 'intermediate_clusters/' + str(fr)
hashobject.global_weights = np.load(hashobject.output_path + 'global_weights.npy')
global_weight_sum = hashobject.global_weights.sum(dtype=np.float64)
Cluster_Files = glob.glob(os.path.join(hashobject.output_path,'*.cluster.npy'))
Cluster_Files = [(int(cf[cf.rfind('/')+1:cf.index('.')]),cf) for cf in Cluster_Files]
cluster_sizes = np.load(hashobject.output_path+'kmer_cluster_sizes.npy')
total_set_size = 0
cluster_weights = []
cluster_keys = []
outpart = 0
for ci,cf in Cluster_Files:
# ignore super clusters and super small clusters
if cluster_sizes[ci] < 0.2*2**hashobject.hash_size:
cw = np.load(cf)
cw_sum_prob = hashobject.global_weights[cw].sum(dtype=np.float64)/global_weight_sum
if cw_sum_prob > 0.00002:
cluster_weights.append((set(cw),cw_sum_prob))
cluster_keys.append(cf[cf.rfind('/')+1:cf.rfind('.')])
total_set_size += len(cw)
if total_set_size > 50*10**6:
hashobject.membership_generator(cluster_weights,cluster_keys,outpart)
cluster_weights = []
cluster_keys = []
total_set_size = 0
outpart += 1
if len(cluster_weights) > 0:
hashobject.membership_generator(cluster_weights,cluster_keys,outpart)
| 0.096219 | 0.07521 |
from enum import Enum
import logging
from ..services import game_service, game_message_service, rcon_service, status_service
from ..exceptions import InvalidOperationException
from ..services.status_service import Status
IDLE_TRACKERS = {}
PREVIOUS_IDLE_STATUSES = {}
async def auto_shutdown_loop(bot):
logging.info('Running auto-shutdown loop')
games = await status_service.list_game_statuses()
for game in games:
status = games[game]
if status != Status.RUNNING:
logging.info(
'%s is not running so will not be monitored for inactivity', game)
_deregister_game(game)
elif game not in IDLE_TRACKERS:
logging.info('%s is now being monitored for inactivity', game)
await _register_game(game)
else:
logging.info('Checking idle status for %s', game)
idle_status = IDLE_TRACKERS[game].check_idle_status()
previous_idle_status = PREVIOUS_IDLE_STATUSES.get(game)
if idle_status == previous_idle_status:
logging.info(
'No change in idle status for %s - still %s', game, idle_status)
return
# Only react when the status changes - not on every iteration
logging.info('Idle status for %s has changed - was %s, now %s',
game, previous_idle_status, idle_status)
PREVIOUS_IDLE_STATUSES[game] = idle_status
if idle_status == IdleStatus.SHUTDOWN or idle_status == IdleStatus.UNKNOWN_SHUTDOWN:
logging.info('Stopping %s due to inactivity', game)
await game_message_service.send_shutdown_notification(bot, game)
try:
force = previous_idle_status == IdleStatus.SHUTDOWN_FAILED
logging.info('Stopping %s with force=%s', game, force)
await game_service.stop(game, force)
await game_message_service.send_shutdown_finished(bot, game)
except InvalidOperationException:
await game_message_service.send_shutdown_failed(bot, game)
logging.error(
'Failed to stop %s, will use force next time', game)
PREVIOUS_IDLE_STATUSES[game] = IdleStatus.SHUTDOWN_FAILED
if idle_status == IdleStatus.UNKNOWN_WARNING:
await game_message_service.send_unknown_idle_status_message(bot, game)
if idle_status == IdleStatus.WARNING:
await game_message_service.send_shutdown_warning(bot, game)
if idle_status == IdleStatus.IDLE:
await game_message_service.send_idle_message(bot, game)
def reset_idle_counter(game):
if game in IDLE_TRACKERS:
IDLE_TRACKERS[game].reset_count()
PREVIOUS_IDLE_STATUSES[game] = None
async def _register_game(game):
rcon_client = await rcon_service.get_rcon_client(game)
IDLE_TRACKERS[game] = IdleTracker(rcon_client)
def _deregister_game(game):
IDLE_TRACKERS.pop(game, None)
PREVIOUS_IDLE_STATUSES.pop(game, None)
class IdleStatus(Enum):
IN_USE = 1
IDLE = 2
WARNING = 3
SHUTDOWN = 4
UNKNOWN = 5
UNKNOWN_WARNING = 6
UNKNOWN_SHUTDOWN = 7
SHUTDOWN_FAILED = 8
WARNING_COUNT = 2
SHUTDOWN_COUNT = 3
# Only send an unknown warning if we see this twice in a row - it can happen
# naturally if the stutdown loop runs while the server is starting up
UNKNOWN_WARNING_COUNT = 2
# Give the user enough time to respond and cancel the shutdown
UNKNOWN_SHUTDOWN_COUNT = 5
class IdleTracker():
def __init__(self, rcon_client):
self.rcon_client = rcon_client
self.game_time = None
self.idle_count = 0
self.unknown_count = 0
def check_idle_status(self):
try:
latest_game_time = self.rcon_client.game_time()
self.unknown_count = 0
if self.game_time is not None and latest_game_time == self.game_time:
self.idle_count += 1
if self.idle_count >= SHUTDOWN_COUNT:
return IdleStatus.SHUTDOWN
if self.idle_count >= WARNING_COUNT:
return IdleStatus.WARNING
return IdleStatus.IDLE
self.game_time = latest_game_time
self.idle_count = 0
return IdleStatus.IN_USE
except Exception: # pylint: disable=broad-except
self.unknown_count += 1
if self.unknown_count >= UNKNOWN_SHUTDOWN_COUNT:
return IdleStatus.UNKNOWN_SHUTDOWN
if self.unknown_count >= UNKNOWN_WARNING_COUNT:
return IdleStatus.UNKNOWN_WARNING
return IdleStatus.UNKNOWN
def reset_count(self):
self.idle_count = 0
self.unknown_count = 0
|
bot/services/inactivity_service.py
|
from enum import Enum
import logging
from ..services import game_service, game_message_service, rcon_service, status_service
from ..exceptions import InvalidOperationException
from ..services.status_service import Status
IDLE_TRACKERS = {}
PREVIOUS_IDLE_STATUSES = {}
async def auto_shutdown_loop(bot):
logging.info('Running auto-shutdown loop')
games = await status_service.list_game_statuses()
for game in games:
status = games[game]
if status != Status.RUNNING:
logging.info(
'%s is not running so will not be monitored for inactivity', game)
_deregister_game(game)
elif game not in IDLE_TRACKERS:
logging.info('%s is now being monitored for inactivity', game)
await _register_game(game)
else:
logging.info('Checking idle status for %s', game)
idle_status = IDLE_TRACKERS[game].check_idle_status()
previous_idle_status = PREVIOUS_IDLE_STATUSES.get(game)
if idle_status == previous_idle_status:
logging.info(
'No change in idle status for %s - still %s', game, idle_status)
return
# Only react when the status changes - not on every iteration
logging.info('Idle status for %s has changed - was %s, now %s',
game, previous_idle_status, idle_status)
PREVIOUS_IDLE_STATUSES[game] = idle_status
if idle_status == IdleStatus.SHUTDOWN or idle_status == IdleStatus.UNKNOWN_SHUTDOWN:
logging.info('Stopping %s due to inactivity', game)
await game_message_service.send_shutdown_notification(bot, game)
try:
force = previous_idle_status == IdleStatus.SHUTDOWN_FAILED
logging.info('Stopping %s with force=%s', game, force)
await game_service.stop(game, force)
await game_message_service.send_shutdown_finished(bot, game)
except InvalidOperationException:
await game_message_service.send_shutdown_failed(bot, game)
logging.error(
'Failed to stop %s, will use force next time', game)
PREVIOUS_IDLE_STATUSES[game] = IdleStatus.SHUTDOWN_FAILED
if idle_status == IdleStatus.UNKNOWN_WARNING:
await game_message_service.send_unknown_idle_status_message(bot, game)
if idle_status == IdleStatus.WARNING:
await game_message_service.send_shutdown_warning(bot, game)
if idle_status == IdleStatus.IDLE:
await game_message_service.send_idle_message(bot, game)
def reset_idle_counter(game):
if game in IDLE_TRACKERS:
IDLE_TRACKERS[game].reset_count()
PREVIOUS_IDLE_STATUSES[game] = None
async def _register_game(game):
rcon_client = await rcon_service.get_rcon_client(game)
IDLE_TRACKERS[game] = IdleTracker(rcon_client)
def _deregister_game(game):
IDLE_TRACKERS.pop(game, None)
PREVIOUS_IDLE_STATUSES.pop(game, None)
class IdleStatus(Enum):
IN_USE = 1
IDLE = 2
WARNING = 3
SHUTDOWN = 4
UNKNOWN = 5
UNKNOWN_WARNING = 6
UNKNOWN_SHUTDOWN = 7
SHUTDOWN_FAILED = 8
WARNING_COUNT = 2
SHUTDOWN_COUNT = 3
# Only send an unknown warning if we see this twice in a row - it can happen
# naturally if the stutdown loop runs while the server is starting up
UNKNOWN_WARNING_COUNT = 2
# Give the user enough time to respond and cancel the shutdown
UNKNOWN_SHUTDOWN_COUNT = 5
class IdleTracker():
def __init__(self, rcon_client):
self.rcon_client = rcon_client
self.game_time = None
self.idle_count = 0
self.unknown_count = 0
def check_idle_status(self):
try:
latest_game_time = self.rcon_client.game_time()
self.unknown_count = 0
if self.game_time is not None and latest_game_time == self.game_time:
self.idle_count += 1
if self.idle_count >= SHUTDOWN_COUNT:
return IdleStatus.SHUTDOWN
if self.idle_count >= WARNING_COUNT:
return IdleStatus.WARNING
return IdleStatus.IDLE
self.game_time = latest_game_time
self.idle_count = 0
return IdleStatus.IN_USE
except Exception: # pylint: disable=broad-except
self.unknown_count += 1
if self.unknown_count >= UNKNOWN_SHUTDOWN_COUNT:
return IdleStatus.UNKNOWN_SHUTDOWN
if self.unknown_count >= UNKNOWN_WARNING_COUNT:
return IdleStatus.UNKNOWN_WARNING
return IdleStatus.UNKNOWN
def reset_count(self):
self.idle_count = 0
self.unknown_count = 0
| 0.387574 | 0.070816 |
from headers.static_values import *
from worker import *
from utils import *
import requests
class Forwarded():
def __init__(self, destination):
self.destination = destination
self.mime_types = MimeTypes.get_mime_list()
self.payloads = Payloads.get_payload_list(Config.revshell_ip, Config.revshell_port)
self.test_array = []
def revshell_tests(self):
for payload in self.payloads:
self.test_array.append(
PreRequest(
req_type = "GET",
destination = self.destination,
payload = payload,
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Forwarded" : payload
},
body = None,
expected_status_code = Config.initial_response.status_code
)
)
def forwarded_fuzz_tests(self):
forwarded_fuzz = [
'for="_mdn"',
'For="[::1]:80"',
'for=0.0.0.0;proto=http;by=0.0.0.0',
'for=0.0.0.0, for=127.0.0.1, for=localhost'
]
for forwarded in forwarded_fuzz:
self.test_array.append(
PreRequest(
req_type = "GET",
destination = self.destination,
payload = forwarded,
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Forwarded" : forwarded
},
body = None,
expected_status_code = Config.initial_response.status_code
)
)
def generate_tests(self):
if Config.test_level >= Config.TEST_LEVEL_OPTIMAL:
self.revshell_tests()
self.forwarded_fuzz_tests()
def get_tests(self):
self.generate_tests()
return self.test_array
|
headers/forwarded.py
|
from headers.static_values import *
from worker import *
from utils import *
import requests
class Forwarded():
def __init__(self, destination):
self.destination = destination
self.mime_types = MimeTypes.get_mime_list()
self.payloads = Payloads.get_payload_list(Config.revshell_ip, Config.revshell_port)
self.test_array = []
def revshell_tests(self):
for payload in self.payloads:
self.test_array.append(
PreRequest(
req_type = "GET",
destination = self.destination,
payload = payload,
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Forwarded" : payload
},
body = None,
expected_status_code = Config.initial_response.status_code
)
)
def forwarded_fuzz_tests(self):
forwarded_fuzz = [
'for="_mdn"',
'For="[::1]:80"',
'for=0.0.0.0;proto=http;by=0.0.0.0',
'for=0.0.0.0, for=127.0.0.1, for=localhost'
]
for forwarded in forwarded_fuzz:
self.test_array.append(
PreRequest(
req_type = "GET",
destination = self.destination,
payload = forwarded,
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Forwarded" : forwarded
},
body = None,
expected_status_code = Config.initial_response.status_code
)
)
def generate_tests(self):
if Config.test_level >= Config.TEST_LEVEL_OPTIMAL:
self.revshell_tests()
self.forwarded_fuzz_tests()
def get_tests(self):
self.generate_tests()
return self.test_array
| 0.427516 | 0.133049 |
from corpus.event import EventManager, EventSeriesManager, EventStorage
from corpus.config import EventDataSourceConfig
from corpus.quality.rating import RatingManager
from corpus.datasources.download import Download
class EventDataSource(object):
'''
a data source for events
'''
def __init__(self,eventManager:EventManager,eventSeriesManager:EventSeriesManager,sourceConfig=EventDataSourceConfig):
'''
constructor
Args:
sourceConfig(EventDataSourceConfig): the configuration for the EventDataSource
eventManager(EventManager): manager for the events
eventSeriesManager(EventSeriesManager): manager for the eventSeries
'''
self.sourceConfig=sourceConfig
self.name=self.sourceConfig.name
self.eventManager=eventManager
self.eventManager.dataSource=self
self.eventSeriesManager=eventSeriesManager
self.eventSeriesManager.dataSource=self
pass
def load(self,forceUpdate=False):
'''
load this data source
'''
self.eventSeriesManager.configure()
self.eventManager.configure()
# first events
self.eventManager.fromCache(force=forceUpdate)
# then series
self.eventSeriesManager.fromCache(force=forceUpdate)
# TODO use same foreign key in all dataSources
self.eventManager.linkSeriesAndEvent(self.eventSeriesManager,"inEventSeries")
def rateAll(self,ratingManager:RatingManager):
'''
rate all events and series based on the given rating Manager
'''
self.eventManager.rateAll(ratingManager)
self.eventSeriesManager.rateAll(ratingManager)
class EventCorpus(object):
'''
Towards a gold standard event corpus and observatory ...
'''
def __init__(self,debug=False,verbose=False):
'''
Constructor
Args:
debug(bool): set debugging if True
verbose(bool): set verbose output if True
'''
self.debug=debug
self.verbose=verbose
self.eventDataSources={}
def addDataSource(self, eventDataSource:EventDataSource):
'''
adds the given eventDataSource
Args:
eventDataSource: EventDataSource
'''
self.eventDataSources[eventDataSource.sourceConfig.lookupId]=eventDataSource
pass
def loadAll(self,forceUpdate:bool=False):
'''
load all eventDataSources
Args:
forceUpdate(bool): True if the data should be fetched from the source instead of the cache
'''
for eventDataSource in self.eventDataSources.values():
eventDataSource.load(forceUpdate=forceUpdate)
@staticmethod
def download():
'''
download the EventCorpus.db if needed
'''
fileName="EventCorpus.db"
url = f"https://github.com/WolfgangFahl/ConferenceCorpus/wiki/data/{fileName}.gz"
targetDirectory=EventStorage.getStorageConfig().getCachePath()
Download.downloadBackupFile(url, fileName, targetDirectory)
|
corpus/eventcorpus.py
|
from corpus.event import EventManager, EventSeriesManager, EventStorage
from corpus.config import EventDataSourceConfig
from corpus.quality.rating import RatingManager
from corpus.datasources.download import Download
class EventDataSource(object):
'''
a data source for events
'''
def __init__(self,eventManager:EventManager,eventSeriesManager:EventSeriesManager,sourceConfig=EventDataSourceConfig):
'''
constructor
Args:
sourceConfig(EventDataSourceConfig): the configuration for the EventDataSource
eventManager(EventManager): manager for the events
eventSeriesManager(EventSeriesManager): manager for the eventSeries
'''
self.sourceConfig=sourceConfig
self.name=self.sourceConfig.name
self.eventManager=eventManager
self.eventManager.dataSource=self
self.eventSeriesManager=eventSeriesManager
self.eventSeriesManager.dataSource=self
pass
def load(self,forceUpdate=False):
'''
load this data source
'''
self.eventSeriesManager.configure()
self.eventManager.configure()
# first events
self.eventManager.fromCache(force=forceUpdate)
# then series
self.eventSeriesManager.fromCache(force=forceUpdate)
# TODO use same foreign key in all dataSources
self.eventManager.linkSeriesAndEvent(self.eventSeriesManager,"inEventSeries")
def rateAll(self,ratingManager:RatingManager):
'''
rate all events and series based on the given rating Manager
'''
self.eventManager.rateAll(ratingManager)
self.eventSeriesManager.rateAll(ratingManager)
class EventCorpus(object):
'''
Towards a gold standard event corpus and observatory ...
'''
def __init__(self,debug=False,verbose=False):
'''
Constructor
Args:
debug(bool): set debugging if True
verbose(bool): set verbose output if True
'''
self.debug=debug
self.verbose=verbose
self.eventDataSources={}
def addDataSource(self, eventDataSource:EventDataSource):
'''
adds the given eventDataSource
Args:
eventDataSource: EventDataSource
'''
self.eventDataSources[eventDataSource.sourceConfig.lookupId]=eventDataSource
pass
def loadAll(self,forceUpdate:bool=False):
'''
load all eventDataSources
Args:
forceUpdate(bool): True if the data should be fetched from the source instead of the cache
'''
for eventDataSource in self.eventDataSources.values():
eventDataSource.load(forceUpdate=forceUpdate)
@staticmethod
def download():
'''
download the EventCorpus.db if needed
'''
fileName="EventCorpus.db"
url = f"https://github.com/WolfgangFahl/ConferenceCorpus/wiki/data/{fileName}.gz"
targetDirectory=EventStorage.getStorageConfig().getCachePath()
Download.downloadBackupFile(url, fileName, targetDirectory)
| 0.374562 | 0.128908 |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import altair as alt
import pandas as pd
import pandas as pd
import altair as alt
from vega_datasets import data
import dash_bootstrap_components as dbc
app = dash.Dash(__name__, assets_folder='assets')
server = app.server
df = pd.read_csv("data/merged_data_clean.csv")
def create_map(alcohol_type = 'beer', region = "World"):
"""
Create choropleth heatmap based on alcoholic consumption and region.
Cloropleth colour scheme will change depending on alcohol type selected.
The zoom of map will adjust depending on region selected.
Parameters
----------
alcohol_type : str {‘wine’, ‘beer’, 'spirit'}
Type of alcohol to show on choropleth.
region: str {'World', 'Asia', 'Europe', 'Africa', 'Americas', 'Oceania'}
Returns
-------
altair Chart object
Choropleth of chosen alcohol type
Examples
--------
>>> create_map('spirit', 'Europe')
"""
# dictionary to store zoom scales and text for bar chart title
region_dict = {"World":[140, 450, 400, 'the World'],
"Asia":[400, -190, 520, 'Asia'],
"Europe":[800, 300, 1100, 'Europe'],
"Africa":[400, 300, 310, 'Africa'],
"Americas":[200, 900, 360, 'the Americas'],
"Oceania":[500, -800, 50, 'Oceania']}
# set colour scheme of map depending on alcohol type
if alcohol_type == 'wine':
map_color = ['#f9f9f9', '#720b18']
elif alcohol_type == 'beer':
map_color = ['#f9f9f9', '#DAA520']
else:
map_color = ['#f9f9f9', '#67b2e5', '#1f78b5']
# get columns for specific to the alcohol type selected
cols = [x for x in df.columns if alcohol_type in x]
cols.append('country')
# this is to select the rank column to sort
if region == 'World':
col_to_filter = cols[2]
else:
col_to_filter = cols[3]
# Create map plot
map_plot = alt.Chart(alt.topo_feature(data.world_110m.url, 'countries')).mark_geoshape(
stroke='white',
strokeWidth=0.5
).encode(
alt.Color(field = cols[1], #proportion of alcohol type
type = 'quantitative',
scale=alt.Scale(domain=[0, 1], range=map_color),
legend=alt.Legend(orient='top',
title = f'Proportion of total servings per person from {alcohol_type}')
),
tooltip = [
{"field": cols[4], "type": "nominal", 'title': "Country"},
{"field": cols[1], "type": "quantitative", 'title': f'Proportion of total servings from {alcohol_type}', 'format':'.2f'},
{"field": cols[0], "type": "quantitative", 'title': f'Total {alcohol_type} servings'},
{"field": cols[3], "type": "quantitative", 'title': 'Continent rank'},
{"field": cols[2], "type": "quantitative", 'title': 'Global rank'},
]
).transform_lookup(
lookup='id',
from_=alt.LookupData(df, 'id', fields = cols)
).project(
type='mercator', scale = region_dict[region][0], translate = [region_dict[region][1], region_dict[region][2]]
).properties(
width=900,
height=600,
)
bar = alt.Chart(df).mark_bar().encode(
alt.X(
field=cols[1], #proportion of alcohol type
type='quantitative',
title = "Proportion Consumed",
scale=alt.Scale(domain=[0, 1]),
),
alt.Y(
field='country',
type='nominal',
sort=alt.EncodingSortField(field=cols[1], op='max', order='descending'),
title=''
),
alt.Fill(
field = cols[1],
type = 'quantitative',
scale=alt.Scale(domain=[0, 1], range=map_color),
legend=None),
tooltip = [
{"field": cols[4], "type": "nominal", 'title': "Country"},
{"field": cols[1], "type": "quantitative", 'title': f'Proportion of total servings per person from {alcohol_type}', 'format':'.2f'},
{"field": cols[0], "type": "quantitative", 'title': f'Total {alcohol_type} servings'},
{"field": cols[3], "type": "quantitative", 'title': 'Continent rank'},
{"field": cols[2], "type": "quantitative", 'title': 'Global rank'},
]
).transform_filter(alt.datum.region == region if region != 'World' else alt.datum.total_servings >= 0
).transform_window(
sort=[alt.SortField(cols[1], order="descending")],
rank="rank(col_to_filter)"
).transform_filter(
alt.datum.rank <= 20
).properties(
title=f"Top 20 Countries that love {alcohol_type.title()} in {region_dict[region][3]}",
width = 400,
height = 600
)
# concatenate map and bar chart plots
return alt.hconcat(map_plot, bar).configure_legend(
gradientLength=300,
gradientThickness=20,
titleLimit= 0,
labelFontSize=15,
titleFontSize=20
).configure_axis(
labelFontSize=15,
titleFontSize=20
).configure_title(
fontSize=20
)
header = dbc.Jumbotron(
[
dbc.Container(
[
html.H1("Which Countries are Beer-lovers, Wine-lovers, or Spirit-lovers?", className="display-3",
style={'color': 'blue', 'font-family':'Book Antiqua'}),
html.H1(
"The following dashboard provides a visual overview on the proportion of \
global alcohol consumption across beer, wine and spirits in 2010. \
Users can simultaneously adjust the geographic location and specific \
alcohol type of their choice. The horizontal bar chart on the right of the \
map dynamically updates as different geographies and alcohol types are selected.",
className="lead", style={'color': 'black', 'font-weight':'lighter',
'font-family':'Book Antiqua', 'font-size':20}),
html.H1("______", style={'color': 'white', 'font-size':10}),
html.H1(
"Note: Proportions are calculated as a ratio of total servings for a specific type of drink \
divided by the total servings of all drinks in the country. As a result, countries with low total servings \
of alchohol may have unusually high ratios as shown in the case of Saudi Arabia.",
className="lead", style={'color': 'black', 'font-weight':'lighter',
'font-family':'Book Antiqua', 'font-size':20}),
# html.H1("______", style={'color': 'white', 'font-size':10}),
html.A('Data Source: ',style={'color': 'black', 'font-family':'Book Antiqua'}),
html.A("FiveThirtyEight", href='https://github.com/fivethirtyeight/data/tree/master/alcohol-consumption'),
html.H1("______", style={'color': 'white', 'font-size':10}),
html.H1('Adjust the cells below:' ,
style={'color': 'black', 'font-size': 20,'font-family':'Book Antiqua'}),
],
fluid=True,
)
],
fluid=True,
)
# Drop-down and Map Plot
content = dbc.Container([
dbc.Row(
[dbc.Col(
# Drink type dropdown
dcc.Dropdown(
id='dd-chart',
options=[
{'label': 'Beer', 'value': 'beer'},
{'label': 'Wine', 'value': 'wine'},
{'label': 'Spirits', 'value': 'spirit'},
],
value='beer',
style=dict(width='30%',
verticalAlign="middle")
)),
dbc.Col(
# Region dropdown
dcc.Dropdown(
id='dd-chart2',
options=[
{'label': 'World', 'value': 'World'},
{'label': 'Asia', 'value': 'Asia'},
{'label': 'Europe', 'value': 'Europe'},
{'label': 'Africa', 'value': 'Africa'},
{'label': 'Americas', 'value': 'Americas'},
{'label': 'Oceania', 'value': 'Oceania'}
],
value='World',
style=dict(width='30%',
verticalAlign="middle")
)),
dbc.Col(
html.Iframe(
sandbox='allow-scripts',
id='plot',
height='1000',
width='1500',
style={'border-width': '0'},
# need to change the category here under the create_map function
srcDoc= create_map().to_html()
)),
]
)
]
)
app.layout = html.Div([header,
content])
# call back to update visualizations based on dropdown selections
@app.callback(
dash.dependencies.Output('plot', 'srcDoc'),
[dash.dependencies.Input('dd-chart', 'value'),
dash.dependencies.Input('dd-chart2', 'value')])
def update_plot(alcohol_type, region):
"""
#Function takes in an alcohol_type and region and calls create_map to update Altair figure
Parameters
----------
alcohol_type : str {‘wine’, ‘beer’, 'spirit'}
Type of alcohol to show on choropleth.
region: str {'World', 'Asia', 'Europe', 'Africa', 'Americas', 'Oceania'}
Returns
-------
altair Chart object
Choropleth of chosen alcohol type
Examples
--------
>>> update_plot('spirit', 'Europe')
"""
updated_plot = create_map(alcohol_type, region).to_html()
return updated_plot
if __name__ == '__main__':
app.run_server(debug=True)# Create your app here
|
app.py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import altair as alt
import pandas as pd
import pandas as pd
import altair as alt
from vega_datasets import data
import dash_bootstrap_components as dbc
app = dash.Dash(__name__, assets_folder='assets')
server = app.server
df = pd.read_csv("data/merged_data_clean.csv")
def create_map(alcohol_type = 'beer', region = "World"):
"""
Create choropleth heatmap based on alcoholic consumption and region.
Cloropleth colour scheme will change depending on alcohol type selected.
The zoom of map will adjust depending on region selected.
Parameters
----------
alcohol_type : str {‘wine’, ‘beer’, 'spirit'}
Type of alcohol to show on choropleth.
region: str {'World', 'Asia', 'Europe', 'Africa', 'Americas', 'Oceania'}
Returns
-------
altair Chart object
Choropleth of chosen alcohol type
Examples
--------
>>> create_map('spirit', 'Europe')
"""
# dictionary to store zoom scales and text for bar chart title
region_dict = {"World":[140, 450, 400, 'the World'],
"Asia":[400, -190, 520, 'Asia'],
"Europe":[800, 300, 1100, 'Europe'],
"Africa":[400, 300, 310, 'Africa'],
"Americas":[200, 900, 360, 'the Americas'],
"Oceania":[500, -800, 50, 'Oceania']}
# set colour scheme of map depending on alcohol type
if alcohol_type == 'wine':
map_color = ['#f9f9f9', '#720b18']
elif alcohol_type == 'beer':
map_color = ['#f9f9f9', '#DAA520']
else:
map_color = ['#f9f9f9', '#67b2e5', '#1f78b5']
# get columns for specific to the alcohol type selected
cols = [x for x in df.columns if alcohol_type in x]
cols.append('country')
# this is to select the rank column to sort
if region == 'World':
col_to_filter = cols[2]
else:
col_to_filter = cols[3]
# Create map plot
map_plot = alt.Chart(alt.topo_feature(data.world_110m.url, 'countries')).mark_geoshape(
stroke='white',
strokeWidth=0.5
).encode(
alt.Color(field = cols[1], #proportion of alcohol type
type = 'quantitative',
scale=alt.Scale(domain=[0, 1], range=map_color),
legend=alt.Legend(orient='top',
title = f'Proportion of total servings per person from {alcohol_type}')
),
tooltip = [
{"field": cols[4], "type": "nominal", 'title': "Country"},
{"field": cols[1], "type": "quantitative", 'title': f'Proportion of total servings from {alcohol_type}', 'format':'.2f'},
{"field": cols[0], "type": "quantitative", 'title': f'Total {alcohol_type} servings'},
{"field": cols[3], "type": "quantitative", 'title': 'Continent rank'},
{"field": cols[2], "type": "quantitative", 'title': 'Global rank'},
]
).transform_lookup(
lookup='id',
from_=alt.LookupData(df, 'id', fields = cols)
).project(
type='mercator', scale = region_dict[region][0], translate = [region_dict[region][1], region_dict[region][2]]
).properties(
width=900,
height=600,
)
bar = alt.Chart(df).mark_bar().encode(
alt.X(
field=cols[1], #proportion of alcohol type
type='quantitative',
title = "Proportion Consumed",
scale=alt.Scale(domain=[0, 1]),
),
alt.Y(
field='country',
type='nominal',
sort=alt.EncodingSortField(field=cols[1], op='max', order='descending'),
title=''
),
alt.Fill(
field = cols[1],
type = 'quantitative',
scale=alt.Scale(domain=[0, 1], range=map_color),
legend=None),
tooltip = [
{"field": cols[4], "type": "nominal", 'title': "Country"},
{"field": cols[1], "type": "quantitative", 'title': f'Proportion of total servings per person from {alcohol_type}', 'format':'.2f'},
{"field": cols[0], "type": "quantitative", 'title': f'Total {alcohol_type} servings'},
{"field": cols[3], "type": "quantitative", 'title': 'Continent rank'},
{"field": cols[2], "type": "quantitative", 'title': 'Global rank'},
]
).transform_filter(alt.datum.region == region if region != 'World' else alt.datum.total_servings >= 0
).transform_window(
sort=[alt.SortField(cols[1], order="descending")],
rank="rank(col_to_filter)"
).transform_filter(
alt.datum.rank <= 20
).properties(
title=f"Top 20 Countries that love {alcohol_type.title()} in {region_dict[region][3]}",
width = 400,
height = 600
)
# concatenate map and bar chart plots
return alt.hconcat(map_plot, bar).configure_legend(
gradientLength=300,
gradientThickness=20,
titleLimit= 0,
labelFontSize=15,
titleFontSize=20
).configure_axis(
labelFontSize=15,
titleFontSize=20
).configure_title(
fontSize=20
)
header = dbc.Jumbotron(
[
dbc.Container(
[
html.H1("Which Countries are Beer-lovers, Wine-lovers, or Spirit-lovers?", className="display-3",
style={'color': 'blue', 'font-family':'Book Antiqua'}),
html.H1(
"The following dashboard provides a visual overview on the proportion of \
global alcohol consumption across beer, wine and spirits in 2010. \
Users can simultaneously adjust the geographic location and specific \
alcohol type of their choice. The horizontal bar chart on the right of the \
map dynamically updates as different geographies and alcohol types are selected.",
className="lead", style={'color': 'black', 'font-weight':'lighter',
'font-family':'Book Antiqua', 'font-size':20}),
html.H1("______", style={'color': 'white', 'font-size':10}),
html.H1(
"Note: Proportions are calculated as a ratio of total servings for a specific type of drink \
divided by the total servings of all drinks in the country. As a result, countries with low total servings \
of alchohol may have unusually high ratios as shown in the case of Saudi Arabia.",
className="lead", style={'color': 'black', 'font-weight':'lighter',
'font-family':'Book Antiqua', 'font-size':20}),
# html.H1("______", style={'color': 'white', 'font-size':10}),
html.A('Data Source: ',style={'color': 'black', 'font-family':'Book Antiqua'}),
html.A("FiveThirtyEight", href='https://github.com/fivethirtyeight/data/tree/master/alcohol-consumption'),
html.H1("______", style={'color': 'white', 'font-size':10}),
html.H1('Adjust the cells below:' ,
style={'color': 'black', 'font-size': 20,'font-family':'Book Antiqua'}),
],
fluid=True,
)
],
fluid=True,
)
# Drop-down and Map Plot
content = dbc.Container([
dbc.Row(
[dbc.Col(
# Drink type dropdown
dcc.Dropdown(
id='dd-chart',
options=[
{'label': 'Beer', 'value': 'beer'},
{'label': 'Wine', 'value': 'wine'},
{'label': 'Spirits', 'value': 'spirit'},
],
value='beer',
style=dict(width='30%',
verticalAlign="middle")
)),
dbc.Col(
# Region dropdown
dcc.Dropdown(
id='dd-chart2',
options=[
{'label': 'World', 'value': 'World'},
{'label': 'Asia', 'value': 'Asia'},
{'label': 'Europe', 'value': 'Europe'},
{'label': 'Africa', 'value': 'Africa'},
{'label': 'Americas', 'value': 'Americas'},
{'label': 'Oceania', 'value': 'Oceania'}
],
value='World',
style=dict(width='30%',
verticalAlign="middle")
)),
dbc.Col(
html.Iframe(
sandbox='allow-scripts',
id='plot',
height='1000',
width='1500',
style={'border-width': '0'},
# need to change the category here under the create_map function
srcDoc= create_map().to_html()
)),
]
)
]
)
app.layout = html.Div([header,
content])
# call back to update visualizations based on dropdown selections
@app.callback(
dash.dependencies.Output('plot', 'srcDoc'),
[dash.dependencies.Input('dd-chart', 'value'),
dash.dependencies.Input('dd-chart2', 'value')])
def update_plot(alcohol_type, region):
"""
#Function takes in an alcohol_type and region and calls create_map to update Altair figure
Parameters
----------
alcohol_type : str {‘wine’, ‘beer’, 'spirit'}
Type of alcohol to show on choropleth.
region: str {'World', 'Asia', 'Europe', 'Africa', 'Americas', 'Oceania'}
Returns
-------
altair Chart object
Choropleth of chosen alcohol type
Examples
--------
>>> update_plot('spirit', 'Europe')
"""
updated_plot = create_map(alcohol_type, region).to_html()
return updated_plot
if __name__ == '__main__':
app.run_server(debug=True)# Create your app here
| 0.6705 | 0.361362 |
import random
import numpy as np
import cv2
import scipy.ndimage as ndimage
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
class RandomChoice(object):
"""
choose a random tranform from list an apply
transforms: tranforms to apply
p: probability
"""
def __init__(self, transforms=[],
p=0.5):
self.transforms = transforms
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
t = random.choice(self.transforms)
return t(sample)
class ComposeTransforms(object):
"""
Composes several transforms together.
"""
def __init__(self, transforms=[],
p=0.9):
self.transforms = transforms
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
for t in self.transforms:
sample = t(sample)
return sample
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
def stack_seg_2_image(sample):
image = sample['image']
seg = sample['segmentation']
channels = [chan for chan in image]
channels.append(seg)
return np.stack(channels, axis=3)
def elastic_transform_3d(sample, alpha=1, sigma=20, c_val=0.0, method="linear"):
"""
:param sample: dict of image and seg
:param alpha: scaling factor of gaussian filter
:param sigma: standard deviation of random gaussian filter
:param c_val: fill value
:param method: interpolation method. supported methods : ("linear", "nearest")
:return: deformed image and/or label
"""
img_numpy = sample['image'].copy()
label = sample['segmentation'] if 'segmentation' in sample else None
shape = img_numpy.shape
# Define 3D coordinate system
coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2])
# Interpolated images
chan_intrps = [RegularGridInterpolator(coords, img_numpy[:,:,:,chan],
method=method,
bounds_error=False,
fill_value=c_val) for chan in range(shape[3])]
#Get random elastic deformations
dx = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dy = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dz = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]
indices = np.reshape(x + dx, (-1, 1)), \
np.reshape(y + dy, (-1, 1)), \
np.reshape(z + dz, (-1, 1))
# Interpolate 3D image image
img_numpy = np.stack([chan_intrp(indices).reshape((shape[0],shape[1],shape[2]))
for chan_intrp in chan_intrps], axis=3).astype(np.float32)
# Interpolate labels
if label is not None:
lab_intrp = RegularGridInterpolator(coords, label,
method="nearest",
bounds_error=False,
fill_value=0)
label = lab_intrp(indices).reshape(shape[0],shape[1],shape[2]).astype(label.dtype)
sample['segmentation'] = label
sample['image'] = img_numpy
return sample
class ElasticTransform(object):
def __init__(self, p=0.5, alpha=1, sigma=20, c_val=0.0, method="linear"):
self.p = p
self.alpha = alpha
self.sigma = sigma
self.c_val = c_val
self.method = method
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return elastic_transform_3d(sample, self.alpha, self.sigma, self.c_val, self.method)
def random_noise(sample, mean=0, std=0.001, eps=1e-6):
im = sample['image'].copy()
noise = np.random.normal(mean, std, im.shape)
sample['image'] = np.where(im > eps, im + noise, im)
return sample
class GaussianNoise(object):
def __init__(self, p=0.5, mean=0, std=0.001):
self.mean = mean
self.std = std
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_noise(sample, self.mean, self.std)
def random_crop_to_size(sample, crop_sz):
im = sample['image'].copy()
shape = im.shape
if 'segmentation' in sample:
seg = sample['segmentation'].copy()
else:
seg = None
# choose randomly but check that at least one tumor pixel is included
width, height, depth = crop_sz
sum_tumor = 0
n_round = 0
d,x,y = 0,0,0
while sum_tumor == 0 and n_round < 1000:
n_round += 1
d = np.random.randint(0, shape[0] - depth - 1)
x = np.random.randint(0, shape[1] - width - 1)
y = np.random.randint(0, shape[2] - height - 1)
if seg is not None:
check = seg[d:d+depth, x:x+width, y:y+height]
sum_tumor = np.sum(check)
else:
sum_tumor = 1
assert n_round < 1000, f'no segmentation found in {sample["BraTSID"]}'
im = im[d:d+depth, x:x+width, y:y+height,:]
sample['image'] = im
if seg is not None:
seg = check
sample['segmentation'] = seg
return sample
class RandomCropToSize(object):
def __init__(self, crop_sz=(200,200,95)):
self.crop_sz = crop_sz
def __call__(self, sample):
return random_crop_to_size(sample, self.crop_sz)
def random_flip_lr(sample):
im = sample['image'].copy()
seg = sample['segmentation'].copy()
im = im[:,:,::-1,:]
seg = seg[:,:,::-1]
sample['image'] = im
sample['segmentation'] = seg
return sample
class RandomFlipLR(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_flip_lr(sample)
def random_channel_drop(sample):
im = sample['image'].copy()
c = im.shape[3]
drop_ch = random.randint(0, c-1)
im[:,:,:,drop_ch] = 0. if random.random() > 0.5 else 1.0
sample['image'] = im
return sample
class RandomChannelDrop(object):
def __init__(self, p=0.05):
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_channel_drop(sample)
def random_rotate3D(sample, min_angle, max_angle):
"""
Returns a random rotated image and seg map in sample dict
:param sample: ds sample dict
:param min_angle: in degrees
:param max_angle: in degrees
:return: sample
"""
im = sample['image'].copy()
seg = sample['segmentation'].copy()
assert min_angle < max_angle, "min should be less than max val"
assert min_angle > -360 or max_angle < 360
all_axes = [(1, 0), (1, 2), (0, 2)]
angle = np.random.randint(low=min_angle, high=max_angle + 1)
axes_random_id = np.random.randint(low=0, high=len(all_axes))
axes = all_axes[axes_random_id]
im = ndimage.interpolation.rotate(im , angle, axes=axes, reshape=False)
seg = ndimage.rotate(seg.astype(np.float32), angle, axes=axes, reshape=False)
# seg back to binary float values
seg = np.where(seg < 0.5, 0, 1.)
sample['image'] = im
sample['segmentation'] = seg
return sample
class RandomRotation(object):
def __init__(self, min_angle=-10, max_angle=10, p=0.5):
self.min_angle = min_angle
self.max_angle = max_angle
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_rotate3D(sample, self.min_angle, self.max_angle)
class DownSampleSegmentation(object):
def __init__(self, ds=4):
self.ds = ds
def __call__(self, sample):
if 'segmentation' in sample:
seg = sample['segmentation']
seg = seg[::self.ds, ::self.ds, ::self.ds]
sample['segmentation'] = seg
return sample
|
src/seg_model_utils/augmentations3d.py
|
import random
import numpy as np
import cv2
import scipy.ndimage as ndimage
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
class RandomChoice(object):
"""
choose a random tranform from list an apply
transforms: tranforms to apply
p: probability
"""
def __init__(self, transforms=[],
p=0.5):
self.transforms = transforms
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
t = random.choice(self.transforms)
return t(sample)
class ComposeTransforms(object):
"""
Composes several transforms together.
"""
def __init__(self, transforms=[],
p=0.9):
self.transforms = transforms
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
for t in self.transforms:
sample = t(sample)
return sample
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
def stack_seg_2_image(sample):
image = sample['image']
seg = sample['segmentation']
channels = [chan for chan in image]
channels.append(seg)
return np.stack(channels, axis=3)
def elastic_transform_3d(sample, alpha=1, sigma=20, c_val=0.0, method="linear"):
"""
:param sample: dict of image and seg
:param alpha: scaling factor of gaussian filter
:param sigma: standard deviation of random gaussian filter
:param c_val: fill value
:param method: interpolation method. supported methods : ("linear", "nearest")
:return: deformed image and/or label
"""
img_numpy = sample['image'].copy()
label = sample['segmentation'] if 'segmentation' in sample else None
shape = img_numpy.shape
# Define 3D coordinate system
coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2])
# Interpolated images
chan_intrps = [RegularGridInterpolator(coords, img_numpy[:,:,:,chan],
method=method,
bounds_error=False,
fill_value=c_val) for chan in range(shape[3])]
#Get random elastic deformations
dx = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dy = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dz = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]
indices = np.reshape(x + dx, (-1, 1)), \
np.reshape(y + dy, (-1, 1)), \
np.reshape(z + dz, (-1, 1))
# Interpolate 3D image image
img_numpy = np.stack([chan_intrp(indices).reshape((shape[0],shape[1],shape[2]))
for chan_intrp in chan_intrps], axis=3).astype(np.float32)
# Interpolate labels
if label is not None:
lab_intrp = RegularGridInterpolator(coords, label,
method="nearest",
bounds_error=False,
fill_value=0)
label = lab_intrp(indices).reshape(shape[0],shape[1],shape[2]).astype(label.dtype)
sample['segmentation'] = label
sample['image'] = img_numpy
return sample
class ElasticTransform(object):
def __init__(self, p=0.5, alpha=1, sigma=20, c_val=0.0, method="linear"):
self.p = p
self.alpha = alpha
self.sigma = sigma
self.c_val = c_val
self.method = method
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return elastic_transform_3d(sample, self.alpha, self.sigma, self.c_val, self.method)
def random_noise(sample, mean=0, std=0.001, eps=1e-6):
im = sample['image'].copy()
noise = np.random.normal(mean, std, im.shape)
sample['image'] = np.where(im > eps, im + noise, im)
return sample
class GaussianNoise(object):
def __init__(self, p=0.5, mean=0, std=0.001):
self.mean = mean
self.std = std
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_noise(sample, self.mean, self.std)
def random_crop_to_size(sample, crop_sz):
im = sample['image'].copy()
shape = im.shape
if 'segmentation' in sample:
seg = sample['segmentation'].copy()
else:
seg = None
# choose randomly but check that at least one tumor pixel is included
width, height, depth = crop_sz
sum_tumor = 0
n_round = 0
d,x,y = 0,0,0
while sum_tumor == 0 and n_round < 1000:
n_round += 1
d = np.random.randint(0, shape[0] - depth - 1)
x = np.random.randint(0, shape[1] - width - 1)
y = np.random.randint(0, shape[2] - height - 1)
if seg is not None:
check = seg[d:d+depth, x:x+width, y:y+height]
sum_tumor = np.sum(check)
else:
sum_tumor = 1
assert n_round < 1000, f'no segmentation found in {sample["BraTSID"]}'
im = im[d:d+depth, x:x+width, y:y+height,:]
sample['image'] = im
if seg is not None:
seg = check
sample['segmentation'] = seg
return sample
class RandomCropToSize(object):
def __init__(self, crop_sz=(200,200,95)):
self.crop_sz = crop_sz
def __call__(self, sample):
return random_crop_to_size(sample, self.crop_sz)
def random_flip_lr(sample):
im = sample['image'].copy()
seg = sample['segmentation'].copy()
im = im[:,:,::-1,:]
seg = seg[:,:,::-1]
sample['image'] = im
sample['segmentation'] = seg
return sample
class RandomFlipLR(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_flip_lr(sample)
def random_channel_drop(sample):
im = sample['image'].copy()
c = im.shape[3]
drop_ch = random.randint(0, c-1)
im[:,:,:,drop_ch] = 0. if random.random() > 0.5 else 1.0
sample['image'] = im
return sample
class RandomChannelDrop(object):
def __init__(self, p=0.05):
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_channel_drop(sample)
def random_rotate3D(sample, min_angle, max_angle):
"""
Returns a random rotated image and seg map in sample dict
:param sample: ds sample dict
:param min_angle: in degrees
:param max_angle: in degrees
:return: sample
"""
im = sample['image'].copy()
seg = sample['segmentation'].copy()
assert min_angle < max_angle, "min should be less than max val"
assert min_angle > -360 or max_angle < 360
all_axes = [(1, 0), (1, 2), (0, 2)]
angle = np.random.randint(low=min_angle, high=max_angle + 1)
axes_random_id = np.random.randint(low=0, high=len(all_axes))
axes = all_axes[axes_random_id]
im = ndimage.interpolation.rotate(im , angle, axes=axes, reshape=False)
seg = ndimage.rotate(seg.astype(np.float32), angle, axes=axes, reshape=False)
# seg back to binary float values
seg = np.where(seg < 0.5, 0, 1.)
sample['image'] = im
sample['segmentation'] = seg
return sample
class RandomRotation(object):
def __init__(self, min_angle=-10, max_angle=10, p=0.5):
self.min_angle = min_angle
self.max_angle = max_angle
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_rotate3D(sample, self.min_angle, self.max_angle)
class DownSampleSegmentation(object):
def __init__(self, ds=4):
self.ds = ds
def __call__(self, sample):
if 'segmentation' in sample:
seg = sample['segmentation']
seg = seg[::self.ds, ::self.ds, ::self.ds]
sample['segmentation'] = seg
return sample
| 0.763219 | 0.46794 |
import time
from oslo_log import log as logging
from tempest import clients
from oswin_tempest_plugin import config
from oswin_tempest_plugin.tests import test_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ClientManager(clients.Manager):
def __init__(self, *args, **kwargs):
super(ClientManager, self).__init__(*args, **kwargs)
self.set_gnocchi_client()
def set_gnocchi_client(self):
self.gnocchi_client = self.metric_v1.GnocchiClient()
class MetricsCollectionTestCase(test_base.TestBase):
"""Adds metrics collection scenario tests.
This test suite verifies that the instance metrics are properly published
and collected and have non-zero values. The verification is done via the
ceilometer API.
setup:
1. spins a new instance.
2. waits until the instance was created succesfully (ACTIVE status).
3. wait an interval of time which represents the polling period of the
ceilometer-polling agent.
Waiting for the ceilometer-polling agent to poll the resources is crucial,
otherwise the test suite will fail due to the fact that no samples
would be found published before checking the samples.
The test suite's polled_metrics_delay must have a greater value than the
ceilometer agent's polling interval. This can be done in two ways:
a. Configure tempest's polled_metric_delay, by adding the
following line in tempest.conf, in the hyperv section:
polled_metrics_delay = <desired value>
b. Set the interval value in polling.yaml on the compute node to
the desired value and restart the ceilometer polling agent. The
interval value is set either for the 'meter_source' or for each
of the following: 'cpu_source', 'disk_source', 'network_source'.
Note: If the polled_metrics_delay value is too low, the tests might not
find any samples and fail because of this. As a recommandation,
polled_metrics_delay's value should be:
polled_metric_delay = <polling.yaml interval value> + <15-20 seconds>
tests:
1. test_metrics - tests values for the following metrics:
- cpu
- network.outgoing.bytes
- disk.read.bytes
assumptions:
1. Ceilometer agent on the compute node is running.
2. Ceilometer agent on the compute node has the polling interval
defined in polling.yaml lower than the polled_metrics_delay defined
in this test suite.
3. The compute nodes' nova-compute and neutron-hyperv-agent services
have been configured to enable metrics collection.
"""
client_manager = ClientManager
@classmethod
def skip_checks(cls):
super(MetricsCollectionTestCase, cls).skip_checks()
for service in ['ceilometer', 'gnocchi']:
if not getattr(CONF.service_available, service):
raise cls.skipException("%s service is required." % service)
if not CONF.hyperv.collected_metrics:
raise cls.skipException("Collected metrics not configured.")
@classmethod
def setup_clients(cls):
super(MetricsCollectionTestCase, cls).setup_clients()
# Telemetry client
cls.telemetry_client = cls.os_primary.gnocchi_client
def _check_samples(self, resource_id, meter_name):
LOG.info("Checking %(meter_name)s for resource %(resource_id)s" % {
'meter_name': meter_name, 'resource_id': resource_id})
samples = self.telemetry_client.list_samples(resource_id, meter_name)
self.assertNotEmpty(
samples,
'Client returned no samples for the given resource '
'"%(resource_id)s" and meter "%(meter_name)s".' % {
'resource_id': resource_id, 'meter_name': meter_name})
non_zero_valued_samples = [s for s in samples if s[2] > 0]
self.assertNotEmpty(
non_zero_valued_samples,
'All meter %(meter_name)s samples for resource '
'%(resource_id)s are 0.' % {'meter_name': meter_name,
'resource_id': resource_id})
def _get_instance_cpu_resource_id(self, server):
return server['id']
def _get_instance_disk_resource_id(self, server):
return server['id']
def _get_instance_port_resource_id(self, server):
# Note(claudiub): the format for the instance_port_resource_id is:
# %(OS-EXT-SRV-ATTR:instance_name)s-%(instance_id)s-%(port_id)s
# the instance returned by self.servers_client does not contain the
# OS-EXT-SRV-ATTR:instance_name field. Which means that the resource_id
# must be found in gnocchi's resources.
start_res_id = server['id']
resources = self.telemetry_client.list_resources()
res_ids = [r['id'] for r in resources
if r['original_resource_id'].startswith('instance-')
and start_res_id in r['original_resource_id']]
self.assertEqual(1, len(res_ids))
return res_ids[0]
def _check_scenario(self, server_tuple):
server = server_tuple.server
LOG.info("Waiting %s seconds for the ceilometer compute agents to "
"publish the samples.", CONF.hyperv.polled_metrics_delay)
time.sleep(CONF.hyperv.polled_metrics_delay)
# TODO(claudiub): Add more metrics.
if 'cpu' in CONF.hyperv.collected_metrics:
cpu_res_id = self._get_instance_cpu_resource_id(server)
self._check_samples(cpu_res_id, 'cpu')
if 'network.outgoing.bytes' in CONF.hyperv.collected_metrics:
port_res_id = self._get_instance_port_resource_id(server)
self._check_samples(port_res_id, 'network.outgoing.bytes')
if 'disk.read.bytes' in CONF.hyperv.collected_metrics:
disk_resource_id = self._get_instance_disk_resource_id(server)
self._check_samples(disk_resource_id, 'disk.read.bytes')
def test_metrics(self):
server_tuple = self._create_server()
self._check_scenario(server_tuple)
|
oswin_tempest_plugin/tests/scenario/test_metrics_collection.py
|
import time
from oslo_log import log as logging
from tempest import clients
from oswin_tempest_plugin import config
from oswin_tempest_plugin.tests import test_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ClientManager(clients.Manager):
def __init__(self, *args, **kwargs):
super(ClientManager, self).__init__(*args, **kwargs)
self.set_gnocchi_client()
def set_gnocchi_client(self):
self.gnocchi_client = self.metric_v1.GnocchiClient()
class MetricsCollectionTestCase(test_base.TestBase):
"""Adds metrics collection scenario tests.
This test suite verifies that the instance metrics are properly published
and collected and have non-zero values. The verification is done via the
ceilometer API.
setup:
1. spins a new instance.
2. waits until the instance was created succesfully (ACTIVE status).
3. wait an interval of time which represents the polling period of the
ceilometer-polling agent.
Waiting for the ceilometer-polling agent to poll the resources is crucial,
otherwise the test suite will fail due to the fact that no samples
would be found published before checking the samples.
The test suite's polled_metrics_delay must have a greater value than the
ceilometer agent's polling interval. This can be done in two ways:
a. Configure tempest's polled_metric_delay, by adding the
following line in tempest.conf, in the hyperv section:
polled_metrics_delay = <desired value>
b. Set the interval value in polling.yaml on the compute node to
the desired value and restart the ceilometer polling agent. The
interval value is set either for the 'meter_source' or for each
of the following: 'cpu_source', 'disk_source', 'network_source'.
Note: If the polled_metrics_delay value is too low, the tests might not
find any samples and fail because of this. As a recommandation,
polled_metrics_delay's value should be:
polled_metric_delay = <polling.yaml interval value> + <15-20 seconds>
tests:
1. test_metrics - tests values for the following metrics:
- cpu
- network.outgoing.bytes
- disk.read.bytes
assumptions:
1. Ceilometer agent on the compute node is running.
2. Ceilometer agent on the compute node has the polling interval
defined in polling.yaml lower than the polled_metrics_delay defined
in this test suite.
3. The compute nodes' nova-compute and neutron-hyperv-agent services
have been configured to enable metrics collection.
"""
client_manager = ClientManager
@classmethod
def skip_checks(cls):
super(MetricsCollectionTestCase, cls).skip_checks()
for service in ['ceilometer', 'gnocchi']:
if not getattr(CONF.service_available, service):
raise cls.skipException("%s service is required." % service)
if not CONF.hyperv.collected_metrics:
raise cls.skipException("Collected metrics not configured.")
@classmethod
def setup_clients(cls):
super(MetricsCollectionTestCase, cls).setup_clients()
# Telemetry client
cls.telemetry_client = cls.os_primary.gnocchi_client
def _check_samples(self, resource_id, meter_name):
LOG.info("Checking %(meter_name)s for resource %(resource_id)s" % {
'meter_name': meter_name, 'resource_id': resource_id})
samples = self.telemetry_client.list_samples(resource_id, meter_name)
self.assertNotEmpty(
samples,
'Client returned no samples for the given resource '
'"%(resource_id)s" and meter "%(meter_name)s".' % {
'resource_id': resource_id, 'meter_name': meter_name})
non_zero_valued_samples = [s for s in samples if s[2] > 0]
self.assertNotEmpty(
non_zero_valued_samples,
'All meter %(meter_name)s samples for resource '
'%(resource_id)s are 0.' % {'meter_name': meter_name,
'resource_id': resource_id})
def _get_instance_cpu_resource_id(self, server):
return server['id']
def _get_instance_disk_resource_id(self, server):
return server['id']
def _get_instance_port_resource_id(self, server):
# Note(claudiub): the format for the instance_port_resource_id is:
# %(OS-EXT-SRV-ATTR:instance_name)s-%(instance_id)s-%(port_id)s
# the instance returned by self.servers_client does not contain the
# OS-EXT-SRV-ATTR:instance_name field. Which means that the resource_id
# must be found in gnocchi's resources.
start_res_id = server['id']
resources = self.telemetry_client.list_resources()
res_ids = [r['id'] for r in resources
if r['original_resource_id'].startswith('instance-')
and start_res_id in r['original_resource_id']]
self.assertEqual(1, len(res_ids))
return res_ids[0]
def _check_scenario(self, server_tuple):
server = server_tuple.server
LOG.info("Waiting %s seconds for the ceilometer compute agents to "
"publish the samples.", CONF.hyperv.polled_metrics_delay)
time.sleep(CONF.hyperv.polled_metrics_delay)
# TODO(claudiub): Add more metrics.
if 'cpu' in CONF.hyperv.collected_metrics:
cpu_res_id = self._get_instance_cpu_resource_id(server)
self._check_samples(cpu_res_id, 'cpu')
if 'network.outgoing.bytes' in CONF.hyperv.collected_metrics:
port_res_id = self._get_instance_port_resource_id(server)
self._check_samples(port_res_id, 'network.outgoing.bytes')
if 'disk.read.bytes' in CONF.hyperv.collected_metrics:
disk_resource_id = self._get_instance_disk_resource_id(server)
self._check_samples(disk_resource_id, 'disk.read.bytes')
def test_metrics(self):
server_tuple = self._create_server()
self._check_scenario(server_tuple)
| 0.634883 | 0.285476 |
import sys
import tempfile
from antlir.compiler.requires_provides import (
ProvidesSymlink,
RequireDirectory,
RequireFile,
)
from antlir.fs_utils import Path
from antlir.subvol_utils import TempSubvolumes
from ..install_file import InstallFileItem
from ..symlink import SymlinkToDirItem, SymlinkToFileItem
from .common import (
DUMMY_LAYER_OPTS,
BaseItemTestCase,
get_dummy_layer_opts_ba,
render_subvol,
)
DUMMY_LAYER_OPTS_BA = get_dummy_layer_opts_ba()
class SymlinkItemsTestCase(BaseItemTestCase):
def test_symlink(self):
self._check_item(
SymlinkToDirItem(from_target="t", source="x", dest="y"),
{ProvidesSymlink(path=Path("y"), target=Path("x"))},
{
RequireDirectory(path=Path("/")),
RequireDirectory(path=Path("/x")),
},
)
self._check_item(
SymlinkToFileItem(
from_target="t", source="source_file", dest="dest_symlink"
),
{
ProvidesSymlink(
path=Path("dest_symlink"), target=Path("source_file")
)
},
{
RequireDirectory(path=Path("/")),
RequireFile(path=Path("/source_file")),
},
)
def test_symlink_idempotent(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
sv.run_as_root(["mkdir", sv.path("x")])
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
SymlinkToDirItem(from_target="t", source="x", dest="y").build(
sv, DUMMY_LAYER_OPTS
)
sv.set_readonly(True)
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
SymlinkToDirItem(from_target="t", source="x", dest="y").build(
sv, DUMMY_LAYER_OPTS
)
def test_symlink_already_exists(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
sv.run_as_root(["touch", sv.path("b")])
sv.set_readonly(True)
with self.assertRaises(
RuntimeError, msg="dest='b' source='c': dest already exists"
):
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
def test_symlink_already_matches(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
sv.run_as_root(["ln", "-ns", "a", sv.path("b")])
sv.set_readonly(True)
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
def test_symlink_already_exists_different_source(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
sv.set_readonly(True)
with self.assertRaises(
RuntimeError, msg="dest='b' source='c': b -> a exists to b'a'"
):
SymlinkToFileItem(from_target="t", source="c", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
def _test_symlink_command(self, layer_opts):
with TempSubvolumes(Path(sys.argv[0])) as temp_subvolumes:
subvol = temp_subvolumes.create("tar-sv")
subvol.run_as_root(["mkdir", subvol.path("dir")])
# We need a source file to validate a SymlinkToFileItem
with tempfile.NamedTemporaryFile() as tf:
InstallFileItem(
from_target="t", source=tf.name, dest="/file"
).build(subvol, layer_opts)
SymlinkToDirItem(
from_target="t", source="/dir", dest="/dir_symlink"
).build(subvol, layer_opts)
SymlinkToFileItem(
from_target="t", source="file", dest="/file_symlink"
).build(subvol, layer_opts)
# Make a couple of absolute symlinks to test our behavior on
# linking to paths that contain those.
subvol.run_as_root(
[
"bash",
"-c",
f"""\
ln -s /file {subvol.path('abs_link_to_file').shell_quote()}
mkdir {subvol.path('my_dir').shell_quote()}
touch {subvol.path('my_dir/inner').shell_quote()}
ln -s /my_dir {subvol.path('my_dir_link').shell_quote()}
""",
]
)
# A simple case: we link to an absolute link.
SymlinkToFileItem(
from_target="t",
source="/abs_link_to_file",
dest="/link_to_abs_link",
).build(subvol, layer_opts)
# This link traverses a directory that is an absolute link. The
# resulting relative symlink is not traversible from outside the
# container.
SymlinkToFileItem(
from_target="t",
source="my_dir_link/inner",
dest="/dir/inner_link",
).build(subvol, layer_opts)
self.assertEqual(
[
"(Dir)",
{
"dir": [
"(Dir)",
{"inner_link": ["(Symlink ../my_dir_link/inner)"]},
],
"dir_symlink": ["(Symlink dir)"],
"file": ["(File m444)"],
"file_symlink": ["(Symlink file)"],
"abs_link_to_file": ["(Symlink /file)"],
"my_dir": ["(Dir)", {"inner": ["(File)"]}],
"my_dir_link": ["(Symlink /my_dir)"],
"link_to_abs_link": ["(Symlink abs_link_to_file)"],
},
],
render_subvol(subvol),
)
def test_symlink_command_non_ba(self):
self._test_symlink_command(DUMMY_LAYER_OPTS)
def test_symlink_command_ba(self):
self._test_symlink_command(DUMMY_LAYER_OPTS_BA)
|
antlir/compiler/items/tests/test_symlink.py
|
import sys
import tempfile
from antlir.compiler.requires_provides import (
ProvidesSymlink,
RequireDirectory,
RequireFile,
)
from antlir.fs_utils import Path
from antlir.subvol_utils import TempSubvolumes
from ..install_file import InstallFileItem
from ..symlink import SymlinkToDirItem, SymlinkToFileItem
from .common import (
DUMMY_LAYER_OPTS,
BaseItemTestCase,
get_dummy_layer_opts_ba,
render_subvol,
)
DUMMY_LAYER_OPTS_BA = get_dummy_layer_opts_ba()
class SymlinkItemsTestCase(BaseItemTestCase):
def test_symlink(self):
self._check_item(
SymlinkToDirItem(from_target="t", source="x", dest="y"),
{ProvidesSymlink(path=Path("y"), target=Path("x"))},
{
RequireDirectory(path=Path("/")),
RequireDirectory(path=Path("/x")),
},
)
self._check_item(
SymlinkToFileItem(
from_target="t", source="source_file", dest="dest_symlink"
),
{
ProvidesSymlink(
path=Path("dest_symlink"), target=Path("source_file")
)
},
{
RequireDirectory(path=Path("/")),
RequireFile(path=Path("/source_file")),
},
)
def test_symlink_idempotent(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
sv.run_as_root(["mkdir", sv.path("x")])
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
SymlinkToDirItem(from_target="t", source="x", dest="y").build(
sv, DUMMY_LAYER_OPTS
)
sv.set_readonly(True)
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
SymlinkToDirItem(from_target="t", source="x", dest="y").build(
sv, DUMMY_LAYER_OPTS
)
def test_symlink_already_exists(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
sv.run_as_root(["touch", sv.path("b")])
sv.set_readonly(True)
with self.assertRaises(
RuntimeError, msg="dest='b' source='c': dest already exists"
):
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
def test_symlink_already_matches(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
sv.run_as_root(["ln", "-ns", "a", sv.path("b")])
sv.set_readonly(True)
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
def test_symlink_already_exists_different_source(self):
with TempSubvolumes() as ts:
sv = ts.create("test")
sv.run_as_root(["touch", sv.path("a")])
SymlinkToFileItem(from_target="t", source="a", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
sv.set_readonly(True)
with self.assertRaises(
RuntimeError, msg="dest='b' source='c': b -> a exists to b'a'"
):
SymlinkToFileItem(from_target="t", source="c", dest="b").build(
sv, DUMMY_LAYER_OPTS
)
def _test_symlink_command(self, layer_opts):
with TempSubvolumes(Path(sys.argv[0])) as temp_subvolumes:
subvol = temp_subvolumes.create("tar-sv")
subvol.run_as_root(["mkdir", subvol.path("dir")])
# We need a source file to validate a SymlinkToFileItem
with tempfile.NamedTemporaryFile() as tf:
InstallFileItem(
from_target="t", source=tf.name, dest="/file"
).build(subvol, layer_opts)
SymlinkToDirItem(
from_target="t", source="/dir", dest="/dir_symlink"
).build(subvol, layer_opts)
SymlinkToFileItem(
from_target="t", source="file", dest="/file_symlink"
).build(subvol, layer_opts)
# Make a couple of absolute symlinks to test our behavior on
# linking to paths that contain those.
subvol.run_as_root(
[
"bash",
"-c",
f"""\
ln -s /file {subvol.path('abs_link_to_file').shell_quote()}
mkdir {subvol.path('my_dir').shell_quote()}
touch {subvol.path('my_dir/inner').shell_quote()}
ln -s /my_dir {subvol.path('my_dir_link').shell_quote()}
""",
]
)
# A simple case: we link to an absolute link.
SymlinkToFileItem(
from_target="t",
source="/abs_link_to_file",
dest="/link_to_abs_link",
).build(subvol, layer_opts)
# This link traverses a directory that is an absolute link. The
# resulting relative symlink is not traversible from outside the
# container.
SymlinkToFileItem(
from_target="t",
source="my_dir_link/inner",
dest="/dir/inner_link",
).build(subvol, layer_opts)
self.assertEqual(
[
"(Dir)",
{
"dir": [
"(Dir)",
{"inner_link": ["(Symlink ../my_dir_link/inner)"]},
],
"dir_symlink": ["(Symlink dir)"],
"file": ["(File m444)"],
"file_symlink": ["(Symlink file)"],
"abs_link_to_file": ["(Symlink /file)"],
"my_dir": ["(Dir)", {"inner": ["(File)"]}],
"my_dir_link": ["(Symlink /my_dir)"],
"link_to_abs_link": ["(Symlink abs_link_to_file)"],
},
],
render_subvol(subvol),
)
def test_symlink_command_non_ba(self):
self._test_symlink_command(DUMMY_LAYER_OPTS)
def test_symlink_command_ba(self):
self._test_symlink_command(DUMMY_LAYER_OPTS_BA)
| 0.379263 | 0.215846 |
import numpy as np
import pytest
from pyrado.environment_wrappers.observation_velfilter import ObsVelFiltWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.policies.feed_forward.dummy import IdlePolicy
from pyrado.sampling.rollout import rollout
from pyrado.spaces.singular import SingularStateSpace
from pyrado.utils.math import rmse
@pytest.mark.wrapper
@pytest.mark.parametrize("plot", [False, pytest.param(True, marks=pytest.mark.visual)])
def test_velocity_filter(plot: bool):
# Set up environment
env_gt = QQubeSwingUpSim(dt=1 / 500.0, max_steps=350)
env_gt.init_space = SingularStateSpace(np.array([0.1, np.pi / 2, 3.0, 0]))
env_filt = ObsVelFiltWrapper(env_gt, idcs_pos=["theta", "alpha"], idcs_vel=["theta_dot", "alpha_dot"])
# Set up policy
policy = IdlePolicy(env_gt.spec)
# Simulate
ro_gt = rollout(env_gt, policy)
ro_filt = rollout(env_filt, policy)
# Filter the observations of the last rollout
theta_dot_gt = ro_gt.observations[:, 4]
alpha_dot_gt = ro_gt.observations[:, 5]
theta_dot_filt = ro_filt.observations[:, 4]
alpha_dot_filt = ro_filt.observations[:, 5]
assert theta_dot_filt[0] != pytest.approx(theta_dot_gt[0]) # can't be equal since we set an init vel of 3 rad/s
assert alpha_dot_filt[0] == pytest.approx(alpha_dot_gt[0], abs=1e-4)
# Compute the error
rmse_theta = rmse(theta_dot_gt, theta_dot_filt)
rmse_alpha = rmse(alpha_dot_gt, alpha_dot_filt)
if plot:
from matplotlib import pyplot as plt
# Plot the filtered signals versus the orignal observations
plt.rc("text", usetex=True)
fix, axs = plt.subplots(2, figsize=(16, 9))
axs[0].plot(theta_dot_gt, label=r"$\dot{\theta}_{true}$")
axs[0].plot(theta_dot_filt, label=r"$\dot{\theta}_{filt}$")
axs[1].plot(alpha_dot_gt, label=r"$\dot{\alpha}_{true}$")
axs[1].plot(alpha_dot_filt, label=r"$\dot{\alpha}_{filt}$")
axs[0].set_title(rf"RMSE($\theta$): {rmse_theta}")
axs[0].set_ylabel(r"$\dot{\theta}$ [rad/s]")
axs[0].legend()
axs[1].set_title(rf"RMSE($\alpha$): {rmse_alpha}")
axs[1].set_xlabel("time steps")
axs[1].set_ylabel(r"$\dot{\alpha}$ [rad/s]")
axs[1].legend()
plt.show()
|
Pyrado/tests/environment_wrappers/test_observation_velfilt.py
|
import numpy as np
import pytest
from pyrado.environment_wrappers.observation_velfilter import ObsVelFiltWrapper
from pyrado.environments.pysim.quanser_qube import QQubeSwingUpSim
from pyrado.policies.feed_forward.dummy import IdlePolicy
from pyrado.sampling.rollout import rollout
from pyrado.spaces.singular import SingularStateSpace
from pyrado.utils.math import rmse
@pytest.mark.wrapper
@pytest.mark.parametrize("plot", [False, pytest.param(True, marks=pytest.mark.visual)])
def test_velocity_filter(plot: bool):
# Set up environment
env_gt = QQubeSwingUpSim(dt=1 / 500.0, max_steps=350)
env_gt.init_space = SingularStateSpace(np.array([0.1, np.pi / 2, 3.0, 0]))
env_filt = ObsVelFiltWrapper(env_gt, idcs_pos=["theta", "alpha"], idcs_vel=["theta_dot", "alpha_dot"])
# Set up policy
policy = IdlePolicy(env_gt.spec)
# Simulate
ro_gt = rollout(env_gt, policy)
ro_filt = rollout(env_filt, policy)
# Filter the observations of the last rollout
theta_dot_gt = ro_gt.observations[:, 4]
alpha_dot_gt = ro_gt.observations[:, 5]
theta_dot_filt = ro_filt.observations[:, 4]
alpha_dot_filt = ro_filt.observations[:, 5]
assert theta_dot_filt[0] != pytest.approx(theta_dot_gt[0]) # can't be equal since we set an init vel of 3 rad/s
assert alpha_dot_filt[0] == pytest.approx(alpha_dot_gt[0], abs=1e-4)
# Compute the error
rmse_theta = rmse(theta_dot_gt, theta_dot_filt)
rmse_alpha = rmse(alpha_dot_gt, alpha_dot_filt)
if plot:
from matplotlib import pyplot as plt
# Plot the filtered signals versus the orignal observations
plt.rc("text", usetex=True)
fix, axs = plt.subplots(2, figsize=(16, 9))
axs[0].plot(theta_dot_gt, label=r"$\dot{\theta}_{true}$")
axs[0].plot(theta_dot_filt, label=r"$\dot{\theta}_{filt}$")
axs[1].plot(alpha_dot_gt, label=r"$\dot{\alpha}_{true}$")
axs[1].plot(alpha_dot_filt, label=r"$\dot{\alpha}_{filt}$")
axs[0].set_title(rf"RMSE($\theta$): {rmse_theta}")
axs[0].set_ylabel(r"$\dot{\theta}$ [rad/s]")
axs[0].legend()
axs[1].set_title(rf"RMSE($\alpha$): {rmse_alpha}")
axs[1].set_xlabel("time steps")
axs[1].set_ylabel(r"$\dot{\alpha}$ [rad/s]")
axs[1].legend()
plt.show()
| 0.791015 | 0.636805 |
import json
import copy
import pandas as pd
import spacy
from sklearn.model_selection import train_test_split
class DataSet:
'''Representation of a data set for text classification pipeline
Attributes:
input_: Input dataset if not pre-split in train and test set. This can be a
pandas.DataFrame or a string pointing to a csv/tsv file or a json file
containing the input data.
train_input: str or pandas.DataFrame, if splitting in training and test set
should not be done (in case a specific split is pre-defined or training
and test data come in separate files) input for training data. If
provided `input_' must be None and `test_input' must be provided as
well.
test_input: str or pandas.DataFrame, see `train_input'.
name: str, name for the dataset for identification in experiments.
field_mapping: dict, Dictionary containing two fields: `text' and `label'
that identify the column (for tabular data) or field (for json data)
that contain the respective information.
test_size: float, proportion of documents to reserve as held-out test set.
'''
def __init__(self, input_=None, train_input=None, test_input=None,
name='', field_mapping={'text': 'text', 'label': 'label'},
file_subset=None, test_size=0.25):
# Store inputs
self.name = name
self.field_mapping = field_mapping
self.test_size = test_size
self.file_subset = file_subset
self.input_ = input_
self.train_input, self.test_input = train_input, test_input
if input_ is not None:
self.df = self.read_transform(input_)
self.df.dropna(inplace=True)
self.df.reset_index(inplace=True, drop=True)
self.train_idxs, self.test_idxs = train_test_split(
range(len(self)), test_size=test_size
)
elif train_input is not None:
if input_ is not None:
raise ValueError('Pleas only specify either `input_` or '
'`train_input`')
if test_input is None:
raise ValueError('Please pass data as `input_` if not '
'specifying both `train_input` and '
'`test_input`')
train_df = self.read_transform(train_input)
test_df = self.read_transform(test_input)
self.df = train_df.append(test_df)
self.df.reset_index(inplace=True, drop=True)
self.df.dropna(inplace=True)
self.train_idxs = list(range(train_df.shape[0]))
self.test_idxs = list(
range(
train_df.shape[0],
train_df.shape[0] + test_df.shape[0]
)
)
else:
raise ValueError('Either `input_` or (`train_input`, `test_input`)'
' have to be specified.')
def __len__(self):
return self.df.shape[0]
@property
def df_train(self):
return self.df.iloc[self.train_idxs]
@property
def df_test(self):
return self.df.iloc[self.test_idxs]
def get_texts(self, set_):
if set_ == "train":
return self.df_train['text']
elif set_ == 'test':
return self.df_test['text']
def get_labels(self, set_):
if set_ == 'train':
return self.df['label'].iloc[self.train_idxs].astype(str)
elif set_ == 'test':
return self.df['label'].iloc[self.test_idxs].astype(str)
else:
raise ValueError("set_ must be one of ['train', 'test']")
def read_transform(self, input_):
'''Read input data and transform to common format
Selects the right method depending on the data type or
file type and dispatches apropriate method
'''
if isinstance(input_, str):
inpath = input_
if inpath.endswith('.csv'):
df = self.read_from_delim(inpath, delim=',')
elif inpath.endswith('.tsv'):
df = self.read_from_delim(inpath, delim='\t')
elif inpath.endswith('.json'):
df = self.read_from_json(inpath)
else:
raise ValueError('Unsupported file format')
elif isinstance(input_, pd.core.frame.DataFrame):
df = self.read_from_df(input_)
else:
raise ValueError('input_ has to be str or'
'pd.core.frame.DataFrame')
return df
def read_from_df(self, input_df):
out = copy.copy(input_df[[self.field_mapping['label'],
self.field_mapping['text']]])
out.columns = ['label', 'text']
out.reset_index(inplace=True, drop=True)
return out
def read_from_delim(self, inpath, delim):
df = pd.read_csv(inpath, delimiter=delim)
if self.file_subset is not None:
df = df[df[self.file_subset[0]] == self.file_subset[1]]
out = df[[self.field_mapping['label'],
self.field_mapping['text']]]
out.columns = ['label', 'text']
return out
def read_from_json(self, inpath):
with open(inpath) as infile:
out = {'label': [], 'text': []}
for line in infile:
doc = json.loads(line)
if self.file_subset is not None:
if doc[self.file_subset[0]] == self.file_subset[1]:
out['label'].append(doc[self.field_mapping['label']])
out['text'].append(doc[self.field_mapping['text']])
else:
out['label'].append(doc[self.field_mapping['label']])
out['text'].append(doc[self.field_mapping['text']])
return pd.DataFrame(out)
class SpacyTokenizer:
def __init__(self):
self.nlp = spacy.load('en', disable=['ner', 'parser', 'tagger'])
@staticmethod
def rescue_hashtags(token_list):
tokens = iter(token_list)
return([t + next(tokens, '') if t == '#' else t for t in tokens])
def tokenize(self, text):
return self.rescue_hashtags([x.orth_ for x in self.nlp(text)])
|
smapp_text_classifier/data.py
|
import json
import copy
import pandas as pd
import spacy
from sklearn.model_selection import train_test_split
class DataSet:
'''Representation of a data set for text classification pipeline
Attributes:
input_: Input dataset if not pre-split in train and test set. This can be a
pandas.DataFrame or a string pointing to a csv/tsv file or a json file
containing the input data.
train_input: str or pandas.DataFrame, if splitting in training and test set
should not be done (in case a specific split is pre-defined or training
and test data come in separate files) input for training data. If
provided `input_' must be None and `test_input' must be provided as
well.
test_input: str or pandas.DataFrame, see `train_input'.
name: str, name for the dataset for identification in experiments.
field_mapping: dict, Dictionary containing two fields: `text' and `label'
that identify the column (for tabular data) or field (for json data)
that contain the respective information.
test_size: float, proportion of documents to reserve as held-out test set.
'''
def __init__(self, input_=None, train_input=None, test_input=None,
name='', field_mapping={'text': 'text', 'label': 'label'},
file_subset=None, test_size=0.25):
# Store inputs
self.name = name
self.field_mapping = field_mapping
self.test_size = test_size
self.file_subset = file_subset
self.input_ = input_
self.train_input, self.test_input = train_input, test_input
if input_ is not None:
self.df = self.read_transform(input_)
self.df.dropna(inplace=True)
self.df.reset_index(inplace=True, drop=True)
self.train_idxs, self.test_idxs = train_test_split(
range(len(self)), test_size=test_size
)
elif train_input is not None:
if input_ is not None:
raise ValueError('Pleas only specify either `input_` or '
'`train_input`')
if test_input is None:
raise ValueError('Please pass data as `input_` if not '
'specifying both `train_input` and '
'`test_input`')
train_df = self.read_transform(train_input)
test_df = self.read_transform(test_input)
self.df = train_df.append(test_df)
self.df.reset_index(inplace=True, drop=True)
self.df.dropna(inplace=True)
self.train_idxs = list(range(train_df.shape[0]))
self.test_idxs = list(
range(
train_df.shape[0],
train_df.shape[0] + test_df.shape[0]
)
)
else:
raise ValueError('Either `input_` or (`train_input`, `test_input`)'
' have to be specified.')
def __len__(self):
return self.df.shape[0]
@property
def df_train(self):
return self.df.iloc[self.train_idxs]
@property
def df_test(self):
return self.df.iloc[self.test_idxs]
def get_texts(self, set_):
if set_ == "train":
return self.df_train['text']
elif set_ == 'test':
return self.df_test['text']
def get_labels(self, set_):
if set_ == 'train':
return self.df['label'].iloc[self.train_idxs].astype(str)
elif set_ == 'test':
return self.df['label'].iloc[self.test_idxs].astype(str)
else:
raise ValueError("set_ must be one of ['train', 'test']")
def read_transform(self, input_):
'''Read input data and transform to common format
Selects the right method depending on the data type or
file type and dispatches apropriate method
'''
if isinstance(input_, str):
inpath = input_
if inpath.endswith('.csv'):
df = self.read_from_delim(inpath, delim=',')
elif inpath.endswith('.tsv'):
df = self.read_from_delim(inpath, delim='\t')
elif inpath.endswith('.json'):
df = self.read_from_json(inpath)
else:
raise ValueError('Unsupported file format')
elif isinstance(input_, pd.core.frame.DataFrame):
df = self.read_from_df(input_)
else:
raise ValueError('input_ has to be str or'
'pd.core.frame.DataFrame')
return df
def read_from_df(self, input_df):
out = copy.copy(input_df[[self.field_mapping['label'],
self.field_mapping['text']]])
out.columns = ['label', 'text']
out.reset_index(inplace=True, drop=True)
return out
def read_from_delim(self, inpath, delim):
df = pd.read_csv(inpath, delimiter=delim)
if self.file_subset is not None:
df = df[df[self.file_subset[0]] == self.file_subset[1]]
out = df[[self.field_mapping['label'],
self.field_mapping['text']]]
out.columns = ['label', 'text']
return out
def read_from_json(self, inpath):
with open(inpath) as infile:
out = {'label': [], 'text': []}
for line in infile:
doc = json.loads(line)
if self.file_subset is not None:
if doc[self.file_subset[0]] == self.file_subset[1]:
out['label'].append(doc[self.field_mapping['label']])
out['text'].append(doc[self.field_mapping['text']])
else:
out['label'].append(doc[self.field_mapping['label']])
out['text'].append(doc[self.field_mapping['text']])
return pd.DataFrame(out)
class SpacyTokenizer:
def __init__(self):
self.nlp = spacy.load('en', disable=['ner', 'parser', 'tagger'])
@staticmethod
def rescue_hashtags(token_list):
tokens = iter(token_list)
return([t + next(tokens, '') if t == '#' else t for t in tokens])
def tokenize(self, text):
return self.rescue_hashtags([x.orth_ for x in self.nlp(text)])
| 0.698535 | 0.522994 |
import re
from functools import lru_cache
from io import StringIO
from pathlib import Path
from typing import Any, Set, Tuple
import geopandas as gpd
import pandas as pd
from shapely.geometry import MultiPoint
from shapely.ops import unary_union
from ....data import nm_navaids
from .airspaces import NMAirspaceParser
def parse_coordinates(elt: str) -> Tuple[float, float]:
pattern = r"([N,S])(\d{4}|\d{6})(.\d*)?([E,W])(\d{5}|\d{7})(.\d*)?$"
x = re.match(pattern, elt)
assert x is not None, elt
lat_, lat_sign = x.group(2), 1 if x.group(1) == "N" else -1
lon_, lon_sign = x.group(5), 1 if x.group(4) == "E" else -1
lat_ = lat_.ljust(6, "0")
lon_ = lon_.ljust(7, "0")
lat = lat_sign * (
int(lat_[:2]) + int(lat_[2:4]) / 60 + int(lat_[4:]) / 3600
)
lon = lon_sign * (
int(lon_[:3]) + int(lon_[3:5]) / 60 + int(lon_[5:]) / 3600
)
return (lat, lon)
class NMFreeRouteParser(NMAirspaceParser):
def init_cache(self) -> None:
msg = f"Edit file {self.config_file} with NM directory"
if self.nm_path is None:
raise RuntimeError(msg)
are_file = next(self.nm_path.glob("Free_Route_*.are"), None)
if are_file is None:
raise RuntimeError(
f"No Free_Route_*.are file found in {self.nm_path}"
)
self.read_are(are_file)
sls_file = next(self.nm_path.glob("Free_Route_*.sls"), None)
if sls_file is None:
raise RuntimeError(
f"No Free_Route_*.sls file found in {self.nm_path}"
)
self.read_sls(sls_file)
self.initialized = True
self.fra = gpd.GeoDataFrame.from_records(
[
{"FRA": k, "geometry": self[k].shape} # type: ignore
for k in self.elements.keys()
]
)
frp_file = next(self.nm_path.glob("Free_Route_*.frp"), None)
if frp_file is None:
raise RuntimeError(
f"No Free_Route_*.frp file found in {self.nm_path}"
)
self.read_frp(frp_file)
def read_frp(self, filename: Path) -> None:
area = unary_union(self.fra.geometry)
west, south, east, north = area.bounds
subset = nm_navaids.extent((west, east, south, north))
assert subset is not None
coords = subset.data[["longitude", "latitude"]].values
europoints = subset.data.merge(
pd.DataFrame(
[
list(x.coords[0])
for x in area.intersection(MultiPoint(coords)).geoms
],
columns=["longitude", "latitude"],
)
)
df = pd.read_csv(StringIO(filename.read_text()), header=None)
df_ = (
df[0]
.str.replace(r"\s+", " ", regex=True)
.str.split(" ", expand=True)
.rename(columns={0: "FRA", 1: "type", 2: "name"})
)
a = (
df_.query('type in ["AD", "A", "D"]')
.dropna(axis=1, how="all")
.iloc[:, 3:]
.fillna("")
.sum(axis=1)
.str.replace(r"(\w{4})", r"\1,", regex=True)
.str[:-1]
.str.split(",")
)
tab = (
df_.query('type not in ["AD", "A", "D"]')
.dropna(axis=1, how="all")
.rename(columns={3: "latitude", 4: "longitude"})
)
# Part 1: When coordinates are in the file, decode them
coords = (
tab.query("latitude.notnull()")[["latitude", "longitude"]]
.sum(axis=1)
.apply(parse_coordinates)
)
decode_coords = tab.query("latitude.notnull()").assign(
latitude=coords.str[0], longitude=coords.str[1]
)
# Part 2: Propagate decoded coordinates (avoid slight inconsistencies)
propagate_coords = (
tab.query("latitude.isnull() and name in @decode_coords.name")
.drop(columns=["latitude", "longitude"])
.merge(
decode_coords[
["name", "latitude", "longitude"]
].drop_duplicates(),
on="name",
)
)
# Part 3: Unknown coordinates
unknown_coords = (
tab.query("latitude.isnull() and name not in @decode_coords.name")
.drop(columns=["latitude", "longitude"])
.merge(europoints.drop(columns=["type", "description"]), on="name")
)
# Part 4: Airport connections
airport_coords = pd.concat(
[
df_.query('type in ["AD", "A", "D"]').iloc[:, :3],
a.rename("airport"),
],
axis=1,
)
propagate_airports = airport_coords.merge(
decode_coords[["name", "latitude", "longitude"]].drop_duplicates(),
on=["name"],
).explode("airport")
unknown_airports = (
airport_coords.query("name not in @propagate_airports.name").merge(
europoints.drop(columns=["type", "description"]), on="name"
)
).explode("airport")
self.frp = pd.concat(
[
decode_coords,
propagate_coords,
unknown_coords,
propagate_airports,
unknown_airports,
]
)
def __getattr__(self, attr: str) -> Any:
if attr in ["fra", "frp"]:
self.init_cache()
return getattr(self, attr)
raise AttributeError(attr)
@lru_cache()
def _ipython_key_completions_(self) -> Set[str]:
return {*self.elements.keys()}
|
traffic/data/eurocontrol/ddr/freeroute.py
|
import re
from functools import lru_cache
from io import StringIO
from pathlib import Path
from typing import Any, Set, Tuple
import geopandas as gpd
import pandas as pd
from shapely.geometry import MultiPoint
from shapely.ops import unary_union
from ....data import nm_navaids
from .airspaces import NMAirspaceParser
def parse_coordinates(elt: str) -> Tuple[float, float]:
pattern = r"([N,S])(\d{4}|\d{6})(.\d*)?([E,W])(\d{5}|\d{7})(.\d*)?$"
x = re.match(pattern, elt)
assert x is not None, elt
lat_, lat_sign = x.group(2), 1 if x.group(1) == "N" else -1
lon_, lon_sign = x.group(5), 1 if x.group(4) == "E" else -1
lat_ = lat_.ljust(6, "0")
lon_ = lon_.ljust(7, "0")
lat = lat_sign * (
int(lat_[:2]) + int(lat_[2:4]) / 60 + int(lat_[4:]) / 3600
)
lon = lon_sign * (
int(lon_[:3]) + int(lon_[3:5]) / 60 + int(lon_[5:]) / 3600
)
return (lat, lon)
class NMFreeRouteParser(NMAirspaceParser):
def init_cache(self) -> None:
msg = f"Edit file {self.config_file} with NM directory"
if self.nm_path is None:
raise RuntimeError(msg)
are_file = next(self.nm_path.glob("Free_Route_*.are"), None)
if are_file is None:
raise RuntimeError(
f"No Free_Route_*.are file found in {self.nm_path}"
)
self.read_are(are_file)
sls_file = next(self.nm_path.glob("Free_Route_*.sls"), None)
if sls_file is None:
raise RuntimeError(
f"No Free_Route_*.sls file found in {self.nm_path}"
)
self.read_sls(sls_file)
self.initialized = True
self.fra = gpd.GeoDataFrame.from_records(
[
{"FRA": k, "geometry": self[k].shape} # type: ignore
for k in self.elements.keys()
]
)
frp_file = next(self.nm_path.glob("Free_Route_*.frp"), None)
if frp_file is None:
raise RuntimeError(
f"No Free_Route_*.frp file found in {self.nm_path}"
)
self.read_frp(frp_file)
def read_frp(self, filename: Path) -> None:
area = unary_union(self.fra.geometry)
west, south, east, north = area.bounds
subset = nm_navaids.extent((west, east, south, north))
assert subset is not None
coords = subset.data[["longitude", "latitude"]].values
europoints = subset.data.merge(
pd.DataFrame(
[
list(x.coords[0])
for x in area.intersection(MultiPoint(coords)).geoms
],
columns=["longitude", "latitude"],
)
)
df = pd.read_csv(StringIO(filename.read_text()), header=None)
df_ = (
df[0]
.str.replace(r"\s+", " ", regex=True)
.str.split(" ", expand=True)
.rename(columns={0: "FRA", 1: "type", 2: "name"})
)
a = (
df_.query('type in ["AD", "A", "D"]')
.dropna(axis=1, how="all")
.iloc[:, 3:]
.fillna("")
.sum(axis=1)
.str.replace(r"(\w{4})", r"\1,", regex=True)
.str[:-1]
.str.split(",")
)
tab = (
df_.query('type not in ["AD", "A", "D"]')
.dropna(axis=1, how="all")
.rename(columns={3: "latitude", 4: "longitude"})
)
# Part 1: When coordinates are in the file, decode them
coords = (
tab.query("latitude.notnull()")[["latitude", "longitude"]]
.sum(axis=1)
.apply(parse_coordinates)
)
decode_coords = tab.query("latitude.notnull()").assign(
latitude=coords.str[0], longitude=coords.str[1]
)
# Part 2: Propagate decoded coordinates (avoid slight inconsistencies)
propagate_coords = (
tab.query("latitude.isnull() and name in @decode_coords.name")
.drop(columns=["latitude", "longitude"])
.merge(
decode_coords[
["name", "latitude", "longitude"]
].drop_duplicates(),
on="name",
)
)
# Part 3: Unknown coordinates
unknown_coords = (
tab.query("latitude.isnull() and name not in @decode_coords.name")
.drop(columns=["latitude", "longitude"])
.merge(europoints.drop(columns=["type", "description"]), on="name")
)
# Part 4: Airport connections
airport_coords = pd.concat(
[
df_.query('type in ["AD", "A", "D"]').iloc[:, :3],
a.rename("airport"),
],
axis=1,
)
propagate_airports = airport_coords.merge(
decode_coords[["name", "latitude", "longitude"]].drop_duplicates(),
on=["name"],
).explode("airport")
unknown_airports = (
airport_coords.query("name not in @propagate_airports.name").merge(
europoints.drop(columns=["type", "description"]), on="name"
)
).explode("airport")
self.frp = pd.concat(
[
decode_coords,
propagate_coords,
unknown_coords,
propagate_airports,
unknown_airports,
]
)
def __getattr__(self, attr: str) -> Any:
if attr in ["fra", "frp"]:
self.init_cache()
return getattr(self, attr)
raise AttributeError(attr)
@lru_cache()
def _ipython_key_completions_(self) -> Set[str]:
return {*self.elements.keys()}
| 0.762114 | 0.413004 |
from flask_socketio import SocketIO
from flask import Flask, render_template, request
from random import random
import threading
from threading import Thread, Event
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = True
# Turn the flask app into a SocketIO app
socket_io = SocketIO(app, async_mode='eventlet', logger=False, engineio_logger=False)
# Random number Generator Thread
thread = Thread()
thread_stop_event = Event()
client_counter = 0
def random_number_generator():
"""
Generate a random number every 1 second and emit to a socketio instance (broadcast)
Ideally to be run in a separate thread?
"""
while not thread_stop_event.isSet():
number = round(random() * 10, 3)
print(f'{number}, thread_ident={threading.get_native_id()}')
socket_io.emit('new_number', {'number': number}, namespace='/test')
socket_io.sleep(5)
print('Thread is done')
@app.route('/')
def index():
# Only by sending this page first the client will be connected to the socket_io instance
return render_template('index.html')
@socket_io.on('connect', namespace='/test')
def client_connect():
# need visibility of the global thread object
global thread
global client_counter
client_counter += 1
print(f'Clients connected: {client_counter}, sid = {request.sid}, thread_ident={threading.get_native_id()}')
if thread_stop_event.is_set():
thread_stop_event.clear()
# Start the random number generator thread only if the thread has not been started before.
if not thread.is_alive():
print(f"Starting Thread, thread_ident={threading.get_ident()}")
thread = socket_io.start_background_task(random_number_generator)
elif client_counter == 1:
print('Continue use existing thread that is still alive')
@socket_io.on('disconnect', namespace='/test')
def client_disconnect():
global client_counter
client_counter -= 1
print(f'Client disconnected, left: {client_counter}, sid = {request.sid}, thread_ident={threading.get_ident()}')
if thread.is_alive() and client_counter == 0:
global thread_stop_event
thread_stop_event.set()
print('Set event to stop thread')
if __name__ == '__main__':
socket_io.run(app)
|
application.py
|
from flask_socketio import SocketIO
from flask import Flask, render_template, request
from random import random
import threading
from threading import Thread, Event
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = True
# Turn the flask app into a SocketIO app
socket_io = SocketIO(app, async_mode='eventlet', logger=False, engineio_logger=False)
# Random number Generator Thread
thread = Thread()
thread_stop_event = Event()
client_counter = 0
def random_number_generator():
"""
Generate a random number every 1 second and emit to a socketio instance (broadcast)
Ideally to be run in a separate thread?
"""
while not thread_stop_event.isSet():
number = round(random() * 10, 3)
print(f'{number}, thread_ident={threading.get_native_id()}')
socket_io.emit('new_number', {'number': number}, namespace='/test')
socket_io.sleep(5)
print('Thread is done')
@app.route('/')
def index():
# Only by sending this page first the client will be connected to the socket_io instance
return render_template('index.html')
@socket_io.on('connect', namespace='/test')
def client_connect():
# need visibility of the global thread object
global thread
global client_counter
client_counter += 1
print(f'Clients connected: {client_counter}, sid = {request.sid}, thread_ident={threading.get_native_id()}')
if thread_stop_event.is_set():
thread_stop_event.clear()
# Start the random number generator thread only if the thread has not been started before.
if not thread.is_alive():
print(f"Starting Thread, thread_ident={threading.get_ident()}")
thread = socket_io.start_background_task(random_number_generator)
elif client_counter == 1:
print('Continue use existing thread that is still alive')
@socket_io.on('disconnect', namespace='/test')
def client_disconnect():
global client_counter
client_counter -= 1
print(f'Client disconnected, left: {client_counter}, sid = {request.sid}, thread_ident={threading.get_ident()}')
if thread.is_alive() and client_counter == 0:
global thread_stop_event
thread_stop_event.set()
print('Set event to stop thread')
if __name__ == '__main__':
socket_io.run(app)
| 0.499268 | 0.06389 |
import math
from django import template
from django.utils.safestring import mark_safe
from django.contrib.humanize.templatetags.humanize import intcomma
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils import timezone
register = template.Library()
@register.simple_tag(takes_context=True)
def conditional_js(context, script_name):
suffix = "" if context.get("DEBUG", True) else ".min"
filename = "js/{}{}.js".format(script_name, suffix)
url = staticfiles_storage.url(filename)
tag = '<script src="{}"></script>'.format(url)
return mark_safe(tag)
@register.filter
def wholenum(num):
return int(round(num))
@register.filter
def deltawords(num, arg):
"""An adverb to come after the word 'improved' or 'slipped'"""
delta = abs(num - arg)
# We only pick out changes over 10%; over 30% in 9 months is unheard of.
if delta == 0:
word = "not at all"
elif delta < 10:
word = "slightly"
elif delta < 20:
word = "moderately"
elif delta < 30:
word = "considerably"
else:
word = "massively"
return word
@register.filter
def roundpound(num):
order = 10 ** math.floor(math.log10(num))
if order > 0:
return intcomma(int(round(num / order) * order))
else:
return str(int(round(num)))
@register.filter
def sigfigs(value, figures=3):
"""
Round value to supplied significant figures
"""
if not value:
# This might happen when testing.
value = 0
if value == 0:
order = 0
else:
order = int(math.floor(math.log10(math.fabs(value))))
places = figures - order - 1
format_string = "{:.%df}" % max(0, places)
return format_string.format(round(value, places))
@register.simple_tag
def url_toggle(request, field):
dict_ = request.GET.copy()
if field in dict_:
del dict_[field]
else:
dict_[field] = 1
return dict_.urlencode()
@register.simple_tag
def current_time(format_string):
return timezone.now().strftime(format_string)
@register.filter
def fancy_join(lst, sep=", ", final_sep=" and "):
"""
Join a list using a different separator for the final element
"""
if len(lst) > 2:
head, tail = lst[:-1], lst[-1]
lst = [sep.join(head), tail]
return final_sep.join(lst)
@register.filter
def username_from_email(email):
return email.split("@")[0]
|
openprescribing/frontend/templatetags/template_extras.py
|
import math
from django import template
from django.utils.safestring import mark_safe
from django.contrib.humanize.templatetags.humanize import intcomma
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils import timezone
register = template.Library()
@register.simple_tag(takes_context=True)
def conditional_js(context, script_name):
suffix = "" if context.get("DEBUG", True) else ".min"
filename = "js/{}{}.js".format(script_name, suffix)
url = staticfiles_storage.url(filename)
tag = '<script src="{}"></script>'.format(url)
return mark_safe(tag)
@register.filter
def wholenum(num):
return int(round(num))
@register.filter
def deltawords(num, arg):
"""An adverb to come after the word 'improved' or 'slipped'"""
delta = abs(num - arg)
# We only pick out changes over 10%; over 30% in 9 months is unheard of.
if delta == 0:
word = "not at all"
elif delta < 10:
word = "slightly"
elif delta < 20:
word = "moderately"
elif delta < 30:
word = "considerably"
else:
word = "massively"
return word
@register.filter
def roundpound(num):
order = 10 ** math.floor(math.log10(num))
if order > 0:
return intcomma(int(round(num / order) * order))
else:
return str(int(round(num)))
@register.filter
def sigfigs(value, figures=3):
"""
Round value to supplied significant figures
"""
if not value:
# This might happen when testing.
value = 0
if value == 0:
order = 0
else:
order = int(math.floor(math.log10(math.fabs(value))))
places = figures - order - 1
format_string = "{:.%df}" % max(0, places)
return format_string.format(round(value, places))
@register.simple_tag
def url_toggle(request, field):
dict_ = request.GET.copy()
if field in dict_:
del dict_[field]
else:
dict_[field] = 1
return dict_.urlencode()
@register.simple_tag
def current_time(format_string):
return timezone.now().strftime(format_string)
@register.filter
def fancy_join(lst, sep=", ", final_sep=" and "):
"""
Join a list using a different separator for the final element
"""
if len(lst) > 2:
head, tail = lst[:-1], lst[-1]
lst = [sep.join(head), tail]
return final_sep.join(lst)
@register.filter
def username_from_email(email):
return email.split("@")[0]
| 0.414306 | 0.239572 |
# # QCM Analysis
# ## Imports
import logging
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from bric_analysis_libraries import standard_functions as std
# # Analysis
def sauerbrey( freq, f0, density = 2.648, shear = 2.947e11 ):
"""
The Sauerbrey equation, solved for mass change per unit area.
The realtive change in frequency should be less than 5%,
otherwise use Z-matching.
:param freq: Measured frequency in Hertz.
:param f0: Fundamental frequency in Hertz.
:param density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
"""
# check if larger than 5% change
delta = np.abs( ( freq - f0 )/ f0 )
if delta.max() > 0.05:
logging.warning( 'Frequency change is large than 5%. Consider using Z-match method instead.' )
coeff = np.sqrt( density* shear )/ ( 2* np.square( f0 ) )
m_delta = -coeff* ( freq - f0 )
return m_delta
def z_match(
freq,
f0,
film_density,
film_shear,
freq_constant = 1.668e13,
sub_density = 2.648,
sub_shear = 2.974e11
):
"""
The Z-match equation.
Used when relative frequency change is larger than 5%.
:param freq: Frequency of the loaded sensor in Hertz.
:param f0: Frequency of the unloaded sensor in hertz.
:param film_density: Density of the film in g/cm^3.
:param film_shear: Shear modulus of the film in g/( cm* s ).
:param freq_constant: Frequency constant of the sensor in Hz* Angstrom. [Default: Quartz (1.66 e13)]
:param sub_density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param sub_shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
"""
z = np.sqrt( sub_density* sub_shear/( film_density* film_shear ) )
coeff = freq_constant* sub_density/( np.pi* z* freq )
tan_arg = np.pi*( f0 - freq )/ f0
m = coeff* np.arctan( z* np.tan( tan_arg ) )
return m
def sauerbrey_mass_change( df, f0 = 5e6, density = 2.648, shear = 2.947e11 ):
"""
Creates a DataFrame of mass changes calculated with the Sauerbrey equation.
:param df: DataFrame containing frequencies in Hertz.
:param f0: The undamental freqeuncy of the sensor. [Default: 5 MHz]
:param density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
:returns: DataFrame of mass changes in grams.
"""
return df.apply( lambda x: sauerbrey( x, f0, density, shear ) )
def z_match_mass_change(
df,
f0,
film_density,
film_shear,
freq_constant = 1.668e13,
sub_density = 2.648,
sub_shear = 2.974e11
):
"""
The Z-match equation.
Used when relative frequency change is larger than 5%.
:param freq: Frequency of the loaded sensor in Hertz.
:param f0: Frequency of the unloaded sensor in hertz.
:param film_density: Density of the film in g/cm^3.
:param film_shear: Shear modulus of the film in g/( cm* s ).
:param freq_constant: Frequency constant of the sensor in Hz* Angstrom. [Default: Quartz (1.66 e13)]
:param sub_density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param sub_shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
"""
return df.apply( lambda x:
z_match(
x,
f0,
film_density,
film_shear,
freq_constant = freq_constant,
sub_density = sub_density,
sub_shear = sub_shear
)
)
# # Work
|
bric_analysis_libraries/misc/qcm_analysis.py
|
# # QCM Analysis
# ## Imports
import logging
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from bric_analysis_libraries import standard_functions as std
# # Analysis
def sauerbrey( freq, f0, density = 2.648, shear = 2.947e11 ):
"""
The Sauerbrey equation, solved for mass change per unit area.
The realtive change in frequency should be less than 5%,
otherwise use Z-matching.
:param freq: Measured frequency in Hertz.
:param f0: Fundamental frequency in Hertz.
:param density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
"""
# check if larger than 5% change
delta = np.abs( ( freq - f0 )/ f0 )
if delta.max() > 0.05:
logging.warning( 'Frequency change is large than 5%. Consider using Z-match method instead.' )
coeff = np.sqrt( density* shear )/ ( 2* np.square( f0 ) )
m_delta = -coeff* ( freq - f0 )
return m_delta
def z_match(
freq,
f0,
film_density,
film_shear,
freq_constant = 1.668e13,
sub_density = 2.648,
sub_shear = 2.974e11
):
"""
The Z-match equation.
Used when relative frequency change is larger than 5%.
:param freq: Frequency of the loaded sensor in Hertz.
:param f0: Frequency of the unloaded sensor in hertz.
:param film_density: Density of the film in g/cm^3.
:param film_shear: Shear modulus of the film in g/( cm* s ).
:param freq_constant: Frequency constant of the sensor in Hz* Angstrom. [Default: Quartz (1.66 e13)]
:param sub_density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param sub_shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
"""
z = np.sqrt( sub_density* sub_shear/( film_density* film_shear ) )
coeff = freq_constant* sub_density/( np.pi* z* freq )
tan_arg = np.pi*( f0 - freq )/ f0
m = coeff* np.arctan( z* np.tan( tan_arg ) )
return m
def sauerbrey_mass_change( df, f0 = 5e6, density = 2.648, shear = 2.947e11 ):
"""
Creates a DataFrame of mass changes calculated with the Sauerbrey equation.
:param df: DataFrame containing frequencies in Hertz.
:param f0: The undamental freqeuncy of the sensor. [Default: 5 MHz]
:param density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
:returns: DataFrame of mass changes in grams.
"""
return df.apply( lambda x: sauerbrey( x, f0, density, shear ) )
def z_match_mass_change(
df,
f0,
film_density,
film_shear,
freq_constant = 1.668e13,
sub_density = 2.648,
sub_shear = 2.974e11
):
"""
The Z-match equation.
Used when relative frequency change is larger than 5%.
:param freq: Frequency of the loaded sensor in Hertz.
:param f0: Frequency of the unloaded sensor in hertz.
:param film_density: Density of the film in g/cm^3.
:param film_shear: Shear modulus of the film in g/( cm* s ).
:param freq_constant: Frequency constant of the sensor in Hz* Angstrom. [Default: Quartz (1.66 e13)]
:param sub_density: Density of sensor substrate in g/cm^3. [Default: Quartz (2.648)]
:param sub_shear: Shear modulus of sensor substrate in g/( cm* s ). [Default: Quartz (2.947 e11) ]
"""
return df.apply( lambda x:
z_match(
x,
f0,
film_density,
film_shear,
freq_constant = freq_constant,
sub_density = sub_density,
sub_shear = sub_shear
)
)
# # Work
| 0.785966 | 0.723236 |
from torchvision.datasets import CIFAR100
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import os.path as pt
import torch
import numpy as np
def ceil(x: float):
return int(np.ceil(x))
class MYCIFAR100(CIFAR100):
""" Reimplements get_item to transform tensor input to pil image before applying transformation. """
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class OECifar100(MYCIFAR100):
cifar10_classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def __init__(self, size: torch.Size, root: str = None, train: bool = True, limit_var: int = 20):
"""
Outlier Exposure dataset for Cifar-100.
:param size: size of the samples in n x c x h x w, samples will be resized to h x w. If n is larger than the
number of samples available in Cifar-100, dataset will be enlarged by repetitions to fit n.
This is important as exactly n images are extracted per iteration of the data_loader.
For online supervision n should be set to 1 because only one sample is extracted at a time.
:param root: root directory where data is found or is to be downloaded to.
:param train: whether to use training or test samples.
:param limit_var: limits the number of different samples, i.e. randomly chooses limit_var many samples
from all available ones to be the training data.
"""
assert len(size) == 4 and size[2] == size[3]
assert size[1] in [1, 3]
root = pt.join(root, 'cifar100', )
transform = transforms.Compose([
transforms.Resize((size[2], size[3])),
transforms.Grayscale() if size[1] == 1 else transforms.Lambda(lambda x: x),
transforms.ToTensor()
])
super().__init__(root, train, transform=transform, download=True)
self.size = size
self.targets = torch.from_numpy(np.asarray(self.targets))
self.data = torch.from_numpy(self.data).transpose(1, 3).transpose(2, 3)
self.idx_to_class = {v: k for k, v in self.class_to_idx.items()}
if limit_var is not None and limit_var < len(self):
picks = np.random.choice(np.arange(self.data.size(0)), size=limit_var, replace=False)
self.data = self.data[picks]
self.targets = self.targets[picks]
if limit_var is not None and limit_var > len(self):
print(
'OECifar100 shall be limited to {} samples, but Cifar100 contains only {} samples, thus using all.'
.format(limit_var, len(self))
)
if len(self) < size[0]:
rep = ceil(size[0] / len(self))
old = len(self)
self.data = self.data.repeat(rep, 1, 1, 1)
self.targets = self.targets.repeat(rep)
if rep != size[0] / old:
import warnings
warnings.warn(
'OECifar100 has been limited to {} samples. '
'Due to the requested size of {}, the dataset will be enlarged. '
'But {} repetitions will make some samples appear more often than others in the dataset, '
'because the final size after repetitions is {}, which is cut to {}'
.format(limit_var, size[0], rep, len(self), size[0])
)
def data_loader(self) -> DataLoader:
return DataLoader(dataset=self, batch_size=self.size[0], shuffle=True, num_workers=0)
def __getitem__(self, index: int) -> torch.Tensor:
sample, target = super().__getitem__(index)
sample = sample.mul(255).byte()
return sample
|
python/fcdd/datasets/outlier_exposure/cifar100.py
|
from torchvision.datasets import CIFAR100
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import os.path as pt
import torch
import numpy as np
def ceil(x: float):
return int(np.ceil(x))
class MYCIFAR100(CIFAR100):
""" Reimplements get_item to transform tensor input to pil image before applying transformation. """
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class OECifar100(MYCIFAR100):
cifar10_classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def __init__(self, size: torch.Size, root: str = None, train: bool = True, limit_var: int = 20):
"""
Outlier Exposure dataset for Cifar-100.
:param size: size of the samples in n x c x h x w, samples will be resized to h x w. If n is larger than the
number of samples available in Cifar-100, dataset will be enlarged by repetitions to fit n.
This is important as exactly n images are extracted per iteration of the data_loader.
For online supervision n should be set to 1 because only one sample is extracted at a time.
:param root: root directory where data is found or is to be downloaded to.
:param train: whether to use training or test samples.
:param limit_var: limits the number of different samples, i.e. randomly chooses limit_var many samples
from all available ones to be the training data.
"""
assert len(size) == 4 and size[2] == size[3]
assert size[1] in [1, 3]
root = pt.join(root, 'cifar100', )
transform = transforms.Compose([
transforms.Resize((size[2], size[3])),
transforms.Grayscale() if size[1] == 1 else transforms.Lambda(lambda x: x),
transforms.ToTensor()
])
super().__init__(root, train, transform=transform, download=True)
self.size = size
self.targets = torch.from_numpy(np.asarray(self.targets))
self.data = torch.from_numpy(self.data).transpose(1, 3).transpose(2, 3)
self.idx_to_class = {v: k for k, v in self.class_to_idx.items()}
if limit_var is not None and limit_var < len(self):
picks = np.random.choice(np.arange(self.data.size(0)), size=limit_var, replace=False)
self.data = self.data[picks]
self.targets = self.targets[picks]
if limit_var is not None and limit_var > len(self):
print(
'OECifar100 shall be limited to {} samples, but Cifar100 contains only {} samples, thus using all.'
.format(limit_var, len(self))
)
if len(self) < size[0]:
rep = ceil(size[0] / len(self))
old = len(self)
self.data = self.data.repeat(rep, 1, 1, 1)
self.targets = self.targets.repeat(rep)
if rep != size[0] / old:
import warnings
warnings.warn(
'OECifar100 has been limited to {} samples. '
'Due to the requested size of {}, the dataset will be enlarged. '
'But {} repetitions will make some samples appear more often than others in the dataset, '
'because the final size after repetitions is {}, which is cut to {}'
.format(limit_var, size[0], rep, len(self), size[0])
)
def data_loader(self) -> DataLoader:
return DataLoader(dataset=self, batch_size=self.size[0], shuffle=True, num_workers=0)
def __getitem__(self, index: int) -> torch.Tensor:
sample, target = super().__getitem__(index)
sample = sample.mul(255).byte()
return sample
| 0.903882 | 0.725089 |
import sys
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib
import numpy as np
from random import randint
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.backends.backend_pdf import PdfPages
from palettable.colorbrewer.sequential import YlGnBu_5
# Reading data
df = pd.read_csv('../plot_data/pdp_pipeline.csv')
# Plot type
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(32, 8))
# plt.xticks(rotation=90)
# Set limits for X and Y axises
# plt.xlim(-0.5, 10.5)
plt.ylim(0, 1)
N = len(df['layer'])
ind = np.arange(N)
width = 0.6
c = -1
for x in np.arange(N):
if(x%10 == 0):
c = c + 1
ind[x] = c
c = c + 1
# print(ind)
# bar.hatch -> puting patterns on the colors
# opbars = ax.bar(ind, df['ref'].values.tolist(), width, ecolor='k',
# color=YlGnBu_5.hex_colors[0], edgecolor='k', hatch='//');
colour = ['#ffeda0','#feb24c','#f03b20']
opbars = ax.bar(ind, df['avg_cycles'].values.tolist(), width, ecolor='k',
color=colour[0], edgecolor='k');
opbars = ax.bar(ind, df['middle'].values.tolist(), width, ecolor='k',
color=colour[1], edgecolor='k');
opbars = ax.bar(ind, df['late'].values.tolist(), width, ecolor='k',
color=colour[2], edgecolor='k');
opbars = ax.bar(ind, df['total_cycles'].values.tolist(), width, ecolor='k',
color=YlGnBu_5.hex_colors[4], edgecolor='k', hatch='//');
ax.set_ylabel('Latency',fontsize=32)
ax.yaxis.label.set_color('black')
ax.set_xticks(ind);
# Adding extra name to the x labels
# rotation='degree' for rotating the text
ax.set_xticklabels(df['layer'], fontsize=16, rotation=90)
t = 10
ax.text(0, -0.25, 'Early', fontsize=32)
ax.text(3, -0.25, 'Middle', fontsize=32)
ax.text(6.5, -0.25, 'Late', fontsize=32)
ax.text(10, -0.05, '|', fontsize=20)
ax.text(10, -0.08, '|', fontsize=20)
ax.text(10, -0.11, '|', fontsize=20)
ax.text(10, -0.14, '|', fontsize=20)
ax.text(11, -0.25, 'Early', fontsize=32)
ax.text(14, -0.25, 'Middle', fontsize=32)
ax.text(17.5, -0.25, 'Late', fontsize=32)
ax.text(21, -0.05, '|', fontsize=20)
ax.text(21, -0.08, '|', fontsize=20)
ax.text(21, -0.11, '|', fontsize=20)
ax.text(21, -0.14, '|', fontsize=20)
ax.text(22, -0.25, 'Early', fontsize=32)
ax.text(25, -0.25, 'Middle', fontsize=32)
ax.text(28.5, -0.25, 'Late', fontsize=32)
# ax.text(0, -2, 'Early Layers', fontsize=22)
# ax.text(4, -2, 'Middle Layers', fontsize=22)
# ax.text(8, -2, 'Late Layers', fontsize=22)
ax.text(3, 1.05, 'pdp-0', fontsize=32)
ax.text(14, 1.05, 'pdp-1', fontsize=32)
ax.text(24, 1.05, 'pdp-2', fontsize=32)
### Style
# Set the background color
ax.set_facecolor('whitesmoke')
plt.gca().xaxis.grid(False)
plt.gca().yaxis.grid(True, color='black')
plt.tick_params( axis='x', which='both', bottom=False, top=False, colors='black', labelsize=26)
plt.tick_params( axis='y', which='both', right=False, colors='black', labelsize=30 )
plt.tick_params(axis='both', which='major', direction='in',
length=6, width=3,color='black')
plt.grid(linestyle='--')
ax.spines['bottom'].set_color('gray')
ax.spines['top'].set_color('gray')
ax.spines['right'].set_color('gray')
ax.spines['left'].set_color('gray')
# Adding legend and the position
# ax.legend((pbars[0], opbars[0], cbars[0], prec[0]), ('A', 'B', 'C', 'D'), bbox_to_anchor=(1, 0.92), fontsize=22)
fig.savefig('test.pdf',facecolor=fig.get_facecolor(), bbox_inches='tight')
|
TB-scheduler/deprecated/pdp_pipeline.py
|
import sys
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib
import numpy as np
from random import randint
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.backends.backend_pdf import PdfPages
from palettable.colorbrewer.sequential import YlGnBu_5
# Reading data
df = pd.read_csv('../plot_data/pdp_pipeline.csv')
# Plot type
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(32, 8))
# plt.xticks(rotation=90)
# Set limits for X and Y axises
# plt.xlim(-0.5, 10.5)
plt.ylim(0, 1)
N = len(df['layer'])
ind = np.arange(N)
width = 0.6
c = -1
for x in np.arange(N):
if(x%10 == 0):
c = c + 1
ind[x] = c
c = c + 1
# print(ind)
# bar.hatch -> puting patterns on the colors
# opbars = ax.bar(ind, df['ref'].values.tolist(), width, ecolor='k',
# color=YlGnBu_5.hex_colors[0], edgecolor='k', hatch='//');
colour = ['#ffeda0','#feb24c','#f03b20']
opbars = ax.bar(ind, df['avg_cycles'].values.tolist(), width, ecolor='k',
color=colour[0], edgecolor='k');
opbars = ax.bar(ind, df['middle'].values.tolist(), width, ecolor='k',
color=colour[1], edgecolor='k');
opbars = ax.bar(ind, df['late'].values.tolist(), width, ecolor='k',
color=colour[2], edgecolor='k');
opbars = ax.bar(ind, df['total_cycles'].values.tolist(), width, ecolor='k',
color=YlGnBu_5.hex_colors[4], edgecolor='k', hatch='//');
ax.set_ylabel('Latency',fontsize=32)
ax.yaxis.label.set_color('black')
ax.set_xticks(ind);
# Adding extra name to the x labels
# rotation='degree' for rotating the text
ax.set_xticklabels(df['layer'], fontsize=16, rotation=90)
t = 10
ax.text(0, -0.25, 'Early', fontsize=32)
ax.text(3, -0.25, 'Middle', fontsize=32)
ax.text(6.5, -0.25, 'Late', fontsize=32)
ax.text(10, -0.05, '|', fontsize=20)
ax.text(10, -0.08, '|', fontsize=20)
ax.text(10, -0.11, '|', fontsize=20)
ax.text(10, -0.14, '|', fontsize=20)
ax.text(11, -0.25, 'Early', fontsize=32)
ax.text(14, -0.25, 'Middle', fontsize=32)
ax.text(17.5, -0.25, 'Late', fontsize=32)
ax.text(21, -0.05, '|', fontsize=20)
ax.text(21, -0.08, '|', fontsize=20)
ax.text(21, -0.11, '|', fontsize=20)
ax.text(21, -0.14, '|', fontsize=20)
ax.text(22, -0.25, 'Early', fontsize=32)
ax.text(25, -0.25, 'Middle', fontsize=32)
ax.text(28.5, -0.25, 'Late', fontsize=32)
# ax.text(0, -2, 'Early Layers', fontsize=22)
# ax.text(4, -2, 'Middle Layers', fontsize=22)
# ax.text(8, -2, 'Late Layers', fontsize=22)
ax.text(3, 1.05, 'pdp-0', fontsize=32)
ax.text(14, 1.05, 'pdp-1', fontsize=32)
ax.text(24, 1.05, 'pdp-2', fontsize=32)
### Style
# Set the background color
ax.set_facecolor('whitesmoke')
plt.gca().xaxis.grid(False)
plt.gca().yaxis.grid(True, color='black')
plt.tick_params( axis='x', which='both', bottom=False, top=False, colors='black', labelsize=26)
plt.tick_params( axis='y', which='both', right=False, colors='black', labelsize=30 )
plt.tick_params(axis='both', which='major', direction='in',
length=6, width=3,color='black')
plt.grid(linestyle='--')
ax.spines['bottom'].set_color('gray')
ax.spines['top'].set_color('gray')
ax.spines['right'].set_color('gray')
ax.spines['left'].set_color('gray')
# Adding legend and the position
# ax.legend((pbars[0], opbars[0], cbars[0], prec[0]), ('A', 'B', 'C', 'D'), bbox_to_anchor=(1, 0.92), fontsize=22)
fig.savefig('test.pdf',facecolor=fig.get_facecolor(), bbox_inches='tight')
| 0.400984 | 0.422266 |
# C major scale
song1_tempo = 220
song1 = [
["Cn", 2, 1],
["Dn", 2, 1],
["En", 2, 1],
["Fn", 2, 1],
["Gn", 2, 1],
["An", 2, 1],
["Bn", 2, 1],
["Cn", 3, 1],
["Bn", 2, 1],
["An", 2, 1],
["Gn", 2, 1],
["Fn", 2, 1],
["En", 2, 1],
["Dn", 2, 1],
["Cn", 2, 1]
]
# Imperial March
song2_tempo = 104 * 8
song2 = [
["Gn", 1, 8 ],
["Gn", 1, 8 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ],
["Dn", 2, 8 ],
["Dn", 2, 8 ],
["Dn", 2, 8 ],
["Ef", 2, 6 ],
["Bf", 1, 2 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ],
["Gn", 2, 8 ],
["Gn", 1, 6 ],
["Gn", 1, 2 ],
["Gn", 2, 8 ],
["Gf", 2, 6 ],
["Fn", 2, 2 ],
["En", 2, 2 ],
["Ds", 2, 2 ],
["En", 2, 4 ],
["Zz", 0, 4 ],
["Gs", 1, 4 ],
["Cs", 2, 8 ],
["Bs", 2, 6 ],
["Bn", 1, 2 ],
["Bf", 1, 2 ],
["An", 1, 2 ],
["Bf", 1, 4 ],
["Zz", 0, 4 ],
["Ef", 1, 4 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Gf", 1, 2 ],
["Bf", 1, 8 ],
["Gn", 1, 6 ],
["Bf", 1, 2 ],
["Dn", 2, 16 ],
["Gn", 2, 8 ],
["Gn", 1, 6 ],
["Gn", 1, 2 ],
["Gn", 2, 8 ],
["Gf", 2, 6 ],
["Fn", 2, 2 ],
["En", 2, 2 ],
["Ds", 2, 2 ],
["En", 2, 4 ],
["Zz", 0, 4 ],
["Gs", 1, 4 ],
["Cs", 2, 8 ],
["Bs", 2, 6 ],
["Bn", 1, 2 ],
["Bf", 1, 2 ],
["An", 1, 2 ],
["Bf", 1, 4 ],
["Zz", 0, 4 ],
["Ef", 1, 4 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ]
]
# Metal Crusher
song3_tempo = 115 * 4
song3 = [
["Ef", 3, 3 ], # Bar 1
["Ef", 3, 1 ],
["Ef", 3, 2 ],
["Ef", 3, 2 ],
["Bn", 2, 3 ],
["Bn", 2, 1 ],
["Ef", 1, 4 ],
["Ef", 1, 3 ],
["Ef", 1, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 2 ],
["Af", 2, 8 ], # End of intro
["Ef", 2, 1 ],
["En", 2, 1 ],
["Ef", 2, 1 ],
["Dn", 2, 1 ],
["Ef", 2, 2 ],
["Dn", 2, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Gn", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Bn", 2, 1 ],
["Df", 2, 2 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Af", 2, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 3, 2 ],
["Ef", 2, 1 ], # Repeat
["En", 2, 1 ],
["Ef", 2, 1 ],
["Dn", 2, 1 ],
["Ef", 2, 2 ],
["Dn", 2, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Gn", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Bn", 2, 1 ],
["Df", 2, 2 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Af", 2, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 3, 2 ]
]
song4_tempo = 135*2
song4 = [
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["An", 1, 1],
["Af", 1, 1],
["Gf", 1, 1],
["Fn", 1, 2],
["Zz", 0, 1],
["Fn", 1, 2],
["Fn", 1, 1],
["Fn", 1, 2],
["Dn", 1, 2],
["Zz", 0, 1],
["Dn", 1, 2],
["Dn", 1, 1],
["En", 1, 1],
["Fn", 1, 1],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["An", 1, 1],
["Af", 1, 1],
["Gf", 1, 1],
["Fn", 1, 2],
["Zz", 0, 1],
["Fn", 1, 2],
["Fn", 1, 1],
["Fn", 1, 2],
["Dn", 1, 2],
["Zz", 0, 1],
["Dn", 1, 2],
["Dn", 1, 1],
["En", 1, 1],
["Fn", 1, 1],
# Part 2
["Gf", 2, 2],
["An", 2, 2],
["Gf", 2, 2],
["Df", 2, 2],
["Df", 2, 2],
["Df", 2, 1],
["Gf", 1, 3],
["Df", 2, 1],
["Df", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 1],
["Cn", 2, 1],
["An", 1, 2],
["An", 1, 2],
["Dn", 2, 2],
["En", 2, 1],
["Fn", 2, 1],
["Gf", 2, 2],
["An", 2, 2],
["Gf", 2, 2],
["Df", 2, 2],
["Df", 2, 2],
["Df", 2, 1],
["Gf", 1, 3],
["Df", 2, 1],
["Df", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 1],
["Cn", 2, 1],
["An", 1, 1],
["An", 1, 1],
["An", 1, 1],
["An", 1, 1],
["Gf", 1, 4],
]
song5_tempo = 135*2
song5 = [
["Gf", 1, 2],
["An", 1, 1],
["An", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["An", 1, 1],
["An", 1, 2],
["Gf", 1, 1],
["An", 1, 1],
["Bn", 1, 1],
["Fn", 1, 2],
["Af", 1, 1],
["Af", 1, 2],
["Fn", 1, 1],
["Af", 1, 2],
["Dn", 1, 2],
["Gf", 1, 1],
["Gf", 1, 2],
["Dn", 1, 1],
["An", 1, 1],
["Af", 1, 1],
]
# song5a_tempo = 160*2
# song5a = [
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ]
cantina_tempo = 132*4
cantina = [
# Part 1
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
# Part 2
["Fs", 1, 1],
["Fn", 1, 1],
["Fs", 1, 1],
["En", 1, 1],
["Zz", 1, 1],
["Ef", 1, 1],
["En", 1, 1],
["Ef", 1, 1],
["Dn", 1, 3],
["Bn", 0, 5],
# Part 3
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
["En", 1, 2],
["En", 1, 3],
["Ef", 1, 1],
["En", 1, 2],
["An", 1, 1],
["Gn", 1, 2],
["Fs", 1, 2],
["En", 1, 3],
# Part 4
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
["An", 1, 2],
["An", 1, 3],
["Fs", 1, 1],
["En", 1, 2],
["Dn", 1, 3],
["Bn", 0, 5],
# Leadup
["Bn", 0, 4],
["Dn", 1, 4],
["Fs", 1, 4],
["An", 1, 4],
["Cn", 2, 2],
["Bn", 1, 2],
["Fn", 1, 1],
["Fs", 1, 2],
["Dn", 1, 6],
["Zz", 1, 4]
]
|
player/python/songs.py
|
# C major scale
song1_tempo = 220
song1 = [
["Cn", 2, 1],
["Dn", 2, 1],
["En", 2, 1],
["Fn", 2, 1],
["Gn", 2, 1],
["An", 2, 1],
["Bn", 2, 1],
["Cn", 3, 1],
["Bn", 2, 1],
["An", 2, 1],
["Gn", 2, 1],
["Fn", 2, 1],
["En", 2, 1],
["Dn", 2, 1],
["Cn", 2, 1]
]
# Imperial March
song2_tempo = 104 * 8
song2 = [
["Gn", 1, 8 ],
["Gn", 1, 8 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ],
["Dn", 2, 8 ],
["Dn", 2, 8 ],
["Dn", 2, 8 ],
["Ef", 2, 6 ],
["Bf", 1, 2 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ],
["Gn", 2, 8 ],
["Gn", 1, 6 ],
["Gn", 1, 2 ],
["Gn", 2, 8 ],
["Gf", 2, 6 ],
["Fn", 2, 2 ],
["En", 2, 2 ],
["Ds", 2, 2 ],
["En", 2, 4 ],
["Zz", 0, 4 ],
["Gs", 1, 4 ],
["Cs", 2, 8 ],
["Bs", 2, 6 ],
["Bn", 1, 2 ],
["Bf", 1, 2 ],
["An", 1, 2 ],
["Bf", 1, 4 ],
["Zz", 0, 4 ],
["Ef", 1, 4 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Gf", 1, 2 ],
["Bf", 1, 8 ],
["Gn", 1, 6 ],
["Bf", 1, 2 ],
["Dn", 2, 16 ],
["Gn", 2, 8 ],
["Gn", 1, 6 ],
["Gn", 1, 2 ],
["Gn", 2, 8 ],
["Gf", 2, 6 ],
["Fn", 2, 2 ],
["En", 2, 2 ],
["Ds", 2, 2 ],
["En", 2, 4 ],
["Zz", 0, 4 ],
["Gs", 1, 4 ],
["Cs", 2, 8 ],
["Bs", 2, 6 ],
["Bn", 1, 2 ],
["Bf", 1, 2 ],
["An", 1, 2 ],
["Bf", 1, 4 ],
["Zz", 0, 4 ],
["Ef", 1, 4 ],
["Gf", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 8 ],
["Ef", 1, 6 ],
["Bf", 1, 2 ],
["Gn", 1, 16 ]
]
# Metal Crusher
song3_tempo = 115 * 4
song3 = [
["Ef", 3, 3 ], # Bar 1
["Ef", 3, 1 ],
["Ef", 3, 2 ],
["Ef", 3, 2 ],
["Bn", 2, 3 ],
["Bn", 2, 1 ],
["Ef", 1, 4 ],
["Ef", 1, 3 ],
["Ef", 1, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 2 ],
["Af", 2, 8 ], # End of intro
["Ef", 2, 1 ],
["En", 2, 1 ],
["Ef", 2, 1 ],
["Dn", 2, 1 ],
["Ef", 2, 2 ],
["Dn", 2, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Gn", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Bn", 2, 1 ],
["Df", 2, 2 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Af", 2, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 3, 2 ],
["Ef", 2, 1 ], # Repeat
["En", 2, 1 ],
["Ef", 2, 1 ],
["Dn", 2, 1 ],
["Ef", 2, 2 ],
["Dn", 2, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Gn", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Bn", 2, 1 ],
["Df", 2, 2 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 2 ],
["Af", 2, 1 ],
["Bf", 2, 1 ],
["Bn", 2, 2 ],
["Bf", 2, 1 ],
["Af", 2, 1 ],
["Ef", 1, 2 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["En", 1, 1 ],
["Ef", 1, 1 ],
["Bn", 2, 1 ],
["Bf", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 2, 1 ],
["Gn", 1, 1 ],
["Af", 2, 2 ],
["Af", 3, 2 ]
]
song4_tempo = 135*2
song4 = [
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["An", 1, 1],
["Af", 1, 1],
["Gf", 1, 1],
["Fn", 1, 2],
["Zz", 0, 1],
["Fn", 1, 2],
["Fn", 1, 1],
["Fn", 1, 2],
["Dn", 1, 2],
["Zz", 0, 1],
["Dn", 1, 2],
["Dn", 1, 1],
["En", 1, 1],
["Fn", 1, 1],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["Zz", 0, 1],
["Gf", 1, 2],
["An", 1, 1],
["Af", 1, 1],
["Gf", 1, 1],
["Fn", 1, 2],
["Zz", 0, 1],
["Fn", 1, 2],
["Fn", 1, 1],
["Fn", 1, 2],
["Dn", 1, 2],
["Zz", 0, 1],
["Dn", 1, 2],
["Dn", 1, 1],
["En", 1, 1],
["Fn", 1, 1],
# Part 2
["Gf", 2, 2],
["An", 2, 2],
["Gf", 2, 2],
["Df", 2, 2],
["Df", 2, 2],
["Df", 2, 1],
["Gf", 1, 3],
["Df", 2, 1],
["Df", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 1],
["Cn", 2, 1],
["An", 1, 2],
["An", 1, 2],
["Dn", 2, 2],
["En", 2, 1],
["Fn", 2, 1],
["Gf", 2, 2],
["An", 2, 2],
["Gf", 2, 2],
["Df", 2, 2],
["Df", 2, 2],
["Df", 2, 1],
["Gf", 1, 3],
["Df", 2, 1],
["Df", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 2],
["Cn", 2, 1],
["Cn", 2, 1],
["Cn", 2, 1],
["An", 1, 1],
["An", 1, 1],
["An", 1, 1],
["An", 1, 1],
["Gf", 1, 4],
]
song5_tempo = 135*2
song5 = [
["Gf", 1, 2],
["An", 1, 1],
["An", 1, 2],
["Gf", 1, 1],
["An", 1, 2],
["Gf", 1, 2],
["An", 1, 1],
["An", 1, 2],
["Gf", 1, 1],
["An", 1, 1],
["Bn", 1, 1],
["Fn", 1, 2],
["Af", 1, 1],
["Af", 1, 2],
["Fn", 1, 1],
["Af", 1, 2],
["Dn", 1, 2],
["Gf", 1, 1],
["Gf", 1, 2],
["Dn", 1, 1],
["An", 1, 1],
["Af", 1, 1],
]
# song5a_tempo = 160*2
# song5a = [
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ["Cn", 1, 2],
# ["Bn", 1, 1],
# ["Cn", 1, 1],
# ["An", 1, 2],
# ]
cantina_tempo = 132*4
cantina = [
# Part 1
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
# Part 2
["Fs", 1, 1],
["Fn", 1, 1],
["Fs", 1, 1],
["En", 1, 1],
["Zz", 1, 1],
["Ef", 1, 1],
["En", 1, 1],
["Ef", 1, 1],
["Dn", 1, 3],
["Bn", 0, 5],
# Part 3
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
["En", 1, 2],
["En", 1, 3],
["Ef", 1, 1],
["En", 1, 2],
["An", 1, 1],
["Gn", 1, 2],
["Fs", 1, 2],
["En", 1, 3],
# Part 4
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 2],
["Bn", 1, 2],
["Fs", 1, 1],
["Bn", 1, 2],
["Fs", 1, 1],
["Zz", 1, 1],
["Fn", 1, 1],
["Fs", 1, 2],
["An", 1, 2],
["An", 1, 3],
["Fs", 1, 1],
["En", 1, 2],
["Dn", 1, 3],
["Bn", 0, 5],
# Leadup
["Bn", 0, 4],
["Dn", 1, 4],
["Fs", 1, 4],
["An", 1, 4],
["Cn", 2, 2],
["Bn", 1, 2],
["Fn", 1, 1],
["Fs", 1, 2],
["Dn", 1, 6],
["Zz", 1, 4]
]
| 0.205296 | 0.511534 |
import ipaddress
from django.conf import settings
from django.test import TestCase
from peering.constants import (
BGP_RELATIONSHIP_PRIVATE_PEERING,
COMMUNITY_TYPE_INGRESS,
COMMUNITY_TYPE_EGRESS,
PLATFORM_IOSXR,
PLATFORM_JUNOS,
PLATFORM_NONE,
ROUTING_POLICY_TYPE_EXPORT,
ROUTING_POLICY_TYPE_IMPORT,
ROUTING_POLICY_TYPE_IMPORT_EXPORT,
)
from peering.models import (
AutonomousSystem,
BGPGroup,
Community,
DirectPeeringSession,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
Template,
)
class AutonomousSystemTest(TestCase):
def test_does_exist(self):
asn = 201281
# AS should not exist
autonomous_system = AutonomousSystem.does_exist(asn)
self.assertEqual(None, autonomous_system)
# Create the AS
new_as = AutonomousSystem.objects.create(asn=asn, name="<NAME>")
# AS must exist
autonomous_system = AutonomousSystem.does_exist(asn)
self.assertEqual(asn, new_as.asn)
def test_create_from_peeringdb(self):
asn = 201281
# Illegal ASN
self.assertIsNone(AutonomousSystem.create_from_peeringdb(64500))
# Must not exist at first
self.assertIsNone(AutonomousSystem.does_exist(asn))
# Create the AS
autonomous_system1 = AutonomousSystem.create_from_peeringdb(asn)
self.assertEqual(asn, autonomous_system1.asn)
# Must exist now
self.assertEqual(asn, AutonomousSystem.does_exist(asn).asn)
# Must not rise error, just return the AS
autonomous_system2 = AutonomousSystem.create_from_peeringdb(asn)
self.assertEqual(asn, autonomous_system2.asn)
# Must exist now also
self.assertEqual(asn, AutonomousSystem.does_exist(asn).asn)
def test_synchronize_with_peeringdb(self):
# Create legal AS to sync with PeeringDB
asn = 201281
autonomous_system = AutonomousSystem.create_from_peeringdb(asn)
self.assertEqual(asn, autonomous_system.asn)
self.assertTrue(autonomous_system.synchronize_with_peeringdb())
# Create illegal AS to fail sync with PeeringDB
asn = 64500
autonomous_system = AutonomousSystem.objects.create(asn=asn, name="Test")
self.assertEqual(asn, autonomous_system.asn)
self.assertFalse(autonomous_system.synchronize_with_peeringdb())
def test_get_irr_as_set_prefixes(self):
autonomous_system = AutonomousSystem.create_from_peeringdb(201281)
prefixes = autonomous_system.get_irr_as_set_prefixes()
self.assertEqual(autonomous_system.ipv6_max_prefixes, len(prefixes["ipv6"]))
self.assertEqual(autonomous_system.ipv4_max_prefixes, len(prefixes["ipv4"]))
def test__str__(self):
asn = 64500
name = "Test"
expected = "AS{} - {}".format(asn, name)
autonomous_system = AutonomousSystem.objects.create(asn=asn, name=name)
self.assertEqual(expected, str(autonomous_system))
class CommunityTest(TestCase):
def test_create(self):
community_list = [
{"name": "Test", "value": "64500:1", "type": None, "str": "Test"},
{
"name": "Test",
"value": "64500:1",
"type": COMMUNITY_TYPE_EGRESS,
"str": "Test",
},
]
for details in community_list:
if details["type"]:
community = Community.objects.create(
name=details["name"], value=details["value"], type=details["type"]
)
else:
community = Community.objects.create(
name=details["name"], value=details["value"]
)
self.assertIsNotNone(community)
self.assertEqual(details["name"], community.name)
self.assertEqual(details["value"], community.value)
self.assertEqual(details["type"] or COMMUNITY_TYPE_INGRESS, community.type)
self.assertEqual(details["str"], str(community))
def test_get_type_html(self):
expected = [
'<span class="badge badge-primary">Egress</span>',
'<span class="badge badge-info">Ingress</span>',
'<span class="badge badge-secondary">Unknown</span>',
]
community_types = [COMMUNITY_TYPE_EGRESS, COMMUNITY_TYPE_INGRESS, "unknown"]
for i in range(len(community_types)):
self.assertEqual(
expected[i],
Community.objects.create(
name="test{}".format(i),
value="64500:{}".format(i),
type=community_types[i],
).get_type_html(),
)
class InternetExchangeTest(TestCase):
def test_is_peeringdb_valid(self):
ix = InternetExchange.objects.create(name="Test", slug="test")
# Not linked with PeeringDB but considered as valid
self.assertTrue(ix.is_peeringdb_valid())
# Set invalid ID, must result in false
ix.peeringdb_id = 14658
ix.save()
self.assertFalse(ix.is_peeringdb_valid())
# Set valid ID, must result in true
ix.peeringdb_id = 29146
ix.save()
self.assertTrue(ix.is_peeringdb_valid())
def test_get_peeringdb_id(self):
# Expected results
expected = [0, 0, 0, 0, 29146, 29146, 29146]
# Test data
data = [
{
# No IP addresses
},
{"ipv6_address": "2001:db8::1"},
{"ipv4_address": "192.168.168.1"},
{"ipv6_address": "2001:db8::1", "ipv4_address": "192.168.168.1"},
{"ipv6_address": "fc00:e968:6179::de52:7100:9467:1"},
{"ipv4_address": "192.168.127.12"},
{
"ipv6_address": "fc00:e968:6179::de52:7100",
"ipv4_address": "192.168.127.12",
},
]
# Run test cases
for i in range(len(expected)):
ixp = InternetExchange.objects.create(
name="Test {}".format(i), slug="test_{}".format(i), **data[i]
)
self.assertEqual(expected[i], ixp.get_peeringdb_id())
def test_import_peering_sessions(self):
# Expected results
expected = [
# First case
(1, 1, []),
# Second case
(0, 1, []),
# Third case
(0, 1, []),
# Fourth case
(0, 0, []),
]
session_lists = [
# First case, one new session with one new AS
[{"ip_address": ipaddress.ip_address("2001:db8::1"), "remote_asn": 29467}],
# Second case, one new session with one known AS
[{"ip_address": ipaddress.ip_address("192.168.0.1"), "remote_asn": 29467}],
# Third case, new IPv4 session on another IX but with an IP that
# has already been used
[{"ip_address": ipaddress.ip_address("192.168.0.1"), "remote_asn": 29467}],
# Fourth case, new IPv4 session with IPv6 prefix
[{"ip_address": ipaddress.ip_address("192.168.2.1"), "remote_asn": 29467}],
]
prefix_lists = [
# First case
[ipaddress.ip_network("2001:db8::/64")],
# Second case
[ipaddress.ip_network("192.168.0.0/24")],
# Third case
[ipaddress.ip_network("192.168.0.0/24")],
# Fourth case
[ipaddress.ip_network("2001:db8::/64")],
]
# Run test cases
for i in range(len(expected)):
ixp = InternetExchange.objects.create(
name="Test {}".format(i), slug="test_{}".format(i)
)
self.assertEqual(
expected[i],
ixp._import_peering_sessions(session_lists[i], prefix_lists[i]),
)
self.assertEqual(expected[i][1], len(ixp.get_peering_sessions()))
class InternetExchangePeeringSessionTest(TestCase):
def test_does_exist(self):
# No session, must expect None
self.assertIsNone(InternetExchangePeeringSession.does_exist())
# Prepare objects and create a peering session
autonomous_system0 = AutonomousSystem.objects.create(asn=64500, name="Test")
internet_exchange0 = InternetExchange.objects.create(name="Test0", slug="test0")
peering_session0 = InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange0,
ip_address="2001:db8::1",
)
# Make sure that the session has been created
self.assertIsNotNone(peering_session0)
# Make sure that the session is returned by calling does_exist()
# without arguments (only one session in the database)
self.assertIsNotNone(InternetExchangePeeringSession.does_exist())
# Make sure we can retrieve the session with its IP
self.assertEqual(
peering_session0,
InternetExchangePeeringSession.does_exist(ip_address="2001:db8::1"),
)
# Make sure we can retrieve the session with its IX
self.assertEqual(
peering_session0,
InternetExchangePeeringSession.does_exist(
internet_exchange=internet_exchange0
),
)
# Make sure we can retrieve the session with AS
self.assertEqual(
peering_session0,
InternetExchangePeeringSession.does_exist(
autonomous_system=autonomous_system0
),
)
# Create another peering session
peering_session1 = InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange0,
ip_address="192.168.1.1",
)
# Make sure that the session has been created
self.assertIsNotNone(peering_session1)
# More than one session, must expect None
self.assertIsNone(InternetExchangePeeringSession.does_exist())
# Make sure we can retrieve the session with its IP
self.assertEqual(
peering_session1,
InternetExchangePeeringSession.does_exist(ip_address="192.168.1.1"),
)
# Make sure it returns None when using a field that the two sessions
# have in common
self.assertIsNone(
InternetExchangePeeringSession.does_exist(
internet_exchange=internet_exchange0
)
)
# Create a new IX
internet_exchange1 = InternetExchange.objects.create(name="Test1", slug="test1")
# Make sure it returns None when there is no session
self.assertIsNone(
InternetExchangePeeringSession.does_exist(
internet_exchange=internet_exchange1
)
)
# Create a new session with a already used IP in another OX
peering_session2 = InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange1,
ip_address="2001:db8::1",
)
# Make sure that the session has been created
self.assertIsNotNone(peering_session2)
# Make sure we have None, because two sessions will be found
self.assertIsNone(
InternetExchangePeeringSession.does_exist(ip_address="2001:db8::1")
)
# But if we narrow the search with the IX we must have the proper
# session
self.assertEqual(
peering_session2,
InternetExchangePeeringSession.does_exist(
ip_address="2001:db8::1", internet_exchange=internet_exchange1
),
)
class RouterTest(TestCase):
def setUp(self):
super().setUp()
self.router = Router.objects.create(
name="Test", hostname="test.example.com", platform=PLATFORM_JUNOS
)
def test_get_configuration_context(self):
for i in range(1, 6):
AutonomousSystem.objects.create(asn=i, name="Test {}".format(i))
bgp_group = BGPGroup.objects.create(name="Test Group", slug="testgroup")
for i in range(1, 6):
DirectPeeringSession.objects.create(
local_ip_address="192.0.2.1",
autonomous_system=AutonomousSystem.objects.get(asn=i),
bgp_group=bgp_group,
relationship=BGP_RELATIONSHIP_PRIVATE_PEERING,
ip_address="10.0.0.{}".format(i),
router=self.router,
)
internet_exchange = InternetExchange.objects.create(
name="Test IX", slug="testix", router=self.router
)
for i in range(1, 6):
InternetExchangePeeringSession.objects.create(
autonomous_system=AutonomousSystem.objects.get(asn=i),
internet_exchange=internet_exchange,
ip_address="2001:db8::{}".format(i),
)
InternetExchangePeeringSession.objects.create(
autonomous_system=AutonomousSystem.objects.get(asn=i),
internet_exchange=internet_exchange,
ip_address="192.168.0.{}".format(i),
)
# Convert to dict and merge values
bgp_group_dict = bgp_group.to_dict()
bgp_group_dict.update(
{
"sessions": {
6: [
session.to_dict()
for session in DirectPeeringSession.objects.filter(
ip_address__family=6
)
],
4: [
session.to_dict()
for session in DirectPeeringSession.objects.filter(
ip_address__family=4
)
],
}
}
)
internet_exchange_dict = internet_exchange.to_dict()
internet_exchange_dict.update(
{
"sessions": {
6: [
session.to_dict()
for session in InternetExchangePeeringSession.objects.filter(
ip_address__family=6
)
],
4: [
session.to_dict()
for session in InternetExchangePeeringSession.objects.filter(
ip_address__family=4
)
],
}
}
)
# Generate expected result
expected = {
"autonomous_systems": [
autonomous_system.to_dict()
for autonomous_system in AutonomousSystem.objects.all()
],
"my_asn": settings.MY_ASN,
"bgp_groups": [bgp_group_dict],
"internet_exchanges": [internet_exchange_dict],
"routing_policies": [],
"communities": [],
}
result = self.router.get_configuration_context()
self.assertEqual(result, expected)
def test_decrypt_encrypt_string(self):
string = "<PASSWORD>"
# Generic router (crypto not implemented)
router = Router.objects.create(
name="test", hostname="test.example.com", platform=PLATFORM_NONE
)
self.assertEqual(string, router.decrypt_string(router.encrypt_string(string)))
for platform in [PLATFORM_JUNOS, PLATFORM_IOSXR]:
router = Router.objects.create(
name="test", hostname="test.example.com", platform=platform
)
self.assertEqual(
string, router.decrypt_string(router.encrypt_string(string))
)
# Should detect that it is already encrypted
self.assertEqual(
string,
router.decrypt_string(
router.encrypt_string(router.encrypt_string(string))
),
)
# Should detect that it is not encrypted
self.assertEqual(
string, router.decrypt_string(router.decrypt_string(string))
)
def test_napalm_bgp_neighbors_to_peer_list(self):
# Expected results
expected = [0, 0, 1, 2, 3, 2, 2]
napalm_dicts_list = [
# If None or empty dict passed, returned value must be empty list
None,
{},
# List size must match peers number including VRFs
{"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}}},
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
},
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf0": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
"vrf1": {"peers": {"192.168.2.1": {"remote_as": 64502}}},
},
# If peer does not have remote_as field, it must be ignored
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf0": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
"vrf1": {"peers": {"192.168.2.1": {"not_valid": 64502}}},
},
# If an IP address appears more than one time, only the first
# occurence must be retained
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf0": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
"vrf1": {"peers": {"192.168.1.1": {"remote_as": 64502}}},
},
]
# Create a router
router = Router.objects.create(
name="test", hostname="test.example.com", platform=PLATFORM_JUNOS
)
# Run test cases
for i in range(len(expected)):
self.assertEqual(
expected[i],
len(router._napalm_bgp_neighbors_to_peer_list(napalm_dicts_list[i])),
)
def test_bgp_neighbors_detail_as_list(self):
expected = [
{
"up": True,
"local_as": 201281,
"remote_as": 29467,
"local_address": "192.168.1.1",
}
]
bgp_neighbors_detail = {
"global": {
29467: [
{
"up": True,
"local_as": 201281,
"remote_as": 29467,
"local_address": "192.168.1.1",
}
]
}
}
router = Router.objects.create(
name="test", hostname="test.example.com", platform=PLATFORM_JUNOS
)
self.assertEqual(
expected, router.bgp_neighbors_detail_as_list(bgp_neighbors_detail)
)
class RoutingPolicyTest(TestCase):
def test_create(self):
routing_policy_list = [
{"name": "Test1", "slug": "test1", "type": None, "weight": 0},
{
"name": "Test2",
"slug": "test2",
"type": ROUTING_POLICY_TYPE_EXPORT,
"weight": 0,
},
]
for details in routing_policy_list:
if details["type"]:
routing_policy = RoutingPolicy.objects.create(
name=details["name"], slug=details["slug"], type=details["type"]
)
else:
routing_policy = RoutingPolicy.objects.create(
name=details["name"], slug=details["slug"]
)
self.assertIsNotNone(routing_policy)
self.assertEqual(details["name"], routing_policy.name)
self.assertEqual(details["slug"], routing_policy.slug)
self.assertEqual(
details["type"] or ROUTING_POLICY_TYPE_IMPORT, routing_policy.type
)
def test_get_type_html(self):
expected = [
'<span class="badge badge-primary">Export</span>',
'<span class="badge badge-info">Import</span>',
'<span class="badge badge-dark">Import+Export</span>',
'<span class="badge badge-secondary">Unknown</span>',
]
routing_policy_types = [
ROUTING_POLICY_TYPE_EXPORT,
ROUTING_POLICY_TYPE_IMPORT,
ROUTING_POLICY_TYPE_IMPORT_EXPORT,
"unknown",
]
for i in range(len(routing_policy_types)):
self.assertEqual(
expected[i],
RoutingPolicy.objects.create(
name="test{}".format(i),
slug="test{}".format(i),
type=routing_policy_types[i],
).get_type_html(),
)
class TemplateTest(TestCase):
def setUp(self):
super().setUp()
self.template = Template(name="Test", template="{{ test }}")
def test_render(self):
self.assertEqual(self.template.render({"test": "test"}), "test")
|
peering/tests/test_models.py
|
import ipaddress
from django.conf import settings
from django.test import TestCase
from peering.constants import (
BGP_RELATIONSHIP_PRIVATE_PEERING,
COMMUNITY_TYPE_INGRESS,
COMMUNITY_TYPE_EGRESS,
PLATFORM_IOSXR,
PLATFORM_JUNOS,
PLATFORM_NONE,
ROUTING_POLICY_TYPE_EXPORT,
ROUTING_POLICY_TYPE_IMPORT,
ROUTING_POLICY_TYPE_IMPORT_EXPORT,
)
from peering.models import (
AutonomousSystem,
BGPGroup,
Community,
DirectPeeringSession,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
Template,
)
class AutonomousSystemTest(TestCase):
def test_does_exist(self):
asn = 201281
# AS should not exist
autonomous_system = AutonomousSystem.does_exist(asn)
self.assertEqual(None, autonomous_system)
# Create the AS
new_as = AutonomousSystem.objects.create(asn=asn, name="<NAME>")
# AS must exist
autonomous_system = AutonomousSystem.does_exist(asn)
self.assertEqual(asn, new_as.asn)
def test_create_from_peeringdb(self):
asn = 201281
# Illegal ASN
self.assertIsNone(AutonomousSystem.create_from_peeringdb(64500))
# Must not exist at first
self.assertIsNone(AutonomousSystem.does_exist(asn))
# Create the AS
autonomous_system1 = AutonomousSystem.create_from_peeringdb(asn)
self.assertEqual(asn, autonomous_system1.asn)
# Must exist now
self.assertEqual(asn, AutonomousSystem.does_exist(asn).asn)
# Must not rise error, just return the AS
autonomous_system2 = AutonomousSystem.create_from_peeringdb(asn)
self.assertEqual(asn, autonomous_system2.asn)
# Must exist now also
self.assertEqual(asn, AutonomousSystem.does_exist(asn).asn)
def test_synchronize_with_peeringdb(self):
# Create legal AS to sync with PeeringDB
asn = 201281
autonomous_system = AutonomousSystem.create_from_peeringdb(asn)
self.assertEqual(asn, autonomous_system.asn)
self.assertTrue(autonomous_system.synchronize_with_peeringdb())
# Create illegal AS to fail sync with PeeringDB
asn = 64500
autonomous_system = AutonomousSystem.objects.create(asn=asn, name="Test")
self.assertEqual(asn, autonomous_system.asn)
self.assertFalse(autonomous_system.synchronize_with_peeringdb())
def test_get_irr_as_set_prefixes(self):
autonomous_system = AutonomousSystem.create_from_peeringdb(201281)
prefixes = autonomous_system.get_irr_as_set_prefixes()
self.assertEqual(autonomous_system.ipv6_max_prefixes, len(prefixes["ipv6"]))
self.assertEqual(autonomous_system.ipv4_max_prefixes, len(prefixes["ipv4"]))
def test__str__(self):
asn = 64500
name = "Test"
expected = "AS{} - {}".format(asn, name)
autonomous_system = AutonomousSystem.objects.create(asn=asn, name=name)
self.assertEqual(expected, str(autonomous_system))
class CommunityTest(TestCase):
def test_create(self):
community_list = [
{"name": "Test", "value": "64500:1", "type": None, "str": "Test"},
{
"name": "Test",
"value": "64500:1",
"type": COMMUNITY_TYPE_EGRESS,
"str": "Test",
},
]
for details in community_list:
if details["type"]:
community = Community.objects.create(
name=details["name"], value=details["value"], type=details["type"]
)
else:
community = Community.objects.create(
name=details["name"], value=details["value"]
)
self.assertIsNotNone(community)
self.assertEqual(details["name"], community.name)
self.assertEqual(details["value"], community.value)
self.assertEqual(details["type"] or COMMUNITY_TYPE_INGRESS, community.type)
self.assertEqual(details["str"], str(community))
def test_get_type_html(self):
expected = [
'<span class="badge badge-primary">Egress</span>',
'<span class="badge badge-info">Ingress</span>',
'<span class="badge badge-secondary">Unknown</span>',
]
community_types = [COMMUNITY_TYPE_EGRESS, COMMUNITY_TYPE_INGRESS, "unknown"]
for i in range(len(community_types)):
self.assertEqual(
expected[i],
Community.objects.create(
name="test{}".format(i),
value="64500:{}".format(i),
type=community_types[i],
).get_type_html(),
)
class InternetExchangeTest(TestCase):
def test_is_peeringdb_valid(self):
ix = InternetExchange.objects.create(name="Test", slug="test")
# Not linked with PeeringDB but considered as valid
self.assertTrue(ix.is_peeringdb_valid())
# Set invalid ID, must result in false
ix.peeringdb_id = 14658
ix.save()
self.assertFalse(ix.is_peeringdb_valid())
# Set valid ID, must result in true
ix.peeringdb_id = 29146
ix.save()
self.assertTrue(ix.is_peeringdb_valid())
def test_get_peeringdb_id(self):
# Expected results
expected = [0, 0, 0, 0, 29146, 29146, 29146]
# Test data
data = [
{
# No IP addresses
},
{"ipv6_address": "2001:db8::1"},
{"ipv4_address": "192.168.168.1"},
{"ipv6_address": "2001:db8::1", "ipv4_address": "192.168.168.1"},
{"ipv6_address": "fc00:e968:6179::de52:7100:9467:1"},
{"ipv4_address": "192.168.127.12"},
{
"ipv6_address": "fc00:e968:6179::de52:7100",
"ipv4_address": "192.168.127.12",
},
]
# Run test cases
for i in range(len(expected)):
ixp = InternetExchange.objects.create(
name="Test {}".format(i), slug="test_{}".format(i), **data[i]
)
self.assertEqual(expected[i], ixp.get_peeringdb_id())
def test_import_peering_sessions(self):
# Expected results
expected = [
# First case
(1, 1, []),
# Second case
(0, 1, []),
# Third case
(0, 1, []),
# Fourth case
(0, 0, []),
]
session_lists = [
# First case, one new session with one new AS
[{"ip_address": ipaddress.ip_address("2001:db8::1"), "remote_asn": 29467}],
# Second case, one new session with one known AS
[{"ip_address": ipaddress.ip_address("192.168.0.1"), "remote_asn": 29467}],
# Third case, new IPv4 session on another IX but with an IP that
# has already been used
[{"ip_address": ipaddress.ip_address("192.168.0.1"), "remote_asn": 29467}],
# Fourth case, new IPv4 session with IPv6 prefix
[{"ip_address": ipaddress.ip_address("192.168.2.1"), "remote_asn": 29467}],
]
prefix_lists = [
# First case
[ipaddress.ip_network("2001:db8::/64")],
# Second case
[ipaddress.ip_network("192.168.0.0/24")],
# Third case
[ipaddress.ip_network("192.168.0.0/24")],
# Fourth case
[ipaddress.ip_network("2001:db8::/64")],
]
# Run test cases
for i in range(len(expected)):
ixp = InternetExchange.objects.create(
name="Test {}".format(i), slug="test_{}".format(i)
)
self.assertEqual(
expected[i],
ixp._import_peering_sessions(session_lists[i], prefix_lists[i]),
)
self.assertEqual(expected[i][1], len(ixp.get_peering_sessions()))
class InternetExchangePeeringSessionTest(TestCase):
def test_does_exist(self):
# No session, must expect None
self.assertIsNone(InternetExchangePeeringSession.does_exist())
# Prepare objects and create a peering session
autonomous_system0 = AutonomousSystem.objects.create(asn=64500, name="Test")
internet_exchange0 = InternetExchange.objects.create(name="Test0", slug="test0")
peering_session0 = InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange0,
ip_address="2001:db8::1",
)
# Make sure that the session has been created
self.assertIsNotNone(peering_session0)
# Make sure that the session is returned by calling does_exist()
# without arguments (only one session in the database)
self.assertIsNotNone(InternetExchangePeeringSession.does_exist())
# Make sure we can retrieve the session with its IP
self.assertEqual(
peering_session0,
InternetExchangePeeringSession.does_exist(ip_address="2001:db8::1"),
)
# Make sure we can retrieve the session with its IX
self.assertEqual(
peering_session0,
InternetExchangePeeringSession.does_exist(
internet_exchange=internet_exchange0
),
)
# Make sure we can retrieve the session with AS
self.assertEqual(
peering_session0,
InternetExchangePeeringSession.does_exist(
autonomous_system=autonomous_system0
),
)
# Create another peering session
peering_session1 = InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange0,
ip_address="192.168.1.1",
)
# Make sure that the session has been created
self.assertIsNotNone(peering_session1)
# More than one session, must expect None
self.assertIsNone(InternetExchangePeeringSession.does_exist())
# Make sure we can retrieve the session with its IP
self.assertEqual(
peering_session1,
InternetExchangePeeringSession.does_exist(ip_address="192.168.1.1"),
)
# Make sure it returns None when using a field that the two sessions
# have in common
self.assertIsNone(
InternetExchangePeeringSession.does_exist(
internet_exchange=internet_exchange0
)
)
# Create a new IX
internet_exchange1 = InternetExchange.objects.create(name="Test1", slug="test1")
# Make sure it returns None when there is no session
self.assertIsNone(
InternetExchangePeeringSession.does_exist(
internet_exchange=internet_exchange1
)
)
# Create a new session with a already used IP in another OX
peering_session2 = InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange1,
ip_address="2001:db8::1",
)
# Make sure that the session has been created
self.assertIsNotNone(peering_session2)
# Make sure we have None, because two sessions will be found
self.assertIsNone(
InternetExchangePeeringSession.does_exist(ip_address="2001:db8::1")
)
# But if we narrow the search with the IX we must have the proper
# session
self.assertEqual(
peering_session2,
InternetExchangePeeringSession.does_exist(
ip_address="2001:db8::1", internet_exchange=internet_exchange1
),
)
class RouterTest(TestCase):
def setUp(self):
super().setUp()
self.router = Router.objects.create(
name="Test", hostname="test.example.com", platform=PLATFORM_JUNOS
)
def test_get_configuration_context(self):
for i in range(1, 6):
AutonomousSystem.objects.create(asn=i, name="Test {}".format(i))
bgp_group = BGPGroup.objects.create(name="Test Group", slug="testgroup")
for i in range(1, 6):
DirectPeeringSession.objects.create(
local_ip_address="192.0.2.1",
autonomous_system=AutonomousSystem.objects.get(asn=i),
bgp_group=bgp_group,
relationship=BGP_RELATIONSHIP_PRIVATE_PEERING,
ip_address="10.0.0.{}".format(i),
router=self.router,
)
internet_exchange = InternetExchange.objects.create(
name="Test IX", slug="testix", router=self.router
)
for i in range(1, 6):
InternetExchangePeeringSession.objects.create(
autonomous_system=AutonomousSystem.objects.get(asn=i),
internet_exchange=internet_exchange,
ip_address="2001:db8::{}".format(i),
)
InternetExchangePeeringSession.objects.create(
autonomous_system=AutonomousSystem.objects.get(asn=i),
internet_exchange=internet_exchange,
ip_address="192.168.0.{}".format(i),
)
# Convert to dict and merge values
bgp_group_dict = bgp_group.to_dict()
bgp_group_dict.update(
{
"sessions": {
6: [
session.to_dict()
for session in DirectPeeringSession.objects.filter(
ip_address__family=6
)
],
4: [
session.to_dict()
for session in DirectPeeringSession.objects.filter(
ip_address__family=4
)
],
}
}
)
internet_exchange_dict = internet_exchange.to_dict()
internet_exchange_dict.update(
{
"sessions": {
6: [
session.to_dict()
for session in InternetExchangePeeringSession.objects.filter(
ip_address__family=6
)
],
4: [
session.to_dict()
for session in InternetExchangePeeringSession.objects.filter(
ip_address__family=4
)
],
}
}
)
# Generate expected result
expected = {
"autonomous_systems": [
autonomous_system.to_dict()
for autonomous_system in AutonomousSystem.objects.all()
],
"my_asn": settings.MY_ASN,
"bgp_groups": [bgp_group_dict],
"internet_exchanges": [internet_exchange_dict],
"routing_policies": [],
"communities": [],
}
result = self.router.get_configuration_context()
self.assertEqual(result, expected)
def test_decrypt_encrypt_string(self):
string = "<PASSWORD>"
# Generic router (crypto not implemented)
router = Router.objects.create(
name="test", hostname="test.example.com", platform=PLATFORM_NONE
)
self.assertEqual(string, router.decrypt_string(router.encrypt_string(string)))
for platform in [PLATFORM_JUNOS, PLATFORM_IOSXR]:
router = Router.objects.create(
name="test", hostname="test.example.com", platform=platform
)
self.assertEqual(
string, router.decrypt_string(router.encrypt_string(string))
)
# Should detect that it is already encrypted
self.assertEqual(
string,
router.decrypt_string(
router.encrypt_string(router.encrypt_string(string))
),
)
# Should detect that it is not encrypted
self.assertEqual(
string, router.decrypt_string(router.decrypt_string(string))
)
def test_napalm_bgp_neighbors_to_peer_list(self):
# Expected results
expected = [0, 0, 1, 2, 3, 2, 2]
napalm_dicts_list = [
# If None or empty dict passed, returned value must be empty list
None,
{},
# List size must match peers number including VRFs
{"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}}},
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
},
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf0": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
"vrf1": {"peers": {"192.168.2.1": {"remote_as": 64502}}},
},
# If peer does not have remote_as field, it must be ignored
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf0": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
"vrf1": {"peers": {"192.168.2.1": {"not_valid": 64502}}},
},
# If an IP address appears more than one time, only the first
# occurence must be retained
{
"global": {"peers": {"192.168.0.1": {"remote_as": 64500}}},
"vrf0": {"peers": {"192.168.1.1": {"remote_as": 64501}}},
"vrf1": {"peers": {"192.168.1.1": {"remote_as": 64502}}},
},
]
# Create a router
router = Router.objects.create(
name="test", hostname="test.example.com", platform=PLATFORM_JUNOS
)
# Run test cases
for i in range(len(expected)):
self.assertEqual(
expected[i],
len(router._napalm_bgp_neighbors_to_peer_list(napalm_dicts_list[i])),
)
def test_bgp_neighbors_detail_as_list(self):
expected = [
{
"up": True,
"local_as": 201281,
"remote_as": 29467,
"local_address": "192.168.1.1",
}
]
bgp_neighbors_detail = {
"global": {
29467: [
{
"up": True,
"local_as": 201281,
"remote_as": 29467,
"local_address": "192.168.1.1",
}
]
}
}
router = Router.objects.create(
name="test", hostname="test.example.com", platform=PLATFORM_JUNOS
)
self.assertEqual(
expected, router.bgp_neighbors_detail_as_list(bgp_neighbors_detail)
)
class RoutingPolicyTest(TestCase):
def test_create(self):
routing_policy_list = [
{"name": "Test1", "slug": "test1", "type": None, "weight": 0},
{
"name": "Test2",
"slug": "test2",
"type": ROUTING_POLICY_TYPE_EXPORT,
"weight": 0,
},
]
for details in routing_policy_list:
if details["type"]:
routing_policy = RoutingPolicy.objects.create(
name=details["name"], slug=details["slug"], type=details["type"]
)
else:
routing_policy = RoutingPolicy.objects.create(
name=details["name"], slug=details["slug"]
)
self.assertIsNotNone(routing_policy)
self.assertEqual(details["name"], routing_policy.name)
self.assertEqual(details["slug"], routing_policy.slug)
self.assertEqual(
details["type"] or ROUTING_POLICY_TYPE_IMPORT, routing_policy.type
)
def test_get_type_html(self):
expected = [
'<span class="badge badge-primary">Export</span>',
'<span class="badge badge-info">Import</span>',
'<span class="badge badge-dark">Import+Export</span>',
'<span class="badge badge-secondary">Unknown</span>',
]
routing_policy_types = [
ROUTING_POLICY_TYPE_EXPORT,
ROUTING_POLICY_TYPE_IMPORT,
ROUTING_POLICY_TYPE_IMPORT_EXPORT,
"unknown",
]
for i in range(len(routing_policy_types)):
self.assertEqual(
expected[i],
RoutingPolicy.objects.create(
name="test{}".format(i),
slug="test{}".format(i),
type=routing_policy_types[i],
).get_type_html(),
)
class TemplateTest(TestCase):
def setUp(self):
super().setUp()
self.template = Template(name="Test", template="{{ test }}")
def test_render(self):
self.assertEqual(self.template.render({"test": "test"}), "test")
| 0.602296 | 0.297508 |
import logging
from collections import defaultdict
import matplotlib.pyplot as plt
from tqdm import tqdm
from social_media_buzz.src.constants import ACCURACY, R2, RANK_SIZE
from social_media_buzz.src.data import (
get_candidate_features, prepare_dataset,
show_rank, write_results,
)
from social_media_buzz.src.linear_regression import LinearRegressionModel
logger = logging.getLogger(__name__)
def rank_features(metric_result, name, top=RANK_SIZE) -> list:
"""Get the top most significative features by averaging out their results.
"""
analysis = defaultdict(lambda: 0)
amount = len(metric_result)
for fold_result in tqdm(metric_result, desc=f"Processing {name} results."):
for attr_result in fold_result:
attr_name = attr_result[0]
analysis[attr_name] += attr_result[1]
averages = map(lambda x: (x[0], x[1] / amount), list(analysis.items()))
ranking = sorted(list(averages), key=lambda x: x[1] * -1)
return ranking[:top]
def get_ranks(fold_results=None) -> list:
"""Print ranks to terminal and write csv files."""
ranks = []
for name in (R2, ACCURACY):
metric_result = fold_results.get(name)
rank = rank_features(metric_result.values(), name)
logger.info(f"{name} ranking:")
show_rank(rank, metric_result, name)
ranks.append(rank)
return ranks
def generate_charts(ranks):
"""Use rankings to generate chart for each fold."""
for name, rank in zip([R2, ACCURACY], ranks):
best_attr = rank[0][0]
for idx, dataset in enumerate(prepare_dataset()):
training_data, testing_data = dataset
model = LinearRegressionModel(training_data)
model.train(best_attr)
model.test(testing_data)
fig, ax = plt.subplots()
ax.set_title(f"Fold {idx:02}")
filename = f"{name.lower()}_{best_attr}_{idx:02}"
model.plot_chart(filename=filename)
def main():
"""Run main logics for comparing features.
For each fold, for each attribute, train model using that attribute and
the target feature. Then, calculate R-squared, accuracy and store them.
Write results in CSV files and rank the best attributes for each metric.
"""
features = get_candidate_features()
results = defaultdict(lambda: defaultdict(list))
fold_results = defaultdict(lambda: defaultdict(list))
for idx, dataset in enumerate(prepare_dataset()):
training_data, testing_data = dataset
model = LinearRegressionModel(training_data)
progress = tqdm(features, position=1)
for attr_name in progress:
progress.set_description(f"Trying feature {attr_name}")
model.train(attr_name)
model.test(testing_data)
results[R2][attr_name].append(model.r_squared)
results[ACCURACY][attr_name].append(model.testing_acc)
fold_results[R2][idx].append((attr_name, model.r_squared))
fold_results[ACCURACY][idx].append((attr_name, model.testing_acc))
write_results(results)
ranks = get_ranks(fold_results)
generate_charts(ranks)
|
social_media_buzz/src/analysis.py
|
import logging
from collections import defaultdict
import matplotlib.pyplot as plt
from tqdm import tqdm
from social_media_buzz.src.constants import ACCURACY, R2, RANK_SIZE
from social_media_buzz.src.data import (
get_candidate_features, prepare_dataset,
show_rank, write_results,
)
from social_media_buzz.src.linear_regression import LinearRegressionModel
logger = logging.getLogger(__name__)
def rank_features(metric_result, name, top=RANK_SIZE) -> list:
"""Get the top most significative features by averaging out their results.
"""
analysis = defaultdict(lambda: 0)
amount = len(metric_result)
for fold_result in tqdm(metric_result, desc=f"Processing {name} results."):
for attr_result in fold_result:
attr_name = attr_result[0]
analysis[attr_name] += attr_result[1]
averages = map(lambda x: (x[0], x[1] / amount), list(analysis.items()))
ranking = sorted(list(averages), key=lambda x: x[1] * -1)
return ranking[:top]
def get_ranks(fold_results=None) -> list:
"""Print ranks to terminal and write csv files."""
ranks = []
for name in (R2, ACCURACY):
metric_result = fold_results.get(name)
rank = rank_features(metric_result.values(), name)
logger.info(f"{name} ranking:")
show_rank(rank, metric_result, name)
ranks.append(rank)
return ranks
def generate_charts(ranks):
"""Use rankings to generate chart for each fold."""
for name, rank in zip([R2, ACCURACY], ranks):
best_attr = rank[0][0]
for idx, dataset in enumerate(prepare_dataset()):
training_data, testing_data = dataset
model = LinearRegressionModel(training_data)
model.train(best_attr)
model.test(testing_data)
fig, ax = plt.subplots()
ax.set_title(f"Fold {idx:02}")
filename = f"{name.lower()}_{best_attr}_{idx:02}"
model.plot_chart(filename=filename)
def main():
"""Run main logics for comparing features.
For each fold, for each attribute, train model using that attribute and
the target feature. Then, calculate R-squared, accuracy and store them.
Write results in CSV files and rank the best attributes for each metric.
"""
features = get_candidate_features()
results = defaultdict(lambda: defaultdict(list))
fold_results = defaultdict(lambda: defaultdict(list))
for idx, dataset in enumerate(prepare_dataset()):
training_data, testing_data = dataset
model = LinearRegressionModel(training_data)
progress = tqdm(features, position=1)
for attr_name in progress:
progress.set_description(f"Trying feature {attr_name}")
model.train(attr_name)
model.test(testing_data)
results[R2][attr_name].append(model.r_squared)
results[ACCURACY][attr_name].append(model.testing_acc)
fold_results[R2][idx].append((attr_name, model.r_squared))
fold_results[ACCURACY][idx].append((attr_name, model.testing_acc))
write_results(results)
ranks = get_ranks(fold_results)
generate_charts(ranks)
| 0.738009 | 0.266462 |
from __future__ import annotations
import logging
from datetime import timedelta
from functools import cached_property
from typing import Any
from homeassistant.components.climate.const import PRESET_BOOST, PRESET_NONE
from homeassistant.components.fan import FanEntityDescription, FanEntity, SUPPORT_SET_SPEED, SUPPORT_PRESET_MODE, \
DIRECTION_FORWARD
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import TionInstance
from .climate import TionClimateEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
config = FanEntityDescription(
key="fan_speed",
entity_category=EntityCategory.CONFIG,
name="fan speed",
entity_registry_enabled_default=True,
icon="mdi:fan",
)
async def async_setup_entry(hass: HomeAssistant, _config: ConfigEntry, async_add_entities):
"""Set up the sensor entry"""
async_add_entities([TionFan(config, hass.data[DOMAIN][_config.unique_id])])
return True
class TionFan(FanEntity, CoordinatorEntity):
_attr_supported_features = SUPPORT_PRESET_MODE | SUPPORT_SET_SPEED
_attr_oscillating = False
_attr_preset_modes = [PRESET_NONE, PRESET_BOOST]
_attr_speed_count = len(TionClimateEntity.attr_fan_modes())
_attr_current_direction = DIRECTION_FORWARD
_mode_percent_mapping = {
0: 0,
1: 17,
2: 33,
3: 50,
4: 67,
5: 83,
6: 100,
}
_percent_mode_mapping = {
0: 0,
16: 1,
33: 2,
50: 3,
66: 4,
83: 5,
100: 6,
}
# Home Assistant is using float speed step and ceil to determinate supported speed percents.
def set_preset_mode(self, preset_mode: str) -> None:
pass
def set_direction(self, direction: str) -> None:
raise NotImplemented
def turn_on(self, percentage: int | None = None, preset_mode: str | None = None, **kwargs) -> None:
raise NotImplemented
def oscillate(self, oscillating: bool) -> None:
raise NotImplemented
def turn_off(self, **kwargs: Any) -> None:
pass
def set_percentage(self, percentage: int) -> None:
raise NotImplemented
def __init__(self, description: FanEntityDescription, instance: TionInstance):
"""Initialize the fan."""
CoordinatorEntity.__init__(self=self, coordinator=instance, )
self.entity_description = description
self._attr_name = f"{instance.name} {description.name}"
self._attr_device_info = instance.device_info
self._attr_unique_id = f"{instance.unique_id}-{description.key}"
self._saved_fan_mode = None
_LOGGER.debug(f"Init of fan {self.name} ({instance.unique_id})")
_LOGGER.debug(f"Speed step is {self.percentage_step}")
def percent2mode(self, percentage: int) -> int:
result = 0
try:
return self._percent_mode_mapping[percentage]
except KeyError:
_LOGGER.warning(f"Could not to convert {percentage} to mode with {self._percent_mode_mapping}. "
f"Will use fall back method.")
for i in range(len(TionClimateEntity.attr_fan_modes())):
if percentage < self.percentage_step * i:
break
else:
result = i
else:
result = 6
return result
def mode2percent(self) -> int | None:
return self._mode_percent_mapping[self.fan_mode] if self.fan_mode is not None else None
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
await self.coordinator.set(fan_speed=self.percent2mode(percentage), is_on=percentage > 0)
@cached_property
def boost_fan_mode(self) -> int:
return max(TionClimateEntity.attr_fan_modes())
@property
def fan_mode(self):
return self.coordinator.data.get(self.entity_description.key)
async def async_set_preset_mode(self, preset_mode: str) -> None:
if preset_mode == PRESET_BOOST and self.preset_mode != PRESET_BOOST:
if self._saved_fan_mode is None:
self._saved_fan_mode = int(self.fan_mode)
await self.coordinator.set(fan_speed=self.boost_fan_mode)
if preset_mode == PRESET_NONE and self.preset_mode == PRESET_BOOST:
if self._saved_fan_mode is not None:
await self.coordinator.set(fan_speed=self._saved_fan_mode)
self._saved_fan_mode = None
self._attr_preset_mode = preset_mode
async def async_turn_on(self, percentage: int | None = None, preset_mode: str | None = None, **kwargs, ) -> None:
target_speed = 2 if self._saved_fan_mode is None else self._saved_fan_mode
self._saved_fan_mode = None
await self.coordinator.set(fan_speed=target_speed, is_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
if self._saved_fan_mode is None and self.fan_mode > 0:
self._saved_fan_mode = self.fan_mode
await self.coordinator.set(is_on=False)
def _handle_coordinator_update(self) -> None:
self._attr_assumed_state = False if self.coordinator.last_update_success else True
self._attr_is_on = self.coordinator.data.get("is_on")
self._attr_percentage = self.mode2percent() if self._attr_is_on else 0 # should check attr to avoid deadlock
self.async_write_ha_state()
|
custom_components/tion/fan.py
|
from __future__ import annotations
import logging
from datetime import timedelta
from functools import cached_property
from typing import Any
from homeassistant.components.climate.const import PRESET_BOOST, PRESET_NONE
from homeassistant.components.fan import FanEntityDescription, FanEntity, SUPPORT_SET_SPEED, SUPPORT_PRESET_MODE, \
DIRECTION_FORWARD
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import TionInstance
from .climate import TionClimateEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
config = FanEntityDescription(
key="fan_speed",
entity_category=EntityCategory.CONFIG,
name="fan speed",
entity_registry_enabled_default=True,
icon="mdi:fan",
)
async def async_setup_entry(hass: HomeAssistant, _config: ConfigEntry, async_add_entities):
"""Set up the sensor entry"""
async_add_entities([TionFan(config, hass.data[DOMAIN][_config.unique_id])])
return True
class TionFan(FanEntity, CoordinatorEntity):
_attr_supported_features = SUPPORT_PRESET_MODE | SUPPORT_SET_SPEED
_attr_oscillating = False
_attr_preset_modes = [PRESET_NONE, PRESET_BOOST]
_attr_speed_count = len(TionClimateEntity.attr_fan_modes())
_attr_current_direction = DIRECTION_FORWARD
_mode_percent_mapping = {
0: 0,
1: 17,
2: 33,
3: 50,
4: 67,
5: 83,
6: 100,
}
_percent_mode_mapping = {
0: 0,
16: 1,
33: 2,
50: 3,
66: 4,
83: 5,
100: 6,
}
# Home Assistant is using float speed step and ceil to determinate supported speed percents.
def set_preset_mode(self, preset_mode: str) -> None:
pass
def set_direction(self, direction: str) -> None:
raise NotImplemented
def turn_on(self, percentage: int | None = None, preset_mode: str | None = None, **kwargs) -> None:
raise NotImplemented
def oscillate(self, oscillating: bool) -> None:
raise NotImplemented
def turn_off(self, **kwargs: Any) -> None:
pass
def set_percentage(self, percentage: int) -> None:
raise NotImplemented
def __init__(self, description: FanEntityDescription, instance: TionInstance):
"""Initialize the fan."""
CoordinatorEntity.__init__(self=self, coordinator=instance, )
self.entity_description = description
self._attr_name = f"{instance.name} {description.name}"
self._attr_device_info = instance.device_info
self._attr_unique_id = f"{instance.unique_id}-{description.key}"
self._saved_fan_mode = None
_LOGGER.debug(f"Init of fan {self.name} ({instance.unique_id})")
_LOGGER.debug(f"Speed step is {self.percentage_step}")
def percent2mode(self, percentage: int) -> int:
result = 0
try:
return self._percent_mode_mapping[percentage]
except KeyError:
_LOGGER.warning(f"Could not to convert {percentage} to mode with {self._percent_mode_mapping}. "
f"Will use fall back method.")
for i in range(len(TionClimateEntity.attr_fan_modes())):
if percentage < self.percentage_step * i:
break
else:
result = i
else:
result = 6
return result
def mode2percent(self) -> int | None:
return self._mode_percent_mapping[self.fan_mode] if self.fan_mode is not None else None
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
await self.coordinator.set(fan_speed=self.percent2mode(percentage), is_on=percentage > 0)
@cached_property
def boost_fan_mode(self) -> int:
return max(TionClimateEntity.attr_fan_modes())
@property
def fan_mode(self):
return self.coordinator.data.get(self.entity_description.key)
async def async_set_preset_mode(self, preset_mode: str) -> None:
if preset_mode == PRESET_BOOST and self.preset_mode != PRESET_BOOST:
if self._saved_fan_mode is None:
self._saved_fan_mode = int(self.fan_mode)
await self.coordinator.set(fan_speed=self.boost_fan_mode)
if preset_mode == PRESET_NONE and self.preset_mode == PRESET_BOOST:
if self._saved_fan_mode is not None:
await self.coordinator.set(fan_speed=self._saved_fan_mode)
self._saved_fan_mode = None
self._attr_preset_mode = preset_mode
async def async_turn_on(self, percentage: int | None = None, preset_mode: str | None = None, **kwargs, ) -> None:
target_speed = 2 if self._saved_fan_mode is None else self._saved_fan_mode
self._saved_fan_mode = None
await self.coordinator.set(fan_speed=target_speed, is_on=True)
async def async_turn_off(self, **kwargs: Any) -> None:
if self._saved_fan_mode is None and self.fan_mode > 0:
self._saved_fan_mode = self.fan_mode
await self.coordinator.set(is_on=False)
def _handle_coordinator_update(self) -> None:
self._attr_assumed_state = False if self.coordinator.last_update_success else True
self._attr_is_on = self.coordinator.data.get("is_on")
self._attr_percentage = self.mode2percent() if self._attr_is_on else 0 # should check attr to avoid deadlock
self.async_write_ha_state()
| 0.821975 | 0.115736 |
import init_file as variables
import cj_function_lib as cj
from datetime import datetime
bsn_table = cj.extract_table_from_mdb(variables.ProjMDB, "bsn", variables.path + "\\bsn.tmp~")
bsn_params = bsn_table[0].split(",")
now = datetime.now()
DateAndTime = str(now.month) + "/" + str(now.day) + "/" + \
str(now.year) + " " + str(now.time()).split(".")[0]
SWAT_Vers = "QSWAT Workflow v1.5.2"
# Parameters
SFTMP = bsn_params[1].strip('"')
SMTMP = bsn_params[2].strip('"')
SMFMX = bsn_params[3].strip('"')
SMFMN = bsn_params[4].strip('"')
TIMP = bsn_params[5].strip('"')
SNOCOVMX = bsn_params[6].strip('"')
SNO50COV = bsn_params[7].strip('"')
IPET = bsn_params[8].strip('"')
ESCO = bsn_params[9].strip('"')
EPCO = bsn_params[10].strip('"')
EVLAI = bsn_params[11].strip('"')
FFCB = bsn_params[12].strip('"')
IEVENT = bsn_params[13].strip('"')
ICRK = bsn_params[14].strip('"')
SURLAG = bsn_params[15].strip('"')
ADJ_PKR = bsn_params[16].strip('"')
PRF_BSN = bsn_params[17].strip('"')
SPCON = bsn_params[18].strip('"')
SPEXP = bsn_params[19].strip('"')
RCN = bsn_params[20].strip('"')
CMN = bsn_params[21].strip('"')
N_UPDIS = bsn_params[22].strip('"')
P_UPDIS = bsn_params[23].strip('"')
NPERCO = bsn_params[24].strip('"')
PPERCO = bsn_params[25].strip('"')
PHOSKD = bsn_params[26].strip('"')
PSP = bsn_params[27].strip('"')
RSDCO = bsn_params[28].strip('"')
PERCOP = bsn_params[29].strip('"')
ISUBWQ = bsn_params[30].strip('"')
WDPQ = bsn_params[31].strip('"')
WGPQ = bsn_params[32].strip('"')
WDLPQ = bsn_params[33].strip('"')
WGLPQ = bsn_params[34].strip('"')
WDPS = bsn_params[35].strip('"')
WGPS = bsn_params[36].strip('"')
WDLPS = bsn_params[37].strip('"')
WGLPS = bsn_params[38].strip('"')
BACTKDQ = bsn_params[39].strip('"')
THBACT = bsn_params[40].strip('"')
WOF_P = bsn_params[41].strip('"')
WOF_LP = bsn_params[42].strip('"')
WDPF = bsn_params[43].strip('"')
WGPF = bsn_params[44].strip('"')
WDLPF = bsn_params[45].strip('"')
WGLPF = bsn_params[46].strip('"')
IRTE = bsn_params[47].strip('"')
MSK_CO1 = bsn_params[48].strip('"')
MSK_CO2 = bsn_params[49].strip('"')
MSK_X = bsn_params[50].strip('"')
IDEG = bsn_params[51].strip('"')
IWQ = bsn_params[52].strip('"')
TRNSRCH = bsn_params[53].strip('"')
EVRCH = bsn_params[54].strip('"')
IRTPEST = bsn_params[55].strip('"')
ICN = bsn_params[56].strip('"')
CNCOEF = bsn_params[57].strip('"')
CDN = bsn_params[58].strip('"')
SDNCO = bsn_params[59].strip('"')
BACT_SWF = bsn_params[60].strip('"')
BACTMX = bsn_params[61].strip('"')
BACTMINLP = bsn_params[62].strip('"')
BACTMINP = bsn_params[63].strip('"')
WDLPRCH = bsn_params[64].strip('"')
WDPRCH = bsn_params[65].strip('"')
WDLPRES = bsn_params[66].strip('"')
WDPRES = bsn_params[67].strip('"')
TB_ADJ = bsn_params[68].strip('"')
DEP_IMP = bsn_params[69].strip('"')
DDRAIN_BSN = bsn_params[70].strip('"')
TDRAIN_BSN = bsn_params[71].strip('"')
GDRAIN_BSN = bsn_params[72].strip('"')
CN_FROZ = bsn_params[73].strip('"')
ISED_DET = bsn_params[74].strip('"')
ETFILE = bsn_params[75].strip('"')
DORM_HR = bsn_params[76].strip('"')
SMXCO = bsn_params[77].strip('"')
FIXCO = bsn_params[78].strip('"')
NFIXMX = bsn_params[79].strip('"')
ANION_EXCL_BSN = bsn_params[80].strip('"')
CH_ONCO_BSN = bsn_params[81].strip('"')
CH_OPCO_BSN = bsn_params[82].strip('"')
HLIFE_NGW_BSN = bsn_params[83].strip('"')
RCN_SUB_BSN = bsn_params[84].strip('"')
BC1_BSN = bsn_params[85].strip('"')
BC2_BSN = bsn_params[86].strip('"')
BC3_BSN = bsn_params[87].strip('"')
BC4_BSN = bsn_params[88].strip('"')
DECR_MIN = bsn_params[89].strip('"')
ICFAC = bsn_params[90].strip('"')
RSD_COVCO = bsn_params[91].strip('"')
VCRIT = bsn_params[92].strip('"')
CSWAT = bsn_params[93].strip('"')
RES_STLR_CO = bsn_params[94].strip('"')
BFLO_DIST = bsn_params[95].strip('"')
IUH = bsn_params[96].strip('"')
UHALPHA = bsn_params[97].strip('"')
LU_NODRAIN = bsn_params[98].strip('"')
EROS_SPL = bsn_params[99].strip('"')
RILL_MULT = bsn_params[100].strip('"')
EROS_EXPO = bsn_params[101].strip('"')
SUBD_CHSED = bsn_params[102].strip('"')
C_FACTOR = bsn_params[103].strip('"')
CH_D50 = bsn_params[104].strip('"')
SIG_G = bsn_params[105].strip('"')
RE_BSN = bsn_params[106].strip('"')
SDRAIN_BSN = bsn_params[107].strip('"')
DRAIN_CO_BSN = bsn_params[108].strip('"')
PC_BSN = bsn_params[109].strip('"')
LATKSATF_BSN = bsn_params[110].strip('"')
ITDRN = bsn_params[111].strip('"')
IWTDN = bsn_params[112].strip('"')
SOL_P_MODEL = bsn_params[113].strip('"')
IABSTR = bsn_params[114].strip('"')
IATMODEP = bsn_params[115].strip('"')
RAMMO_SUB = bsn_params[116].strip('"')
RCN_SUB = bsn_params[117].strip('"')
DRYDEP_NH4 = bsn_params[118].strip('"')
DRYDEP_NO3 = bsn_params[119].strip('"')
R2ADJ_BSN = bsn_params[120].strip('"')
SSTMAXD_BSN = bsn_params[121].strip('"')
ISMAX = bsn_params[122].strip('"')
IROUTUNIT = bsn_params[123].strip('"')
# Building String
bsn_file = "Basin data .bsn file " + DateAndTime + " " + SWAT_Vers + \
"\n" + "Modeling Options: Land Area" + \
"\n" + "Water Balance:" + \
"\n" + cj.trailing_spaces(16, SFTMP, 3) + " | SFTMP : Snowfall temperature [deg C]" + \
"\n" + cj.trailing_spaces(16, SMTMP, 3) + " | SMTMP : Snow melt base temperature [deg C]" + \
"\n" + cj.trailing_spaces(16, SMFMX, 3) + " | SMFMX : Melt factor for snow on June 21 [mm H2O/deg C-day]" + \
"\n" + cj.trailing_spaces(16, SMFMN, 3) + " | SMFMN : Melt factor for snow on December 21 [mm H2O/deg C-day]" + \
"\n" + cj.trailing_spaces(16, TIMP, 3) + " | TIMP : Snow pack temperature lag factor" + \
"\n" + cj.trailing_spaces(16, SNOCOVMX, 3) + " | SNOCOVMX : Minimum snow water content that corresponds to 100% snow cover [mm]" + \
"\n" + cj.trailing_spaces(16, SNO50COV, 3) + " | SNO50COV : Fraction of snow volume represented by SNOCOVMX that corresponds to 50% snow cover" + \
"\n" + cj.trailing_spaces(16, IPET, 0) + " | IPET: PET method: 0=priest-t, 1=pen-m, 2=har, 3=read into model" + \
"\n" + " " + " | PETFILE: name of potential ET input file" + \
"\n" + cj.trailing_spaces(16, ESCO, 3) + " | ESCO: soil evaporation compensation factor" + \
"\n" + cj.trailing_spaces(16, EPCO, 3) + " | EPCO: plant water uptake compensation factor" + \
"\n" + cj.trailing_spaces(16, EVLAI, 3) + " | EVLAI : Leaf area index at which no evaporation occurs from water surface [m2/m2]" + \
"\n" + cj.trailing_spaces(16, FFCB, 3) + " | FFCB : Initial soil water storage expressed as a fraction of field capacity water content" + \
"\n" + "Surface Runoff:" + \
"\n" + cj.trailing_spaces(16, IEVENT, 0) + " | IEVENT: rainfall/runoff code: 0=daily rainfall/CN" + \
"\n" + cj.trailing_spaces(16, ICRK, 0) + " | ICRK: crack flow code: 1=model crack flow in soil" + \
"\n" + cj.trailing_spaces(16, SURLAG, 3) + " | SURLAG : Surface runoff lag time [days]" + \
"\n" + cj.trailing_spaces(16, ADJ_PKR, 3) + " | ADJ_PKR : Peak rate adjustment factor for sediment routing in the subbasin (tributary channels)" + \
"\n" + cj.trailing_spaces(16, PRF_BSN, 3) + " | PRF_BSN : Peak rate adjustment factor for sediment routing in the main channel" + \
"\n" + cj.trailing_spaces(16, SPCON, 4) + " | SPCON : Linear parameter for calculating the maximum amount of sediment that can be reentrained during channel sediment routing" + \
"\n" + cj.trailing_spaces(16, SPEXP, 3) + " | SPEXP : Exponent parameter for calculating sediment reentrained in channel sediment routing" + \
"\n" + "Nutrient Cycling:" + \
"\n" + cj.trailing_spaces(16, RCN, 3) + " | RCN : Concentration of nitrogen in rainfall [mg N/l]" + \
"\n" + cj.trailing_spaces(16, CMN, 5) + " | CMN : Rate factor for humus mineralization of active organic nitrogen" + \
"\n" + cj.trailing_spaces(16, N_UPDIS, 3) + " | N_UPDIS : Nitrogen uptake distribution parameter" + \
"\n" + cj.trailing_spaces(16, P_UPDIS, 3) + " | P_UPDIS : Phosphorus uptake distribution parameter" + \
"\n" + cj.trailing_spaces(16, NPERCO, 3) + " | NPERCO : Nitrogen percolation coefficient" + \
"\n" + cj.trailing_spaces(16, PPERCO, 3) + " | PPERCO : Phosphorus percolation coefficient" + \
"\n" + cj.trailing_spaces(16, PHOSKD, 3) + " | PHOSKD : Phosphorus soil partitioning coefficient" + \
"\n" + cj.trailing_spaces(16, PSP, 3) + " | PSP : Phosphorus sorption coefficient" + \
"\n" + cj.trailing_spaces(16, RSDCO, 3) + " | RSDCO : Residue decomposition coefficient" + \
"\n" + "Pesticide Cycling:" + \
"\n" + cj.trailing_spaces(16, PERCOP, 3) + " | PERCOP : Pesticide percolation coefficient" + \
"\n" + "Algae/CBOD/Dissolved Oxygen:" + \
"\n" + cj.trailing_spaces(16, ISUBWQ, 0) + " | ISUBWQ: subbasin water quality parameter" + \
"\n" + "Bacteria:" + \
"\n" + cj.trailing_spaces(16, WDPQ, 3) + " | WDPQ : Die-off factor for persistent bacteria in soil solution. [1/day]" + \
"\n" + cj.trailing_spaces(16, WGPQ, 3) + " | WGPQ : Growth factor for persistent bacteria in soil solution [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPQ, 3) + " | WDLPQ : Die-off factor for less persistent bacteria in soil solution [1/day]" + \
"\n" + cj.trailing_spaces(16, WGLPQ, 3) + " | WGLPQ : Growth factor for less persistent bacteria in soil solution. [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPS, 3) + " | WDPS : Die-off factor for persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, WGPS, 3) + " | WGPS : Growth factor for persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPS, 3) + " | WDLPS : Die-off factor for less persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, WGLPS, 3) + " | WGLPS : Growth factor for less persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, BACTKDQ, 3) + " | BACTKDQ : Bacteria partition coefficient" + \
"\n" + cj.trailing_spaces(16, THBACT, 3) + " | THBACT : Temperature adjustment factor for bacteria die-off/growth" + \
"\n" + cj.trailing_spaces(16, WOF_P, 3) + " | WOF_P: wash-off fraction for persistent bacteria on foliage" + \
"\n" + cj.trailing_spaces(16, WOF_LP, 3) + " | WOF_LP: wash-off fraction for less persistent bacteria on foliage" + \
"\n" + cj.trailing_spaces(16, WDPF, 3) + " | WDPF: persistent bacteria die-off factor on foliage" + \
"\n" + cj.trailing_spaces(16, WGPF, 3) + " | WGPF: persistent bacteria growth factor on foliage" + \
"\n" + cj.trailing_spaces(16, WDLPF, 3) + " | WDLPF: less persistent bacteria die-off factor on foliage" + \
"\n" + cj.trailing_spaces(16, WGLPF, 3) + " | WGLPF: less persistent bacteria growth factor on foliage" + \
"\n" + cj.trailing_spaces(16, ISED_DET, 0) + " | ISED_DET:" + \
"\n" + "Modeling Options: Reaches" + \
"\n" + cj.trailing_spaces(16, IRTE, 0) + " | IRTE: water routing method 0=variable travel-time 1=Muskingum" + \
"\n" + cj.trailing_spaces(16, MSK_CO1, 3) + " | MSK_CO1 : Calibration coefficient used to control impact of the storage time constant (Km) for normal flow" + \
"\n" + cj.trailing_spaces(16, MSK_CO2, 3) + " | MSK_CO2 : Calibration coefficient used to control impact of the storage time constant (Km) for low flow " + \
"\n" + cj.trailing_spaces(16, MSK_X, 3) + " | MSK_X : Weighting factor controlling relative importance of inflow rate and outflow rate in determining water storage in reach segment" + \
"\n" + cj.trailing_spaces(16, IDEG, 0) + " | IDEG: channel degradation code" + \
"\n" + cj.trailing_spaces(16, IWQ, 0) + " | IWQ: in-stream water quality: 1=model in-stream water quality" + \
"\n" + " basins.wwq | WWQFILE: name of watershed water quality file" + \
"\n" + cj.trailing_spaces(16, TRNSRCH, 3) + " | TRNSRCH: reach transmission loss partitioning to deep aquifer" + \
"\n" + cj.trailing_spaces(16, EVRCH, 3) + " | EVRCH : Reach evaporation adjustment factor" + \
"\n" + cj.trailing_spaces(16, IRTPEST, 0) + " | IRTPEST : Number of pesticide to be routed through the watershed channel network" + \
"\n" + cj.trailing_spaces(16, ICN, 0) + " | ICN : Daily curve number calculation method" + \
"\n" + cj.trailing_spaces(16, CNCOEF, 3) + " | CNCOEF : Plant ET curve number coefficient" + \
"\n" + cj.trailing_spaces(16, CDN, 3) + " | CDN : Denitrification exponential rate coefficient" + \
"\n" + cj.trailing_spaces(16, SDNCO, 3) + " | SDNCO : Denitrification threshold water content" + \
"\n" + cj.trailing_spaces(16, BACT_SWF, 3) + " | BACT_SWF : Fraction of manure applied to land areas that has active colony forming units" + \
"\n" + cj.trailing_spaces(16, BACTMX, 3) + " | BACTMX : Bacteria percolation coefficient [10 m3/Mg]." + \
"\n" + cj.trailing_spaces(16, BACTMINLP, 3) + " | BACTMINLP : Minimum daily bacteria loss for less persistent bacteria [# cfu/m2]" + \
"\n" + cj.trailing_spaces(16, BACTMINP, 3) + " | BACTMINP : Minimum daily bacteria loss for persistent bacteria [# cfu/m2]" + \
"\n" + cj.trailing_spaces(16, WDLPRCH, 3) + " | WDLPRCH: Die-off factor for less persistent bacteria in streams (moving water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPRCH, 3) + " | WDPRCH : Die-off factor for persistent bacteria in streams (moving water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPRES, 3) + " | WDLPRES : Die-off factor for less persistent bacteria in water bodies (still water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPRES, 3) + " | WDPRES : Die-off factor for persistent bacteria in water bodies (still water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, TB_ADJ, 3) + " | TB_ADJ : New variable in testing ...Adjustment factor for subdaily unit hydrograph basetime" + \
"\n" + cj.trailing_spaces(16, DEP_IMP, 3) + " | DEPIMP_BSN : Depth to impervious layer for modeling perched water tables [mm]" + \
"\n" + cj.trailing_spaces(16, DDRAIN_BSN, 3) + " | DDRAIN_BSN : Depth to the sub-surface drain [mm]" + \
"\n" + cj.trailing_spaces(16, TDRAIN_BSN, 3) + " | TDRAIN_BSN : Time to drain soil to field capacity [hours]" + \
"\n" + cj.trailing_spaces(16, GDRAIN_BSN, 3) + " | GDRAIN_BSN : Drain tile lag time [hours]" + \
"\n" + cj.trailing_spaces(16, CN_FROZ, 6) + " | CN_FROZ : Parameter for frozen soil adjustment on infiltration/runoff" + \
"\n" + cj.trailing_spaces(16, DORM_HR, 3) + " | DORM_HR : Time threshold used to define dormancy [hours]" + \
"\n" + cj.trailing_spaces(16, SMXCO, 3) + " | SMXCO : Adjustment factor for maximum curve number S factor" + \
"\n" + cj.trailing_spaces(16, FIXCO, 3) + " | FIXCO : Nitrogen fixation coefficient" + \
"\n" + cj.trailing_spaces(16, NFIXMX, 3) + " | NFIXMX : Maximum daily-n fixation [kg/ha]" + \
"\n" + cj.trailing_spaces(16, ANION_EXCL_BSN, 3) + " | ANION_EXCL_BSN : Fraction of porosity from which anions are excluded" + \
"\n" + cj.trailing_spaces(16, CH_ONCO_BSN, 3) + " | CH_ONCO_BSN : Channel organic nitrogen concentration in basin [ppm]" + \
"\n" + cj.trailing_spaces(16, CH_OPCO_BSN, 3) + " | CH_OPCO_BSN : Channel organic phosphorus concentration in basin [ppm]" + \
"\n" + cj.trailing_spaces(16, HLIFE_NGW_BSN, 3) + " | HLIFE_NGW_BSN : Half-life of nitrogen in groundwater [days]" + \
"\n" + cj.trailing_spaces(16, RCN_SUB_BSN, 3) + " | RCN_SUB_BSN : Concentration of nitrate in precipitation [ppm]" + \
"\n" + cj.trailing_spaces(16, BC1_BSN, 3) + " | BC1_BSN : Rate constant for biological oxidation of NH3 [1/day]" + \
"\n" + cj.trailing_spaces(16, BC2_BSN, 3) + " | BC2_BSN : Rate constant for biological oxidation NO2 to NO3 [1/day]" + \
"\n" + cj.trailing_spaces(16, BC3_BSN, 3) + " | BC3_BSN : Rate constant for hydrolosis of organic nitrogen to ammonia [1/day]" + \
"\n" + cj.trailing_spaces(16, BC4_BSN, 3) + " | BC4_BSN : Rate constant for decay of organic phosphorus to dissolved phosphorus [1/day]" + \
"\n" + cj.trailing_spaces(16, DECR_MIN, 3) + " | DECR_MIN: Minimum daily residue decay" + \
"\n" + cj.trailing_spaces(16, ICFAC, 3) + " | ICFAC : C-factor calculation method" + \
"\n" + cj.trailing_spaces(16, RSD_COVCO, 3) + " | RSD_COVCO : Residue cover factor for computing fraction of cover" + \
"\n" + cj.trailing_spaces(16, VCRIT, 3) + " | VCRIT : Critical velocity" + \
"\n" + cj.trailing_spaces(16, CSWAT, 0) + " | CSWAT : Code for new carbon routines" + \
"\n" + cj.trailing_spaces(16, RES_STLR_CO, 3) + " | RES_STLR_CO : Reservoir sediment settling coefficient" + \
"\n" + cj.trailing_spaces(16, BFLO_DIST, 3) + " | BFLO_DIST 0-1 (1:profile of baseflow in a day follows rainfall pattern, 0:baseflow evenly distributed to each time step during a day" + \
"\n" + cj.trailing_spaces(16, IUH, 0) + " | IUH : Unit hydrograph method: 1=triangular UH, 2=gamma function UH" + \
"\n" + cj.trailing_spaces(16, UHALPHA, 3) + " | UHALPHA : alpha coefficient for gamma function unit hydrograph. Required if iuh=2 is selected" + \
"\n" + "Land Use types in urban.dat that do not make runoff to urban BMPs:" + \
"\n" + \
"\n" + "Subdaily Erosion:" + \
"\n" + cj.trailing_spaces(16, EROS_SPL, 3) + " | EROS_SPL: The splash erosion coefficient ranges 0.9 - 3.1" + \
"\n" + cj.trailing_spaces(16, RILL_MULT, 3) + " | RILL_MULT: Multiplier to USLE_K for soil susceptible to rill erosion, ranges 0.5 - 2.0" + \
"\n" + cj.trailing_spaces(16, EROS_EXPO, 3) + " | EROS_EXPO: an exponent in the overland flow erosion equation, ranges 1.5 - 3.0" + \
"\n" + cj.trailing_spaces(16, SUBD_CHSED, 3) + " | SUBD_CHSED: 1=Brownlie(1981) model, 2=Yang(1973,1984) model" + \
"\n" + cj.trailing_spaces(16, C_FACTOR, 3) + " | C_FACTOR: Scaling parameter for Cover and management factor in ANSWERS erosion model" + \
"\n" + cj.trailing_spaces(16, CH_D50, 1) + " | CH_D50 : median particle diameter of channel bed [mm]" + \
"\n" + cj.trailing_spaces(16, SIG_G, 3) + " | SIG_G : geometric standard deviation of particle sizes" + \
"\n" + cj.trailing_spaces(16, RE_BSN, 2) + " | RE_BSN: Effective radius of drains" + \
"\n" + cj.trailing_spaces(16, SDRAIN_BSN, 2) + " | SDRAIN_BSN: Distance between two drain or tile tubes" + \
"\n" + cj.trailing_spaces(16, DRAIN_CO_BSN, 2) + " | DRAIN_CO_BSN: Drainage coefficient" + \
"\n" + cj.trailing_spaces(16, PC_BSN, 3) + " | PC_BSN: Pump capacity" + \
"\n" + cj.trailing_spaces(16, LATKSATF_BSN, 2) + " | LATKSATF_BSN: Multiplication factor to determine lateral ksat from SWAT ksat input value for HRU" + \
"\n" + cj.trailing_spaces(16, ITDRN, 0) + " | ITDRN: Tile drainage equations flag" + \
"\n" + cj.trailing_spaces(16, IWTDN, 0) + " | IWTDN: Water table depth algorithms flag" + \
"\n" + cj.trailing_spaces(16, SOL_P_MODEL, 0) + " | SOL_P_MODEL: if = 1, use new soil P model" + \
"\n" + cj.trailing_spaces(16, IABSTR, 2) + " | IABSTR: Initial abstraction on impervious cover (mm)" + \
"\n" + cj.trailing_spaces(16, IATMODEP, 0) + " | IATMODEP: 0 = average annual inputs 1 = monthly inputs" + \
"\n" + cj.trailing_spaces(16, R2ADJ_BSN, 0) + " | R2ADJ_BSN: basinwide retention parm adjustment factor" + \
"\n" + cj.trailing_spaces(16, SSTMAXD_BSN, 0) + " | SSTMAXD_BSN: basinwide retention parm adjustment factor" + \
"\n" + cj.trailing_spaces(16, ISMAX, 0) + " | ISMAX: max depressional storage code" + \
"\n" + cj.trailing_spaces(16, IROUTUNIT, 0) + " | IROUTUNIT:" + \
"\n"
fileName = "basins.bsn"
cj.write_to(variables.DefaultSimDir + "TxtInOut\\" + fileName, bsn_file)
#print fileName
|
workflow_lib/bsn.py
|
import init_file as variables
import cj_function_lib as cj
from datetime import datetime
bsn_table = cj.extract_table_from_mdb(variables.ProjMDB, "bsn", variables.path + "\\bsn.tmp~")
bsn_params = bsn_table[0].split(",")
now = datetime.now()
DateAndTime = str(now.month) + "/" + str(now.day) + "/" + \
str(now.year) + " " + str(now.time()).split(".")[0]
SWAT_Vers = "QSWAT Workflow v1.5.2"
# Parameters
SFTMP = bsn_params[1].strip('"')
SMTMP = bsn_params[2].strip('"')
SMFMX = bsn_params[3].strip('"')
SMFMN = bsn_params[4].strip('"')
TIMP = bsn_params[5].strip('"')
SNOCOVMX = bsn_params[6].strip('"')
SNO50COV = bsn_params[7].strip('"')
IPET = bsn_params[8].strip('"')
ESCO = bsn_params[9].strip('"')
EPCO = bsn_params[10].strip('"')
EVLAI = bsn_params[11].strip('"')
FFCB = bsn_params[12].strip('"')
IEVENT = bsn_params[13].strip('"')
ICRK = bsn_params[14].strip('"')
SURLAG = bsn_params[15].strip('"')
ADJ_PKR = bsn_params[16].strip('"')
PRF_BSN = bsn_params[17].strip('"')
SPCON = bsn_params[18].strip('"')
SPEXP = bsn_params[19].strip('"')
RCN = bsn_params[20].strip('"')
CMN = bsn_params[21].strip('"')
N_UPDIS = bsn_params[22].strip('"')
P_UPDIS = bsn_params[23].strip('"')
NPERCO = bsn_params[24].strip('"')
PPERCO = bsn_params[25].strip('"')
PHOSKD = bsn_params[26].strip('"')
PSP = bsn_params[27].strip('"')
RSDCO = bsn_params[28].strip('"')
PERCOP = bsn_params[29].strip('"')
ISUBWQ = bsn_params[30].strip('"')
WDPQ = bsn_params[31].strip('"')
WGPQ = bsn_params[32].strip('"')
WDLPQ = bsn_params[33].strip('"')
WGLPQ = bsn_params[34].strip('"')
WDPS = bsn_params[35].strip('"')
WGPS = bsn_params[36].strip('"')
WDLPS = bsn_params[37].strip('"')
WGLPS = bsn_params[38].strip('"')
BACTKDQ = bsn_params[39].strip('"')
THBACT = bsn_params[40].strip('"')
WOF_P = bsn_params[41].strip('"')
WOF_LP = bsn_params[42].strip('"')
WDPF = bsn_params[43].strip('"')
WGPF = bsn_params[44].strip('"')
WDLPF = bsn_params[45].strip('"')
WGLPF = bsn_params[46].strip('"')
IRTE = bsn_params[47].strip('"')
MSK_CO1 = bsn_params[48].strip('"')
MSK_CO2 = bsn_params[49].strip('"')
MSK_X = bsn_params[50].strip('"')
IDEG = bsn_params[51].strip('"')
IWQ = bsn_params[52].strip('"')
TRNSRCH = bsn_params[53].strip('"')
EVRCH = bsn_params[54].strip('"')
IRTPEST = bsn_params[55].strip('"')
ICN = bsn_params[56].strip('"')
CNCOEF = bsn_params[57].strip('"')
CDN = bsn_params[58].strip('"')
SDNCO = bsn_params[59].strip('"')
BACT_SWF = bsn_params[60].strip('"')
BACTMX = bsn_params[61].strip('"')
BACTMINLP = bsn_params[62].strip('"')
BACTMINP = bsn_params[63].strip('"')
WDLPRCH = bsn_params[64].strip('"')
WDPRCH = bsn_params[65].strip('"')
WDLPRES = bsn_params[66].strip('"')
WDPRES = bsn_params[67].strip('"')
TB_ADJ = bsn_params[68].strip('"')
DEP_IMP = bsn_params[69].strip('"')
DDRAIN_BSN = bsn_params[70].strip('"')
TDRAIN_BSN = bsn_params[71].strip('"')
GDRAIN_BSN = bsn_params[72].strip('"')
CN_FROZ = bsn_params[73].strip('"')
ISED_DET = bsn_params[74].strip('"')
ETFILE = bsn_params[75].strip('"')
DORM_HR = bsn_params[76].strip('"')
SMXCO = bsn_params[77].strip('"')
FIXCO = bsn_params[78].strip('"')
NFIXMX = bsn_params[79].strip('"')
ANION_EXCL_BSN = bsn_params[80].strip('"')
CH_ONCO_BSN = bsn_params[81].strip('"')
CH_OPCO_BSN = bsn_params[82].strip('"')
HLIFE_NGW_BSN = bsn_params[83].strip('"')
RCN_SUB_BSN = bsn_params[84].strip('"')
BC1_BSN = bsn_params[85].strip('"')
BC2_BSN = bsn_params[86].strip('"')
BC3_BSN = bsn_params[87].strip('"')
BC4_BSN = bsn_params[88].strip('"')
DECR_MIN = bsn_params[89].strip('"')
ICFAC = bsn_params[90].strip('"')
RSD_COVCO = bsn_params[91].strip('"')
VCRIT = bsn_params[92].strip('"')
CSWAT = bsn_params[93].strip('"')
RES_STLR_CO = bsn_params[94].strip('"')
BFLO_DIST = bsn_params[95].strip('"')
IUH = bsn_params[96].strip('"')
UHALPHA = bsn_params[97].strip('"')
LU_NODRAIN = bsn_params[98].strip('"')
EROS_SPL = bsn_params[99].strip('"')
RILL_MULT = bsn_params[100].strip('"')
EROS_EXPO = bsn_params[101].strip('"')
SUBD_CHSED = bsn_params[102].strip('"')
C_FACTOR = bsn_params[103].strip('"')
CH_D50 = bsn_params[104].strip('"')
SIG_G = bsn_params[105].strip('"')
RE_BSN = bsn_params[106].strip('"')
SDRAIN_BSN = bsn_params[107].strip('"')
DRAIN_CO_BSN = bsn_params[108].strip('"')
PC_BSN = bsn_params[109].strip('"')
LATKSATF_BSN = bsn_params[110].strip('"')
ITDRN = bsn_params[111].strip('"')
IWTDN = bsn_params[112].strip('"')
SOL_P_MODEL = bsn_params[113].strip('"')
IABSTR = bsn_params[114].strip('"')
IATMODEP = bsn_params[115].strip('"')
RAMMO_SUB = bsn_params[116].strip('"')
RCN_SUB = bsn_params[117].strip('"')
DRYDEP_NH4 = bsn_params[118].strip('"')
DRYDEP_NO3 = bsn_params[119].strip('"')
R2ADJ_BSN = bsn_params[120].strip('"')
SSTMAXD_BSN = bsn_params[121].strip('"')
ISMAX = bsn_params[122].strip('"')
IROUTUNIT = bsn_params[123].strip('"')
# Building String
bsn_file = "Basin data .bsn file " + DateAndTime + " " + SWAT_Vers + \
"\n" + "Modeling Options: Land Area" + \
"\n" + "Water Balance:" + \
"\n" + cj.trailing_spaces(16, SFTMP, 3) + " | SFTMP : Snowfall temperature [deg C]" + \
"\n" + cj.trailing_spaces(16, SMTMP, 3) + " | SMTMP : Snow melt base temperature [deg C]" + \
"\n" + cj.trailing_spaces(16, SMFMX, 3) + " | SMFMX : Melt factor for snow on June 21 [mm H2O/deg C-day]" + \
"\n" + cj.trailing_spaces(16, SMFMN, 3) + " | SMFMN : Melt factor for snow on December 21 [mm H2O/deg C-day]" + \
"\n" + cj.trailing_spaces(16, TIMP, 3) + " | TIMP : Snow pack temperature lag factor" + \
"\n" + cj.trailing_spaces(16, SNOCOVMX, 3) + " | SNOCOVMX : Minimum snow water content that corresponds to 100% snow cover [mm]" + \
"\n" + cj.trailing_spaces(16, SNO50COV, 3) + " | SNO50COV : Fraction of snow volume represented by SNOCOVMX that corresponds to 50% snow cover" + \
"\n" + cj.trailing_spaces(16, IPET, 0) + " | IPET: PET method: 0=priest-t, 1=pen-m, 2=har, 3=read into model" + \
"\n" + " " + " | PETFILE: name of potential ET input file" + \
"\n" + cj.trailing_spaces(16, ESCO, 3) + " | ESCO: soil evaporation compensation factor" + \
"\n" + cj.trailing_spaces(16, EPCO, 3) + " | EPCO: plant water uptake compensation factor" + \
"\n" + cj.trailing_spaces(16, EVLAI, 3) + " | EVLAI : Leaf area index at which no evaporation occurs from water surface [m2/m2]" + \
"\n" + cj.trailing_spaces(16, FFCB, 3) + " | FFCB : Initial soil water storage expressed as a fraction of field capacity water content" + \
"\n" + "Surface Runoff:" + \
"\n" + cj.trailing_spaces(16, IEVENT, 0) + " | IEVENT: rainfall/runoff code: 0=daily rainfall/CN" + \
"\n" + cj.trailing_spaces(16, ICRK, 0) + " | ICRK: crack flow code: 1=model crack flow in soil" + \
"\n" + cj.trailing_spaces(16, SURLAG, 3) + " | SURLAG : Surface runoff lag time [days]" + \
"\n" + cj.trailing_spaces(16, ADJ_PKR, 3) + " | ADJ_PKR : Peak rate adjustment factor for sediment routing in the subbasin (tributary channels)" + \
"\n" + cj.trailing_spaces(16, PRF_BSN, 3) + " | PRF_BSN : Peak rate adjustment factor for sediment routing in the main channel" + \
"\n" + cj.trailing_spaces(16, SPCON, 4) + " | SPCON : Linear parameter for calculating the maximum amount of sediment that can be reentrained during channel sediment routing" + \
"\n" + cj.trailing_spaces(16, SPEXP, 3) + " | SPEXP : Exponent parameter for calculating sediment reentrained in channel sediment routing" + \
"\n" + "Nutrient Cycling:" + \
"\n" + cj.trailing_spaces(16, RCN, 3) + " | RCN : Concentration of nitrogen in rainfall [mg N/l]" + \
"\n" + cj.trailing_spaces(16, CMN, 5) + " | CMN : Rate factor for humus mineralization of active organic nitrogen" + \
"\n" + cj.trailing_spaces(16, N_UPDIS, 3) + " | N_UPDIS : Nitrogen uptake distribution parameter" + \
"\n" + cj.trailing_spaces(16, P_UPDIS, 3) + " | P_UPDIS : Phosphorus uptake distribution parameter" + \
"\n" + cj.trailing_spaces(16, NPERCO, 3) + " | NPERCO : Nitrogen percolation coefficient" + \
"\n" + cj.trailing_spaces(16, PPERCO, 3) + " | PPERCO : Phosphorus percolation coefficient" + \
"\n" + cj.trailing_spaces(16, PHOSKD, 3) + " | PHOSKD : Phosphorus soil partitioning coefficient" + \
"\n" + cj.trailing_spaces(16, PSP, 3) + " | PSP : Phosphorus sorption coefficient" + \
"\n" + cj.trailing_spaces(16, RSDCO, 3) + " | RSDCO : Residue decomposition coefficient" + \
"\n" + "Pesticide Cycling:" + \
"\n" + cj.trailing_spaces(16, PERCOP, 3) + " | PERCOP : Pesticide percolation coefficient" + \
"\n" + "Algae/CBOD/Dissolved Oxygen:" + \
"\n" + cj.trailing_spaces(16, ISUBWQ, 0) + " | ISUBWQ: subbasin water quality parameter" + \
"\n" + "Bacteria:" + \
"\n" + cj.trailing_spaces(16, WDPQ, 3) + " | WDPQ : Die-off factor for persistent bacteria in soil solution. [1/day]" + \
"\n" + cj.trailing_spaces(16, WGPQ, 3) + " | WGPQ : Growth factor for persistent bacteria in soil solution [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPQ, 3) + " | WDLPQ : Die-off factor for less persistent bacteria in soil solution [1/day]" + \
"\n" + cj.trailing_spaces(16, WGLPQ, 3) + " | WGLPQ : Growth factor for less persistent bacteria in soil solution. [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPS, 3) + " | WDPS : Die-off factor for persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, WGPS, 3) + " | WGPS : Growth factor for persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPS, 3) + " | WDLPS : Die-off factor for less persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, WGLPS, 3) + " | WGLPS : Growth factor for less persistent bacteria adsorbed to soil particles. [1/day]" + \
"\n" + cj.trailing_spaces(16, BACTKDQ, 3) + " | BACTKDQ : Bacteria partition coefficient" + \
"\n" + cj.trailing_spaces(16, THBACT, 3) + " | THBACT : Temperature adjustment factor for bacteria die-off/growth" + \
"\n" + cj.trailing_spaces(16, WOF_P, 3) + " | WOF_P: wash-off fraction for persistent bacteria on foliage" + \
"\n" + cj.trailing_spaces(16, WOF_LP, 3) + " | WOF_LP: wash-off fraction for less persistent bacteria on foliage" + \
"\n" + cj.trailing_spaces(16, WDPF, 3) + " | WDPF: persistent bacteria die-off factor on foliage" + \
"\n" + cj.trailing_spaces(16, WGPF, 3) + " | WGPF: persistent bacteria growth factor on foliage" + \
"\n" + cj.trailing_spaces(16, WDLPF, 3) + " | WDLPF: less persistent bacteria die-off factor on foliage" + \
"\n" + cj.trailing_spaces(16, WGLPF, 3) + " | WGLPF: less persistent bacteria growth factor on foliage" + \
"\n" + cj.trailing_spaces(16, ISED_DET, 0) + " | ISED_DET:" + \
"\n" + "Modeling Options: Reaches" + \
"\n" + cj.trailing_spaces(16, IRTE, 0) + " | IRTE: water routing method 0=variable travel-time 1=Muskingum" + \
"\n" + cj.trailing_spaces(16, MSK_CO1, 3) + " | MSK_CO1 : Calibration coefficient used to control impact of the storage time constant (Km) for normal flow" + \
"\n" + cj.trailing_spaces(16, MSK_CO2, 3) + " | MSK_CO2 : Calibration coefficient used to control impact of the storage time constant (Km) for low flow " + \
"\n" + cj.trailing_spaces(16, MSK_X, 3) + " | MSK_X : Weighting factor controlling relative importance of inflow rate and outflow rate in determining water storage in reach segment" + \
"\n" + cj.trailing_spaces(16, IDEG, 0) + " | IDEG: channel degradation code" + \
"\n" + cj.trailing_spaces(16, IWQ, 0) + " | IWQ: in-stream water quality: 1=model in-stream water quality" + \
"\n" + " basins.wwq | WWQFILE: name of watershed water quality file" + \
"\n" + cj.trailing_spaces(16, TRNSRCH, 3) + " | TRNSRCH: reach transmission loss partitioning to deep aquifer" + \
"\n" + cj.trailing_spaces(16, EVRCH, 3) + " | EVRCH : Reach evaporation adjustment factor" + \
"\n" + cj.trailing_spaces(16, IRTPEST, 0) + " | IRTPEST : Number of pesticide to be routed through the watershed channel network" + \
"\n" + cj.trailing_spaces(16, ICN, 0) + " | ICN : Daily curve number calculation method" + \
"\n" + cj.trailing_spaces(16, CNCOEF, 3) + " | CNCOEF : Plant ET curve number coefficient" + \
"\n" + cj.trailing_spaces(16, CDN, 3) + " | CDN : Denitrification exponential rate coefficient" + \
"\n" + cj.trailing_spaces(16, SDNCO, 3) + " | SDNCO : Denitrification threshold water content" + \
"\n" + cj.trailing_spaces(16, BACT_SWF, 3) + " | BACT_SWF : Fraction of manure applied to land areas that has active colony forming units" + \
"\n" + cj.trailing_spaces(16, BACTMX, 3) + " | BACTMX : Bacteria percolation coefficient [10 m3/Mg]." + \
"\n" + cj.trailing_spaces(16, BACTMINLP, 3) + " | BACTMINLP : Minimum daily bacteria loss for less persistent bacteria [# cfu/m2]" + \
"\n" + cj.trailing_spaces(16, BACTMINP, 3) + " | BACTMINP : Minimum daily bacteria loss for persistent bacteria [# cfu/m2]" + \
"\n" + cj.trailing_spaces(16, WDLPRCH, 3) + " | WDLPRCH: Die-off factor for less persistent bacteria in streams (moving water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPRCH, 3) + " | WDPRCH : Die-off factor for persistent bacteria in streams (moving water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPRES, 3) + " | WDLPRES : Die-off factor for less persistent bacteria in water bodies (still water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPRES, 3) + " | WDPRES : Die-off factor for persistent bacteria in water bodies (still water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, TB_ADJ, 3) + " | TB_ADJ : New variable in testing ...Adjustment factor for subdaily unit hydrograph basetime" + \
"\n" + cj.trailing_spaces(16, DEP_IMP, 3) + " | DEPIMP_BSN : Depth to impervious layer for modeling perched water tables [mm]" + \
"\n" + cj.trailing_spaces(16, DDRAIN_BSN, 3) + " | DDRAIN_BSN : Depth to the sub-surface drain [mm]" + \
"\n" + cj.trailing_spaces(16, TDRAIN_BSN, 3) + " | TDRAIN_BSN : Time to drain soil to field capacity [hours]" + \
"\n" + cj.trailing_spaces(16, GDRAIN_BSN, 3) + " | GDRAIN_BSN : Drain tile lag time [hours]" + \
"\n" + cj.trailing_spaces(16, CN_FROZ, 6) + " | CN_FROZ : Parameter for frozen soil adjustment on infiltration/runoff" + \
"\n" + cj.trailing_spaces(16, DORM_HR, 3) + " | DORM_HR : Time threshold used to define dormancy [hours]" + \
"\n" + cj.trailing_spaces(16, SMXCO, 3) + " | SMXCO : Adjustment factor for maximum curve number S factor" + \
"\n" + cj.trailing_spaces(16, FIXCO, 3) + " | FIXCO : Nitrogen fixation coefficient" + \
"\n" + cj.trailing_spaces(16, NFIXMX, 3) + " | NFIXMX : Maximum daily-n fixation [kg/ha]" + \
"\n" + cj.trailing_spaces(16, ANION_EXCL_BSN, 3) + " | ANION_EXCL_BSN : Fraction of porosity from which anions are excluded" + \
"\n" + cj.trailing_spaces(16, CH_ONCO_BSN, 3) + " | CH_ONCO_BSN : Channel organic nitrogen concentration in basin [ppm]" + \
"\n" + cj.trailing_spaces(16, CH_OPCO_BSN, 3) + " | CH_OPCO_BSN : Channel organic phosphorus concentration in basin [ppm]" + \
"\n" + cj.trailing_spaces(16, HLIFE_NGW_BSN, 3) + " | HLIFE_NGW_BSN : Half-life of nitrogen in groundwater [days]" + \
"\n" + cj.trailing_spaces(16, RCN_SUB_BSN, 3) + " | RCN_SUB_BSN : Concentration of nitrate in precipitation [ppm]" + \
"\n" + cj.trailing_spaces(16, BC1_BSN, 3) + " | BC1_BSN : Rate constant for biological oxidation of NH3 [1/day]" + \
"\n" + cj.trailing_spaces(16, BC2_BSN, 3) + " | BC2_BSN : Rate constant for biological oxidation NO2 to NO3 [1/day]" + \
"\n" + cj.trailing_spaces(16, BC3_BSN, 3) + " | BC3_BSN : Rate constant for hydrolosis of organic nitrogen to ammonia [1/day]" + \
"\n" + cj.trailing_spaces(16, BC4_BSN, 3) + " | BC4_BSN : Rate constant for decay of organic phosphorus to dissolved phosphorus [1/day]" + \
"\n" + cj.trailing_spaces(16, DECR_MIN, 3) + " | DECR_MIN: Minimum daily residue decay" + \
"\n" + cj.trailing_spaces(16, ICFAC, 3) + " | ICFAC : C-factor calculation method" + \
"\n" + cj.trailing_spaces(16, RSD_COVCO, 3) + " | RSD_COVCO : Residue cover factor for computing fraction of cover" + \
"\n" + cj.trailing_spaces(16, VCRIT, 3) + " | VCRIT : Critical velocity" + \
"\n" + cj.trailing_spaces(16, CSWAT, 0) + " | CSWAT : Code for new carbon routines" + \
"\n" + cj.trailing_spaces(16, RES_STLR_CO, 3) + " | RES_STLR_CO : Reservoir sediment settling coefficient" + \
"\n" + cj.trailing_spaces(16, BFLO_DIST, 3) + " | BFLO_DIST 0-1 (1:profile of baseflow in a day follows rainfall pattern, 0:baseflow evenly distributed to each time step during a day" + \
"\n" + cj.trailing_spaces(16, IUH, 0) + " | IUH : Unit hydrograph method: 1=triangular UH, 2=gamma function UH" + \
"\n" + cj.trailing_spaces(16, UHALPHA, 3) + " | UHALPHA : alpha coefficient for gamma function unit hydrograph. Required if iuh=2 is selected" + \
"\n" + "Land Use types in urban.dat that do not make runoff to urban BMPs:" + \
"\n" + \
"\n" + "Subdaily Erosion:" + \
"\n" + cj.trailing_spaces(16, EROS_SPL, 3) + " | EROS_SPL: The splash erosion coefficient ranges 0.9 - 3.1" + \
"\n" + cj.trailing_spaces(16, RILL_MULT, 3) + " | RILL_MULT: Multiplier to USLE_K for soil susceptible to rill erosion, ranges 0.5 - 2.0" + \
"\n" + cj.trailing_spaces(16, EROS_EXPO, 3) + " | EROS_EXPO: an exponent in the overland flow erosion equation, ranges 1.5 - 3.0" + \
"\n" + cj.trailing_spaces(16, SUBD_CHSED, 3) + " | SUBD_CHSED: 1=Brownlie(1981) model, 2=Yang(1973,1984) model" + \
"\n" + cj.trailing_spaces(16, C_FACTOR, 3) + " | C_FACTOR: Scaling parameter for Cover and management factor in ANSWERS erosion model" + \
"\n" + cj.trailing_spaces(16, CH_D50, 1) + " | CH_D50 : median particle diameter of channel bed [mm]" + \
"\n" + cj.trailing_spaces(16, SIG_G, 3) + " | SIG_G : geometric standard deviation of particle sizes" + \
"\n" + cj.trailing_spaces(16, RE_BSN, 2) + " | RE_BSN: Effective radius of drains" + \
"\n" + cj.trailing_spaces(16, SDRAIN_BSN, 2) + " | SDRAIN_BSN: Distance between two drain or tile tubes" + \
"\n" + cj.trailing_spaces(16, DRAIN_CO_BSN, 2) + " | DRAIN_CO_BSN: Drainage coefficient" + \
"\n" + cj.trailing_spaces(16, PC_BSN, 3) + " | PC_BSN: Pump capacity" + \
"\n" + cj.trailing_spaces(16, LATKSATF_BSN, 2) + " | LATKSATF_BSN: Multiplication factor to determine lateral ksat from SWAT ksat input value for HRU" + \
"\n" + cj.trailing_spaces(16, ITDRN, 0) + " | ITDRN: Tile drainage equations flag" + \
"\n" + cj.trailing_spaces(16, IWTDN, 0) + " | IWTDN: Water table depth algorithms flag" + \
"\n" + cj.trailing_spaces(16, SOL_P_MODEL, 0) + " | SOL_P_MODEL: if = 1, use new soil P model" + \
"\n" + cj.trailing_spaces(16, IABSTR, 2) + " | IABSTR: Initial abstraction on impervious cover (mm)" + \
"\n" + cj.trailing_spaces(16, IATMODEP, 0) + " | IATMODEP: 0 = average annual inputs 1 = monthly inputs" + \
"\n" + cj.trailing_spaces(16, R2ADJ_BSN, 0) + " | R2ADJ_BSN: basinwide retention parm adjustment factor" + \
"\n" + cj.trailing_spaces(16, SSTMAXD_BSN, 0) + " | SSTMAXD_BSN: basinwide retention parm adjustment factor" + \
"\n" + cj.trailing_spaces(16, ISMAX, 0) + " | ISMAX: max depressional storage code" + \
"\n" + cj.trailing_spaces(16, IROUTUNIT, 0) + " | IROUTUNIT:" + \
"\n"
fileName = "basins.bsn"
cj.write_to(variables.DefaultSimDir + "TxtInOut\\" + fileName, bsn_file)
#print fileName
| 0.147371 | 0.062075 |
from _pytest.pytester import Testdir as TD, LineMatcher
from contextlib import contextmanager
from textwrap import dedent
import subprocess
import tempfile
import asyncio
import socket
import signal
import pytest
import shutil
import sys
import py
import os
this_dir = os.path.dirname(__file__)
@contextmanager
def listening():
filename = None
try:
with tempfile.NamedTemporaryFile(delete=False) as fle:
filename = fle.name
fle.close()
os.remove(fle.name)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(2)
s.bind(fle.name)
s.listen(1)
yield s, fle.name
s.close()
finally:
if os.path.exists(fle.name):
os.remove(fle.name)
def example_dir_factory(tmpdir_factory, name):
path = os.path.join(this_dir, name)
assert os.path.isdir(path)
expected_file = os.path.join(this_dir, name, "expected")
assert os.path.isfile(expected_file)
with open(expected_file, "r") as fle:
expected = fle.read().strip()
directory = tmpdir_factory.mktemp(name)
shutil.rmtree(directory)
shutil.copytree(path, directory)
class Factory:
@property
def expected(s):
return expected
def mktemp(s, p, **kwargs):
if p.startswith("tmp-"):
return tmpdir_factory.mktemp(p)
else:
return directory
return Factory()
@pytest.mark.parametrize(
"name", [name for name in os.listdir(this_dir) if name.startswith("example_")]
)
async it "shows correctly for failing fixtures", name, request, tmpdir_factory:
factory = example_dir_factory(tmpdir_factory, name)
testdir = TD(request, factory)
expected = factory.expected
result = testdir.runpytest("--tb", "short")
assert not result.errlines
lines = 0
for line in result.outlines:
if line.startswith("=") and isinstance(lines, int):
if lines < 1:
lines += 1
else:
lines = []
if isinstance(lines, list):
lines.append(line)
matcher = LineMatcher(lines)
matcher.fnmatch_lines(expected.split("\n"))
@pytest.mark.async_timeout(4)
async it "cleans up tests properly on interrupt":
directory = os.path.join(this_dir, "interrupt_test")
expected_file = os.path.join(directory, "expected")
assert os.path.isfile(expected_file)
with open(expected_file, "r") as fle:
expected = fle.read().strip()
p = await asyncio.create_subprocess_exec(
shutil.which("pytest"), cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
await asyncio.sleep(2)
p.send_signal(signal.SIGINT)
await p.wait()
got = (await p.stdout.read()).decode().strip().split("\n")
while got and not got[0].startswith("collected"):
got.pop(0)
want = expected.strip().split("\n")
if len(got) != len(want):
print("\n".join(got))
assert False, "expected different number of lines in output"
matcher = LineMatcher(got)
matcher.fnmatch_lines(want)
|
tests/test_examples.py
|
from _pytest.pytester import Testdir as TD, LineMatcher
from contextlib import contextmanager
from textwrap import dedent
import subprocess
import tempfile
import asyncio
import socket
import signal
import pytest
import shutil
import sys
import py
import os
this_dir = os.path.dirname(__file__)
@contextmanager
def listening():
filename = None
try:
with tempfile.NamedTemporaryFile(delete=False) as fle:
filename = fle.name
fle.close()
os.remove(fle.name)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(2)
s.bind(fle.name)
s.listen(1)
yield s, fle.name
s.close()
finally:
if os.path.exists(fle.name):
os.remove(fle.name)
def example_dir_factory(tmpdir_factory, name):
path = os.path.join(this_dir, name)
assert os.path.isdir(path)
expected_file = os.path.join(this_dir, name, "expected")
assert os.path.isfile(expected_file)
with open(expected_file, "r") as fle:
expected = fle.read().strip()
directory = tmpdir_factory.mktemp(name)
shutil.rmtree(directory)
shutil.copytree(path, directory)
class Factory:
@property
def expected(s):
return expected
def mktemp(s, p, **kwargs):
if p.startswith("tmp-"):
return tmpdir_factory.mktemp(p)
else:
return directory
return Factory()
@pytest.mark.parametrize(
"name", [name for name in os.listdir(this_dir) if name.startswith("example_")]
)
async it "shows correctly for failing fixtures", name, request, tmpdir_factory:
factory = example_dir_factory(tmpdir_factory, name)
testdir = TD(request, factory)
expected = factory.expected
result = testdir.runpytest("--tb", "short")
assert not result.errlines
lines = 0
for line in result.outlines:
if line.startswith("=") and isinstance(lines, int):
if lines < 1:
lines += 1
else:
lines = []
if isinstance(lines, list):
lines.append(line)
matcher = LineMatcher(lines)
matcher.fnmatch_lines(expected.split("\n"))
@pytest.mark.async_timeout(4)
async it "cleans up tests properly on interrupt":
directory = os.path.join(this_dir, "interrupt_test")
expected_file = os.path.join(directory, "expected")
assert os.path.isfile(expected_file)
with open(expected_file, "r") as fle:
expected = fle.read().strip()
p = await asyncio.create_subprocess_exec(
shutil.which("pytest"), cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
await asyncio.sleep(2)
p.send_signal(signal.SIGINT)
await p.wait()
got = (await p.stdout.read()).decode().strip().split("\n")
while got and not got[0].startswith("collected"):
got.pop(0)
want = expected.strip().split("\n")
if len(got) != len(want):
print("\n".join(got))
assert False, "expected different number of lines in output"
matcher = LineMatcher(got)
matcher.fnmatch_lines(want)
| 0.440469 | 0.316581 |
from coecms.regrid import esmf_generate_weights, regrid
import argparse
import xarray
import iris
from dask.diagnostics import ProgressBar
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--start-date', help='ISO-formatted start date')
parser.add_argument('--end-date', help='ISO-formatted end date')
parser.add_argument('--output', '-o', help='Output file name', required=True)
parser.add_argument('--target-mask', help='Target UM land mask', required=True)
parser.add_argument('--frequency', choices=[6, 12, 24],
type=int, help='Update frequency (hours)', default=24)
args = parser.parse_args()
# Read in the source mask
tos = xarray.open_mfdataset('/g/data1a/ub4/erai/netcdf/6hr/ocean/'
'oper_an_sfc/v01/tos/'
'tos_6hrs_ERAI_historical_an-sfc_2001*.nc',
coords='all')
src_mask = tos.tos.isel(time=0)
# Read in the target mask
mask_iris = iris.load_cube(args.target_mask, iris.AttributeConstraint(STASH='m01s00i030'))
mask_iris.coord('latitude').var_name = 'lat'
mask_iris.coord('longitude').var_name = 'lon'
tgt_mask = xarray.DataArray.from_iris(mask_iris).load()
tgt_mask = tgt_mask.where(tgt_mask == 0)
tgt_mask.lon.attrs['standard_name'] = 'longitude'
tgt_mask.lat.attrs['standard_name'] = 'latitude'
tgt_mask.lon.attrs['units'] = 'degrees_east'
tgt_mask.lat.attrs['units'] = 'degrees_north'
print(tgt_mask)
weights = esmf_generate_weights(src_mask, tgt_mask, method='patch')
with ProgressBar():
# Read and slice the source data
tos = xarray.open_mfdataset('/g/data1a/ub4/erai/netcdf/6hr/ocean/'
'oper_an_sfc/v01/tos/'
'tos_6hrs_ERAI_historical_an-sfc_2001*.nc',
coords='all')
sic = xarray.open_mfdataset('/g/data1a/ub4/erai/netcdf/6hr/seaIce/'
'oper_an_sfc/v01/sic/'
'sic_6hrs_ERAI_historical_an-sfc_2001*.nc',
coords='all')
ds = xarray.Dataset({'tos': tos.tos, 'sic': sic.sic})
ds = ds.sel(time=slice(args.start_date, args.end_date))
print(ds)
newds = regrid(ds, weights=weights)
newds['time'] = newds['time'].astype('i4')
newds.to_netcdf(args.output)
if __name__ == '__main__':
main()
|
scripts/um/era_sst.py
|
from coecms.regrid import esmf_generate_weights, regrid
import argparse
import xarray
import iris
from dask.diagnostics import ProgressBar
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--start-date', help='ISO-formatted start date')
parser.add_argument('--end-date', help='ISO-formatted end date')
parser.add_argument('--output', '-o', help='Output file name', required=True)
parser.add_argument('--target-mask', help='Target UM land mask', required=True)
parser.add_argument('--frequency', choices=[6, 12, 24],
type=int, help='Update frequency (hours)', default=24)
args = parser.parse_args()
# Read in the source mask
tos = xarray.open_mfdataset('/g/data1a/ub4/erai/netcdf/6hr/ocean/'
'oper_an_sfc/v01/tos/'
'tos_6hrs_ERAI_historical_an-sfc_2001*.nc',
coords='all')
src_mask = tos.tos.isel(time=0)
# Read in the target mask
mask_iris = iris.load_cube(args.target_mask, iris.AttributeConstraint(STASH='m01s00i030'))
mask_iris.coord('latitude').var_name = 'lat'
mask_iris.coord('longitude').var_name = 'lon'
tgt_mask = xarray.DataArray.from_iris(mask_iris).load()
tgt_mask = tgt_mask.where(tgt_mask == 0)
tgt_mask.lon.attrs['standard_name'] = 'longitude'
tgt_mask.lat.attrs['standard_name'] = 'latitude'
tgt_mask.lon.attrs['units'] = 'degrees_east'
tgt_mask.lat.attrs['units'] = 'degrees_north'
print(tgt_mask)
weights = esmf_generate_weights(src_mask, tgt_mask, method='patch')
with ProgressBar():
# Read and slice the source data
tos = xarray.open_mfdataset('/g/data1a/ub4/erai/netcdf/6hr/ocean/'
'oper_an_sfc/v01/tos/'
'tos_6hrs_ERAI_historical_an-sfc_2001*.nc',
coords='all')
sic = xarray.open_mfdataset('/g/data1a/ub4/erai/netcdf/6hr/seaIce/'
'oper_an_sfc/v01/sic/'
'sic_6hrs_ERAI_historical_an-sfc_2001*.nc',
coords='all')
ds = xarray.Dataset({'tos': tos.tos, 'sic': sic.sic})
ds = ds.sel(time=slice(args.start_date, args.end_date))
print(ds)
newds = regrid(ds, weights=weights)
newds['time'] = newds['time'].astype('i4')
newds.to_netcdf(args.output)
if __name__ == '__main__':
main()
| 0.569972 | 0.16944 |
import io
import unittest
from contextlib import redirect_stdout
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
lns = [5, 3, 5]
inputs = [
[16, 12, 4, 2, 5],
[7, 3, 9],
[5, 1, 18, 3, 13],
]
for i, ln in enumerate(lns):
linked_list = solution.SinglyLinkedList()
for j in range(ln):
linked_list.insert_node(inputs[i][j])
solution.reversePrint(linked_list.head)
self.assertEqual(text_trap.getvalue(),
'5\n' +
'2\n' +
'4\n' +
'12\n' +
'16\n' +
'9\n' +
'3\n' +
'7\n' +
'13\n' +
'3\n' +
'18\n' +
'1\n' +
'5\n')
def test_case_1(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
lns = [3, 3, 4]
inputs = [
[11, 1, 17],
[12, 11, 15],
[5, 7, 15, 14],
]
for i, ln in enumerate(lns):
linked_list = solution.SinglyLinkedList()
for j in range(ln):
linked_list.insert_node(inputs[i][j])
solution.reversePrint(linked_list.head)
self.assertEqual(text_trap.getvalue(),
'17\n' +
'1\n' +
'11\n' +
'15\n' +
'11\n' +
'12\n' +
'14\n' +
'15\n' +
'7\n' +
'5\n')
if __name__ == '__main__':
unittest.main()
|
hackerrank/Data Structures/Print in Reverse/test.py
|
import io
import unittest
from contextlib import redirect_stdout
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
lns = [5, 3, 5]
inputs = [
[16, 12, 4, 2, 5],
[7, 3, 9],
[5, 1, 18, 3, 13],
]
for i, ln in enumerate(lns):
linked_list = solution.SinglyLinkedList()
for j in range(ln):
linked_list.insert_node(inputs[i][j])
solution.reversePrint(linked_list.head)
self.assertEqual(text_trap.getvalue(),
'5\n' +
'2\n' +
'4\n' +
'12\n' +
'16\n' +
'9\n' +
'3\n' +
'7\n' +
'13\n' +
'3\n' +
'18\n' +
'1\n' +
'5\n')
def test_case_1(self):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
lns = [3, 3, 4]
inputs = [
[11, 1, 17],
[12, 11, 15],
[5, 7, 15, 14],
]
for i, ln in enumerate(lns):
linked_list = solution.SinglyLinkedList()
for j in range(ln):
linked_list.insert_node(inputs[i][j])
solution.reversePrint(linked_list.head)
self.assertEqual(text_trap.getvalue(),
'17\n' +
'1\n' +
'11\n' +
'15\n' +
'11\n' +
'12\n' +
'14\n' +
'15\n' +
'7\n' +
'5\n')
if __name__ == '__main__':
unittest.main()
| 0.327453 | 0.263991 |
import classad
import htcondor
import logging
import os
import shutil
import subprocess
import time
from ornithology import *
from pathlib import Path
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@standup
def config_dir(test_dir):
config_dir = test_dir / "condor" / "config.d"
Path(config_dir).mkdir(parents=True, exist_ok=True)
return config_dir
@standup
def user_tokens_dir(test_dir):
user_tokens_dir = test_dir / "condor" / "user-tokens.d"
Path(user_tokens_dir).mkdir(parents=True, exist_ok=True)
return user_tokens_dir
@standup
def system_tokens_dir(test_dir):
system_tokens_dir = test_dir / "condor" / "system-tokens.d"
Path(system_tokens_dir).mkdir(parents=True, exist_ok=True)
return system_tokens_dir
@standup
def passwords_dir(test_dir):
passwords_dir = test_dir / "condor" / "password.d"
Path(passwords_dir).mkdir(parents=True, exist_ok=True)
return passwords_dir
@standup
def pool_signing_key(passwords_dir):
return passwords_dir / "POOL"
@standup
def password_file(test_dir):
return test_dir / "condor" / "password"
@standup
def offline_password_file(test_dir):
return test_dir / "condor" / "password-offline"
@standup
def wrong_password_file(test_dir):
wrong_password_file = open(test_dir / "condor" / "wrong-password", "w")
wrong_password_file.write("wrong password file\n")
wrong_password_file.close()
os.chmod(test_dir / "condor" / "wrong-password", 0o600)
yield test_dir / "condor" / "wrong-password"
@standup
def condor(test_dir, passwords_dir, system_tokens_dir, pool_signing_key, password_file, user_tokens_dir):
with Condor(
local_dir=test_dir / "condor",
clean_local_dir_before=False,
config={
"DAEMON_LIST" : "MASTER COLLECTOR",
"MASTER_DEBUG" : "D_SECURITY",
"TOOL_DEBUG" : "D_SECURITY",
"SHARED_PORT_PORT" : "0",
"LOCAL_CONFIG_DIR" : "$(LOCAL_DIR)/config.d",
"SEC_DEFAULT_AUTHENTICATION" : "REQUIRED",
"SEC_CLIENT_AUTHENTICATION" : "REQUIRED",
# we will enable this config statement *after* condor starts up
#"SEC_DEFAULT_AUTHENTICATION_METHODS" : "TOKEN",
"SEC_PASSWORD_DIRECTORY" : passwords_dir,
"SEC_TOKEN_SYSTEM_DIRECTORY" : system_tokens_dir,
"SEC_TOKEN_POOL_SIGNING_KEY_FILE" : pool_signing_key,
"TOOL.SEC_TOKEN_POOL_SIGNING_KEY_FILE" : password_file,
"SEC_TOKEN_DIRECTORY" : user_tokens_dir,
# FIXME: I want there to be no permissions in the security system
# other than condor_pool@*/* and administrator@domain/*. Get ZKM
# to review/test these settings for that purpose.
"ALLOW_ADMINISTRATOR" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_OWNER" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_CONFIG" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_DAEMON" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_NEGOTIATOR" : "condor_pool@*/*, administrator@domain/*",
"DENY_ALL" : "*",
}
) as condor:
yield condor
# create a local config file that disables all auth methods other than TOKEN
@action
def token_config_file(condor, config_dir):
token_config_file = open(config_dir / "00token-config", "w")
token_config_file.write("SEC_DEFAULT_AUTHENTICATION_METHODS = TOKEN\n")
token_config_file.close()
os.chmod(config_dir / "00token-config", 0o600)
yield config_dir / "00token-config"
# reconfig the daemons so they pick up the changed config and the generated POOL key
# reconfig is a bit async, so we sleep 5 to give it time to take effect
@action
def reconfigure_daemons(condor, token_config_file):
condor.run_command(["condor_reconfig", "-all"], timeout=20)
time.sleep(5)
@action
def token_list(condor):
cmd = condor.run_command(["condor_token_list"], timeout=20)
assert cmd.returncode == 0
return cmd.stdout
# copy the POOL key to the filename that tools use
@action
def copy_pool_key(condor, reconfigure_daemons, pool_signing_key, password_file):
shutil.copyfile(pool_signing_key, password_file)
os.chmod(password_file, 0o600)
class TestAuthProtocolToken:
def test_if_pool_signing_key_generated(self, condor, pool_signing_key):
assert os.path.isfile(pool_signing_key)
def test_generated_token_signing_key(self, condor, copy_pool_key):
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 0
def test_move_password_removes_access(self, condor, password_file, offline_password_file):
os.rename(password_file, offline_password_file)
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 1
def test_wrong_master_password_fails(self, condor, password_file, wrong_password_file):
os.rename(wrong_password_file, password_file)
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 1
def test_correct_master_password_succeeds(self, condor, password_file, wrong_password_file, offline_password_file):
# Switch back to the correct password
os.rename(password_file, wrong_password_file)
os.rename(offline_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 0
def test_create_valid_token_authorized_user(self, condor):
cmd = condor.run_command(["condor_token_create", "-identity", "administrator@domain", "-token", "tokenfile"], timeout=20)
assert cmd.returncode == 0
def test_command_succeeds_with_token_but_no_common_pool_key(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to wrong POOL signing key
os.rename(password_file, offline_password_file)
os.rename(wrong_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 0
def test_list_tokens(self, token_list):
assert token_list
def test_ping_fails_after_deleting_authorized_token(self, condor, token_list):
token_file = token_list.split(' ')[-1]
os.unlink(token_file)
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 1
def test_create_valid_token_unauthorized_user(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to correct POOL signing key
os.rename(password_file, wrong_password_file)
os.rename(offline_password_file, password_file)
# Verify condor_token_create
cmd = condor.run_command(["condor_token_create", "-identity", "test@trust-domain", "-token", "tokenfile"], timeout=20)
assert cmd.returncode == 0
def test_ping_fails_with_unauthorized_identity(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to wrong POOL signing key
os.rename(password_file, offline_password_file)
os.rename(wrong_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 1
def test_condor_fetch(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to correct POOL signing key
os.rename(password_file, wrong_password_file)
os.rename(offline_password_file, password_file)
# Verify condor_token_fetch
cmd = condor.run_command(["condor_token_fetch", "-type", "master", "-token", "tokenfile"], timeout=20)
assert cmd.returncode == 0
def test_ping_with_fetched_token(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to wrong POOL signing key
os.rename(password_file, offline_password_file)
os.rename(wrong_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 1
|
src/condor_tests/test_auth_protocol_token.py
|
import classad
import htcondor
import logging
import os
import shutil
import subprocess
import time
from ornithology import *
from pathlib import Path
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@standup
def config_dir(test_dir):
config_dir = test_dir / "condor" / "config.d"
Path(config_dir).mkdir(parents=True, exist_ok=True)
return config_dir
@standup
def user_tokens_dir(test_dir):
user_tokens_dir = test_dir / "condor" / "user-tokens.d"
Path(user_tokens_dir).mkdir(parents=True, exist_ok=True)
return user_tokens_dir
@standup
def system_tokens_dir(test_dir):
system_tokens_dir = test_dir / "condor" / "system-tokens.d"
Path(system_tokens_dir).mkdir(parents=True, exist_ok=True)
return system_tokens_dir
@standup
def passwords_dir(test_dir):
passwords_dir = test_dir / "condor" / "password.d"
Path(passwords_dir).mkdir(parents=True, exist_ok=True)
return passwords_dir
@standup
def pool_signing_key(passwords_dir):
return passwords_dir / "POOL"
@standup
def password_file(test_dir):
return test_dir / "condor" / "password"
@standup
def offline_password_file(test_dir):
return test_dir / "condor" / "password-offline"
@standup
def wrong_password_file(test_dir):
wrong_password_file = open(test_dir / "condor" / "wrong-password", "w")
wrong_password_file.write("wrong password file\n")
wrong_password_file.close()
os.chmod(test_dir / "condor" / "wrong-password", 0o600)
yield test_dir / "condor" / "wrong-password"
@standup
def condor(test_dir, passwords_dir, system_tokens_dir, pool_signing_key, password_file, user_tokens_dir):
with Condor(
local_dir=test_dir / "condor",
clean_local_dir_before=False,
config={
"DAEMON_LIST" : "MASTER COLLECTOR",
"MASTER_DEBUG" : "D_SECURITY",
"TOOL_DEBUG" : "D_SECURITY",
"SHARED_PORT_PORT" : "0",
"LOCAL_CONFIG_DIR" : "$(LOCAL_DIR)/config.d",
"SEC_DEFAULT_AUTHENTICATION" : "REQUIRED",
"SEC_CLIENT_AUTHENTICATION" : "REQUIRED",
# we will enable this config statement *after* condor starts up
#"SEC_DEFAULT_AUTHENTICATION_METHODS" : "TOKEN",
"SEC_PASSWORD_DIRECTORY" : passwords_dir,
"SEC_TOKEN_SYSTEM_DIRECTORY" : system_tokens_dir,
"SEC_TOKEN_POOL_SIGNING_KEY_FILE" : pool_signing_key,
"TOOL.SEC_TOKEN_POOL_SIGNING_KEY_FILE" : password_file,
"SEC_TOKEN_DIRECTORY" : user_tokens_dir,
# FIXME: I want there to be no permissions in the security system
# other than condor_pool@*/* and administrator@domain/*. Get ZKM
# to review/test these settings for that purpose.
"ALLOW_ADMINISTRATOR" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_OWNER" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_CONFIG" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_DAEMON" : "condor_pool@*/*, administrator@domain/*",
"ALLOW_NEGOTIATOR" : "condor_pool@*/*, administrator@domain/*",
"DENY_ALL" : "*",
}
) as condor:
yield condor
# create a local config file that disables all auth methods other than TOKEN
@action
def token_config_file(condor, config_dir):
token_config_file = open(config_dir / "00token-config", "w")
token_config_file.write("SEC_DEFAULT_AUTHENTICATION_METHODS = TOKEN\n")
token_config_file.close()
os.chmod(config_dir / "00token-config", 0o600)
yield config_dir / "00token-config"
# reconfig the daemons so they pick up the changed config and the generated POOL key
# reconfig is a bit async, so we sleep 5 to give it time to take effect
@action
def reconfigure_daemons(condor, token_config_file):
condor.run_command(["condor_reconfig", "-all"], timeout=20)
time.sleep(5)
@action
def token_list(condor):
cmd = condor.run_command(["condor_token_list"], timeout=20)
assert cmd.returncode == 0
return cmd.stdout
# copy the POOL key to the filename that tools use
@action
def copy_pool_key(condor, reconfigure_daemons, pool_signing_key, password_file):
shutil.copyfile(pool_signing_key, password_file)
os.chmod(password_file, 0o600)
class TestAuthProtocolToken:
def test_if_pool_signing_key_generated(self, condor, pool_signing_key):
assert os.path.isfile(pool_signing_key)
def test_generated_token_signing_key(self, condor, copy_pool_key):
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 0
def test_move_password_removes_access(self, condor, password_file, offline_password_file):
os.rename(password_file, offline_password_file)
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 1
def test_wrong_master_password_fails(self, condor, password_file, wrong_password_file):
os.rename(wrong_password_file, password_file)
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 1
def test_correct_master_password_succeeds(self, condor, password_file, wrong_password_file, offline_password_file):
# Switch back to the correct password
os.rename(password_file, wrong_password_file)
os.rename(offline_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "collector", "-table", "ALL", "-debug"], timeout=20)
assert cmd.returncode == 0
def test_create_valid_token_authorized_user(self, condor):
cmd = condor.run_command(["condor_token_create", "-identity", "administrator@domain", "-token", "tokenfile"], timeout=20)
assert cmd.returncode == 0
def test_command_succeeds_with_token_but_no_common_pool_key(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to wrong POOL signing key
os.rename(password_file, offline_password_file)
os.rename(wrong_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 0
def test_list_tokens(self, token_list):
assert token_list
def test_ping_fails_after_deleting_authorized_token(self, condor, token_list):
token_file = token_list.split(' ')[-1]
os.unlink(token_file)
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 1
def test_create_valid_token_unauthorized_user(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to correct POOL signing key
os.rename(password_file, wrong_password_file)
os.rename(offline_password_file, password_file)
# Verify condor_token_create
cmd = condor.run_command(["condor_token_create", "-identity", "test@trust-domain", "-token", "tokenfile"], timeout=20)
assert cmd.returncode == 0
def test_ping_fails_with_unauthorized_identity(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to wrong POOL signing key
os.rename(password_file, offline_password_file)
os.rename(wrong_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 1
def test_condor_fetch(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to correct POOL signing key
os.rename(password_file, wrong_password_file)
os.rename(offline_password_file, password_file)
# Verify condor_token_fetch
cmd = condor.run_command(["condor_token_fetch", "-type", "master", "-token", "tokenfile"], timeout=20)
assert cmd.returncode == 0
def test_ping_with_fetched_token(self, condor, password_file, offline_password_file, wrong_password_file):
# Switch back to wrong POOL signing key
os.rename(password_file, offline_password_file)
os.rename(wrong_password_file, password_file)
# Verify condor_ping
cmd = condor.run_command(["condor_ping", "-type", "master", "-table", "ALL"], timeout=20)
assert cmd.returncode == 1
| 0.282097 | 0.218878 |
import os
import glob
import sys
import functools
import jsonpickle
from collections import OrderedDict
from Orange.widgets import widget, gui, settings
import Orange.data
from Orange.data.io import FileFormat
from DockerClient import DockerClient
from BwBase import OWBwBWidget, ConnectionDict, BwbGuiElements, getIconName, getJsonName
from PyQt5 import QtWidgets, QtGui
class OWjupyter_sleuth(OWBwBWidget):
name = "jupyter_sleuth"
description = "Base installation of Jupyter"
priority = 103
icon = getIconName(__file__, "jupyter-sleuth.png")
want_main_area = False
docker_image_name = "biodepot/sleuth"
docker_image_tag = (
"0.30.0__ubuntu-16.04__r-3.4.4__jupyter-5.6.0__firefox-61.0.1__082318"
)
inputs = [
("InputDir", str, "handleInputsInputDir"),
("Trigger", str, "handleInputsTrigger"),
("startingNotebook", str, "handleInputsstartingNotebook"),
]
outputs = [("OutputDir", str), ("outputNotebook", str)]
pset = functools.partial(settings.Setting, schema_only=True)
runMode = pset(0)
exportGraphics = pset(False)
runTriggers = pset([])
triggerReady = pset({})
inputConnectionsStore = pset({})
optionsChecked = pset({})
subcommand = pset("notebook")
execute = pset(False)
startingNotebook = pset(None)
type = pset("notebook")
outputNotebook = pset(None)
debug = pset(False)
generateConfig = pset(False)
autoyes = pset(True)
allowRoot = pset(True)
loglevel = pset("30")
ip = pset("0.0.0.0")
port = pset(8888)
config = pset(None)
transport = pset(None)
keyfile = pset(None)
certfile = pset(None)
clientca = pset(None)
nomathjax = pset(False)
browser = pset(None)
def __init__(self):
super().__init__(self.docker_image_name, self.docker_image_tag)
with open(getJsonName(__file__, "jupyter_sleuth")) as f:
self.data = jsonpickle.decode(f.read())
f.close()
self.initVolumes()
self.inputConnections = ConnectionDict(self.inputConnectionsStore)
self.drawGUI()
def handleInputsInputDir(self, value, *args):
if args and len(args) > 0:
self.handleInputs("InputDir", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleInputsTrigger(self, value, *args):
if args and len(args) > 0:
self.handleInputs("Trigger", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleInputsstartingNotebook(self, value, *args):
if args and len(args) > 0:
self.handleInputs("startingNotebook", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleOutputs(self):
outputValue = None
if hasattr(self, "OutputDir"):
outputValue = getattr(self, "OutputDir")
self.send("OutputDir", outputValue)
outputValue = None
if hasattr(self, "outputNotebook"):
outputValue = getattr(self, "outputNotebook")
self.send("outputNotebook", outputValue)
|
biodepot/Jupyter/OWjupyter_sleuth.py
|
import os
import glob
import sys
import functools
import jsonpickle
from collections import OrderedDict
from Orange.widgets import widget, gui, settings
import Orange.data
from Orange.data.io import FileFormat
from DockerClient import DockerClient
from BwBase import OWBwBWidget, ConnectionDict, BwbGuiElements, getIconName, getJsonName
from PyQt5 import QtWidgets, QtGui
class OWjupyter_sleuth(OWBwBWidget):
name = "jupyter_sleuth"
description = "Base installation of Jupyter"
priority = 103
icon = getIconName(__file__, "jupyter-sleuth.png")
want_main_area = False
docker_image_name = "biodepot/sleuth"
docker_image_tag = (
"0.30.0__ubuntu-16.04__r-3.4.4__jupyter-5.6.0__firefox-61.0.1__082318"
)
inputs = [
("InputDir", str, "handleInputsInputDir"),
("Trigger", str, "handleInputsTrigger"),
("startingNotebook", str, "handleInputsstartingNotebook"),
]
outputs = [("OutputDir", str), ("outputNotebook", str)]
pset = functools.partial(settings.Setting, schema_only=True)
runMode = pset(0)
exportGraphics = pset(False)
runTriggers = pset([])
triggerReady = pset({})
inputConnectionsStore = pset({})
optionsChecked = pset({})
subcommand = pset("notebook")
execute = pset(False)
startingNotebook = pset(None)
type = pset("notebook")
outputNotebook = pset(None)
debug = pset(False)
generateConfig = pset(False)
autoyes = pset(True)
allowRoot = pset(True)
loglevel = pset("30")
ip = pset("0.0.0.0")
port = pset(8888)
config = pset(None)
transport = pset(None)
keyfile = pset(None)
certfile = pset(None)
clientca = pset(None)
nomathjax = pset(False)
browser = pset(None)
def __init__(self):
super().__init__(self.docker_image_name, self.docker_image_tag)
with open(getJsonName(__file__, "jupyter_sleuth")) as f:
self.data = jsonpickle.decode(f.read())
f.close()
self.initVolumes()
self.inputConnections = ConnectionDict(self.inputConnectionsStore)
self.drawGUI()
def handleInputsInputDir(self, value, *args):
if args and len(args) > 0:
self.handleInputs("InputDir", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleInputsTrigger(self, value, *args):
if args and len(args) > 0:
self.handleInputs("Trigger", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleInputsstartingNotebook(self, value, *args):
if args and len(args) > 0:
self.handleInputs("startingNotebook", value, args[0][0], test=args[0][3])
else:
self.handleInputs("inputFile", value, None)
def handleOutputs(self):
outputValue = None
if hasattr(self, "OutputDir"):
outputValue = getattr(self, "OutputDir")
self.send("OutputDir", outputValue)
outputValue = None
if hasattr(self, "outputNotebook"):
outputValue = getattr(self, "outputNotebook")
self.send("outputNotebook", outputValue)
| 0.158956 | 0.099426 |