code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
from __future__ import absolute_import
from . import internals
from . import utils
class DataType:
"""Contains the possible data types which can be represented in an Element.
Class attributes:
BOOL Boolean
CHAR Char
BYTE Unsigned 8 bit value
INT32 32 bit Integer
INT64 64 bit Integer
FLOAT32 32 bit Floating point
FLOAT64 64 bit Floating point
STRING ASCIIZ string
BYTEARRAY Opaque binary data
DATE Date
TIME Timestamp
DECIMAL Currently Unsuppored
DATETIME Date and time
ENUMERATION An opaque enumeration
SEQUENCE Sequence type
CHOICE Choice type
CORRELATION_ID Used for some internal messages
"""
BOOL = internals.DATATYPE_BOOL
"""Boolean"""
CHAR = internals.DATATYPE_CHAR
"""Char"""
BYTE = internals.DATATYPE_BYTE
"""Unsigned 8 bit value"""
INT32 = internals.DATATYPE_INT32
"""32 bit Integer"""
INT64 = internals.DATATYPE_INT64
"""64 bit Integer"""
FLOAT32 = internals.DATATYPE_FLOAT32
"""32 bit Floating point"""
FLOAT64 = internals.DATATYPE_FLOAT64
"""64 bit Floating point"""
STRING = internals.DATATYPE_STRING
"""ASCIIZ string"""
BYTEARRAY = internals.DATATYPE_BYTEARRAY
"""Opaque binary data"""
DATE = internals.DATATYPE_DATE
"""Date"""
TIME = internals.DATATYPE_TIME
"""Timestamp"""
DECIMAL = internals.DATATYPE_DECIMAL
"""Currently Unsupported"""
DATETIME = internals.DATATYPE_DATETIME
"""Date and time"""
ENUMERATION = internals.DATATYPE_ENUMERATION
"""An opaque enumeration"""
SEQUENCE = internals.DATATYPE_SEQUENCE
"""Sequence type"""
CHOICE = internals.DATATYPE_CHOICE
"""Choice type"""
CORRELATION_ID = internals.DATATYPE_CORRELATION_ID
"""Used for some internal messages"""
# Protect enumeration constant(s) defined in this class and in classes
# derived from this class from changes:
__metaclass__ = utils.MetaClassForClassesWithEnums
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
|
Mayank/blpapi_python3.5.5/build/lib.linux-i686-2.7/blpapi/datatype.py
|
from __future__ import absolute_import
from . import internals
from . import utils
class DataType:
"""Contains the possible data types which can be represented in an Element.
Class attributes:
BOOL Boolean
CHAR Char
BYTE Unsigned 8 bit value
INT32 32 bit Integer
INT64 64 bit Integer
FLOAT32 32 bit Floating point
FLOAT64 64 bit Floating point
STRING ASCIIZ string
BYTEARRAY Opaque binary data
DATE Date
TIME Timestamp
DECIMAL Currently Unsuppored
DATETIME Date and time
ENUMERATION An opaque enumeration
SEQUENCE Sequence type
CHOICE Choice type
CORRELATION_ID Used for some internal messages
"""
BOOL = internals.DATATYPE_BOOL
"""Boolean"""
CHAR = internals.DATATYPE_CHAR
"""Char"""
BYTE = internals.DATATYPE_BYTE
"""Unsigned 8 bit value"""
INT32 = internals.DATATYPE_INT32
"""32 bit Integer"""
INT64 = internals.DATATYPE_INT64
"""64 bit Integer"""
FLOAT32 = internals.DATATYPE_FLOAT32
"""32 bit Floating point"""
FLOAT64 = internals.DATATYPE_FLOAT64
"""64 bit Floating point"""
STRING = internals.DATATYPE_STRING
"""ASCIIZ string"""
BYTEARRAY = internals.DATATYPE_BYTEARRAY
"""Opaque binary data"""
DATE = internals.DATATYPE_DATE
"""Date"""
TIME = internals.DATATYPE_TIME
"""Timestamp"""
DECIMAL = internals.DATATYPE_DECIMAL
"""Currently Unsupported"""
DATETIME = internals.DATATYPE_DATETIME
"""Date and time"""
ENUMERATION = internals.DATATYPE_ENUMERATION
"""An opaque enumeration"""
SEQUENCE = internals.DATATYPE_SEQUENCE
"""Sequence type"""
CHOICE = internals.DATATYPE_CHOICE
"""Choice type"""
CORRELATION_ID = internals.DATATYPE_CORRELATION_ID
"""Used for some internal messages"""
# Protect enumeration constant(s) defined in this class and in classes
# derived from this class from changes:
__metaclass__ = utils.MetaClassForClassesWithEnums
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| 0.78968 | 0.21211 |
import json
import os
import shutil
import tempfile
import traceback
from io import BytesIO
from typing import List
from zipfile import ZipFile
import nvflare.fuel.hci.file_transfer_defs as ftd
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec
from nvflare.fuel.hci.base64_utils import (
b64str_to_binary_file,
b64str_to_bytes,
b64str_to_text_file,
binary_file_to_b64str,
bytes_to_b64str,
text_file_to_b64str,
)
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.fuel.hci.zip_utils import convert_legacy_zip, unzip_all_from_bytes, zip_directory_to_bytes
from nvflare.private.fed.server.cmd_utils import CommandUtil
from nvflare.private.fed.server.job_meta_validator import JobMetaValidator
from nvflare.security.security import Action
META_FILE = "meta.json"
class FileTransferModule(CommandModule, CommandUtil):
def __init__(self, upload_dir: str, download_dir: str, upload_folder_authz_func=None):
"""Command module for file transfers.
Args:
upload_dir:
download_dir:
upload_folder_authz_func:
"""
if not os.path.isdir(upload_dir):
raise ValueError("upload_dir {} is not a valid dir".format(upload_dir))
if not os.path.isdir(download_dir):
raise ValueError("download_dir {} is not a valid dir".format(download_dir))
self.upload_dir = upload_dir
self.download_dir = download_dir
self.upload_folder_authz_func = upload_folder_authz_func
def get_spec(self):
return CommandModuleSpec(
name=ftd.SERVER_MODULE_NAME,
cmd_specs=[
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_TEXT,
description="upload one or more text files",
usage="_upload name1 data1 name2 data2 ...",
handler_func=self.upload_text_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_TEXT,
description="download one or more text files",
usage="download file_name ...",
handler_func=self.download_text_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_BINARY,
description="upload one or more binary files",
usage="upload name1 data1 name2 data2 ...",
handler_func=self.upload_binary_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_BINARY,
description="download one or more binary files",
usage="download file_name ...",
handler_func=self.download_binary_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_FOLDER,
description="upload a folder from client",
usage="upload_folder folder_name",
handler_func=self.upload_folder,
authz_func=self._authorize_upload_folder,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_FOLDER,
description="download a folder to client",
usage="download folder_name",
handler_func=self.download_folder,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_SUBMIT_JOB,
description="Submit a job",
usage="submit_job job_folder",
handler_func=self.submit_job,
authz_func=self._authorize_submission,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_JOB,
description="download a job",
usage="download_job job_id",
handler_func=self.download_job,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_INFO,
description="show info",
usage="info",
handler_func=self.info,
visible=False,
),
],
)
def upload_file(self, conn: Connection, args: List[str], str_to_file_func):
if len(args) < 3:
conn.append_error("syntax error: missing files")
return
if len(args) % 2 != 1:
conn.append_error("syntax error: file name/data not paired")
return
table = conn.append_table(["file", "size"])
i = 1
while i < len(args):
name = args[i]
data = args[i + 1]
i += 2
full_path = os.path.join(self.upload_dir, name)
num_bytes = str_to_file_func(b64str=data, file_name=full_path)
table.add_row([name, str(num_bytes)])
def upload_text_file(self, conn: Connection, args: List[str]):
self.upload_file(conn, args, b64str_to_text_file)
def upload_binary_file(self, conn: Connection, args: List[str]):
self.upload_file(conn, args, b64str_to_binary_file)
def download_file(self, conn: Connection, args: List[str], file_to_str_func):
if len(args) < 2:
conn.append_error("syntax error: missing file names")
return
table = conn.append_table(["name", "data"])
for i in range(1, len(args)):
file_name = args[i]
full_path = os.path.join(self.download_dir, file_name)
if not os.path.exists(full_path):
conn.append_error("no such file: {}".format(file_name))
continue
if not os.path.isfile(full_path):
conn.append_error("not a file: {}".format(file_name))
continue
encoded_str = file_to_str_func(full_path)
table.add_row([file_name, encoded_str])
def download_text_file(self, conn: Connection, args: List[str]):
self.download_file(conn, args, text_file_to_b64str)
def download_binary_file(self, conn: Connection, args: List[str]):
self.download_file(conn, args, binary_file_to_b64str)
def _authorize_upload_folder(self, conn: Connection, args: List[str]):
if len(args) != 3:
conn.append_error("syntax error: require data")
return False, None
folder_name = args[1]
zip_b64str = args[2]
tmp_dir = tempfile.mkdtemp()
try:
data_bytes = b64str_to_bytes(zip_b64str)
unzip_all_from_bytes(data_bytes, tmp_dir)
tmp_folder_path = os.path.join(tmp_dir, folder_name)
if not os.path.isdir(tmp_folder_path):
conn.append_error("logic error: unzip failed to create folder {}".format(tmp_folder_path))
return False, None
if self.upload_folder_authz_func:
err, authz_ctx = self.upload_folder_authz_func(tmp_folder_path)
if err is None:
err = ""
elif not isinstance(err, str):
# the validator failed to follow signature
# assuming things are bad
err = "folder validation failed"
if len(err) > 0:
conn.append_error(err)
return False, None
else:
return True, authz_ctx
else:
return True, None
except BaseException:
traceback.print_exc()
conn.append_error("exception occurred")
return False, None
finally:
shutil.rmtree(tmp_dir)
def upload_folder(self, conn: Connection, args: List[str]):
folder_name = args[1]
zip_b64str = args[2]
folder_path = os.path.join(self.upload_dir, folder_name)
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
data_bytes = b64str_to_bytes(zip_b64str)
unzip_all_from_bytes(data_bytes, self.upload_dir)
conn.set_prop("upload_folder_path", folder_path)
conn.append_string("Created folder {}".format(folder_path))
def download_folder(self, conn: Connection, args: List[str]):
if len(args) != 2:
conn.append_error("syntax error: require folder name")
return
folder_name = args[1]
full_path = os.path.join(self.download_dir, folder_name)
if not os.path.exists(full_path):
conn.append_error("no such folder: {}".format(full_path))
return
if not os.path.isdir(full_path):
conn.append_error("'{}' is not a valid folder".format(full_path))
return
try:
data = zip_directory_to_bytes(self.download_dir, folder_name)
b64str = bytes_to_b64str(data)
conn.append_string(b64str)
except BaseException:
traceback.print_exc()
conn.append_error("exception occurred")
def submit_job(self, conn: Connection, args: List[str]):
folder_name = args[1]
data_bytes = conn.get_prop(ConnProps.JOB_DATA)
engine = conn.app_ctx
try:
with engine.new_context() as fl_ctx:
job_validator = JobMetaValidator()
valid, error, meta = job_validator.validate(folder_name, data_bytes)
if not valid:
conn.append_error(error)
return
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
meta = job_def_manager.create(meta, data_bytes, fl_ctx)
conn.append_string("Submitted job: {}".format(meta.get(JobMetaKey.JOB_ID)))
except Exception as e:
conn.append_error("Exception occurred trying to submit job: " + str(e))
return
conn.append_success("")
def download_job(self, conn: Connection, args: List[str]):
if len(args) != 2:
conn.append_error("syntax error: job ID required")
return
job_id = args[1]
engine = conn.app_ctx
try:
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
with engine.new_context() as fl_ctx:
data_bytes = job_def_manager.get_content(job_id, fl_ctx)
job_id_dir = os.path.join(self.download_dir, job_id)
if os.path.exists(job_id_dir):
shutil.rmtree(job_id_dir)
os.mkdir(job_id_dir)
unzip_all_from_bytes(data_bytes, job_id_dir)
except Exception as e:
conn.append_error("Exception occurred trying to get job from store: " + str(e))
return
try:
data = zip_directory_to_bytes(self.download_dir, job_id)
b64str = bytes_to_b64str(data)
conn.append_string(b64str)
except BaseException:
traceback.print_exc()
conn.append_error("Exception occurred during attempt to zip data to send for job: {}".format(job_id))
def info(self, conn: Connection, args: List[str]):
conn.append_string("Server Upload Destination: {}".format(self.upload_dir))
conn.append_string("Server Download Source: {}".format(self.download_dir))
def _authorize_submission(self, conn: Connection, args: List[str]):
folder_name = args[1]
zip_b64str = args[2]
data_bytes = convert_legacy_zip(b64str_to_bytes(zip_b64str))
conn.set_prop(ConnProps.JOB_DATA, data_bytes)
meta_file = f"{folder_name}/{META_FILE}"
with ZipFile(BytesIO(data_bytes), "r") as zf:
meta_data = zf.read(meta_file)
meta = json.loads(meta_data)
conn.set_prop(ConnProps.JOB_META, meta)
return self.authorize_job_meta(conn, meta, [Action.TRAIN])
|
nvflare/fuel/hci/server/file_transfer.py
|
import json
import os
import shutil
import tempfile
import traceback
from io import BytesIO
from typing import List
from zipfile import ZipFile
import nvflare.fuel.hci.file_transfer_defs as ftd
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.job_def_manager_spec import JobDefManagerSpec
from nvflare.fuel.hci.base64_utils import (
b64str_to_binary_file,
b64str_to_bytes,
b64str_to_text_file,
binary_file_to_b64str,
bytes_to_b64str,
text_file_to_b64str,
)
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.fuel.hci.zip_utils import convert_legacy_zip, unzip_all_from_bytes, zip_directory_to_bytes
from nvflare.private.fed.server.cmd_utils import CommandUtil
from nvflare.private.fed.server.job_meta_validator import JobMetaValidator
from nvflare.security.security import Action
META_FILE = "meta.json"
class FileTransferModule(CommandModule, CommandUtil):
def __init__(self, upload_dir: str, download_dir: str, upload_folder_authz_func=None):
"""Command module for file transfers.
Args:
upload_dir:
download_dir:
upload_folder_authz_func:
"""
if not os.path.isdir(upload_dir):
raise ValueError("upload_dir {} is not a valid dir".format(upload_dir))
if not os.path.isdir(download_dir):
raise ValueError("download_dir {} is not a valid dir".format(download_dir))
self.upload_dir = upload_dir
self.download_dir = download_dir
self.upload_folder_authz_func = upload_folder_authz_func
def get_spec(self):
return CommandModuleSpec(
name=ftd.SERVER_MODULE_NAME,
cmd_specs=[
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_TEXT,
description="upload one or more text files",
usage="_upload name1 data1 name2 data2 ...",
handler_func=self.upload_text_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_TEXT,
description="download one or more text files",
usage="download file_name ...",
handler_func=self.download_text_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_BINARY,
description="upload one or more binary files",
usage="upload name1 data1 name2 data2 ...",
handler_func=self.upload_binary_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_BINARY,
description="download one or more binary files",
usage="download file_name ...",
handler_func=self.download_binary_file,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_UPLOAD_FOLDER,
description="upload a folder from client",
usage="upload_folder folder_name",
handler_func=self.upload_folder,
authz_func=self._authorize_upload_folder,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_FOLDER,
description="download a folder to client",
usage="download folder_name",
handler_func=self.download_folder,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_SUBMIT_JOB,
description="Submit a job",
usage="submit_job job_folder",
handler_func=self.submit_job,
authz_func=self._authorize_submission,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_DOWNLOAD_JOB,
description="download a job",
usage="download_job job_id",
handler_func=self.download_job,
visible=False,
),
CommandSpec(
name=ftd.SERVER_CMD_INFO,
description="show info",
usage="info",
handler_func=self.info,
visible=False,
),
],
)
def upload_file(self, conn: Connection, args: List[str], str_to_file_func):
if len(args) < 3:
conn.append_error("syntax error: missing files")
return
if len(args) % 2 != 1:
conn.append_error("syntax error: file name/data not paired")
return
table = conn.append_table(["file", "size"])
i = 1
while i < len(args):
name = args[i]
data = args[i + 1]
i += 2
full_path = os.path.join(self.upload_dir, name)
num_bytes = str_to_file_func(b64str=data, file_name=full_path)
table.add_row([name, str(num_bytes)])
def upload_text_file(self, conn: Connection, args: List[str]):
self.upload_file(conn, args, b64str_to_text_file)
def upload_binary_file(self, conn: Connection, args: List[str]):
self.upload_file(conn, args, b64str_to_binary_file)
def download_file(self, conn: Connection, args: List[str], file_to_str_func):
if len(args) < 2:
conn.append_error("syntax error: missing file names")
return
table = conn.append_table(["name", "data"])
for i in range(1, len(args)):
file_name = args[i]
full_path = os.path.join(self.download_dir, file_name)
if not os.path.exists(full_path):
conn.append_error("no such file: {}".format(file_name))
continue
if not os.path.isfile(full_path):
conn.append_error("not a file: {}".format(file_name))
continue
encoded_str = file_to_str_func(full_path)
table.add_row([file_name, encoded_str])
def download_text_file(self, conn: Connection, args: List[str]):
self.download_file(conn, args, text_file_to_b64str)
def download_binary_file(self, conn: Connection, args: List[str]):
self.download_file(conn, args, binary_file_to_b64str)
def _authorize_upload_folder(self, conn: Connection, args: List[str]):
if len(args) != 3:
conn.append_error("syntax error: require data")
return False, None
folder_name = args[1]
zip_b64str = args[2]
tmp_dir = tempfile.mkdtemp()
try:
data_bytes = b64str_to_bytes(zip_b64str)
unzip_all_from_bytes(data_bytes, tmp_dir)
tmp_folder_path = os.path.join(tmp_dir, folder_name)
if not os.path.isdir(tmp_folder_path):
conn.append_error("logic error: unzip failed to create folder {}".format(tmp_folder_path))
return False, None
if self.upload_folder_authz_func:
err, authz_ctx = self.upload_folder_authz_func(tmp_folder_path)
if err is None:
err = ""
elif not isinstance(err, str):
# the validator failed to follow signature
# assuming things are bad
err = "folder validation failed"
if len(err) > 0:
conn.append_error(err)
return False, None
else:
return True, authz_ctx
else:
return True, None
except BaseException:
traceback.print_exc()
conn.append_error("exception occurred")
return False, None
finally:
shutil.rmtree(tmp_dir)
def upload_folder(self, conn: Connection, args: List[str]):
folder_name = args[1]
zip_b64str = args[2]
folder_path = os.path.join(self.upload_dir, folder_name)
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
data_bytes = b64str_to_bytes(zip_b64str)
unzip_all_from_bytes(data_bytes, self.upload_dir)
conn.set_prop("upload_folder_path", folder_path)
conn.append_string("Created folder {}".format(folder_path))
def download_folder(self, conn: Connection, args: List[str]):
if len(args) != 2:
conn.append_error("syntax error: require folder name")
return
folder_name = args[1]
full_path = os.path.join(self.download_dir, folder_name)
if not os.path.exists(full_path):
conn.append_error("no such folder: {}".format(full_path))
return
if not os.path.isdir(full_path):
conn.append_error("'{}' is not a valid folder".format(full_path))
return
try:
data = zip_directory_to_bytes(self.download_dir, folder_name)
b64str = bytes_to_b64str(data)
conn.append_string(b64str)
except BaseException:
traceback.print_exc()
conn.append_error("exception occurred")
def submit_job(self, conn: Connection, args: List[str]):
folder_name = args[1]
data_bytes = conn.get_prop(ConnProps.JOB_DATA)
engine = conn.app_ctx
try:
with engine.new_context() as fl_ctx:
job_validator = JobMetaValidator()
valid, error, meta = job_validator.validate(folder_name, data_bytes)
if not valid:
conn.append_error(error)
return
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
meta = job_def_manager.create(meta, data_bytes, fl_ctx)
conn.append_string("Submitted job: {}".format(meta.get(JobMetaKey.JOB_ID)))
except Exception as e:
conn.append_error("Exception occurred trying to submit job: " + str(e))
return
conn.append_success("")
def download_job(self, conn: Connection, args: List[str]):
if len(args) != 2:
conn.append_error("syntax error: job ID required")
return
job_id = args[1]
engine = conn.app_ctx
try:
job_def_manager = engine.job_def_manager
if not isinstance(job_def_manager, JobDefManagerSpec):
raise TypeError(
f"job_def_manager in engine is not of type JobDefManagerSpec, but got {type(job_def_manager)}"
)
with engine.new_context() as fl_ctx:
data_bytes = job_def_manager.get_content(job_id, fl_ctx)
job_id_dir = os.path.join(self.download_dir, job_id)
if os.path.exists(job_id_dir):
shutil.rmtree(job_id_dir)
os.mkdir(job_id_dir)
unzip_all_from_bytes(data_bytes, job_id_dir)
except Exception as e:
conn.append_error("Exception occurred trying to get job from store: " + str(e))
return
try:
data = zip_directory_to_bytes(self.download_dir, job_id)
b64str = bytes_to_b64str(data)
conn.append_string(b64str)
except BaseException:
traceback.print_exc()
conn.append_error("Exception occurred during attempt to zip data to send for job: {}".format(job_id))
def info(self, conn: Connection, args: List[str]):
conn.append_string("Server Upload Destination: {}".format(self.upload_dir))
conn.append_string("Server Download Source: {}".format(self.download_dir))
def _authorize_submission(self, conn: Connection, args: List[str]):
folder_name = args[1]
zip_b64str = args[2]
data_bytes = convert_legacy_zip(b64str_to_bytes(zip_b64str))
conn.set_prop(ConnProps.JOB_DATA, data_bytes)
meta_file = f"{folder_name}/{META_FILE}"
with ZipFile(BytesIO(data_bytes), "r") as zf:
meta_data = zf.read(meta_file)
meta = json.loads(meta_data)
conn.set_prop(ConnProps.JOB_META, meta)
return self.authorize_job_meta(conn, meta, [Action.TRAIN])
| 0.477554 | 0.118436 |
from kiali_qe.entities import EntityBase
from kiali_qe.components.enums import MeshWideTLSType
class Overview(EntityBase):
def __init__(self, overview_type, namespace, items,
config_status=None,
healthy=0, unhealthy=0, degraded=0, na=0, idle=0,
tls_type=MeshWideTLSType.DISABLED,
labels={}):
self.overview_type = overview_type
self.namespace = namespace
self.items = items
self.config_status = config_status
self.unhealthy = unhealthy
self.healthy = healthy
self.degraded = degraded
self.na = na
self.idle = idle
self.tls_type = tls_type
self.labels = labels
def __str__(self):
return 'overview_type:{}, namespace:{}, items:{}, \
healthy:{}, unhealthy:{}, degraded:{}, N/A:{}, Idle:{}, TLS:{}'.format(
self.overview_type, self.namespace, self.items,
self.healthy, self.unhealthy, self.degraded, self.na, self.idle,
self.tls_type)
def __repr__(self):
return "{}({}, {}, {}, {}, {}, {}, {}, {}, {})".format(
type(self).__name__, repr(self.overview_type), repr(self.namespace), repr(self.items),
repr(self.healthy), repr(self.unhealthy), repr(self.degraded), repr(self.na),
repr(self.idle),
repr(self.tls_type))
def __eq__(self, other):
return self.is_equal(other, advanced_check=True)
def __hash__(self):
return (hash(self.namespace) ^ hash(self.overview_type))
def is_equal(self, other, advanced_check=True):
# basic check
if not isinstance(other, Overview):
return False
if self.overview_type != other.overview_type:
return False
if self.namespace != other.namespace:
return False
if self.items != other.items:
return False
# advanced check
if advanced_check:
# @TODO performance issue between UI and REST
'''if self.healthy != other.healthy:
return False
if self.unhealthy != other.unhealthy:
return False
if self.degraded != other.degraded:
return False
if self.na != other.na:
return False
if self.idle != other.idle:
return False'''
if self.labels != other.labels:
return False
return True
|
kiali_qe/entities/overview.py
|
from kiali_qe.entities import EntityBase
from kiali_qe.components.enums import MeshWideTLSType
class Overview(EntityBase):
def __init__(self, overview_type, namespace, items,
config_status=None,
healthy=0, unhealthy=0, degraded=0, na=0, idle=0,
tls_type=MeshWideTLSType.DISABLED,
labels={}):
self.overview_type = overview_type
self.namespace = namespace
self.items = items
self.config_status = config_status
self.unhealthy = unhealthy
self.healthy = healthy
self.degraded = degraded
self.na = na
self.idle = idle
self.tls_type = tls_type
self.labels = labels
def __str__(self):
return 'overview_type:{}, namespace:{}, items:{}, \
healthy:{}, unhealthy:{}, degraded:{}, N/A:{}, Idle:{}, TLS:{}'.format(
self.overview_type, self.namespace, self.items,
self.healthy, self.unhealthy, self.degraded, self.na, self.idle,
self.tls_type)
def __repr__(self):
return "{}({}, {}, {}, {}, {}, {}, {}, {}, {})".format(
type(self).__name__, repr(self.overview_type), repr(self.namespace), repr(self.items),
repr(self.healthy), repr(self.unhealthy), repr(self.degraded), repr(self.na),
repr(self.idle),
repr(self.tls_type))
def __eq__(self, other):
return self.is_equal(other, advanced_check=True)
def __hash__(self):
return (hash(self.namespace) ^ hash(self.overview_type))
def is_equal(self, other, advanced_check=True):
# basic check
if not isinstance(other, Overview):
return False
if self.overview_type != other.overview_type:
return False
if self.namespace != other.namespace:
return False
if self.items != other.items:
return False
# advanced check
if advanced_check:
# @TODO performance issue between UI and REST
'''if self.healthy != other.healthy:
return False
if self.unhealthy != other.unhealthy:
return False
if self.degraded != other.degraded:
return False
if self.na != other.na:
return False
if self.idle != other.idle:
return False'''
if self.labels != other.labels:
return False
return True
| 0.64232 | 0.124612 |
from typing import Optional, Tuple
import SimpleITK
import numpy as np
import pytest
import sys
from pathlib import Path
import nnunet.inference.predict_simple
from nnunet.inference.predict_simple import main
RESOURCES_DIR = Path(__file__).parent / "resources"
TASK004_HIPPOCAMPUS_PRETRAINED_DIR = RESOURCES_DIR / "pretrained" / "Task004_Hippocampus"
TEST_INPUT_FOLDER = RESOURCES_DIR / "input_data" / "Task004_Hippocampus" / "imagesTs"
TEST_REF_FOLDER = RESOURCES_DIR / "results"
@pytest.mark.parametrize("model", ("2d", "3d_fullres",))
@pytest.mark.parametrize("folds", (None, (0, 1, 2, 3, 4), (0,),))
@pytest.mark.parametrize("disable_tta", (False, True,))
@pytest.mark.parametrize("use_overlap", (False, True,))
def test_nnunet_inference_predict_simple(tmp_path: Path, model: str, folds: Optional[Tuple[int, ...]], disable_tta: bool, use_overlap: bool):
fold_dir = f"folds_{folds[0]}" if folds is not None and len(folds) == 1 else "folds_all"
tta_dir = "notta" if disable_tta else "tta"
ref_dir = TEST_REF_FOLDER / tta_dir / fold_dir / model
# set the output_dir by setting the module's variable (environment variables are circumvented this way)
nnunet.inference.predict_simple.network_training_output_dir = str(TASK004_HIPPOCAMPUS_PRETRAINED_DIR)
# simulate passing arguments to main() using sys.argv
sys.argv = ["", "-i", str(TEST_INPUT_FOLDER), "-o", str(tmp_path), "-t", "Task004_Hippocampus", "-m", model]
if folds is not None:
sys.argv.extend(["-f"] + list(map(str, folds)))
if disable_tta:
sys.argv.append("--disable_tta")
sys.argv.extend(["--step_size", "1" if not use_overlap else "0.5"])
main()
assert (tmp_path / "plans.pkl").is_file()
assert (tmp_path / "postprocessing.json").is_file()
for expected_predict_file in ref_dir.glob("*.nii.gz"):
produced_output_file = (tmp_path / expected_predict_file.name)
assert produced_output_file.is_file()
produced_output = SimpleITK.ReadImage(str(produced_output_file))
expected_output = SimpleITK.ReadImage(str(expected_predict_file))
assert np.sum(SimpleITK.GetArrayFromImage(produced_output) != SimpleITK.GetArrayFromImage(expected_output)) < 5
|
tests/test_prediction.py
|
from typing import Optional, Tuple
import SimpleITK
import numpy as np
import pytest
import sys
from pathlib import Path
import nnunet.inference.predict_simple
from nnunet.inference.predict_simple import main
RESOURCES_DIR = Path(__file__).parent / "resources"
TASK004_HIPPOCAMPUS_PRETRAINED_DIR = RESOURCES_DIR / "pretrained" / "Task004_Hippocampus"
TEST_INPUT_FOLDER = RESOURCES_DIR / "input_data" / "Task004_Hippocampus" / "imagesTs"
TEST_REF_FOLDER = RESOURCES_DIR / "results"
@pytest.mark.parametrize("model", ("2d", "3d_fullres",))
@pytest.mark.parametrize("folds", (None, (0, 1, 2, 3, 4), (0,),))
@pytest.mark.parametrize("disable_tta", (False, True,))
@pytest.mark.parametrize("use_overlap", (False, True,))
def test_nnunet_inference_predict_simple(tmp_path: Path, model: str, folds: Optional[Tuple[int, ...]], disable_tta: bool, use_overlap: bool):
fold_dir = f"folds_{folds[0]}" if folds is not None and len(folds) == 1 else "folds_all"
tta_dir = "notta" if disable_tta else "tta"
ref_dir = TEST_REF_FOLDER / tta_dir / fold_dir / model
# set the output_dir by setting the module's variable (environment variables are circumvented this way)
nnunet.inference.predict_simple.network_training_output_dir = str(TASK004_HIPPOCAMPUS_PRETRAINED_DIR)
# simulate passing arguments to main() using sys.argv
sys.argv = ["", "-i", str(TEST_INPUT_FOLDER), "-o", str(tmp_path), "-t", "Task004_Hippocampus", "-m", model]
if folds is not None:
sys.argv.extend(["-f"] + list(map(str, folds)))
if disable_tta:
sys.argv.append("--disable_tta")
sys.argv.extend(["--step_size", "1" if not use_overlap else "0.5"])
main()
assert (tmp_path / "plans.pkl").is_file()
assert (tmp_path / "postprocessing.json").is_file()
for expected_predict_file in ref_dir.glob("*.nii.gz"):
produced_output_file = (tmp_path / expected_predict_file.name)
assert produced_output_file.is_file()
produced_output = SimpleITK.ReadImage(str(produced_output_file))
expected_output = SimpleITK.ReadImage(str(expected_predict_file))
assert np.sum(SimpleITK.GetArrayFromImage(produced_output) != SimpleITK.GetArrayFromImage(expected_output)) < 5
| 0.627837 | 0.25097 |
from functools import wraps
from flask import render_template
from flask_babelex import gettext
import pgadmin.browser.server_groups.servers.databases as database
from config import PG_DEFAULT_DRIVER
from pgadmin.browser.server_groups.servers.databases.schemas.utils \
import SchemaChildModule
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import gone
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response
from pgadmin.utils.driver import get_driver
class CatalogObjectModule(SchemaChildModule):
"""
class CatalogObjectModule(SchemaChildModule)
A module class for Catalog objects node derived from SchemaChildModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the Catalog objects and it's base module.
* get_nodes(gid, sid, did, scid, coid)
- Method is used to generate the browser collection node.
* script_load()
- Load the module script for Catalog objects, when any of the server node
is initialized.
"""
NODE_TYPE = 'catalog_object'
COLLECTION_LABEL = gettext("Catalog Objects")
# Flag for not to show node under Schema/Catalog node
# By default its set to True to display node in schema/catalog
# We do not want to display 'Catalog Objects' under Schema/Catalog
# but only in information_schema/sys/dbo
CATALOG_DB_SUPPORTED = False
SUPPORTED_SCHEMAS = ['information_schema', 'sys', 'dbo']
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the CatalogObjectModule and it's base
module.
Args:
*args:
**kwargs:
"""
super(CatalogObjectModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid):
"""
Generate the collection node
"""
yield self.generate_browser_collection_node(scid)
@property
def script_load(self):
"""
Load the module script for server, when any of the database node is
initialized.
"""
return database.DatabaseModule.NODE_TYPE
blueprint = CatalogObjectModule(__name__)
class CatalogObjectView(PGChildNodeView):
"""
This class is responsible for generating routes for Catalog objects node.
Methods:
-------
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- Lists all the Catalog objects nodes within that collection.
* nodes()
- Creates all the nodes of type Catalog objects.
* properties(gid, sid, did, scid, coid)
- Shows the properties of the selected Catalog objects node.
* dependency(gid, sid, did, scid):
- Returns the dependencies list for the given catalog object node.
* dependent(gid, sid, did, scid):
- Returns the dependents list for the given Catalog objects node.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'}
]
ids = [
{'type': 'int', 'id': 'coid'}
]
operations = dict({
'obj': [{'get': 'properties'}, {'get': 'list'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}]
})
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
kwargs['sid']
)
self.conn = self.manager.connection(did=kwargs['did'])
self.template_path = 'catalog_object/sql/{0}/#{1}#'.format(
'ppas' if self.manager.server_type == 'ppas' else 'pg',
self.manager.version
)
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, did, scid):
"""
This function is used to list all the catalog objects
nodes within that collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
scid: Schema ID
Returns:
JSON of available catalog objects nodes
"""
SQL = render_template("/".join([
self.template_path, 'properties.sql'
]), scid=scid
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid):
"""
This function will used to create all the child node within that
collection.
Here it will create all the catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
Returns:
JSON of available catalog objects child nodes
"""
res = []
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']), scid=scid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
scid,
row['name'],
icon="icon-catalog_object"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def node(self, gid, sid, did, scid, coid):
"""
This function will fetch properties of catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog object ID
Returns:
JSON of given catalog objects child node
"""
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']), coid=coid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
return make_json_response(
data=self.blueprint.generate_browser_node(
row['oid'],
scid,
row['name'],
icon="icon-catalog_object"
),
status=200
)
return gone(
errormsg=gettext("Could not find the specified catalog object."))
@check_precondition
def properties(self, gid, sid, did, scid, coid):
"""
This function will show the properties of the selected
catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
scid: Schema ID
coid: Catalog object ID
Returns:
JSON of selected catalog objects node
"""
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
scid=scid, coid=coid
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the specified catalog object."""))
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def dependents(self, gid, sid, did, scid, coid):
"""
This function get the dependents and return ajax response
for the catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: catalog objects ID
"""
dependents_result = self.get_dependents(self.conn, coid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, coid):
"""
This function get the dependencies and return ajax response
for the catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: catalog objects ID
"""
dependencies_result = self.get_dependencies(self.conn, coid)
return ajax_response(
response=dependencies_result,
status=200
)
CatalogObjectView.register_node_view(blueprint)
|
pgAdmin/browser/server_groups/servers/databases/schemas/catalog_objects/__init__.py
|
from functools import wraps
from flask import render_template
from flask_babelex import gettext
import pgadmin.browser.server_groups.servers.databases as database
from config import PG_DEFAULT_DRIVER
from pgadmin.browser.server_groups.servers.databases.schemas.utils \
import SchemaChildModule
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import gone
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response
from pgadmin.utils.driver import get_driver
class CatalogObjectModule(SchemaChildModule):
"""
class CatalogObjectModule(SchemaChildModule)
A module class for Catalog objects node derived from SchemaChildModule.
Methods:
-------
* __init__(*args, **kwargs)
- Method is used to initialize the Catalog objects and it's base module.
* get_nodes(gid, sid, did, scid, coid)
- Method is used to generate the browser collection node.
* script_load()
- Load the module script for Catalog objects, when any of the server node
is initialized.
"""
NODE_TYPE = 'catalog_object'
COLLECTION_LABEL = gettext("Catalog Objects")
# Flag for not to show node under Schema/Catalog node
# By default its set to True to display node in schema/catalog
# We do not want to display 'Catalog Objects' under Schema/Catalog
# but only in information_schema/sys/dbo
CATALOG_DB_SUPPORTED = False
SUPPORTED_SCHEMAS = ['information_schema', 'sys', 'dbo']
def __init__(self, *args, **kwargs):
"""
Method is used to initialize the CatalogObjectModule and it's base
module.
Args:
*args:
**kwargs:
"""
super(CatalogObjectModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid):
"""
Generate the collection node
"""
yield self.generate_browser_collection_node(scid)
@property
def script_load(self):
"""
Load the module script for server, when any of the database node is
initialized.
"""
return database.DatabaseModule.NODE_TYPE
blueprint = CatalogObjectModule(__name__)
class CatalogObjectView(PGChildNodeView):
"""
This class is responsible for generating routes for Catalog objects node.
Methods:
-------
* check_precondition()
- This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
* list()
- Lists all the Catalog objects nodes within that collection.
* nodes()
- Creates all the nodes of type Catalog objects.
* properties(gid, sid, did, scid, coid)
- Shows the properties of the selected Catalog objects node.
* dependency(gid, sid, did, scid):
- Returns the dependencies list for the given catalog object node.
* dependent(gid, sid, did, scid):
- Returns the dependents list for the given Catalog objects node.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'}
]
ids = [
{'type': 'int', 'id': 'coid'}
]
operations = dict({
'obj': [{'get': 'properties'}, {'get': 'list'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}]
})
def check_precondition(f):
"""
This function will behave as a decorator which will checks
database connection before running view, it will also attaches
manager,conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
kwargs['sid']
)
self.conn = self.manager.connection(did=kwargs['did'])
self.template_path = 'catalog_object/sql/{0}/#{1}#'.format(
'ppas' if self.manager.server_type == 'ppas' else 'pg',
self.manager.version
)
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, did, scid):
"""
This function is used to list all the catalog objects
nodes within that collection.
Args:
gid: Server group ID
sid: Server ID
did: Database ID
scid: Schema ID
Returns:
JSON of available catalog objects nodes
"""
SQL = render_template("/".join([
self.template_path, 'properties.sql'
]), scid=scid
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid):
"""
This function will used to create all the child node within that
collection.
Here it will create all the catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
Returns:
JSON of available catalog objects child nodes
"""
res = []
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']), scid=scid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
scid,
row['name'],
icon="icon-catalog_object"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def node(self, gid, sid, did, scid, coid):
"""
This function will fetch properties of catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: Catalog object ID
Returns:
JSON of given catalog objects child node
"""
SQL = render_template(
"/".join([self.template_path, 'nodes.sql']), coid=coid
)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
return make_json_response(
data=self.blueprint.generate_browser_node(
row['oid'],
scid,
row['name'],
icon="icon-catalog_object"
),
status=200
)
return gone(
errormsg=gettext("Could not find the specified catalog object."))
@check_precondition
def properties(self, gid, sid, did, scid, coid):
"""
This function will show the properties of the selected
catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
scid: Schema ID
coid: Catalog object ID
Returns:
JSON of selected catalog objects node
"""
SQL = render_template(
"/".join([self.template_path, 'properties.sql']),
scid=scid, coid=coid
)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the specified catalog object."""))
return ajax_response(
response=res['rows'][0],
status=200
)
@check_precondition
def dependents(self, gid, sid, did, scid, coid):
"""
This function get the dependents and return ajax response
for the catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: catalog objects ID
"""
dependents_result = self.get_dependents(self.conn, coid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, coid):
"""
This function get the dependencies and return ajax response
for the catalog objects node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
coid: catalog objects ID
"""
dependencies_result = self.get_dependencies(self.conn, coid)
return ajax_response(
response=dependencies_result,
status=200
)
CatalogObjectView.register_node_view(blueprint)
| 0.693888 | 0.129513 |
import datetime
import random
from datetime import datetime
from textwrap import wrap
from typing import Union
import discord
from discord.ext import commands
from lib.emotes import basic_emoji
class Miscellaneous(commands.Cog):
"""Other interesting commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="ping", help="Display bot's ping.")
async def ping(self, ctx):
"""Displays time delta between Discord message and command invocation"""
ms = (datetime.utcnow() - ctx.message.created_at).total_seconds() * 1000
await ctx.send(basic_emoji.get("Pepega") + " 🏓 Pong! `{0}ms`".format(int(ms)))
@commands.command(name="roll", help="Generate a random number between 1 and 100 by default.")
async def roll(self, ctx, num: str = "100"):
"""Roll a dice"""
# Default string for invalid input
result = "No, I don't think so. " + basic_emoji.get("forsenSmug")
# Parse input
if num.isnumeric():
# Roll dice
result = str(random.randint(1, int(num)))
else:
await ctx.message.add_reaction(basic_emoji.get("Si"))
# Display result
await ctx.send(result)
@commands.command(name="decide", aliases=["choose"], help="Decide between options.")
async def decide(self, ctx, *args):
"""Choose one option from a list"""
# No arguments -> exit
if not args:
await ctx.send("Decide between what? " + basic_emoji.get("Pepega") + basic_emoji.get("Clap") + "\nUse `;`, `:`, `,` or ` or `, to separate options.")
await ctx.message.add_reaction(basic_emoji.get("Si"))
return
# Join arguments to one string
raw = " ".join(str(i) for i in args)
# Attempt to split it by any separator
options = raw.split(";")
if len(options) < 2:
options = raw.split(":")
if len(options) < 2:
options = raw.split(",")
if len(options) < 2:
options = raw.split(" or ")
# Splitting failed
if len(options) < 2:
await ctx.send("Separator not recognized, use `;`, `:`, `,` or ` or `, to separate options.")
# Else send a result
else:
await ctx.send(random.choice(options))
@commands.command(name="created", help="Find when an account was created")
async def created(self, ctx, user: Union[discord.Member, discord.User, discord.ClientUser, str, None]):
"""Display account creation date"""
if isinstance(user, str):
await ctx.send("Try a user's tag instead " + basic_emoji.get("Okayga"))
return
if user is None:
user_id = ctx.author.id
msg = "Your account"
else:
user_id = user.id
msg = "That account"
# Decode user's ID
binary = str(bin(user_id)[2:])
unix_binary = binary[:len(binary) - 22]
unix = (int(unix_binary, 2) + 1420070400000) // 1000
time = datetime.utcfromtimestamp(unix).strftime("%Y-%m-%d %H:%M:%S")
await ctx.send("{0} was created at {1} UTC".format(msg, time))
@commands.command(name="assemble", aliases=["ass"], help="Tag online/idle users with role")
@commands.guild_only()
async def assemble(self, ctx, role_name: Union[discord.role.Role, str, None]):
"""Attempt to find role, then tag online/idle users with that role"""
if not role_name:
await ctx.send("No role name provided.")
return
if type(role_name) is discord.role.Role:
role = role_name
else:
role = next((role for role in ctx.guild.roles if role.name == role_name), None)
if not role:
await ctx.send("No role with this name exists.")
return
if not role.members:
await ctx.send("No users with this role exist.")
return
online_members = list()
for member in role.members:
if member.status in [discord.Status.online, discord.Status.idle]:
online_members.append(member)
if not online_members:
await ctx.send("No users with this role are currently online.")
return
tag = " ".join(member.mention for member in online_members)
# This split likely mangles any tag on the edge, oh well
for tag_part in wrap(tag, 2000):
await ctx.send(tag_part)
def setup(bot):
bot.add_cog(Miscellaneous(bot))
|
bot/cogs/miscellaneous_cog.py
|
import datetime
import random
from datetime import datetime
from textwrap import wrap
from typing import Union
import discord
from discord.ext import commands
from lib.emotes import basic_emoji
class Miscellaneous(commands.Cog):
"""Other interesting commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="ping", help="Display bot's ping.")
async def ping(self, ctx):
"""Displays time delta between Discord message and command invocation"""
ms = (datetime.utcnow() - ctx.message.created_at).total_seconds() * 1000
await ctx.send(basic_emoji.get("Pepega") + " 🏓 Pong! `{0}ms`".format(int(ms)))
@commands.command(name="roll", help="Generate a random number between 1 and 100 by default.")
async def roll(self, ctx, num: str = "100"):
"""Roll a dice"""
# Default string for invalid input
result = "No, I don't think so. " + basic_emoji.get("forsenSmug")
# Parse input
if num.isnumeric():
# Roll dice
result = str(random.randint(1, int(num)))
else:
await ctx.message.add_reaction(basic_emoji.get("Si"))
# Display result
await ctx.send(result)
@commands.command(name="decide", aliases=["choose"], help="Decide between options.")
async def decide(self, ctx, *args):
"""Choose one option from a list"""
# No arguments -> exit
if not args:
await ctx.send("Decide between what? " + basic_emoji.get("Pepega") + basic_emoji.get("Clap") + "\nUse `;`, `:`, `,` or ` or `, to separate options.")
await ctx.message.add_reaction(basic_emoji.get("Si"))
return
# Join arguments to one string
raw = " ".join(str(i) for i in args)
# Attempt to split it by any separator
options = raw.split(";")
if len(options) < 2:
options = raw.split(":")
if len(options) < 2:
options = raw.split(",")
if len(options) < 2:
options = raw.split(" or ")
# Splitting failed
if len(options) < 2:
await ctx.send("Separator not recognized, use `;`, `:`, `,` or ` or `, to separate options.")
# Else send a result
else:
await ctx.send(random.choice(options))
@commands.command(name="created", help="Find when an account was created")
async def created(self, ctx, user: Union[discord.Member, discord.User, discord.ClientUser, str, None]):
"""Display account creation date"""
if isinstance(user, str):
await ctx.send("Try a user's tag instead " + basic_emoji.get("Okayga"))
return
if user is None:
user_id = ctx.author.id
msg = "Your account"
else:
user_id = user.id
msg = "That account"
# Decode user's ID
binary = str(bin(user_id)[2:])
unix_binary = binary[:len(binary) - 22]
unix = (int(unix_binary, 2) + 1420070400000) // 1000
time = datetime.utcfromtimestamp(unix).strftime("%Y-%m-%d %H:%M:%S")
await ctx.send("{0} was created at {1} UTC".format(msg, time))
@commands.command(name="assemble", aliases=["ass"], help="Tag online/idle users with role")
@commands.guild_only()
async def assemble(self, ctx, role_name: Union[discord.role.Role, str, None]):
"""Attempt to find role, then tag online/idle users with that role"""
if not role_name:
await ctx.send("No role name provided.")
return
if type(role_name) is discord.role.Role:
role = role_name
else:
role = next((role for role in ctx.guild.roles if role.name == role_name), None)
if not role:
await ctx.send("No role with this name exists.")
return
if not role.members:
await ctx.send("No users with this role exist.")
return
online_members = list()
for member in role.members:
if member.status in [discord.Status.online, discord.Status.idle]:
online_members.append(member)
if not online_members:
await ctx.send("No users with this role are currently online.")
return
tag = " ".join(member.mention for member in online_members)
# This split likely mangles any tag on the edge, oh well
for tag_part in wrap(tag, 2000):
await ctx.send(tag_part)
def setup(bot):
bot.add_cog(Miscellaneous(bot))
| 0.644673 | 0.107437 |
from __future__ import annotations
import abc
from typing import NamedTuple, Union, Dict
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
class ContainersAndTEUContainerFlowPair(NamedTuple):
"""
This is a pair of two container flows, i.e. the number of containers moving from A to B within a given time window.
First, it is reported in containers which is important for reporting the efficiency at the interfaces,
e.g. moves per hour for the ship-to-shore gantry cranes.
Second, it is reported in TEU which is important for the yard capacity.
"""
containers: Dict[ModeOfTransport, Dict[ModeOfTransport, Union[int, float]]]
TEU: Dict[ModeOfTransport, Dict[ModeOfTransport, Union[int, float]]]
class AbstractPosthocAnalysis(abc.ABC):
def __init__(
self,
transportation_buffer: float | None = None
):
"""
Args:
transportation_buffer: The buffer, e.g. 0.2 means that 20% more containers (in TEU) can be put on a vessel
compared to the amount of containers it had on its inbound journey - as long as the total vehicle
capacity would not be exceeded.
"""
self.transportation_buffer: float | None = None
self.update(
transportation_buffer=transportation_buffer
)
def update(
self,
transportation_buffer: float | None
):
"""
As the transportation buffer is not stored in the database, for some analyses it needs to be provided.
Args:
transportation_buffer: The buffer, e.g. 0.2 means that 20% more containers (in TEU) can be put on a vessel
compared to the amount of containers it had on its inbound journey - as long as the total vehicle
capacity would not be exceeded.
"""
if transportation_buffer is not None:
assert transportation_buffer > -1
self.transportation_buffer = transportation_buffer
|
conflowgen/posthoc_analysis/abstract_posthoc_analysis.py
|
from __future__ import annotations
import abc
from typing import NamedTuple, Union, Dict
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
class ContainersAndTEUContainerFlowPair(NamedTuple):
"""
This is a pair of two container flows, i.e. the number of containers moving from A to B within a given time window.
First, it is reported in containers which is important for reporting the efficiency at the interfaces,
e.g. moves per hour for the ship-to-shore gantry cranes.
Second, it is reported in TEU which is important for the yard capacity.
"""
containers: Dict[ModeOfTransport, Dict[ModeOfTransport, Union[int, float]]]
TEU: Dict[ModeOfTransport, Dict[ModeOfTransport, Union[int, float]]]
class AbstractPosthocAnalysis(abc.ABC):
def __init__(
self,
transportation_buffer: float | None = None
):
"""
Args:
transportation_buffer: The buffer, e.g. 0.2 means that 20% more containers (in TEU) can be put on a vessel
compared to the amount of containers it had on its inbound journey - as long as the total vehicle
capacity would not be exceeded.
"""
self.transportation_buffer: float | None = None
self.update(
transportation_buffer=transportation_buffer
)
def update(
self,
transportation_buffer: float | None
):
"""
As the transportation buffer is not stored in the database, for some analyses it needs to be provided.
Args:
transportation_buffer: The buffer, e.g. 0.2 means that 20% more containers (in TEU) can be put on a vessel
compared to the amount of containers it had on its inbound journey - as long as the total vehicle
capacity would not be exceeded.
"""
if transportation_buffer is not None:
assert transportation_buffer > -1
self.transportation_buffer = transportation_buffer
| 0.918745 | 0.4206 |
import json
import xml.etree.ElementTree as ET
from cafe.engine.models.base import AutoMarshallingModel
from cafe.engine.models.base import AutoMarshallingListModel
from cloudcafe.compute.common.constants import Constants
from cloudcafe.compute.common.equality_tools import EqualityTools
class FlavorAccess(AutoMarshallingModel):
def __init__(self, flavor_id=None, tenant_id=None):
super(FlavorAccess, self).__init__()
self.flavor_id = flavor_id
self.tenant_id = tenant_id
@classmethod
def _json_to_obj(cls, json_dict):
access = FlavorAccess(flavor_id=json_dict.get('flavor_id'),
tenant_id=json_dict.get('tenant_id'))
return access
@classmethod
def _xml_to_obj(cls, element):
access_dict = element.attrib
access = FlavorAccess(flavor_id=access_dict.get('flavor_id'),
tenant_id=access_dict.get('tenant_id'))
return access
def __eq__(self, other):
return EqualityTools.are_objects_equal(self, other)
def __ne__(self, other):
return not self == other
class FlavorAccessList(AutoMarshallingListModel):
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
return cls._list_to_obj(json_dict.get('flavor_access'))
@classmethod
def _list_to_obj(cls, access_dict_list):
access_list = FlavorAccessList()
for flavor_dict in access_dict_list:
access = FlavorAccess._json_to_obj(flavor_dict)
access_list.append(access)
return access_list
@classmethod
def _xml_to_obj(cls, serialized_str):
element = ET.fromstring(serialized_str)
return cls._xml_list_to_obj(element.findall('access'))
@classmethod
def _xml_list_to_obj(cls, xml_list):
flavors = FlavorAccessList()
for ele in xml_list:
flavors.append(FlavorAccess._xml_to_obj(ele))
return flavors
class AddTenantFlavorAccess(AutoMarshallingModel):
def __init__(self, tenant=None):
super(AddTenantFlavorAccess, self).__init__()
self.tenant = tenant
def _obj_to_json(self):
ret = {'addTenantAccess': {'tenant': self.tenant}}
return json.dumps(ret)
def _obj_to_xml(self):
xml = Constants.XML_HEADER
element = ET.Element('addTenantAccess')
element.set('xmlns', Constants.XML_API_NAMESPACE)
element.set('tenant', self.tenant)
xml += ET.tostring(element)
return xml
class RemoveTenantFlavorAccess(AutoMarshallingModel):
def __init__(self, tenant=None):
super(RemoveTenantFlavorAccess, self).__init__()
self.tenant = tenant
def _obj_to_json(self):
ret = {'removeTenantAccess': {'tenant': self.tenant}}
return json.dumps(ret)
def _obj_to_xml(self):
xml = Constants.XML_HEADER
element = ET.Element('removeTenantAccess')
element.set('xmlns', Constants.XML_API_NAMESPACE)
element.set('tenant', self.tenant)
xml += ET.tostring(element)
return xml
|
cloudcafe/compute/flavors_api/models/flavor_access.py
|
import json
import xml.etree.ElementTree as ET
from cafe.engine.models.base import AutoMarshallingModel
from cafe.engine.models.base import AutoMarshallingListModel
from cloudcafe.compute.common.constants import Constants
from cloudcafe.compute.common.equality_tools import EqualityTools
class FlavorAccess(AutoMarshallingModel):
def __init__(self, flavor_id=None, tenant_id=None):
super(FlavorAccess, self).__init__()
self.flavor_id = flavor_id
self.tenant_id = tenant_id
@classmethod
def _json_to_obj(cls, json_dict):
access = FlavorAccess(flavor_id=json_dict.get('flavor_id'),
tenant_id=json_dict.get('tenant_id'))
return access
@classmethod
def _xml_to_obj(cls, element):
access_dict = element.attrib
access = FlavorAccess(flavor_id=access_dict.get('flavor_id'),
tenant_id=access_dict.get('tenant_id'))
return access
def __eq__(self, other):
return EqualityTools.are_objects_equal(self, other)
def __ne__(self, other):
return not self == other
class FlavorAccessList(AutoMarshallingListModel):
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
return cls._list_to_obj(json_dict.get('flavor_access'))
@classmethod
def _list_to_obj(cls, access_dict_list):
access_list = FlavorAccessList()
for flavor_dict in access_dict_list:
access = FlavorAccess._json_to_obj(flavor_dict)
access_list.append(access)
return access_list
@classmethod
def _xml_to_obj(cls, serialized_str):
element = ET.fromstring(serialized_str)
return cls._xml_list_to_obj(element.findall('access'))
@classmethod
def _xml_list_to_obj(cls, xml_list):
flavors = FlavorAccessList()
for ele in xml_list:
flavors.append(FlavorAccess._xml_to_obj(ele))
return flavors
class AddTenantFlavorAccess(AutoMarshallingModel):
def __init__(self, tenant=None):
super(AddTenantFlavorAccess, self).__init__()
self.tenant = tenant
def _obj_to_json(self):
ret = {'addTenantAccess': {'tenant': self.tenant}}
return json.dumps(ret)
def _obj_to_xml(self):
xml = Constants.XML_HEADER
element = ET.Element('addTenantAccess')
element.set('xmlns', Constants.XML_API_NAMESPACE)
element.set('tenant', self.tenant)
xml += ET.tostring(element)
return xml
class RemoveTenantFlavorAccess(AutoMarshallingModel):
def __init__(self, tenant=None):
super(RemoveTenantFlavorAccess, self).__init__()
self.tenant = tenant
def _obj_to_json(self):
ret = {'removeTenantAccess': {'tenant': self.tenant}}
return json.dumps(ret)
def _obj_to_xml(self):
xml = Constants.XML_HEADER
element = ET.Element('removeTenantAccess')
element.set('xmlns', Constants.XML_API_NAMESPACE)
element.set('tenant', self.tenant)
xml += ET.tostring(element)
return xml
| 0.490236 | 0.052255 |
import logging
import types
from etcd import EtcdResult
from mock import Mock
from calico.etcdutils import PathDispatcher
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
SAME_AS_KEY = object()
class _TestPathDispatcherBase(BaseTestCase):
"""
Abstract base class for Dispatcher tests.
"""
# Etcd action that this class tests.
action = None
# Expected handler type, "set" or "delete".
expected_handlers = None
def setUp(self):
super(_TestPathDispatcherBase, self).setUp()
self.dispatcher = PathDispatcher()
self.handlers = {
"delete": {},
"set": {},
}
self.register("/")
self.register("/a")
self.register("/a/<b>")
self.register("/a/<b>/c")
self.register("/a/<b>/d")
self.register("/a/<b>/d/<e>")
def register(self, key):
m_on_set = Mock()
m_on_del = Mock()
self.dispatcher.register(key, on_set=m_on_set, on_del=m_on_del)
self.handlers["set"][key.strip("/")] = m_on_set
self.handlers["delete"][key.strip("/")] = m_on_del
def assert_handled(self, key, exp_handler=SAME_AS_KEY, **exp_captures):
if exp_handler is SAME_AS_KEY:
exp_handler = key
if isinstance(exp_handler, types.StringTypes):
exp_handler = exp_handler.strip("/")
m_response = Mock(spec=EtcdResult)
m_response.key = key
m_response.action = self.action
self.dispatcher.handle_event(m_response)
exp_handlers = self.handlers[self.expected_handlers]
for handler_key, handler in exp_handlers.iteritems():
assert isinstance(handler, Mock)
if handler_key == exp_handler:
continue
self.assertFalse(handler.called,
"Unexpected set handler %s was called for "
"key %s" % (handler_key, key))
unexp_handlers = self.handlers[self.unexpected_handlers]
for handler_key, handler in unexp_handlers.iteritems():
assert isinstance(handler, Mock)
self.assertFalse(handler.called,
"Unexpected del handler %s was called for "
"key %s" % (handler_key, key))
if exp_handler is not None:
exp_handlers[exp_handler].assert_called_once_with(
m_response, **exp_captures)
@property
def unexpected_handlers(self):
if self.expected_handlers == "set":
return "delete"
else:
return "set"
def test_dispatch_root(self):
self.assert_handled("/")
def test_dispatch_no_captures(self):
self.assert_handled("/a")
def test_dispatch_capture(self):
self.assert_handled("/a/bval", exp_handler="/a/<b>", b="bval")
def test_dispatch_after_capture(self):
self.assert_handled("/a/bval/c", exp_handler="/a/<b>/c", b="bval")
def test_dispatch_after_capture_2(self):
self.assert_handled("/a/bval/d", exp_handler="/a/<b>/d", b="bval")
def test_multi_capture(self):
self.assert_handled("/a/bval/d/eval",
exp_handler="/a/<b>/d/<e>",
b="bval", e="eval")
def test_non_match(self):
self.assert_handled("/a/bval/c/eval", exp_handler=None)
self.assert_handled("/foo", exp_handler=None)
def test_cover_no_match(self):
m_result = Mock(spec=EtcdResult)
m_result.key = "/a"
m_result.action = "unknown"
self.dispatcher.handle_event(m_result)
for handlers in self.handlers.itervalues():
for key, handler in handlers.iteritems():
self.assertFalse(handler.called,
msg="Unexpected handler called: %s" % key)
class TestDispatcherSet(_TestPathDispatcherBase):
action = "set"
expected_handlers = "set"
class TestDispatcherCaS(_TestPathDispatcherBase):
action = "compareAndSwap"
expected_handlers = "set"
class TestDispatcherCreate(_TestPathDispatcherBase):
action = "create"
expected_handlers = "set"
class TestDispatcherUpdate(_TestPathDispatcherBase):
action = "update"
expected_handlers = "set"
class TestDispatcherDel(_TestPathDispatcherBase):
action = "delete"
expected_handlers = "delete"
class TestDispatcherCaD(_TestPathDispatcherBase):
action = "compareAndDelete"
expected_handlers = "delete"
class TestDispatcherExpire(_TestPathDispatcherBase):
action = "expire"
expected_handlers = "delete"
|
calico/test/test_etcdutils.py
|
import logging
import types
from etcd import EtcdResult
from mock import Mock
from calico.etcdutils import PathDispatcher
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
SAME_AS_KEY = object()
class _TestPathDispatcherBase(BaseTestCase):
"""
Abstract base class for Dispatcher tests.
"""
# Etcd action that this class tests.
action = None
# Expected handler type, "set" or "delete".
expected_handlers = None
def setUp(self):
super(_TestPathDispatcherBase, self).setUp()
self.dispatcher = PathDispatcher()
self.handlers = {
"delete": {},
"set": {},
}
self.register("/")
self.register("/a")
self.register("/a/<b>")
self.register("/a/<b>/c")
self.register("/a/<b>/d")
self.register("/a/<b>/d/<e>")
def register(self, key):
m_on_set = Mock()
m_on_del = Mock()
self.dispatcher.register(key, on_set=m_on_set, on_del=m_on_del)
self.handlers["set"][key.strip("/")] = m_on_set
self.handlers["delete"][key.strip("/")] = m_on_del
def assert_handled(self, key, exp_handler=SAME_AS_KEY, **exp_captures):
if exp_handler is SAME_AS_KEY:
exp_handler = key
if isinstance(exp_handler, types.StringTypes):
exp_handler = exp_handler.strip("/")
m_response = Mock(spec=EtcdResult)
m_response.key = key
m_response.action = self.action
self.dispatcher.handle_event(m_response)
exp_handlers = self.handlers[self.expected_handlers]
for handler_key, handler in exp_handlers.iteritems():
assert isinstance(handler, Mock)
if handler_key == exp_handler:
continue
self.assertFalse(handler.called,
"Unexpected set handler %s was called for "
"key %s" % (handler_key, key))
unexp_handlers = self.handlers[self.unexpected_handlers]
for handler_key, handler in unexp_handlers.iteritems():
assert isinstance(handler, Mock)
self.assertFalse(handler.called,
"Unexpected del handler %s was called for "
"key %s" % (handler_key, key))
if exp_handler is not None:
exp_handlers[exp_handler].assert_called_once_with(
m_response, **exp_captures)
@property
def unexpected_handlers(self):
if self.expected_handlers == "set":
return "delete"
else:
return "set"
def test_dispatch_root(self):
self.assert_handled("/")
def test_dispatch_no_captures(self):
self.assert_handled("/a")
def test_dispatch_capture(self):
self.assert_handled("/a/bval", exp_handler="/a/<b>", b="bval")
def test_dispatch_after_capture(self):
self.assert_handled("/a/bval/c", exp_handler="/a/<b>/c", b="bval")
def test_dispatch_after_capture_2(self):
self.assert_handled("/a/bval/d", exp_handler="/a/<b>/d", b="bval")
def test_multi_capture(self):
self.assert_handled("/a/bval/d/eval",
exp_handler="/a/<b>/d/<e>",
b="bval", e="eval")
def test_non_match(self):
self.assert_handled("/a/bval/c/eval", exp_handler=None)
self.assert_handled("/foo", exp_handler=None)
def test_cover_no_match(self):
m_result = Mock(spec=EtcdResult)
m_result.key = "/a"
m_result.action = "unknown"
self.dispatcher.handle_event(m_result)
for handlers in self.handlers.itervalues():
for key, handler in handlers.iteritems():
self.assertFalse(handler.called,
msg="Unexpected handler called: %s" % key)
class TestDispatcherSet(_TestPathDispatcherBase):
action = "set"
expected_handlers = "set"
class TestDispatcherCaS(_TestPathDispatcherBase):
action = "compareAndSwap"
expected_handlers = "set"
class TestDispatcherCreate(_TestPathDispatcherBase):
action = "create"
expected_handlers = "set"
class TestDispatcherUpdate(_TestPathDispatcherBase):
action = "update"
expected_handlers = "set"
class TestDispatcherDel(_TestPathDispatcherBase):
action = "delete"
expected_handlers = "delete"
class TestDispatcherCaD(_TestPathDispatcherBase):
action = "compareAndDelete"
expected_handlers = "delete"
class TestDispatcherExpire(_TestPathDispatcherBase):
action = "expire"
expected_handlers = "delete"
| 0.570092 | 0.179818 |
from PlaystationHandler import PlayStationHandler
from geometry_msgs.msg import Twist,TwistStamped
import rospy
class PlayStationDiffDrive(PlayStationHandler):
def __init__(self,message_type):
PlayStationHandler.__init__(self)
self.rate=rospy.Rate(rospy.get_param("~rate",10))
self.active_robot = 0
self.speed_translation = rospy.get_param("~translation",0.1)
self.speed_rotation = rospy.get_param("~rotation",0.2)
self.trans_incr=rospy.get_param("~trans_incr",0.1)
self.rot_incr=rospy.get_param("~rot_incr",0.1)
self.robotnames = rospy.get_param("~robot_names","")
self.cmd_vel_topic_prefix = rospy.get_param("~cmd_vel_topic_prefix","")
self.callbackList=[ self.decreaseTrans,
self.increaseRot,
self.decreaseRot,
self.increaseTrans,
self.dummy,
self.dummy,
self.dummy,
self.changeRobot
]
self.translation = float()
self.rotation = float()
self.initialized = False
self.lower_position_reached = False
self.publishFunction=None
if message_type==Twist:
print("Publishing as Twist")
self.publishFunction=self.publishTwist
elif message_type==TwistStamped:
print("Publishing as TwistStamped")
self.publishFunction=self.publishTwistStamped
self.publisher_stack = []
if self.robotnames == "":
self.publisher_stack.append(rospy.Publisher(self.cmd_vel_topic_prefix + "/cmd_vel",message_type,queue_size= 10))
else:
for i in self.robotnames:
self.publisher_stack.append(rospy.Publisher(i+"/" + self.cmd_vel_topic_prefix + "/cmd_vel",message_type,queue_size= 10))
def dummy(self):
pass
def increaseRot(self):
print("Increasing rot")
self.speed_rotation=self.speed_rotation * (1+self.rot_incr)
def increaseTrans(self):
print("Increasing trans")
self.speed_translation=self.speed_translation * (1+self.trans_incr)
def decreaseRot(self):
print("Decreasing rot")
self.speed_rotation=self.speed_rotation* (1- self.rot_incr)
if self.speed_rotation<0.0:
self.speed_rotation=0.0
def decreaseTrans(self):
print("Decreasing trans")
self.speed_translation=self.speed_translation * (1-self.trans_incr)
if self.speed_translation<0.0:
self.speed_translation=0.0
def changeRobot(self):
self.active_robot = (self.active_robot + 1) % len(self.robotnames)
def publishTwist(self):
msg=Twist()
msg.linear.x=self.translation
msg.angular.z=self.rotation
self.publisher_stack[self.active_robot].publish(msg)
def publishTwistStamped(self):
msg=TwistStamped()
msg.header.stamp=rospy.Time.now()
msg.twist.linear.x=self.translation
msg.twist.angular.z=self.rotation
self.publisher_stack[self.active_robot].publish(msg)
def run(self):
while not rospy.is_shutdown():
for i,edge in enumerate(self._edges):
if edge:
self._edges[i] = 0
try:
self.callbackList[i]()
except Exception as ex:
print(ex)
pass
if self.initialized == True:
self.translation = (abs(self._axes[5] - 1) - abs(self._axes[2] - 1)) *self.speed_translation #data.axes[1] + data.axes[4]
self.rotation = (self._axes[0] + self._axes[3])*self.speed_rotation
self.publishFunction()
else:
rospy.loginfo_throttle(5,"Controller is not initialized. Press and release both shoulder buttons simultaneously")
if self._axes[2] == -1.0 and self._axes[5] == -1.0:
self.lower_position_reached = True
rospy.loginfo_once("lower position reached")
if self.lower_position_reached == True and self._axes[2] == 1.0 and self._axes[5] == 1.0:
self.initialized = True
rospy.loginfo_once("initilization complete")
self.rate.sleep()
|
src/Match_Mobile_Robotics/general_hardware_helper/ps4_controller/src/ps4_controller/PlayStationDiffDrive.py
|
from PlaystationHandler import PlayStationHandler
from geometry_msgs.msg import Twist,TwistStamped
import rospy
class PlayStationDiffDrive(PlayStationHandler):
def __init__(self,message_type):
PlayStationHandler.__init__(self)
self.rate=rospy.Rate(rospy.get_param("~rate",10))
self.active_robot = 0
self.speed_translation = rospy.get_param("~translation",0.1)
self.speed_rotation = rospy.get_param("~rotation",0.2)
self.trans_incr=rospy.get_param("~trans_incr",0.1)
self.rot_incr=rospy.get_param("~rot_incr",0.1)
self.robotnames = rospy.get_param("~robot_names","")
self.cmd_vel_topic_prefix = rospy.get_param("~cmd_vel_topic_prefix","")
self.callbackList=[ self.decreaseTrans,
self.increaseRot,
self.decreaseRot,
self.increaseTrans,
self.dummy,
self.dummy,
self.dummy,
self.changeRobot
]
self.translation = float()
self.rotation = float()
self.initialized = False
self.lower_position_reached = False
self.publishFunction=None
if message_type==Twist:
print("Publishing as Twist")
self.publishFunction=self.publishTwist
elif message_type==TwistStamped:
print("Publishing as TwistStamped")
self.publishFunction=self.publishTwistStamped
self.publisher_stack = []
if self.robotnames == "":
self.publisher_stack.append(rospy.Publisher(self.cmd_vel_topic_prefix + "/cmd_vel",message_type,queue_size= 10))
else:
for i in self.robotnames:
self.publisher_stack.append(rospy.Publisher(i+"/" + self.cmd_vel_topic_prefix + "/cmd_vel",message_type,queue_size= 10))
def dummy(self):
pass
def increaseRot(self):
print("Increasing rot")
self.speed_rotation=self.speed_rotation * (1+self.rot_incr)
def increaseTrans(self):
print("Increasing trans")
self.speed_translation=self.speed_translation * (1+self.trans_incr)
def decreaseRot(self):
print("Decreasing rot")
self.speed_rotation=self.speed_rotation* (1- self.rot_incr)
if self.speed_rotation<0.0:
self.speed_rotation=0.0
def decreaseTrans(self):
print("Decreasing trans")
self.speed_translation=self.speed_translation * (1-self.trans_incr)
if self.speed_translation<0.0:
self.speed_translation=0.0
def changeRobot(self):
self.active_robot = (self.active_robot + 1) % len(self.robotnames)
def publishTwist(self):
msg=Twist()
msg.linear.x=self.translation
msg.angular.z=self.rotation
self.publisher_stack[self.active_robot].publish(msg)
def publishTwistStamped(self):
msg=TwistStamped()
msg.header.stamp=rospy.Time.now()
msg.twist.linear.x=self.translation
msg.twist.angular.z=self.rotation
self.publisher_stack[self.active_robot].publish(msg)
def run(self):
while not rospy.is_shutdown():
for i,edge in enumerate(self._edges):
if edge:
self._edges[i] = 0
try:
self.callbackList[i]()
except Exception as ex:
print(ex)
pass
if self.initialized == True:
self.translation = (abs(self._axes[5] - 1) - abs(self._axes[2] - 1)) *self.speed_translation #data.axes[1] + data.axes[4]
self.rotation = (self._axes[0] + self._axes[3])*self.speed_rotation
self.publishFunction()
else:
rospy.loginfo_throttle(5,"Controller is not initialized. Press and release both shoulder buttons simultaneously")
if self._axes[2] == -1.0 and self._axes[5] == -1.0:
self.lower_position_reached = True
rospy.loginfo_once("lower position reached")
if self.lower_position_reached == True and self._axes[2] == 1.0 and self._axes[5] == 1.0:
self.initialized = True
rospy.loginfo_once("initilization complete")
self.rate.sleep()
| 0.344774 | 0.117193 |
import librosa
import argparse
import numpy as np
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from separator import AudioSeparator
import librosa.display
import os
class MelScale(mscale.ScaleBase):
r"""Mel Scale transform for axis in plots
Here we want to use the Mel scale, with ticks values in kHz.
WARNING: There is a bug at the moment and the scale does not adjust to the extremal values of the axis.
See https://matplotlib.org/gallery/scales/custom_scale.html for example of using custom scale.
"""
name = 'mel'
def __init__(self, axis, *, fmin=0.0, fmax=8.0, **kwargs):
mscale.ScaleBase.__init__(self)
self.fmin = fmin
self.fmax = fmax
def get_transform(self):
return self.MelTransform()
def set_default_locators_and_formatters(self, axis):
pass
def limit_range_for_scale(self, vmin, vmax, minpos):
return self.fmin, self.fmax
class MelTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self):
mtransforms.Transform.__init__(self)
def transform_non_affine(self, a):
return librosa.hz_to_mel(a * 1000.0)
def inverted(self):
return MelScale.InvertedMelTransform()
class InvertedMelTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self):
mtransforms.Transform.__init__(self)
def transform_non_affine(self, a):
return librosa.mel_to_hz(a) / 1000.0
def inverted(self):
return MelScale.MelTransform()
def main():
r"""This script was used to generate the spectrograms and masks pictures of the master thesis for the section
about the TUT rare sound event 2017 data set.
It does:
- creates an AudioSeparator from a model checkpoint
- use the model to get separation masks for an example of each class
- saves the mask figures to a folder that has the model checkpoint name, next to the model.
- run evaluation of the model on the validation set and prints the results
"""
# Register Mel scale
mscale.register_scale(MelScale)
# Get model checkpoint path and the folder where the audio separated by the model will be saved
parser = argparse.ArgumentParser(allow_abbrev=False,
description="For the model specified by input, computes the separated audio "
"files of the ICASP2018 challenge, then evaluate the separation "
"metrics")
parser.add_argument("--sep_audio_folder", type=str, required=True,
help="Folder to store the separated audio files.")
parser.add_argument("--model_ckpt", type=str, required=True,
help="Path to the checkpoint of the model to evaluate.")
user_args = vars(parser.parse_known_args()[0])
model_ckpt = user_args['model_ckpt']
separated_audio_folder = user_args['sep_audio_folder']
# Load model in separation and evaluation framework
synthetizer = AudioSeparator.from_checkpoint(
{"checkpoint_path": model_ckpt, "separated_audio_folder": separated_audio_folder})
# Path to the folder were to save the pictures
save_path = os.path.join(os.path.dirname(model_ckpt),
os.path.splitext(os.path.basename(model_ckpt))[0] + '_figures')
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
raise RuntimeError('Figure directory already exists ! ')
# Run Separation on 1 example of each class, and save the figures
baby_cry_example = 17
_, babycrymask = synthetizer.model(synthetizer.data_set.__getitem__(baby_cry_example)[0].unsqueeze(0))
babycrymask = babycrymask.detach().clone().squeeze()[0]
fig1, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(babycrymask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(babycrymask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig1.savefig(os.path.join(save_path, 'babycry_mask.svg'), format='svg', bbox_inches='tight')
fig1.savefig(os.path.join(save_path, 'babycry_mask.eps'), format='eps', bbox_inches='tight')
fig1.savefig(os.path.join(save_path, 'babycry_mask.pdf'), format='pdf', bbox_inches='tight')
gunshot_example = 50
_, gunshotmask = synthetizer.model(synthetizer.data_set.__getitem__(gunshot_example)[0].unsqueeze(0))
gunshotmask = gunshotmask.detach().clone().squeeze()[1]
fig2, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(gunshotmask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(gunshotmask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig2.savefig(os.path.join(save_path, 'gunshot_mask.svg'), format='svg', bbox_inches='tight')
fig2.savefig(os.path.join(save_path, 'gunshot_mask.eps'), format='eps', bbox_inches='tight')
fig2.savefig(os.path.join(save_path, 'gunshot_mask.pdf'), format='pdf', bbox_inches='tight')
glassbreak_example = 131
_, glassbreakmask = synthetizer.model(synthetizer.data_set.__getitem__(glassbreak_example)[0].unsqueeze(0))
glassbreakmask = glassbreakmask.detach().clone().squeeze()[2]
fig3, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(glassbreakmask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(glassbreakmask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.svg'), format='svg', bbox_inches='tight')
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.eps'), format='eps', bbox_inches='tight')
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.pdf'), format='pdf', bbox_inches='tight')
# Run separation for all files in the validation set
synthetizer.separate(separation_method='in_lin')
# Compute the separation metrics for all files in the validation data set.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sdrs, sirs, sars = synthetizer.evaluate_separation()
# Print the separation results per class and mixture.
# {class} mixes: the mixture file contains an event and background noise
# {class} only: the mixture file only contains the event.
indices_babycry = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 0] == 1)[0]
indices_glassbreak = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 1] == 1)[0]
indices_gunshot = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 2] == 1)[0]
indices_background = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 3] == 1)[0]
indices_babycry_mix = np.intersect1d(indices_babycry, indices_background)
indices_glassbreak_mix = np.intersect1d(indices_glassbreak, indices_background)
indices_gunshot_mix = np.intersect1d(indices_gunshot, indices_background)
indices_babycry_only = np.setdiff1d(indices_babycry, indices_background)
indices_glassbreak_only = np.setdiff1d(indices_glassbreak, indices_background)
indices_gunshot_only = np.setdiff1d(indices_gunshot, indices_background)
format_string = 'mean {:^9.4f}, std {:^9.4f}, median {:^9.4f}\nSIR: mean {:^9.4f}, std {:^9.4f}, ' \
'median {:^9.4f}\nSAR: mean {:^9.4f}, std {:^9.4f}, median {:^9.4f}'
print('Babycry mixes\nSDR: ' + format_string.format(
sdrs[indices_babycry_mix, 0].mean(), sdrs[indices_babycry_mix, 0].std(),
np.median(sdrs[indices_babycry_mix, 0]),
sirs[indices_babycry_mix, 0].mean(), sirs[indices_babycry_mix, 0].std(),
np.median(sirs[indices_babycry_mix, 0]),
sars[indices_babycry_mix, 0].mean(), sars[indices_babycry_mix, 0].std(),
np.median(sars[indices_babycry_mix, 0])))
print('Babycry only\nSDR: ' + format_string.format(
sdrs[indices_babycry_only, 0].mean(), sdrs[indices_babycry_only, 0].std(),
np.median(sdrs[indices_babycry_only, 0]),
sirs[indices_babycry_only, 0].mean(), sirs[indices_babycry_only, 0].std(),
np.median(sirs[indices_babycry_only, 0]),
sars[indices_babycry_only, 0].mean(), sars[indices_babycry_only, 0].std(),
np.median(sars[indices_babycry_only, 0])))
print('Glassbreak mixes\nSDR: ' + format_string.format(
sdrs[indices_glassbreak_mix, 1].mean(), sdrs[indices_glassbreak_mix, 1].std(),
np.median(sdrs[indices_glassbreak_mix, 1]),
sirs[indices_glassbreak_mix, 1].mean(), sirs[indices_glassbreak_mix, 1].std(),
np.median(sirs[indices_glassbreak_mix, 1]),
sars[indices_glassbreak_mix, 1].mean(), sars[indices_glassbreak_mix, 1].std(),
np.median(sars[indices_glassbreak_mix, 1])))
print('Glassbreak only\nSDR: ' + format_string.format(
sdrs[indices_glassbreak_only, 1].mean(), sdrs[indices_glassbreak_only, 1].std(),
np.median(sdrs[indices_glassbreak_only, 1]),
sirs[indices_glassbreak_only, 1].mean(), sirs[indices_glassbreak_only, 1].std(),
np.median(sirs[indices_glassbreak_only, 1]),
sars[indices_glassbreak_only, 1].mean(), sars[indices_glassbreak_only, 1].std(),
np.median(sars[indices_glassbreak_only, 1])))
print('Gunshot mixes\nSDR: ' + format_string.format(
sdrs[indices_gunshot_mix, 2].mean(), sdrs[indices_gunshot_mix, 2].std(),
np.median(sdrs[indices_gunshot_mix, 2]),
sirs[indices_gunshot_mix, 2].mean(), sirs[indices_gunshot_mix, 2].std(),
np.median(sirs[indices_gunshot_mix, 2]),
sars[indices_gunshot_mix, 2].mean(), sars[indices_gunshot_mix, 2].std(),
np.median(sars[indices_gunshot_mix, 2])))
print('Gunshot only\nSDR: ' + format_string.format(
sdrs[indices_gunshot_only, 2].mean(), sdrs[indices_gunshot_only, 2].std(),
np.median(sdrs[indices_gunshot_only, 2]),
sirs[indices_gunshot_only, 2].mean(), sirs[indices_gunshot_only, 2].std(),
np.median(sirs[indices_gunshot_only, 2]),
sars[indices_gunshot_only, 2].mean(), sars[indices_gunshot_only, 2].std(),
np.median(sars[indices_gunshot_only, 2])))
if __name__ == '__main__':
main()
|
separation_examples_and_compute_metrics.py
|
import librosa
import argparse
import numpy as np
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from separator import AudioSeparator
import librosa.display
import os
class MelScale(mscale.ScaleBase):
r"""Mel Scale transform for axis in plots
Here we want to use the Mel scale, with ticks values in kHz.
WARNING: There is a bug at the moment and the scale does not adjust to the extremal values of the axis.
See https://matplotlib.org/gallery/scales/custom_scale.html for example of using custom scale.
"""
name = 'mel'
def __init__(self, axis, *, fmin=0.0, fmax=8.0, **kwargs):
mscale.ScaleBase.__init__(self)
self.fmin = fmin
self.fmax = fmax
def get_transform(self):
return self.MelTransform()
def set_default_locators_and_formatters(self, axis):
pass
def limit_range_for_scale(self, vmin, vmax, minpos):
return self.fmin, self.fmax
class MelTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self):
mtransforms.Transform.__init__(self)
def transform_non_affine(self, a):
return librosa.hz_to_mel(a * 1000.0)
def inverted(self):
return MelScale.InvertedMelTransform()
class InvertedMelTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self):
mtransforms.Transform.__init__(self)
def transform_non_affine(self, a):
return librosa.mel_to_hz(a) / 1000.0
def inverted(self):
return MelScale.MelTransform()
def main():
r"""This script was used to generate the spectrograms and masks pictures of the master thesis for the section
about the TUT rare sound event 2017 data set.
It does:
- creates an AudioSeparator from a model checkpoint
- use the model to get separation masks for an example of each class
- saves the mask figures to a folder that has the model checkpoint name, next to the model.
- run evaluation of the model on the validation set and prints the results
"""
# Register Mel scale
mscale.register_scale(MelScale)
# Get model checkpoint path and the folder where the audio separated by the model will be saved
parser = argparse.ArgumentParser(allow_abbrev=False,
description="For the model specified by input, computes the separated audio "
"files of the ICASP2018 challenge, then evaluate the separation "
"metrics")
parser.add_argument("--sep_audio_folder", type=str, required=True,
help="Folder to store the separated audio files.")
parser.add_argument("--model_ckpt", type=str, required=True,
help="Path to the checkpoint of the model to evaluate.")
user_args = vars(parser.parse_known_args()[0])
model_ckpt = user_args['model_ckpt']
separated_audio_folder = user_args['sep_audio_folder']
# Load model in separation and evaluation framework
synthetizer = AudioSeparator.from_checkpoint(
{"checkpoint_path": model_ckpt, "separated_audio_folder": separated_audio_folder})
# Path to the folder were to save the pictures
save_path = os.path.join(os.path.dirname(model_ckpt),
os.path.splitext(os.path.basename(model_ckpt))[0] + '_figures')
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
raise RuntimeError('Figure directory already exists ! ')
# Run Separation on 1 example of each class, and save the figures
baby_cry_example = 17
_, babycrymask = synthetizer.model(synthetizer.data_set.__getitem__(baby_cry_example)[0].unsqueeze(0))
babycrymask = babycrymask.detach().clone().squeeze()[0]
fig1, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(babycrymask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(babycrymask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig1.savefig(os.path.join(save_path, 'babycry_mask.svg'), format='svg', bbox_inches='tight')
fig1.savefig(os.path.join(save_path, 'babycry_mask.eps'), format='eps', bbox_inches='tight')
fig1.savefig(os.path.join(save_path, 'babycry_mask.pdf'), format='pdf', bbox_inches='tight')
gunshot_example = 50
_, gunshotmask = synthetizer.model(synthetizer.data_set.__getitem__(gunshot_example)[0].unsqueeze(0))
gunshotmask = gunshotmask.detach().clone().squeeze()[1]
fig2, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(gunshotmask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(gunshotmask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig2.savefig(os.path.join(save_path, 'gunshot_mask.svg'), format='svg', bbox_inches='tight')
fig2.savefig(os.path.join(save_path, 'gunshot_mask.eps'), format='eps', bbox_inches='tight')
fig2.savefig(os.path.join(save_path, 'gunshot_mask.pdf'), format='pdf', bbox_inches='tight')
glassbreak_example = 131
_, glassbreakmask = synthetizer.model(synthetizer.data_set.__getitem__(glassbreak_example)[0].unsqueeze(0))
glassbreakmask = glassbreakmask.detach().clone().squeeze()[2]
fig3, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(glassbreakmask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(glassbreakmask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.svg'), format='svg', bbox_inches='tight')
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.eps'), format='eps', bbox_inches='tight')
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.pdf'), format='pdf', bbox_inches='tight')
# Run separation for all files in the validation set
synthetizer.separate(separation_method='in_lin')
# Compute the separation metrics for all files in the validation data set.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sdrs, sirs, sars = synthetizer.evaluate_separation()
# Print the separation results per class and mixture.
# {class} mixes: the mixture file contains an event and background noise
# {class} only: the mixture file only contains the event.
indices_babycry = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 0] == 1)[0]
indices_glassbreak = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 1] == 1)[0]
indices_gunshot = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 2] == 1)[0]
indices_background = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 3] == 1)[0]
indices_babycry_mix = np.intersect1d(indices_babycry, indices_background)
indices_glassbreak_mix = np.intersect1d(indices_glassbreak, indices_background)
indices_gunshot_mix = np.intersect1d(indices_gunshot, indices_background)
indices_babycry_only = np.setdiff1d(indices_babycry, indices_background)
indices_glassbreak_only = np.setdiff1d(indices_glassbreak, indices_background)
indices_gunshot_only = np.setdiff1d(indices_gunshot, indices_background)
format_string = 'mean {:^9.4f}, std {:^9.4f}, median {:^9.4f}\nSIR: mean {:^9.4f}, std {:^9.4f}, ' \
'median {:^9.4f}\nSAR: mean {:^9.4f}, std {:^9.4f}, median {:^9.4f}'
print('Babycry mixes\nSDR: ' + format_string.format(
sdrs[indices_babycry_mix, 0].mean(), sdrs[indices_babycry_mix, 0].std(),
np.median(sdrs[indices_babycry_mix, 0]),
sirs[indices_babycry_mix, 0].mean(), sirs[indices_babycry_mix, 0].std(),
np.median(sirs[indices_babycry_mix, 0]),
sars[indices_babycry_mix, 0].mean(), sars[indices_babycry_mix, 0].std(),
np.median(sars[indices_babycry_mix, 0])))
print('Babycry only\nSDR: ' + format_string.format(
sdrs[indices_babycry_only, 0].mean(), sdrs[indices_babycry_only, 0].std(),
np.median(sdrs[indices_babycry_only, 0]),
sirs[indices_babycry_only, 0].mean(), sirs[indices_babycry_only, 0].std(),
np.median(sirs[indices_babycry_only, 0]),
sars[indices_babycry_only, 0].mean(), sars[indices_babycry_only, 0].std(),
np.median(sars[indices_babycry_only, 0])))
print('Glassbreak mixes\nSDR: ' + format_string.format(
sdrs[indices_glassbreak_mix, 1].mean(), sdrs[indices_glassbreak_mix, 1].std(),
np.median(sdrs[indices_glassbreak_mix, 1]),
sirs[indices_glassbreak_mix, 1].mean(), sirs[indices_glassbreak_mix, 1].std(),
np.median(sirs[indices_glassbreak_mix, 1]),
sars[indices_glassbreak_mix, 1].mean(), sars[indices_glassbreak_mix, 1].std(),
np.median(sars[indices_glassbreak_mix, 1])))
print('Glassbreak only\nSDR: ' + format_string.format(
sdrs[indices_glassbreak_only, 1].mean(), sdrs[indices_glassbreak_only, 1].std(),
np.median(sdrs[indices_glassbreak_only, 1]),
sirs[indices_glassbreak_only, 1].mean(), sirs[indices_glassbreak_only, 1].std(),
np.median(sirs[indices_glassbreak_only, 1]),
sars[indices_glassbreak_only, 1].mean(), sars[indices_glassbreak_only, 1].std(),
np.median(sars[indices_glassbreak_only, 1])))
print('Gunshot mixes\nSDR: ' + format_string.format(
sdrs[indices_gunshot_mix, 2].mean(), sdrs[indices_gunshot_mix, 2].std(),
np.median(sdrs[indices_gunshot_mix, 2]),
sirs[indices_gunshot_mix, 2].mean(), sirs[indices_gunshot_mix, 2].std(),
np.median(sirs[indices_gunshot_mix, 2]),
sars[indices_gunshot_mix, 2].mean(), sars[indices_gunshot_mix, 2].std(),
np.median(sars[indices_gunshot_mix, 2])))
print('Gunshot only\nSDR: ' + format_string.format(
sdrs[indices_gunshot_only, 2].mean(), sdrs[indices_gunshot_only, 2].std(),
np.median(sdrs[indices_gunshot_only, 2]),
sirs[indices_gunshot_only, 2].mean(), sirs[indices_gunshot_only, 2].std(),
np.median(sirs[indices_gunshot_only, 2]),
sars[indices_gunshot_only, 2].mean(), sars[indices_gunshot_only, 2].std(),
np.median(sars[indices_gunshot_only, 2])))
if __name__ == '__main__':
main()
| 0.778902 | 0.527499 |
from robot.utils import normalize, normalize_whitespace, RecommendationFinder
from .tokens import Token
class Settings:
names = ()
aliases = {}
multi_use = (
'Metadata',
'Library',
'Resource',
'Variables'
)
single_value = (
'Resource',
'Test Timeout',
'Test Template',
'Timeout',
'Template'
)
name_and_arguments = (
'Metadata',
'Suite Setup',
'Suite Teardown',
'Test Setup',
'Test Teardown',
'Test Template',
'Setup',
'Teardown',
'Template',
'Resource',
'Variables'
)
name_arguments_and_with_name = (
'Library',
)
def __init__(self):
self.settings = {n: None for n in self.names}
def lex(self, statement):
setting = statement[0]
name = self._format_name(setting.value)
normalized = self._normalize_name(name)
try:
self._validate(name, normalized, statement)
except ValueError as err:
self._lex_error(setting, statement[1:], err.args[0])
else:
self._lex_setting(setting, statement[1:], normalized)
def _format_name(self, name):
return name
def _normalize_name(self, name):
name = normalize_whitespace(name).title()
if name in self.aliases:
return self.aliases[name]
return name
def _validate(self, name, normalized, statement):
if normalized not in self.settings:
message = self._get_non_existing_setting_message(name, normalized)
raise ValueError(message)
if self.settings[normalized] is not None and normalized not in self.multi_use:
raise ValueError("Setting '%s' is allowed only once. "
"Only the first value is used." % name)
if normalized in self.single_value and len(statement) > 2:
raise ValueError("Setting '%s' accepts only one value, got %s."
% (name, len(statement) - 1))
def _get_non_existing_setting_message(self, name, normalized):
if normalized in TestCaseFileSettings.names:
is_resource = isinstance(self, ResourceFileSettings)
return "Setting '%s' is not allowed in %s file." % (
name, 'resource' if is_resource else 'suite initialization'
)
return RecommendationFinder(normalize).find_and_format(
name=normalized,
candidates=tuple(self.settings) + tuple(self.aliases),
message="Non-existing setting '%s'." % name
)
def _lex_error(self, setting, values, error):
setting.set_error(error)
for token in values:
token.type = Token.COMMENT
def _lex_setting(self, setting, values, name):
self.settings[name] = values
setting.type = name.upper()
if name in self.name_and_arguments:
self._lex_name_and_arguments(values)
elif name in self.name_arguments_and_with_name:
self._lex_name_arguments_and_with_name(values)
else:
self._lex_arguments(values)
def _lex_name_and_arguments(self, tokens):
if tokens:
tokens[0].type = Token.NAME
self._lex_arguments(tokens[1:])
def _lex_name_arguments_and_with_name(self, tokens):
self._lex_name_and_arguments(tokens)
if len(tokens) > 1 and \
normalize_whitespace(tokens[-2].value) == 'WITH NAME':
tokens[-2].type = Token.WITH_NAME
tokens[-1].type = Token.NAME
def _lex_arguments(self, tokens):
for token in tokens:
token.type = Token.ARGUMENT
class TestCaseFileSettings(Settings):
names = (
'Documentation',
'Metadata',
'Suite Setup',
'Suite Teardown',
'Test Setup',
'Test Teardown',
'Test Template',
'Test Timeout',
'Force Tags',
'Default Tags',
'Library',
'Resource',
'Variables'
)
aliases = {
'Task Setup': 'Test Setup',
'Task Teardown': 'Test Teardown',
'Task Template': 'Test Template',
'Task Timeout': 'Test Timeout',
}
class InitFileSettings(Settings):
names = (
'Documentation',
'Metadata',
'Suite Setup',
'Suite Teardown',
'Test Setup',
'Test Teardown',
'Test Timeout',
'Force Tags',
'Library',
'Resource',
'Variables'
)
class ResourceFileSettings(Settings):
names = (
'Documentation',
'Library',
'Resource',
'Variables'
)
class TestCaseSettings(Settings):
names = (
'Documentation',
'Tags',
'Setup',
'Teardown',
'Template',
'Timeout'
)
def __init__(self, parent):
Settings.__init__(self)
self.parent = parent
def _format_name(self, name):
return name[1:-1].strip()
@property
def template_set(self):
template = self.settings['Template']
if self._has_disabling_value(template):
return False
parent_template = self.parent.settings['Test Template']
return self._has_value(template) or self._has_value(parent_template)
def _has_disabling_value(self, setting):
if setting is None:
return False
return setting == [] or setting[0].value.upper() == 'NONE'
def _has_value(self, setting):
return setting and setting[0].value
class KeywordSettings(Settings):
names = (
'Documentation',
'Arguments',
'Teardown',
'Timeout',
'Tags',
'Return'
)
def _format_name(self, name):
return name[1:-1].strip()
|
src/robot/parsing/lexer/settings.py
|
from robot.utils import normalize, normalize_whitespace, RecommendationFinder
from .tokens import Token
class Settings:
names = ()
aliases = {}
multi_use = (
'Metadata',
'Library',
'Resource',
'Variables'
)
single_value = (
'Resource',
'Test Timeout',
'Test Template',
'Timeout',
'Template'
)
name_and_arguments = (
'Metadata',
'Suite Setup',
'Suite Teardown',
'Test Setup',
'Test Teardown',
'Test Template',
'Setup',
'Teardown',
'Template',
'Resource',
'Variables'
)
name_arguments_and_with_name = (
'Library',
)
def __init__(self):
self.settings = {n: None for n in self.names}
def lex(self, statement):
setting = statement[0]
name = self._format_name(setting.value)
normalized = self._normalize_name(name)
try:
self._validate(name, normalized, statement)
except ValueError as err:
self._lex_error(setting, statement[1:], err.args[0])
else:
self._lex_setting(setting, statement[1:], normalized)
def _format_name(self, name):
return name
def _normalize_name(self, name):
name = normalize_whitespace(name).title()
if name in self.aliases:
return self.aliases[name]
return name
def _validate(self, name, normalized, statement):
if normalized not in self.settings:
message = self._get_non_existing_setting_message(name, normalized)
raise ValueError(message)
if self.settings[normalized] is not None and normalized not in self.multi_use:
raise ValueError("Setting '%s' is allowed only once. "
"Only the first value is used." % name)
if normalized in self.single_value and len(statement) > 2:
raise ValueError("Setting '%s' accepts only one value, got %s."
% (name, len(statement) - 1))
def _get_non_existing_setting_message(self, name, normalized):
if normalized in TestCaseFileSettings.names:
is_resource = isinstance(self, ResourceFileSettings)
return "Setting '%s' is not allowed in %s file." % (
name, 'resource' if is_resource else 'suite initialization'
)
return RecommendationFinder(normalize).find_and_format(
name=normalized,
candidates=tuple(self.settings) + tuple(self.aliases),
message="Non-existing setting '%s'." % name
)
def _lex_error(self, setting, values, error):
setting.set_error(error)
for token in values:
token.type = Token.COMMENT
def _lex_setting(self, setting, values, name):
self.settings[name] = values
setting.type = name.upper()
if name in self.name_and_arguments:
self._lex_name_and_arguments(values)
elif name in self.name_arguments_and_with_name:
self._lex_name_arguments_and_with_name(values)
else:
self._lex_arguments(values)
def _lex_name_and_arguments(self, tokens):
if tokens:
tokens[0].type = Token.NAME
self._lex_arguments(tokens[1:])
def _lex_name_arguments_and_with_name(self, tokens):
self._lex_name_and_arguments(tokens)
if len(tokens) > 1 and \
normalize_whitespace(tokens[-2].value) == 'WITH NAME':
tokens[-2].type = Token.WITH_NAME
tokens[-1].type = Token.NAME
def _lex_arguments(self, tokens):
for token in tokens:
token.type = Token.ARGUMENT
class TestCaseFileSettings(Settings):
names = (
'Documentation',
'Metadata',
'Suite Setup',
'Suite Teardown',
'Test Setup',
'Test Teardown',
'Test Template',
'Test Timeout',
'Force Tags',
'Default Tags',
'Library',
'Resource',
'Variables'
)
aliases = {
'Task Setup': 'Test Setup',
'Task Teardown': 'Test Teardown',
'Task Template': 'Test Template',
'Task Timeout': 'Test Timeout',
}
class InitFileSettings(Settings):
names = (
'Documentation',
'Metadata',
'Suite Setup',
'Suite Teardown',
'Test Setup',
'Test Teardown',
'Test Timeout',
'Force Tags',
'Library',
'Resource',
'Variables'
)
class ResourceFileSettings(Settings):
names = (
'Documentation',
'Library',
'Resource',
'Variables'
)
class TestCaseSettings(Settings):
names = (
'Documentation',
'Tags',
'Setup',
'Teardown',
'Template',
'Timeout'
)
def __init__(self, parent):
Settings.__init__(self)
self.parent = parent
def _format_name(self, name):
return name[1:-1].strip()
@property
def template_set(self):
template = self.settings['Template']
if self._has_disabling_value(template):
return False
parent_template = self.parent.settings['Test Template']
return self._has_value(template) or self._has_value(parent_template)
def _has_disabling_value(self, setting):
if setting is None:
return False
return setting == [] or setting[0].value.upper() == 'NONE'
def _has_value(self, setting):
return setting and setting[0].value
class KeywordSettings(Settings):
names = (
'Documentation',
'Arguments',
'Teardown',
'Timeout',
'Tags',
'Return'
)
def _format_name(self, name):
return name[1:-1].strip()
| 0.664758 | 0.240412 |
import os
import sys
import boto3
import time
import acme
from botocore.exceptions import ClientError, WaiterError
LOGGER = acme.get_logger(__name__)
# AWS SSM Client
SSM_CLIENT = boto3.client('ssm')
def generate_csr(common_name, instance_id, platform, subject_alternative_names, path):
"""
Use AWS SSM Run Commands to generate a private key and CSR
on the system and output the CSR value to generate a new
certificate.
"""
cert_root_path = path
cert_parent_dir = "certs"
commandList = []
# Check if sans are provided
if len(subject_alternative_names) > 0:
# If sans are provided, iterate through them
# and create a formatted string for use with
# the openssl command
sans = []
for san in subject_alternative_names:
entry = "DNS:{san}".format(san=san)
sans.append(entry)
sans_string = ",".join(sans)
if platform == "Ubuntu":
commandList.append(
'mkdir -p {path}'.format(path=os.path.join(cert_root_path, cert_parent_dir)))
commandList.append('( cat /etc/ssl/openssl.cnf ; echo \"\\n[SAN]\\nsubjectAltName={sans}\"; ) > {path}'.format(
sans=sans_string, path=os.path.join(cert_root_path, cert_parent_dir, "config")))
elif platform == "CentOS Linux":
commandList.append(
'mkdir -p {path}'.format(path=os.path.join(cert_root_path, cert_parent_dir)))
commandList.append('( cat /etc/pki/tls/openssl.cnf ; echo -e \"\\n[SAN]\\nsubjectAltName={sans}\"; ) > {path}'.format(
sans=sans_string, path=os.path.join(cert_root_path, cert_parent_dir, "config")))
else:
LOGGER.error('Platform {platform} is not supported.'.format(
platform))
sys.exit(1)
commandList.append('openssl req -nodes -newkey rsa:2048 -keyout {private_key_path} -subj "/C={country}/ST={state}/L={locality}/O={organization}/OU={org_unit}/CN={common_name}/emailAddress={email}" -reqexts SAN -config {config_path}'.format(private_key_path=os.path.join(
cert_root_path, cert_parent_dir, "{}.key".format(common_name)), config_path=os.path.join(cert_root_path, cert_parent_dir, "config"), country=os.environ['country'], state=os.environ['state'], locality=os.environ['locality'], organization=os.environ['organization'], org_unit=os.environ['organization_unit'], common_name=common_name, email=os.environ['email']))
commandList.append('rm -rf config')
else:
# Omit sans portion of openssl command
commandList.append('openssl req -nodes -newkey rsa:2048 -keyout {private_key_path} -subj "/C={country}/ST={state}/L={locality}/O={organization}/OU={org_unit}/CN={common_name}/emailAddress={email}"'.format(private_key_path=os.path.join(cert_root_path, cert_parent_dir, "{}.key".format(
common_name)), country=os.environ['country'], state=os.environ['state'], locality=os.environ['locality'], organization=os.environ['organization'], org_unit=os.environ['organization_unit'], common_name=common_name, email=os.environ['email']))
# Generate SSM run command parameters
parameters = {}
parameters["commands"] = commandList
# Send the run command to the target system and
# grab the CSR from the output
invocation = _send_run_command(instance_id, parameters)
local_path = os.environ['HOME']
csr_path = "{path}/csr".format(path=local_path)
# Write the CSR to a file
open(csr_path, 'wb').write(invocation['StandardOutputContent'].encode())
LOGGER.info('Successfully generated new CSR')
def import_certificate(common_name, instance_id, path):
"""
Use AWS SSM Run Commands to import the certificates
to the system.
"""
local_path = os.environ['HOME']
cert_root_path = path
cert_parent_dir = "certs"
# Generate SSM run command parameters
commandList = []
parameters = {}
# Read the certificate contents from the path
certificate_paths = [
"{path}/.acme.sh/{common_name}/fullchain.cer".format(
path=local_path, common_name=common_name),
"{path}/.acme.sh/{common_name}/ca.cer".format(
path=local_path, common_name=common_name),
"{path}/.acme.sh/{common_name}/{common_name}.cer".format(
path=local_path, common_name=common_name),
]
# Iterate through paths to generate command list parameter
for path in certificate_paths:
cert = open(path, 'r')
cert_contents = cert.read()
cert.close()
commandList.append('echo \"{cert}\" > {cert_path}'.format(
cert=cert_contents, cert_path=os.path.join(cert_root_path, cert_parent_dir, os.path.basename(path))))
# Add command list to commands parameter
parameters["commands"] = commandList
# Send the run command to the target system to
# copy the cert contents to a file
_send_run_command(instance_id, parameters)
LOGGER.info("Successfully imported the new certificates")
def get_system_metadata(hostnames):
"""
Get system metadata from AWS SSM managed system inventory
"""
for hostname in hostnames:
system_name = hostname.split(".")[0]
try:
response = SSM_CLIENT.describe_instance_information(
Filters=[
{
'Key': 'tag:Name',
'Values': [
system_name,
]
},
]
)
# Check to see if multiple systems match the given filter
# criteria and return an error if so
if len(response['InstanceInformationList']) == 1:
message = 'System found with a matching name of: `{system_name}`'.format(
system_name=hostname)
LOGGER.info(message)
if response['InstanceInformationList'][0]['PingStatus'] != "Online":
message = 'The system is not online or the AWS SSM Agent is not functioning properly.'
LOGGER.error(message)
sys.exit(1)
elif len(response['InstanceInformationList']) > 1:
message = 'There are multiple systems with a matching name of: `{system_name}`'.format(
system_name=hostname)
LOGGER.error(message)
sys.exit(1)
return response['InstanceInformationList'][0]
except IndexError as error:
message = 'There are no systems with a matching name of: `{system_name}`'.format(
system_name=hostname)
LOGGER.info(message)
continue
message = 'There are no systems matching any of the provided hostnames: {hostnames}'.format(
hostnames=hostname)
LOGGER.error(message)
sys.exit(1)
def run_hooks(instance_id, path):
"""
Run all scripts in path in alphabetical order
"""
LOGGER.info("Running scripts in {}...".format(path))
# Generate SSM run command parameters
commandList = []
commandList.append('mkdir -p {path}'.format(path=path))
commandList.append('touch {path}/template.sh'.format(path=path))
commandList.append(
'for each in {path}/*.sh ; do bash $each || exit ; done'.format(path=path))
parameters = {}
parameters["commands"] = commandList
# Send the run command to the target system to
# run all scripts in alphabetical order in the provided
# path
_send_run_command(instance_id, parameters)
LOGGER.info("Successfully ran all scripts in {}".format(path))
def _wait_for_success(command_id, instance_id):
"""
Wait for AWS SSM Run Command to be executed on system
"""
LOGGER.debug(
'Waiting for run command {} to complete...'.format(command_id))
try:
waiter = SSM_CLIENT.get_waiter('command_executed')
waiter.wait(
CommandId=command_id,
InstanceId=instance_id
)
return _get_command_status(command_id, instance_id)
except WaiterError as error:
invocation = _get_command_status(command_id, instance_id)
message = 'Run Command {command_id} failed with error: {error}'.format(
command_id=command_id, error=invocation['StandardErrorContent'])
LOGGER.error(message)
sys.exit(1)
except Exception as error:
message = 'Run Command {command_id} failed'.format(
command_id=command_id)
LOGGER.error(message, error)
sys.exit(1)
def _send_run_command(instance_id, parameters):
"""
Send run command to target systems
"""
LOGGER.debug('Sending run command to {} system...'.format(instance_id))
try:
response = SSM_CLIENT.send_command(
InstanceIds=[instance_id],
DocumentName='AWS-RunShellScript',
DocumentVersion='$DEFAULT',
TimeoutSeconds=240,
Parameters=parameters,
CloudWatchOutputConfig={
'CloudWatchLogGroupName': "/aws/ssm/AWS-RunShellScript",
'CloudWatchOutputEnabled': True
}
)
LOGGER.debug('Send Command Response: {}'.format(response))
except ClientError as err:
if 'ThrottlingException' in str(err):
LOGGER.warning('RunCommand throttled, automatically retrying...')
return _send_run_command(instance_id, parameters)
else:
LOGGER.error(
'Send Run Command function failed!\n{}'.format(str(err)))
sys.exit(1)
return _wait_for_success(response['Command']['CommandId'], instance_id)
def _get_command_status(command_id, instance_id):
"""
Get SSM run command status
"""
LOGGER.debug('Checking SSM Run Command {0} status for {1}'.format(
command_id, instance_id))
try:
time.sleep(5)
invocation = SSM_CLIENT.get_command_invocation(
CommandId=command_id,
InstanceId=instance_id
)
return invocation
except ClientError as err:
if 'ThrottlingException' in str(err):
LOGGER.warning('RunCommand throttled, automatically retrying...')
return _get_command_status(command_id, instance_id)
else:
LOGGER.error(
'Get SSM Command Status function failed!\n{}'.format(str(err)))
sys.exit(1)
def main():
region_name = os.environ['AWS_REGION']
system_name = os.environ['SYSTEM_NAME']
common_name = os.environ['COMMON_NAME']
dns = os.environ['ACME_DNS']
local_path = os.environ['HOME']
remote_path = "/opt/otter"
hooks_path = os.path.join(remote_path, "hooks")
subject_alternative_names = acme.query_subject_alternative_names(
system_name)
le_client = acme.LetsEncrypt(
hostname=system_name,
common_name=common_name,
subdelegate=dns,
subject_alternative_names=subject_alternative_names,
region=region_name)
hostnames = subject_alternative_names
hostnames.insert(0, system_name)
system_metadata = get_system_metadata(hostnames)
instance_id = system_metadata['InstanceId']
platform = system_metadata['PlatformName']
# Run scripts before new certificates are created
run_hooks(instance_id, os.path.join(hooks_path, "pre"))
generate_csr(common_name, instance_id, platform,
subject_alternative_names, remote_path)
le_client.acme_production(csr=f'{local_path}/csr')
import_certificate(common_name, instance_id, remote_path)
expiration = acme.query_certificate_expiration(system_name, common_name)
acme.update_certificate_expiration(system_name, expiration)
# Run scripts after new certificate is created and uploaded
# to the system
run_hooks(instance_id, os.path.join(hooks_path, "post"))
if __name__ == '__main__':
main()
|
platforms/linux-aws-ssm/src/app.py
|
import os
import sys
import boto3
import time
import acme
from botocore.exceptions import ClientError, WaiterError
LOGGER = acme.get_logger(__name__)
# AWS SSM Client
SSM_CLIENT = boto3.client('ssm')
def generate_csr(common_name, instance_id, platform, subject_alternative_names, path):
"""
Use AWS SSM Run Commands to generate a private key and CSR
on the system and output the CSR value to generate a new
certificate.
"""
cert_root_path = path
cert_parent_dir = "certs"
commandList = []
# Check if sans are provided
if len(subject_alternative_names) > 0:
# If sans are provided, iterate through them
# and create a formatted string for use with
# the openssl command
sans = []
for san in subject_alternative_names:
entry = "DNS:{san}".format(san=san)
sans.append(entry)
sans_string = ",".join(sans)
if platform == "Ubuntu":
commandList.append(
'mkdir -p {path}'.format(path=os.path.join(cert_root_path, cert_parent_dir)))
commandList.append('( cat /etc/ssl/openssl.cnf ; echo \"\\n[SAN]\\nsubjectAltName={sans}\"; ) > {path}'.format(
sans=sans_string, path=os.path.join(cert_root_path, cert_parent_dir, "config")))
elif platform == "CentOS Linux":
commandList.append(
'mkdir -p {path}'.format(path=os.path.join(cert_root_path, cert_parent_dir)))
commandList.append('( cat /etc/pki/tls/openssl.cnf ; echo -e \"\\n[SAN]\\nsubjectAltName={sans}\"; ) > {path}'.format(
sans=sans_string, path=os.path.join(cert_root_path, cert_parent_dir, "config")))
else:
LOGGER.error('Platform {platform} is not supported.'.format(
platform))
sys.exit(1)
commandList.append('openssl req -nodes -newkey rsa:2048 -keyout {private_key_path} -subj "/C={country}/ST={state}/L={locality}/O={organization}/OU={org_unit}/CN={common_name}/emailAddress={email}" -reqexts SAN -config {config_path}'.format(private_key_path=os.path.join(
cert_root_path, cert_parent_dir, "{}.key".format(common_name)), config_path=os.path.join(cert_root_path, cert_parent_dir, "config"), country=os.environ['country'], state=os.environ['state'], locality=os.environ['locality'], organization=os.environ['organization'], org_unit=os.environ['organization_unit'], common_name=common_name, email=os.environ['email']))
commandList.append('rm -rf config')
else:
# Omit sans portion of openssl command
commandList.append('openssl req -nodes -newkey rsa:2048 -keyout {private_key_path} -subj "/C={country}/ST={state}/L={locality}/O={organization}/OU={org_unit}/CN={common_name}/emailAddress={email}"'.format(private_key_path=os.path.join(cert_root_path, cert_parent_dir, "{}.key".format(
common_name)), country=os.environ['country'], state=os.environ['state'], locality=os.environ['locality'], organization=os.environ['organization'], org_unit=os.environ['organization_unit'], common_name=common_name, email=os.environ['email']))
# Generate SSM run command parameters
parameters = {}
parameters["commands"] = commandList
# Send the run command to the target system and
# grab the CSR from the output
invocation = _send_run_command(instance_id, parameters)
local_path = os.environ['HOME']
csr_path = "{path}/csr".format(path=local_path)
# Write the CSR to a file
open(csr_path, 'wb').write(invocation['StandardOutputContent'].encode())
LOGGER.info('Successfully generated new CSR')
def import_certificate(common_name, instance_id, path):
"""
Use AWS SSM Run Commands to import the certificates
to the system.
"""
local_path = os.environ['HOME']
cert_root_path = path
cert_parent_dir = "certs"
# Generate SSM run command parameters
commandList = []
parameters = {}
# Read the certificate contents from the path
certificate_paths = [
"{path}/.acme.sh/{common_name}/fullchain.cer".format(
path=local_path, common_name=common_name),
"{path}/.acme.sh/{common_name}/ca.cer".format(
path=local_path, common_name=common_name),
"{path}/.acme.sh/{common_name}/{common_name}.cer".format(
path=local_path, common_name=common_name),
]
# Iterate through paths to generate command list parameter
for path in certificate_paths:
cert = open(path, 'r')
cert_contents = cert.read()
cert.close()
commandList.append('echo \"{cert}\" > {cert_path}'.format(
cert=cert_contents, cert_path=os.path.join(cert_root_path, cert_parent_dir, os.path.basename(path))))
# Add command list to commands parameter
parameters["commands"] = commandList
# Send the run command to the target system to
# copy the cert contents to a file
_send_run_command(instance_id, parameters)
LOGGER.info("Successfully imported the new certificates")
def get_system_metadata(hostnames):
"""
Get system metadata from AWS SSM managed system inventory
"""
for hostname in hostnames:
system_name = hostname.split(".")[0]
try:
response = SSM_CLIENT.describe_instance_information(
Filters=[
{
'Key': 'tag:Name',
'Values': [
system_name,
]
},
]
)
# Check to see if multiple systems match the given filter
# criteria and return an error if so
if len(response['InstanceInformationList']) == 1:
message = 'System found with a matching name of: `{system_name}`'.format(
system_name=hostname)
LOGGER.info(message)
if response['InstanceInformationList'][0]['PingStatus'] != "Online":
message = 'The system is not online or the AWS SSM Agent is not functioning properly.'
LOGGER.error(message)
sys.exit(1)
elif len(response['InstanceInformationList']) > 1:
message = 'There are multiple systems with a matching name of: `{system_name}`'.format(
system_name=hostname)
LOGGER.error(message)
sys.exit(1)
return response['InstanceInformationList'][0]
except IndexError as error:
message = 'There are no systems with a matching name of: `{system_name}`'.format(
system_name=hostname)
LOGGER.info(message)
continue
message = 'There are no systems matching any of the provided hostnames: {hostnames}'.format(
hostnames=hostname)
LOGGER.error(message)
sys.exit(1)
def run_hooks(instance_id, path):
"""
Run all scripts in path in alphabetical order
"""
LOGGER.info("Running scripts in {}...".format(path))
# Generate SSM run command parameters
commandList = []
commandList.append('mkdir -p {path}'.format(path=path))
commandList.append('touch {path}/template.sh'.format(path=path))
commandList.append(
'for each in {path}/*.sh ; do bash $each || exit ; done'.format(path=path))
parameters = {}
parameters["commands"] = commandList
# Send the run command to the target system to
# run all scripts in alphabetical order in the provided
# path
_send_run_command(instance_id, parameters)
LOGGER.info("Successfully ran all scripts in {}".format(path))
def _wait_for_success(command_id, instance_id):
"""
Wait for AWS SSM Run Command to be executed on system
"""
LOGGER.debug(
'Waiting for run command {} to complete...'.format(command_id))
try:
waiter = SSM_CLIENT.get_waiter('command_executed')
waiter.wait(
CommandId=command_id,
InstanceId=instance_id
)
return _get_command_status(command_id, instance_id)
except WaiterError as error:
invocation = _get_command_status(command_id, instance_id)
message = 'Run Command {command_id} failed with error: {error}'.format(
command_id=command_id, error=invocation['StandardErrorContent'])
LOGGER.error(message)
sys.exit(1)
except Exception as error:
message = 'Run Command {command_id} failed'.format(
command_id=command_id)
LOGGER.error(message, error)
sys.exit(1)
def _send_run_command(instance_id, parameters):
"""
Send run command to target systems
"""
LOGGER.debug('Sending run command to {} system...'.format(instance_id))
try:
response = SSM_CLIENT.send_command(
InstanceIds=[instance_id],
DocumentName='AWS-RunShellScript',
DocumentVersion='$DEFAULT',
TimeoutSeconds=240,
Parameters=parameters,
CloudWatchOutputConfig={
'CloudWatchLogGroupName': "/aws/ssm/AWS-RunShellScript",
'CloudWatchOutputEnabled': True
}
)
LOGGER.debug('Send Command Response: {}'.format(response))
except ClientError as err:
if 'ThrottlingException' in str(err):
LOGGER.warning('RunCommand throttled, automatically retrying...')
return _send_run_command(instance_id, parameters)
else:
LOGGER.error(
'Send Run Command function failed!\n{}'.format(str(err)))
sys.exit(1)
return _wait_for_success(response['Command']['CommandId'], instance_id)
def _get_command_status(command_id, instance_id):
"""
Get SSM run command status
"""
LOGGER.debug('Checking SSM Run Command {0} status for {1}'.format(
command_id, instance_id))
try:
time.sleep(5)
invocation = SSM_CLIENT.get_command_invocation(
CommandId=command_id,
InstanceId=instance_id
)
return invocation
except ClientError as err:
if 'ThrottlingException' in str(err):
LOGGER.warning('RunCommand throttled, automatically retrying...')
return _get_command_status(command_id, instance_id)
else:
LOGGER.error(
'Get SSM Command Status function failed!\n{}'.format(str(err)))
sys.exit(1)
def main():
region_name = os.environ['AWS_REGION']
system_name = os.environ['SYSTEM_NAME']
common_name = os.environ['COMMON_NAME']
dns = os.environ['ACME_DNS']
local_path = os.environ['HOME']
remote_path = "/opt/otter"
hooks_path = os.path.join(remote_path, "hooks")
subject_alternative_names = acme.query_subject_alternative_names(
system_name)
le_client = acme.LetsEncrypt(
hostname=system_name,
common_name=common_name,
subdelegate=dns,
subject_alternative_names=subject_alternative_names,
region=region_name)
hostnames = subject_alternative_names
hostnames.insert(0, system_name)
system_metadata = get_system_metadata(hostnames)
instance_id = system_metadata['InstanceId']
platform = system_metadata['PlatformName']
# Run scripts before new certificates are created
run_hooks(instance_id, os.path.join(hooks_path, "pre"))
generate_csr(common_name, instance_id, platform,
subject_alternative_names, remote_path)
le_client.acme_production(csr=f'{local_path}/csr')
import_certificate(common_name, instance_id, remote_path)
expiration = acme.query_certificate_expiration(system_name, common_name)
acme.update_certificate_expiration(system_name, expiration)
# Run scripts after new certificate is created and uploaded
# to the system
run_hooks(instance_id, os.path.join(hooks_path, "post"))
if __name__ == '__main__':
main()
| 0.412885 | 0.123471 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"]
model_urls = {
"mobilenet_v3_large": "https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
"mobilenet_v3_small": "https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
}
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class SeModule(nn.Module):
def __init__(self, in_size, reduction=4):
super(SeModule, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
)
def forward(self, x):
return x * self.se(x)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
super(Block, self).__init__()
self.stride = stride
self.se = semodule
self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(expand_size)
self.nolinear1 = nolinear
self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride,
padding=kernel_size // 2, groups=expand_size, bias=False)
self.bn2 = nn.BatchNorm2d(expand_size)
self.nolinear2 = nolinear
self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_size)
self.shortcut = nn.Sequential()
if stride == 1 and in_size != out_size:
self.shortcut = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_size),
)
def forward(self, x):
out = self.nolinear1(self.bn1(self.conv1(x)))
out = self.nolinear2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.se != None:
out = self.se(out)
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV3_Large(nn.Module):
def __init__(self, heads, head_conv, final_kernel):
super(MobileNetV3_Large, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
)
self.bneck1 = nn.Sequential(
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
)
self.bneck2 = nn.Sequential(
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
)
self.bneck3 = nn.Sequential(
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1),
)
self.conv2 = nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(960)
self.hs2 = hswish()
self.init_params()
# ??
self.inplanes = 64
self.deconv3 = nn.ConvTranspose2d(960, self.inplanes, 8*2, stride=8, padding= 8//2, output_padding=0,
groups=self.inplanes, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.hs3 = hswish()
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.bneck1(out)
out = self.bneck2(out)
out = self.bneck3(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = self.hs3(self.bn3(self.deconv3(out)))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(out)
return [ret]
class MobileNetV3_Small(nn.Module):
def __init__(self, heads, head_conv, final_kernel):
super(MobileNetV3_Small, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
)
self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(576) # ??
self.hs2 = hswish()
self.init_params()
self.inplanes = 64
self.deconv3 = nn.ConvTranspose2d(576, self.inplanes, 8*2, stride=8, padding= 8//2, output_padding=0,
groups=self.inplanes, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.hs3 = hswish()
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = self.hs3(self.bn3(self.deconv3(out)))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(out)
return [ret]
mobilenetv3_spec = {'large': MobileNetV3_Large,
'small': MobileNetV3_Small}
def get_mobilenet_v3(num_layers, heads, head_conv):
select_model = mobilenetv3_spec[num_layers]
model = select_model(heads=heads, head_conv=head_conv, final_kernel=1)
return model
|
src/lib/models/networks/mobilenetv3.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"]
model_urls = {
"mobilenet_v3_large": "https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
"mobilenet_v3_small": "https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
}
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class SeModule(nn.Module):
def __init__(self, in_size, reduction=4):
super(SeModule, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
)
def forward(self, x):
return x * self.se(x)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
super(Block, self).__init__()
self.stride = stride
self.se = semodule
self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(expand_size)
self.nolinear1 = nolinear
self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride,
padding=kernel_size // 2, groups=expand_size, bias=False)
self.bn2 = nn.BatchNorm2d(expand_size)
self.nolinear2 = nolinear
self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_size)
self.shortcut = nn.Sequential()
if stride == 1 and in_size != out_size:
self.shortcut = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_size),
)
def forward(self, x):
out = self.nolinear1(self.bn1(self.conv1(x)))
out = self.nolinear2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.se != None:
out = self.se(out)
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV3_Large(nn.Module):
def __init__(self, heads, head_conv, final_kernel):
super(MobileNetV3_Large, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
)
self.bneck1 = nn.Sequential(
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
)
self.bneck2 = nn.Sequential(
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
)
self.bneck3 = nn.Sequential(
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1),
)
self.conv2 = nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(960)
self.hs2 = hswish()
self.init_params()
# ??
self.inplanes = 64
self.deconv3 = nn.ConvTranspose2d(960, self.inplanes, 8*2, stride=8, padding= 8//2, output_padding=0,
groups=self.inplanes, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.hs3 = hswish()
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.bneck1(out)
out = self.bneck2(out)
out = self.bneck3(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = self.hs3(self.bn3(self.deconv3(out)))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(out)
return [ret]
class MobileNetV3_Small(nn.Module):
def __init__(self, heads, head_conv, final_kernel):
super(MobileNetV3_Small, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
)
self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(576) # ??
self.hs2 = hswish()
self.init_params()
self.inplanes = 64
self.deconv3 = nn.ConvTranspose2d(576, self.inplanes, 8*2, stride=8, padding= 8//2, output_padding=0,
groups=self.inplanes, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.hs3 = hswish()
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = self.hs3(self.bn3(self.deconv3(out)))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(out)
return [ret]
mobilenetv3_spec = {'large': MobileNetV3_Large,
'small': MobileNetV3_Small}
def get_mobilenet_v3(num_layers, heads, head_conv):
select_model = mobilenetv3_spec[num_layers]
model = select_model(heads=heads, head_conv=head_conv, final_kernel=1)
return model
| 0.934328 | 0.311034 |
from __future__ import print_function
import argparse
import json
import time
from apollo import accessible_organisms
from apollo.util import CnOrGuess, GuessCn
from arrow.apollo import get_apollo_instance
from webapollo import UserObj, handle_credentials
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to export data from Apollo via web services')
CnOrGuess(parser)
parser.add_argument('--gff', type=argparse.FileType('w'))
parser.add_argument('--gff_with_fasta', action='store_true')
parser.add_argument('--fasta_pep', type=argparse.FileType('w'))
parser.add_argument('--fasta_cds', type=argparse.FileType('w'))
parser.add_argument('--fasta_cdna', type=argparse.FileType('w'))
parser.add_argument('--vcf', type=argparse.FileType('w'))
parser.add_argument('--json', type=argparse.FileType('w'))
parser.add_argument('--die', action='store_true')
parser.add_argument('email', help='User Email')
args = parser.parse_args()
wa = get_apollo_instance()
# User must have an apollo account, if not, create it
gx_user = UserObj(**wa.users._assert_or_create_user(args.email))
handle_credentials(gx_user)
org_cns, seqs = GuessCn(args, wa)
if not isinstance(org_cns, list):
org_cns = [org_cns]
all_orgs = wa.organisms.get_organisms()
if 'error' in all_orgs:
all_orgs = []
all_orgs = [org['commonName'] for org in all_orgs]
def error(message):
if args.die:
raise Exception(message)
else:
print(message)
org_data = []
for org_cn in org_cns:
if org_cn not in all_orgs:
raise Exception("Could not find organism %s" % org_cn)
orgs = accessible_organisms(gx_user, [org_cn], 'READ')
if not orgs:
raise Exception("You do not have read permission on organism %s" % org_cn)
org = wa.organisms.show_organism(org_cn)
# Fetch all the refseqs
realSeqs = wa.organisms.get_sequences(org['id'])
# We'll loop over them individually for decreased memory pressure
for sequence in realSeqs['sequences']:
print("Downloading", sequence)
try:
uuid_gff = wa.io.write_downloadable(org['commonName'], 'GFF3', export_gff3_fasta=args.gff_with_fasta, sequences=[sequence['name']])
if 'error' in uuid_gff or 'uuid' not in uuid_gff:
error("Apollo failed to prepare the GFF3 file for download: %s" % uuid_gff)
args.gff.write(wa.io.download(uuid_gff['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_vcf = wa.io.write_downloadable(org['commonName'], 'VCF', sequences=[sequence['name']])
if 'error' in uuid_vcf or 'uuid' not in uuid_vcf:
error("Apollo failed to prepare the VCF file for download: %s" % uuid_vcf)
args.vcf.write(wa.io.download(uuid_vcf['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_fa = wa.io.write_downloadable(org['commonName'], 'FASTA', sequences=[sequence['name']], seq_type='cdna')
if 'error' in uuid_fa or 'uuid' not in uuid_fa:
error("Apollo failed to prepare the cdna FASTA file for download: %s" % uuid_fa)
args.fasta_cdna.write(wa.io.download(uuid_fa['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_fa = wa.io.write_downloadable(org['commonName'], 'FASTA', sequences=[sequence['name']], seq_type='cds')
if 'error' in uuid_fa or 'uuid' not in uuid_fa:
error("Apollo failed to prepare the cds FASTA file for download: %s" % uuid_fa)
args.fasta_cds.write(wa.io.download(uuid_fa['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_fa = wa.io.write_downloadable(org['commonName'], 'FASTA', sequences=[sequence['name']], seq_type='peptide')
if 'error' in uuid_fa or 'uuid' not in uuid_fa:
error("Apollo failed to prepare the file for download: %s" % uuid_fa)
args.fasta_pep.write(wa.io.download(uuid_fa['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
org_data.append(org)
args.json.write(json.dumps(org_data, indent=2))
|
tools/apollo/export.py
|
from __future__ import print_function
import argparse
import json
import time
from apollo import accessible_organisms
from apollo.util import CnOrGuess, GuessCn
from arrow.apollo import get_apollo_instance
from webapollo import UserObj, handle_credentials
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to export data from Apollo via web services')
CnOrGuess(parser)
parser.add_argument('--gff', type=argparse.FileType('w'))
parser.add_argument('--gff_with_fasta', action='store_true')
parser.add_argument('--fasta_pep', type=argparse.FileType('w'))
parser.add_argument('--fasta_cds', type=argparse.FileType('w'))
parser.add_argument('--fasta_cdna', type=argparse.FileType('w'))
parser.add_argument('--vcf', type=argparse.FileType('w'))
parser.add_argument('--json', type=argparse.FileType('w'))
parser.add_argument('--die', action='store_true')
parser.add_argument('email', help='User Email')
args = parser.parse_args()
wa = get_apollo_instance()
# User must have an apollo account, if not, create it
gx_user = UserObj(**wa.users._assert_or_create_user(args.email))
handle_credentials(gx_user)
org_cns, seqs = GuessCn(args, wa)
if not isinstance(org_cns, list):
org_cns = [org_cns]
all_orgs = wa.organisms.get_organisms()
if 'error' in all_orgs:
all_orgs = []
all_orgs = [org['commonName'] for org in all_orgs]
def error(message):
if args.die:
raise Exception(message)
else:
print(message)
org_data = []
for org_cn in org_cns:
if org_cn not in all_orgs:
raise Exception("Could not find organism %s" % org_cn)
orgs = accessible_organisms(gx_user, [org_cn], 'READ')
if not orgs:
raise Exception("You do not have read permission on organism %s" % org_cn)
org = wa.organisms.show_organism(org_cn)
# Fetch all the refseqs
realSeqs = wa.organisms.get_sequences(org['id'])
# We'll loop over them individually for decreased memory pressure
for sequence in realSeqs['sequences']:
print("Downloading", sequence)
try:
uuid_gff = wa.io.write_downloadable(org['commonName'], 'GFF3', export_gff3_fasta=args.gff_with_fasta, sequences=[sequence['name']])
if 'error' in uuid_gff or 'uuid' not in uuid_gff:
error("Apollo failed to prepare the GFF3 file for download: %s" % uuid_gff)
args.gff.write(wa.io.download(uuid_gff['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_vcf = wa.io.write_downloadable(org['commonName'], 'VCF', sequences=[sequence['name']])
if 'error' in uuid_vcf or 'uuid' not in uuid_vcf:
error("Apollo failed to prepare the VCF file for download: %s" % uuid_vcf)
args.vcf.write(wa.io.download(uuid_vcf['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_fa = wa.io.write_downloadable(org['commonName'], 'FASTA', sequences=[sequence['name']], seq_type='cdna')
if 'error' in uuid_fa or 'uuid' not in uuid_fa:
error("Apollo failed to prepare the cdna FASTA file for download: %s" % uuid_fa)
args.fasta_cdna.write(wa.io.download(uuid_fa['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_fa = wa.io.write_downloadable(org['commonName'], 'FASTA', sequences=[sequence['name']], seq_type='cds')
if 'error' in uuid_fa or 'uuid' not in uuid_fa:
error("Apollo failed to prepare the cds FASTA file for download: %s" % uuid_fa)
args.fasta_cds.write(wa.io.download(uuid_fa['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
try:
uuid_fa = wa.io.write_downloadable(org['commonName'], 'FASTA', sequences=[sequence['name']], seq_type='peptide')
if 'error' in uuid_fa or 'uuid' not in uuid_fa:
error("Apollo failed to prepare the file for download: %s" % uuid_fa)
args.fasta_pep.write(wa.io.download(uuid_fa['uuid'], output_format="text"))
time.sleep(1)
except Exception as e:
error(e)
org_data.append(org)
args.json.write(json.dumps(org_data, indent=2))
| 0.317215 | 0.091018 |
import sympy as sy
from .testfunction import TestMinFunction
from .meta import symbolize, substitute
def Rosenbrock(a=1, b=100):
"""
Creates the Rosenbrock function object.
f(x,y) = (a-x)**2 + b*(y-x**2)**2
a,b are constants, tipically a is set to 1 and b is set to 100.
The global minimum
f(1,1) = 0.
"""
str_expr = '(a-x)**2 + b*(y-x**2)**2'
expr = substitute(sy.sympify(str_expr), [a, b], ['a', 'b'])
return TestMinFunction(**symbolize(expr=expr), optimums=[(1.0, 1.0)])
def Himmelblau():
"""
Creates the Himmelblau's function object.
f(x,y) = (x**2 + y - 11)**2 + (x + y**2 - 7)**2
It has four identical local minima with values 0.0 at
(3.0,2.0),
(-2.805,3.131),
(-3.779,-3.283),
(3.584,-1.848).
"""
str_expr = '(x**2 + y - 11)**2 + (x + y**2 - 7)**2'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(3.0, 2.0), (-2.805118, 3.131312),
(-3.779310, -3.283186),
(3.584428, -1.848126)])
def GoldsteinPrice():
"""
Creates the Goldstein-Price function object.
f(x,y) = (1+(x+y+1)**2 * (19-14*x+3*x**2-14*y+6*x*y+3*y**2))
*(30+(2*x-3*y)**2 * (18-32*x+12*x**2+48*y-36*x*y+27*y**2))
The global minimum
f(0,-1) = 3.
"""
str_expr = '(1+(x+y+1)**2 * (19-14*x+3*x**2-14*y+6*x*y+3*y**2))' \
'*(30+(2*x-3*y)**2 * (18-32*x+12*x**2+48*y-36*x*y+27*y**2))'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(0.0, -1.0)])
def Beale():
"""
Creates the Beale function object.
f(x,y) = (1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2
+ (2.625 - x + x*y**3)**2
The global minimum
f(3,0.5) = 0.
"""
str_expr = '(1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2 ' \
'+ (2.625 - x + x*y**3)**2'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(3.0, 0.5)])
def Matyas():
"""
Creates the Matyas function object.
f(x, y) = 0.26*(x**2 + y**2) - 0.48*x*y
The global minimum
f(0.,0.) = 0.
"""
str_expr = '0.26*(x**2 + y**2) - 0.48*x*y'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(0.0, 0.0)])
if __name__ == '__main__':
"""
check:
- Himmelblau with initial = [-1.,-1.]. Problem with the Hessian
at [-0.270845, -0.923039].
"""
|
src/dewloosh/math/function/functions.py
|
import sympy as sy
from .testfunction import TestMinFunction
from .meta import symbolize, substitute
def Rosenbrock(a=1, b=100):
"""
Creates the Rosenbrock function object.
f(x,y) = (a-x)**2 + b*(y-x**2)**2
a,b are constants, tipically a is set to 1 and b is set to 100.
The global minimum
f(1,1) = 0.
"""
str_expr = '(a-x)**2 + b*(y-x**2)**2'
expr = substitute(sy.sympify(str_expr), [a, b], ['a', 'b'])
return TestMinFunction(**symbolize(expr=expr), optimums=[(1.0, 1.0)])
def Himmelblau():
"""
Creates the Himmelblau's function object.
f(x,y) = (x**2 + y - 11)**2 + (x + y**2 - 7)**2
It has four identical local minima with values 0.0 at
(3.0,2.0),
(-2.805,3.131),
(-3.779,-3.283),
(3.584,-1.848).
"""
str_expr = '(x**2 + y - 11)**2 + (x + y**2 - 7)**2'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(3.0, 2.0), (-2.805118, 3.131312),
(-3.779310, -3.283186),
(3.584428, -1.848126)])
def GoldsteinPrice():
"""
Creates the Goldstein-Price function object.
f(x,y) = (1+(x+y+1)**2 * (19-14*x+3*x**2-14*y+6*x*y+3*y**2))
*(30+(2*x-3*y)**2 * (18-32*x+12*x**2+48*y-36*x*y+27*y**2))
The global minimum
f(0,-1) = 3.
"""
str_expr = '(1+(x+y+1)**2 * (19-14*x+3*x**2-14*y+6*x*y+3*y**2))' \
'*(30+(2*x-3*y)**2 * (18-32*x+12*x**2+48*y-36*x*y+27*y**2))'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(0.0, -1.0)])
def Beale():
"""
Creates the Beale function object.
f(x,y) = (1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2
+ (2.625 - x + x*y**3)**2
The global minimum
f(3,0.5) = 0.
"""
str_expr = '(1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2 ' \
'+ (2.625 - x + x*y**3)**2'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(3.0, 0.5)])
def Matyas():
"""
Creates the Matyas function object.
f(x, y) = 0.26*(x**2 + y**2) - 0.48*x*y
The global minimum
f(0.,0.) = 0.
"""
str_expr = '0.26*(x**2 + y**2) - 0.48*x*y'
return TestMinFunction(**symbolize(str_expr=str_expr),
optimums=[(0.0, 0.0)])
if __name__ == '__main__':
"""
check:
- Himmelblau with initial = [-1.,-1.]. Problem with the Hessian
at [-0.270845, -0.923039].
"""
| 0.532668 | 0.597461 |
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from polyfile.kaitai.parsers import dos_datetime
class Lzh(KaitaiStruct):
"""LHA (LHarc, LZH) is a file format used by a popular freeware
eponymous archiver, created in 1988 by <NAME>. Over the
years, many ports and implementations were developed, sporting many
extensions to original 1988 LZH.
File format is pretty simple and essentially consists of a stream of
records.
"""
SEQ_FIELDS = ["entries"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['entries']['start'] = self._io.pos()
self.entries = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['entries']:
self._debug['entries']['arr'] = []
self._debug['entries']['arr'].append({'start': self._io.pos()})
_t_entries = Lzh.Record(self._io, self, self._root)
_t_entries._read()
self.entries.append(_t_entries)
self._debug['entries']['arr'][len(self.entries) - 1]['end'] = self._io.pos()
i += 1
self._debug['entries']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["header_len", "file_record"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header_len']['start'] = self._io.pos()
self.header_len = self._io.read_u1()
self._debug['header_len']['end'] = self._io.pos()
if self.header_len > 0:
self._debug['file_record']['start'] = self._io.pos()
self.file_record = Lzh.FileRecord(self._io, self, self._root)
self.file_record._read()
self._debug['file_record']['end'] = self._io.pos()
class FileRecord(KaitaiStruct):
SEQ_FIELDS = ["header", "file_uncompr_crc16", "body"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self._raw_header = self._io.read_bytes((self._parent.header_len - 1))
_io__raw_header = KaitaiStream(BytesIO(self._raw_header))
self.header = Lzh.Header(_io__raw_header, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
if self.header.header1.lha_level == 0:
self._debug['file_uncompr_crc16']['start'] = self._io.pos()
self.file_uncompr_crc16 = self._io.read_u2le()
self._debug['file_uncompr_crc16']['end'] = self._io.pos()
self._debug['body']['start'] = self._io.pos()
self.body = self._io.read_bytes(self.header.header1.file_size_compr)
self._debug['body']['end'] = self._io.pos()
class Header(KaitaiStruct):
SEQ_FIELDS = ["header1", "filename_len", "filename", "file_uncompr_crc16", "os", "ext_header_size"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header1']['start'] = self._io.pos()
self.header1 = Lzh.Header1(self._io, self, self._root)
self.header1._read()
self._debug['header1']['end'] = self._io.pos()
if self.header1.lha_level == 0:
self._debug['filename_len']['start'] = self._io.pos()
self.filename_len = self._io.read_u1()
self._debug['filename_len']['end'] = self._io.pos()
if self.header1.lha_level == 0:
self._debug['filename']['start'] = self._io.pos()
self.filename = (self._io.read_bytes(self.filename_len)).decode(u"ASCII")
self._debug['filename']['end'] = self._io.pos()
if self.header1.lha_level == 2:
self._debug['file_uncompr_crc16']['start'] = self._io.pos()
self.file_uncompr_crc16 = self._io.read_u2le()
self._debug['file_uncompr_crc16']['end'] = self._io.pos()
if self.header1.lha_level == 2:
self._debug['os']['start'] = self._io.pos()
self.os = self._io.read_u1()
self._debug['os']['end'] = self._io.pos()
if self.header1.lha_level == 2:
self._debug['ext_header_size']['start'] = self._io.pos()
self.ext_header_size = self._io.read_u2le()
self._debug['ext_header_size']['end'] = self._io.pos()
class Header1(KaitaiStruct):
SEQ_FIELDS = ["header_checksum", "method_id", "file_size_compr", "file_size_uncompr", "file_timestamp", "attr", "lha_level"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header_checksum']['start'] = self._io.pos()
self.header_checksum = self._io.read_u1()
self._debug['header_checksum']['end'] = self._io.pos()
self._debug['method_id']['start'] = self._io.pos()
self.method_id = (self._io.read_bytes(5)).decode(u"ASCII")
self._debug['method_id']['end'] = self._io.pos()
self._debug['file_size_compr']['start'] = self._io.pos()
self.file_size_compr = self._io.read_u4le()
self._debug['file_size_compr']['end'] = self._io.pos()
self._debug['file_size_uncompr']['start'] = self._io.pos()
self.file_size_uncompr = self._io.read_u4le()
self._debug['file_size_uncompr']['end'] = self._io.pos()
self._debug['file_timestamp']['start'] = self._io.pos()
self._raw_file_timestamp = self._io.read_bytes(4)
_io__raw_file_timestamp = KaitaiStream(BytesIO(self._raw_file_timestamp))
self.file_timestamp = dos_datetime.DosDatetime(_io__raw_file_timestamp)
self.file_timestamp._read()
self._debug['file_timestamp']['end'] = self._io.pos()
self._debug['attr']['start'] = self._io.pos()
self.attr = self._io.read_u1()
self._debug['attr']['end'] = self._io.pos()
self._debug['lha_level']['start'] = self._io.pos()
self.lha_level = self._io.read_u1()
self._debug['lha_level']['end'] = self._io.pos()
|
polyfile/kaitai/parsers/lzh.py
|
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from polyfile.kaitai.parsers import dos_datetime
class Lzh(KaitaiStruct):
"""LHA (LHarc, LZH) is a file format used by a popular freeware
eponymous archiver, created in 1988 by <NAME>. Over the
years, many ports and implementations were developed, sporting many
extensions to original 1988 LZH.
File format is pretty simple and essentially consists of a stream of
records.
"""
SEQ_FIELDS = ["entries"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['entries']['start'] = self._io.pos()
self.entries = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['entries']:
self._debug['entries']['arr'] = []
self._debug['entries']['arr'].append({'start': self._io.pos()})
_t_entries = Lzh.Record(self._io, self, self._root)
_t_entries._read()
self.entries.append(_t_entries)
self._debug['entries']['arr'][len(self.entries) - 1]['end'] = self._io.pos()
i += 1
self._debug['entries']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["header_len", "file_record"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header_len']['start'] = self._io.pos()
self.header_len = self._io.read_u1()
self._debug['header_len']['end'] = self._io.pos()
if self.header_len > 0:
self._debug['file_record']['start'] = self._io.pos()
self.file_record = Lzh.FileRecord(self._io, self, self._root)
self.file_record._read()
self._debug['file_record']['end'] = self._io.pos()
class FileRecord(KaitaiStruct):
SEQ_FIELDS = ["header", "file_uncompr_crc16", "body"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self._raw_header = self._io.read_bytes((self._parent.header_len - 1))
_io__raw_header = KaitaiStream(BytesIO(self._raw_header))
self.header = Lzh.Header(_io__raw_header, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
if self.header.header1.lha_level == 0:
self._debug['file_uncompr_crc16']['start'] = self._io.pos()
self.file_uncompr_crc16 = self._io.read_u2le()
self._debug['file_uncompr_crc16']['end'] = self._io.pos()
self._debug['body']['start'] = self._io.pos()
self.body = self._io.read_bytes(self.header.header1.file_size_compr)
self._debug['body']['end'] = self._io.pos()
class Header(KaitaiStruct):
SEQ_FIELDS = ["header1", "filename_len", "filename", "file_uncompr_crc16", "os", "ext_header_size"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header1']['start'] = self._io.pos()
self.header1 = Lzh.Header1(self._io, self, self._root)
self.header1._read()
self._debug['header1']['end'] = self._io.pos()
if self.header1.lha_level == 0:
self._debug['filename_len']['start'] = self._io.pos()
self.filename_len = self._io.read_u1()
self._debug['filename_len']['end'] = self._io.pos()
if self.header1.lha_level == 0:
self._debug['filename']['start'] = self._io.pos()
self.filename = (self._io.read_bytes(self.filename_len)).decode(u"ASCII")
self._debug['filename']['end'] = self._io.pos()
if self.header1.lha_level == 2:
self._debug['file_uncompr_crc16']['start'] = self._io.pos()
self.file_uncompr_crc16 = self._io.read_u2le()
self._debug['file_uncompr_crc16']['end'] = self._io.pos()
if self.header1.lha_level == 2:
self._debug['os']['start'] = self._io.pos()
self.os = self._io.read_u1()
self._debug['os']['end'] = self._io.pos()
if self.header1.lha_level == 2:
self._debug['ext_header_size']['start'] = self._io.pos()
self.ext_header_size = self._io.read_u2le()
self._debug['ext_header_size']['end'] = self._io.pos()
class Header1(KaitaiStruct):
SEQ_FIELDS = ["header_checksum", "method_id", "file_size_compr", "file_size_uncompr", "file_timestamp", "attr", "lha_level"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header_checksum']['start'] = self._io.pos()
self.header_checksum = self._io.read_u1()
self._debug['header_checksum']['end'] = self._io.pos()
self._debug['method_id']['start'] = self._io.pos()
self.method_id = (self._io.read_bytes(5)).decode(u"ASCII")
self._debug['method_id']['end'] = self._io.pos()
self._debug['file_size_compr']['start'] = self._io.pos()
self.file_size_compr = self._io.read_u4le()
self._debug['file_size_compr']['end'] = self._io.pos()
self._debug['file_size_uncompr']['start'] = self._io.pos()
self.file_size_uncompr = self._io.read_u4le()
self._debug['file_size_uncompr']['end'] = self._io.pos()
self._debug['file_timestamp']['start'] = self._io.pos()
self._raw_file_timestamp = self._io.read_bytes(4)
_io__raw_file_timestamp = KaitaiStream(BytesIO(self._raw_file_timestamp))
self.file_timestamp = dos_datetime.DosDatetime(_io__raw_file_timestamp)
self.file_timestamp._read()
self._debug['file_timestamp']['end'] = self._io.pos()
self._debug['attr']['start'] = self._io.pos()
self.attr = self._io.read_u1()
self._debug['attr']['end'] = self._io.pos()
self._debug['lha_level']['start'] = self._io.pos()
self.lha_level = self._io.read_u1()
self._debug['lha_level']['end'] = self._io.pos()
| 0.303835 | 0.13201 |
from datetime import datetime
from uuid import UUID
from byceps.services.shop.order import service as order_service
from byceps.services.shop.order.transfer.number import OrderNumber
from byceps.services.shop.order.transfer.order import (
Order,
Orderer,
PaymentState,
)
from byceps.services.shop.shop.transfer.models import ShopID
from byceps.services.shop.storefront.transfer.models import StorefrontID
from byceps.typing import UserID
def test_is_open():
payment_state = PaymentState.open
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == True
assert order.is_canceled == False
assert order.is_paid == False
def test_is_canceled():
payment_state = PaymentState.canceled_before_paid
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == False
assert order.is_canceled == True
assert order.is_paid == False
def test_is_paid():
payment_state = PaymentState.paid
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == False
assert order.is_canceled == False
assert order.is_paid == True
def test_is_canceled_after_paid():
payment_state = PaymentState.canceled_after_paid
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == False
assert order.is_canceled == True
assert order.is_paid == False
# helpers
def create_order_with_payment_state(payment_state: PaymentState) -> Order:
shop_id = ShopID('shop-123')
storefront_id = StorefrontID('storefront-123')
order_number = OrderNumber('AEC-03-B00074')
orderer = create_orderer()
created_at = datetime.utcnow()
order = order_service._build_order(
created_at, shop_id, storefront_id, order_number, orderer
)
order.payment_state = payment_state
return order_service._order_to_transfer_object(order)
def create_orderer() -> Orderer:
return Orderer(
user_id=UserID(UUID('d8a9c61c-2286-41b3-85ae-7d9f7a2f3357')),
first_names='<NAME>',
last_name='Doe',
country='State of Mind',
zip_code='31337',
city='Atrocity',
street='Elite Street 1337',
)
|
tests/unit/services/shop/order/test_order_payment_state.py
|
from datetime import datetime
from uuid import UUID
from byceps.services.shop.order import service as order_service
from byceps.services.shop.order.transfer.number import OrderNumber
from byceps.services.shop.order.transfer.order import (
Order,
Orderer,
PaymentState,
)
from byceps.services.shop.shop.transfer.models import ShopID
from byceps.services.shop.storefront.transfer.models import StorefrontID
from byceps.typing import UserID
def test_is_open():
payment_state = PaymentState.open
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == True
assert order.is_canceled == False
assert order.is_paid == False
def test_is_canceled():
payment_state = PaymentState.canceled_before_paid
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == False
assert order.is_canceled == True
assert order.is_paid == False
def test_is_paid():
payment_state = PaymentState.paid
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == False
assert order.is_canceled == False
assert order.is_paid == True
def test_is_canceled_after_paid():
payment_state = PaymentState.canceled_after_paid
order = create_order_with_payment_state(payment_state)
assert order.payment_state == payment_state
assert order.is_open == False
assert order.is_canceled == True
assert order.is_paid == False
# helpers
def create_order_with_payment_state(payment_state: PaymentState) -> Order:
shop_id = ShopID('shop-123')
storefront_id = StorefrontID('storefront-123')
order_number = OrderNumber('AEC-03-B00074')
orderer = create_orderer()
created_at = datetime.utcnow()
order = order_service._build_order(
created_at, shop_id, storefront_id, order_number, orderer
)
order.payment_state = payment_state
return order_service._order_to_transfer_object(order)
def create_orderer() -> Orderer:
return Orderer(
user_id=UserID(UUID('d8a9c61c-2286-41b3-85ae-7d9f7a2f3357')),
first_names='<NAME>',
last_name='Doe',
country='State of Mind',
zip_code='31337',
city='Atrocity',
street='Elite Street 1337',
)
| 0.731346 | 0.397558 |
import re
from calendar import day_abbr, day_name, month_abbr, month_name
from datetime import datetime as datetime_
from datetime import timedelta, timezone
from time import localtime
tokens = r"H{1,2}|h{1,2}|m{1,2}|s{1,2}|S{1,6}|YYYY|YY|M{1,4}|D{1,4}|Z{1,2}|zz|A|X|x|E|Q|dddd|ddd|d"
pattern = re.compile(r"(?:{0})|\[(?:{0})\]".format(tokens))
class datetime(datetime_):
def __format__(self, spec):
if not spec:
spec = "%Y-%m-%dT%H:%M:%S.%f%z"
if "%" in spec:
return super().__format__(spec)
year, month, day, hour, minute, second, weekday, yearday, _ = self.timetuple()
microsecond = self.microsecond
timestamp = self.timestamp()
tzinfo = self.tzinfo or timezone(timedelta(seconds=0))
offset = tzinfo.utcoffset(self).total_seconds()
sign = ("-", "+")[offset >= 0]
h, m = divmod(abs(offset // 60), 60)
rep = {
"YYYY": "%04d" % year,
"YY": "%02d" % (year % 100),
"Q": "%d" % ((month - 1) // 3 + 1),
"MMMM": month_name[month - 1],
"MMM": month_abbr[month - 1],
"MM": "%02d" % month,
"M": "%d" % month,
"DDDD": "%03d" % yearday,
"DDD": "%d" % yearday,
"DD": "%02d" % day,
"D": "%d" % day,
"dddd": day_name[weekday],
"ddd": day_abbr[weekday],
"d": "%d" % weekday,
"E": "%d" % (weekday + 1),
"HH": "%02d" % hour,
"H": "%d" % hour,
"hh": "%02d" % ((hour - 1) % 12 + 1),
"h": "%d" % ((hour - 1) % 12 + 1),
"mm": "%02d" % minute,
"m": "%d" % minute,
"ss": "%02d" % second,
"s": "%d" % second,
"S": "%d" % (microsecond // 100000),
"SS": "%02d" % (microsecond // 10000),
"SSS": "%03d" % (microsecond // 1000),
"SSSS": "%04d" % (microsecond // 100),
"SSSSS": "%05d" % (microsecond // 10),
"SSSSSS": "%06d" % microsecond,
"A": ("AM", "PM")[hour // 12],
"Z": "%s%02d:%02d" % (sign, h, m),
"ZZ": "%s%02d%02d" % (sign, h, m),
"zz": tzinfo.tzname(self) or "",
"X": "%d" % timestamp,
"x": "%d" % (int(timestamp) * 1000000 + microsecond),
}
def get(m):
try:
return rep[m.group(0)]
except KeyError:
return m.group(0)[1:-1]
return pattern.sub(get, spec)
def now():
now = datetime.now()
local = localtime(now.timestamp())
tzinfo = timezone(timedelta(seconds=local.tm_gmtoff), local.tm_zone)
return now.replace(tzinfo=tzinfo)
|
loguru/_datetime.py
|
import re
from calendar import day_abbr, day_name, month_abbr, month_name
from datetime import datetime as datetime_
from datetime import timedelta, timezone
from time import localtime
tokens = r"H{1,2}|h{1,2}|m{1,2}|s{1,2}|S{1,6}|YYYY|YY|M{1,4}|D{1,4}|Z{1,2}|zz|A|X|x|E|Q|dddd|ddd|d"
pattern = re.compile(r"(?:{0})|\[(?:{0})\]".format(tokens))
class datetime(datetime_):
def __format__(self, spec):
if not spec:
spec = "%Y-%m-%dT%H:%M:%S.%f%z"
if "%" in spec:
return super().__format__(spec)
year, month, day, hour, minute, second, weekday, yearday, _ = self.timetuple()
microsecond = self.microsecond
timestamp = self.timestamp()
tzinfo = self.tzinfo or timezone(timedelta(seconds=0))
offset = tzinfo.utcoffset(self).total_seconds()
sign = ("-", "+")[offset >= 0]
h, m = divmod(abs(offset // 60), 60)
rep = {
"YYYY": "%04d" % year,
"YY": "%02d" % (year % 100),
"Q": "%d" % ((month - 1) // 3 + 1),
"MMMM": month_name[month - 1],
"MMM": month_abbr[month - 1],
"MM": "%02d" % month,
"M": "%d" % month,
"DDDD": "%03d" % yearday,
"DDD": "%d" % yearday,
"DD": "%02d" % day,
"D": "%d" % day,
"dddd": day_name[weekday],
"ddd": day_abbr[weekday],
"d": "%d" % weekday,
"E": "%d" % (weekday + 1),
"HH": "%02d" % hour,
"H": "%d" % hour,
"hh": "%02d" % ((hour - 1) % 12 + 1),
"h": "%d" % ((hour - 1) % 12 + 1),
"mm": "%02d" % minute,
"m": "%d" % minute,
"ss": "%02d" % second,
"s": "%d" % second,
"S": "%d" % (microsecond // 100000),
"SS": "%02d" % (microsecond // 10000),
"SSS": "%03d" % (microsecond // 1000),
"SSSS": "%04d" % (microsecond // 100),
"SSSSS": "%05d" % (microsecond // 10),
"SSSSSS": "%06d" % microsecond,
"A": ("AM", "PM")[hour // 12],
"Z": "%s%02d:%02d" % (sign, h, m),
"ZZ": "%s%02d%02d" % (sign, h, m),
"zz": tzinfo.tzname(self) or "",
"X": "%d" % timestamp,
"x": "%d" % (int(timestamp) * 1000000 + microsecond),
}
def get(m):
try:
return rep[m.group(0)]
except KeyError:
return m.group(0)[1:-1]
return pattern.sub(get, spec)
def now():
now = datetime.now()
local = localtime(now.timestamp())
tzinfo = timezone(timedelta(seconds=local.tm_gmtoff), local.tm_zone)
return now.replace(tzinfo=tzinfo)
| 0.395718 | 0.329109 |
class Product:
def __init__(self, product_id, product_price):
self.product_id = product_id # which product
self.product_price = product_price # product_price
self.stock = 0 # temporary stock will be zero
def add_stock(self, quantity):
self.stock = self.stock + quantity
def decrease_stock(self, quantity):
self.stock = self.stock - quantity
def set_price(self, price):
self.product_price = price
class Customer:
def __init__(self, customer_id, balance):
self.customer_id = customer_id
self.balance = balance
self.customer_issued = -1
def deposit_money(self, money_deposit):
self.balance = self.balance + money_deposit
def purchase(self, product_price, quantity):
self.balance = self.balance - product_price * quantity
class Shop:
def __init__(self, shop_balance):
self.shop_balance = shop_balance
self.product_id_dict = {}
self.customer_id_dict = {}
def add_product(self, product_id, product_price):
if str(product_id) in self.product_id_dict:
print("Product_Id Already in use!")
else:
self.product_id_dict[str(product_id)] = Product(
product_id=product_id, product_price=product_price
) # add new product.
print("Added Product id ", product_id, product_price)
def add_customer(self, product_id, balance, customer_id):
if str(customer_id) in self.customer_id_dict:
print("Customer_Id Already in use!")
else:
self.customer_id_dict[str(customer_id)] = Customer(
customer_id=customer_id, balance=balance
) # add new product.
print("Added Customer id ", customer_id, balance)
def purchase_stock(self, product_id, quantity):
# add quantity of product id
self.product_id_dict[str(product_id)].stock += quantity
# balance should get reduce from shop balance
self.shop_balance -= (
self.product_id_dict[str(product_id)].product_price * quantity
)
def sell_product(self, product_id, customer_id, quantity):
# check wether product is available or not
if str(product_id) in self.product_id_dict:
# check wether customer is added
if str(customer_id) in self.customer_id_dict:
# then sell product by multiplying product price into quantity
total_cost = (
self.product_id_dict[str(product_id)].product_price * quantity
)
# reduce stock from shop
self.product_id_dict[str(product_id)].stock = (
self.product_id_dict[str(product_id)].stock - quantity
)
# set customers product to customer id
self.customer_id_dict[str(customer_id)].customer_issued == product_id
# add balance in shop
self.shop_balance = self.shop_balance + total_cost
# total cost should be deducted from customer balance
self.customer_id_dict[str(customer_id)].balance -= total_cost
else:
print("Please Add Customer .")
else:
print("Product is not avilable , please add the product")
|
Shop/shop.py
|
class Product:
def __init__(self, product_id, product_price):
self.product_id = product_id # which product
self.product_price = product_price # product_price
self.stock = 0 # temporary stock will be zero
def add_stock(self, quantity):
self.stock = self.stock + quantity
def decrease_stock(self, quantity):
self.stock = self.stock - quantity
def set_price(self, price):
self.product_price = price
class Customer:
def __init__(self, customer_id, balance):
self.customer_id = customer_id
self.balance = balance
self.customer_issued = -1
def deposit_money(self, money_deposit):
self.balance = self.balance + money_deposit
def purchase(self, product_price, quantity):
self.balance = self.balance - product_price * quantity
class Shop:
def __init__(self, shop_balance):
self.shop_balance = shop_balance
self.product_id_dict = {}
self.customer_id_dict = {}
def add_product(self, product_id, product_price):
if str(product_id) in self.product_id_dict:
print("Product_Id Already in use!")
else:
self.product_id_dict[str(product_id)] = Product(
product_id=product_id, product_price=product_price
) # add new product.
print("Added Product id ", product_id, product_price)
def add_customer(self, product_id, balance, customer_id):
if str(customer_id) in self.customer_id_dict:
print("Customer_Id Already in use!")
else:
self.customer_id_dict[str(customer_id)] = Customer(
customer_id=customer_id, balance=balance
) # add new product.
print("Added Customer id ", customer_id, balance)
def purchase_stock(self, product_id, quantity):
# add quantity of product id
self.product_id_dict[str(product_id)].stock += quantity
# balance should get reduce from shop balance
self.shop_balance -= (
self.product_id_dict[str(product_id)].product_price * quantity
)
def sell_product(self, product_id, customer_id, quantity):
# check wether product is available or not
if str(product_id) in self.product_id_dict:
# check wether customer is added
if str(customer_id) in self.customer_id_dict:
# then sell product by multiplying product price into quantity
total_cost = (
self.product_id_dict[str(product_id)].product_price * quantity
)
# reduce stock from shop
self.product_id_dict[str(product_id)].stock = (
self.product_id_dict[str(product_id)].stock - quantity
)
# set customers product to customer id
self.customer_id_dict[str(customer_id)].customer_issued == product_id
# add balance in shop
self.shop_balance = self.shop_balance + total_cost
# total cost should be deducted from customer balance
self.customer_id_dict[str(customer_id)].balance -= total_cost
else:
print("Please Add Customer .")
else:
print("Product is not avilable , please add the product")
| 0.343782 | 0.233684 |
from __future__ import absolute_import, print_function, unicode_literals
from pyfaup.faup import Faup
from modules import AbstractBolt, load_whitelist, text2urls_whitelisted
from modules.attachments import MailAttachments
class Urls(AbstractBolt):
outputs = ['sha256_random', 'results']
def initialize(self, stormconf, context):
super(Urls, self).initialize(stormconf, context)
# Faup
self.faup = Faup()
# Input bolts for Phishing bolt
self.input_bolts = set(context["source->stream->grouping"].keys())
# All mails
self._mails = {}
# Load keywords
self._load_lists()
def _load_lists(self):
# Load subjects keywords
self.whitelists = load_whitelist(self.conf.get("whitelists", {}))
self.log("Whitelists domains reloaded", "debug")
def _get_urls(self, greedy_data):
# If mail is filtered don't check for urls
is_filtered = greedy_data["tokenizer"][2]
results = {}
# urls body
if not is_filtered:
text = greedy_data["tokenizer"][1]
urls = text2urls_whitelisted(text, self.whitelists, self.faup)
if urls:
results["body"] = urls
# I can have 2 mails with same body, but with different attachments
attachments = MailAttachments(greedy_data["attachments"][2])
text = attachments.payloadstext()
urls = text2urls_whitelisted(text, self.whitelists, self.faup)
if urls:
results["attachments"] = urls
return results
def process_tick(self, freq):
"""Every freq seconds you reload the keywords. """
super(Urls, self).process_tick(freq)
self._load_lists()
def process(self, tup):
bolt = tup.component
sha256_random = tup.values[0]
sha256 = sha256_random.split("_")[0]
self.log("Processing started: {}".format(sha256))
values = tup.values
self._mails.setdefault(sha256_random, {})[bolt] = values
diff = self.input_bolts - set(self._mails[sha256_random].keys())
if not diff:
results = self._get_urls(self._mails.pop(sha256_random))
self.emit([sha256_random, results])
|
src/bolts/urls.py
|
from __future__ import absolute_import, print_function, unicode_literals
from pyfaup.faup import Faup
from modules import AbstractBolt, load_whitelist, text2urls_whitelisted
from modules.attachments import MailAttachments
class Urls(AbstractBolt):
outputs = ['sha256_random', 'results']
def initialize(self, stormconf, context):
super(Urls, self).initialize(stormconf, context)
# Faup
self.faup = Faup()
# Input bolts for Phishing bolt
self.input_bolts = set(context["source->stream->grouping"].keys())
# All mails
self._mails = {}
# Load keywords
self._load_lists()
def _load_lists(self):
# Load subjects keywords
self.whitelists = load_whitelist(self.conf.get("whitelists", {}))
self.log("Whitelists domains reloaded", "debug")
def _get_urls(self, greedy_data):
# If mail is filtered don't check for urls
is_filtered = greedy_data["tokenizer"][2]
results = {}
# urls body
if not is_filtered:
text = greedy_data["tokenizer"][1]
urls = text2urls_whitelisted(text, self.whitelists, self.faup)
if urls:
results["body"] = urls
# I can have 2 mails with same body, but with different attachments
attachments = MailAttachments(greedy_data["attachments"][2])
text = attachments.payloadstext()
urls = text2urls_whitelisted(text, self.whitelists, self.faup)
if urls:
results["attachments"] = urls
return results
def process_tick(self, freq):
"""Every freq seconds you reload the keywords. """
super(Urls, self).process_tick(freq)
self._load_lists()
def process(self, tup):
bolt = tup.component
sha256_random = tup.values[0]
sha256 = sha256_random.split("_")[0]
self.log("Processing started: {}".format(sha256))
values = tup.values
self._mails.setdefault(sha256_random, {})[bolt] = values
diff = self.input_bolts - set(self._mails[sha256_random].keys())
if not diff:
results = self._get_urls(self._mails.pop(sha256_random))
self.emit([sha256_random, results])
| 0.61173 | 0.1844 |
import argparse, os, pathlib, logging, json, copy, pickle
import pandas as pd
from datetime import datetime, timezone
from typing import Optional
from distutils.util import strtobool
from importlib.metadata import version
from emhass.retrieve_hass import retrieve_hass
from emhass.forecast import forecast
from emhass.optimization import optimization
from emhass import utils
def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
get_data_from_file: Optional[bool] = False) -> dict:
"""
Set up some of the data needed for the different actions.
:param config_path: The absolute path where the config.yaml file is located
:type config_path: str
:param costfun: The type of cost function to use for optimization problem
:type costfun: str
:param params: Configuration parameters passed from data/options.json
:type params: str
:param runtimeparams: Runtime optimization parameters passed as a dictionnary
:type runtimeparams: str
:param set_type: Set the type of setup based on following type of optimization
:type set_type: str
:param logger: The passed logger object
:type logger: logging object
:param get_data_from_file: Use data from saved CSV file (useful for debug)
:type get_data_from_file: bool, optional
:return: A dictionnary with multiple data used by the action functions
:rtype: dict
"""
logger.info("Setting up needed data")
# Parsing yaml
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(config_path, params=params)
# Treat runtimeparams
params, optim_conf = utils.treat_runtimeparams(runtimeparams, params, retrieve_hass_conf,
optim_conf, plant_conf, set_type, logger)
# Define main objects
rh = retrieve_hass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
params, base_path, logger, get_data_from_file=get_data_from_file)
fcst = forecast(retrieve_hass_conf, optim_conf, plant_conf,
params, base_path, logger, get_data_from_file=get_data_from_file)
opt = optimization(retrieve_hass_conf, optim_conf, plant_conf,
fcst.var_load_cost, fcst.var_prod_price,
costfun, base_path, logger)
# Perform setup based on type of action
if set_type == "perfect-optim":
# Retrieve data from hass
if get_data_from_file:
with open(pathlib.Path(base_path+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, var_list = pickle.load(inp)
else:
days_list = utils.get_days_list(retrieve_hass_conf['days_to_retrieve'])
var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
rh.get_data(days_list, var_list,
minimal_response=False, significant_changes_only=False)
rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
set_zero_min = retrieve_hass_conf['set_zero_min'],
var_replace_zero = retrieve_hass_conf['var_replace_zero'],
var_interp = retrieve_hass_conf['var_interp'])
df_input_data = rh.df_final.copy()
# What we don't need for this type of action
P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
elif set_type == "dayahead-optim":
# Get PV and load forecasts
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
P_PV_forecast = fcst.get_power_from_weather(df_weather)
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
params = json.loads(params)
if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
prediction_horizon = params['passed_data']['prediction_horizon']
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
# What we don't need for this type of action
df_input_data, days_list = None, None
elif set_type == "naive-mpc-optim":
# Retrieve data from hass
if get_data_from_file:
with open(pathlib.Path(base_path+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, var_list = pickle.load(inp)
else:
days_list = utils.get_days_list(1)
var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
rh.get_data(days_list, var_list,
minimal_response=False, significant_changes_only=False)
rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
set_zero_min = retrieve_hass_conf['set_zero_min'],
var_replace_zero = retrieve_hass_conf['var_replace_zero'],
var_interp = retrieve_hass_conf['var_interp'])
df_input_data = rh.df_final.copy()
# Get PV and load forecasts
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
params = json.loads(params)
if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
prediction_horizon = params['passed_data']['prediction_horizon']
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
elif set_type == "publish-data":
df_input_data, df_input_data_dayahead = None, None
P_PV_forecast, P_load_forecast = None, None
days_list = None
else:
logger.error("The passed action argument and hence the set_type parameter for setup is not valid")
# The input data dictionnary to return
input_data_dict = {
'root': base_path,
'retrieve_hass_conf': retrieve_hass_conf,
'rh': rh,
'opt': opt,
'fcst': fcst,
'df_input_data': df_input_data,
'df_input_data_dayahead': df_input_data_dayahead,
'P_PV_forecast': P_PV_forecast,
'P_load_forecast': P_load_forecast,
'costfun': costfun,
'params': params,
'days_list': days_list
}
return input_data_dict
def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = True, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the perfect forecast optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing perfect forecast optimization")
# Load cost and prod price forecast
df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
# Save CSV file for analysis
if save_data_to_file:
filename = 'opt_res_perfect_optim_'+input_data_dict['costfun']
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res
def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the day-ahead optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing day-ahead forecast optimization")
# Load cost and prod price forecast
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data_dayahead'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
# Save CSV file for publish_data
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res_dayahead.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res_dayahead
def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the naive Model Predictive Controller optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing naive MPC optimization")
# Load cost and prod price forecast
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data_dayahead'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
# The specifics params for the MPC at runtime
prediction_horizon = input_data_dict['params']['passed_data']['prediction_horizon']
soc_init = input_data_dict['params']['passed_data']['soc_init']
soc_final = input_data_dict['params']['passed_data']['soc_final']
def_total_hours = input_data_dict['params']['passed_data']['def_total_hours']
opt_res_naive_mpc = input_data_dict['opt'].perform_naive_mpc_optim(
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'],
prediction_horizon, soc_init, soc_final, def_total_hours)
# Save CSV file for publish_data
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_naive_mpc_'+today.strftime("%Y_%m_%d")
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res_naive_mpc.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res_naive_mpc
def publish_data(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False) -> pd.DataFrame:
"""
Publish the data obtained from the optimization results.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: If True we will read data from optimization results in dayahead CSV file
:type save_data_to_file: bool, optional
:return: The output data of the optimization readed from a CSV file in the data folder
:rtype: pd.DataFrame
"""
logger.info("Publishing data to HASS instance")
# Check if a day ahead optimization has been performed (read CSV file)
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")
else:
filename = 'opt_res_latest'
if not os.path.isfile(input_data_dict['root'] + '/data/' + filename + '.csv'):
logger.error("File not found error, run an optimization task first.")
else:
opt_res_latest = pd.read_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_col='timestamp')
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
opt_res_latest.index.freq = input_data_dict['retrieve_hass_conf']['freq']
# Estimate the current index
now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='ffill')[0]
elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='bfill')[0]
if idx_closest == -1:
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
# Publish PV forecast
input_data_dict['rh'].post_data(opt_res_latest['P_PV'], idx_closest,
'sensor.p_pv_forecast', "W", "PV Power Forecast")
# Publish Load forecast
input_data_dict['rh'].post_data(opt_res_latest['P_Load'], idx_closest,
'sensor.p_load_forecast', "W", "Load Power Forecast")
cols_published = ['P_PV', 'P_Load']
# Publish deferrable loads
for k in range(input_data_dict['opt'].optim_conf['num_def_loads']):
if "P_deferrable{}".format(k) not in opt_res_latest.columns:
logger.error("P_deferrable{}".format(k)+" was not found in results DataFrame. Optimization task may need to be relaunched or it did not converged to a solution.")
else:
input_data_dict['rh'].post_data(opt_res_latest["P_deferrable{}".format(k)], idx_closest,
'sensor.p_deferrable{}'.format(k), "W", "Deferrable Load {}".format(k))
cols_published = cols_published+["P_deferrable{}".format(k)]
# Publish battery power
if input_data_dict['opt'].optim_conf['set_use_battery']:
if 'P_batt' not in opt_res_latest.columns:
logger.error("P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converged to a solution.")
else:
input_data_dict['rh'].post_data(opt_res_latest['P_batt'], idx_closest,
'sensor.p_batt_forecast', "W", "Battery Power Forecast")
cols_published = cols_published+["P_batt"]
input_data_dict['rh'].post_data(opt_res_latest['SOC_opt']*100, idx_closest,
'sensor.soc_batt_forecast', "%", "Battery SOC Forecast")
cols_published = cols_published+["SOC_opt"]
# Publish grid power
input_data_dict['rh'].post_data(opt_res_latest['P_grid'], idx_closest,
'sensor.p_grid_forecast', "W", "Grid Power Forecast")
cols_published = cols_published+["P_grid"]
# Publish total value of cost function
col_cost_fun = [i for i in opt_res_latest.columns if 'cost_fun_' in i]
input_data_dict['rh'].post_data(opt_res_latest[col_cost_fun], idx_closest,
'sensor.total_cost_fun_value', "", "Total cost function value")
# Create a DF resuming what has been published
opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
return opt_res
def main():
"""Define the main command line entry function."""
# Parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim, naive-mpc-optim and publish-data')
parser.add_argument('--config', type=str, help='Define path to the config.yaml file')
parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption')
parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not')
parser.add_argument('--params', type=str, default=None, help='Configuration parameters passed from data/options.json')
parser.add_argument('--runtimeparams', type=str, default=None, help='Pass runtime optimization parameters as dictionnary')
parser.add_argument('--version', action='version', version='%(prog)s '+version('emhass'))
args = parser.parse_args()
# The path to the configuration files
config_path = pathlib.Path(args.config)
base_path = str(config_path.parent)
# create logger
logger, ch = utils.get_logger(__name__, base_path, save_to_file=bool(args.log2file))
# Setup parameters
input_data_dict = set_input_data_dict(config_path, base_path, args.costfun, args.params, args.runtimeparams, args.action, logger)
# Perform selected action
if args.action == 'perfect-optim':
opt_res = perfect_forecast_optim(input_data_dict, logger)
elif args.action == 'dayahead-optim':
opt_res = dayahead_forecast_optim(input_data_dict, logger)
elif args.action == 'naive-mpc-optim':
opt_res = naive_mpc_optim(input_data_dict, logger)
elif args.action == 'publish-data':
opt_res = publish_data(input_data_dict, logger)
else:
logger.error("The passed action argument is not valid")
opt_res = None
logger.info(opt_res)
# Flush the logger
ch.close()
logger.removeHandler(ch)
if __name__ == '__main__':
main()
|
src/emhass/command_line.py
|
import argparse, os, pathlib, logging, json, copy, pickle
import pandas as pd
from datetime import datetime, timezone
from typing import Optional
from distutils.util import strtobool
from importlib.metadata import version
from emhass.retrieve_hass import retrieve_hass
from emhass.forecast import forecast
from emhass.optimization import optimization
from emhass import utils
def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
get_data_from_file: Optional[bool] = False) -> dict:
"""
Set up some of the data needed for the different actions.
:param config_path: The absolute path where the config.yaml file is located
:type config_path: str
:param costfun: The type of cost function to use for optimization problem
:type costfun: str
:param params: Configuration parameters passed from data/options.json
:type params: str
:param runtimeparams: Runtime optimization parameters passed as a dictionnary
:type runtimeparams: str
:param set_type: Set the type of setup based on following type of optimization
:type set_type: str
:param logger: The passed logger object
:type logger: logging object
:param get_data_from_file: Use data from saved CSV file (useful for debug)
:type get_data_from_file: bool, optional
:return: A dictionnary with multiple data used by the action functions
:rtype: dict
"""
logger.info("Setting up needed data")
# Parsing yaml
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(config_path, params=params)
# Treat runtimeparams
params, optim_conf = utils.treat_runtimeparams(runtimeparams, params, retrieve_hass_conf,
optim_conf, plant_conf, set_type, logger)
# Define main objects
rh = retrieve_hass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
params, base_path, logger, get_data_from_file=get_data_from_file)
fcst = forecast(retrieve_hass_conf, optim_conf, plant_conf,
params, base_path, logger, get_data_from_file=get_data_from_file)
opt = optimization(retrieve_hass_conf, optim_conf, plant_conf,
fcst.var_load_cost, fcst.var_prod_price,
costfun, base_path, logger)
# Perform setup based on type of action
if set_type == "perfect-optim":
# Retrieve data from hass
if get_data_from_file:
with open(pathlib.Path(base_path+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, var_list = pickle.load(inp)
else:
days_list = utils.get_days_list(retrieve_hass_conf['days_to_retrieve'])
var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
rh.get_data(days_list, var_list,
minimal_response=False, significant_changes_only=False)
rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
set_zero_min = retrieve_hass_conf['set_zero_min'],
var_replace_zero = retrieve_hass_conf['var_replace_zero'],
var_interp = retrieve_hass_conf['var_interp'])
df_input_data = rh.df_final.copy()
# What we don't need for this type of action
P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
elif set_type == "dayahead-optim":
# Get PV and load forecasts
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
P_PV_forecast = fcst.get_power_from_weather(df_weather)
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
params = json.loads(params)
if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
prediction_horizon = params['passed_data']['prediction_horizon']
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
# What we don't need for this type of action
df_input_data, days_list = None, None
elif set_type == "naive-mpc-optim":
# Retrieve data from hass
if get_data_from_file:
with open(pathlib.Path(base_path+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, var_list = pickle.load(inp)
else:
days_list = utils.get_days_list(1)
var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
rh.get_data(days_list, var_list,
minimal_response=False, significant_changes_only=False)
rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
set_zero_min = retrieve_hass_conf['set_zero_min'],
var_replace_zero = retrieve_hass_conf['var_replace_zero'],
var_interp = retrieve_hass_conf['var_interp'])
df_input_data = rh.df_final.copy()
# Get PV and load forecasts
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
params = json.loads(params)
if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
prediction_horizon = params['passed_data']['prediction_horizon']
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
elif set_type == "publish-data":
df_input_data, df_input_data_dayahead = None, None
P_PV_forecast, P_load_forecast = None, None
days_list = None
else:
logger.error("The passed action argument and hence the set_type parameter for setup is not valid")
# The input data dictionnary to return
input_data_dict = {
'root': base_path,
'retrieve_hass_conf': retrieve_hass_conf,
'rh': rh,
'opt': opt,
'fcst': fcst,
'df_input_data': df_input_data,
'df_input_data_dayahead': df_input_data_dayahead,
'P_PV_forecast': P_PV_forecast,
'P_load_forecast': P_load_forecast,
'costfun': costfun,
'params': params,
'days_list': days_list
}
return input_data_dict
def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = True, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the perfect forecast optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing perfect forecast optimization")
# Load cost and prod price forecast
df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
# Save CSV file for analysis
if save_data_to_file:
filename = 'opt_res_perfect_optim_'+input_data_dict['costfun']
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res
def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the day-ahead optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing day-ahead forecast optimization")
# Load cost and prod price forecast
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data_dayahead'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
# Save CSV file for publish_data
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res_dayahead.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res_dayahead
def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the naive Model Predictive Controller optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing naive MPC optimization")
# Load cost and prod price forecast
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data_dayahead'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
# The specifics params for the MPC at runtime
prediction_horizon = input_data_dict['params']['passed_data']['prediction_horizon']
soc_init = input_data_dict['params']['passed_data']['soc_init']
soc_final = input_data_dict['params']['passed_data']['soc_final']
def_total_hours = input_data_dict['params']['passed_data']['def_total_hours']
opt_res_naive_mpc = input_data_dict['opt'].perform_naive_mpc_optim(
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'],
prediction_horizon, soc_init, soc_final, def_total_hours)
# Save CSV file for publish_data
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_naive_mpc_'+today.strftime("%Y_%m_%d")
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res_naive_mpc.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res_naive_mpc
def publish_data(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False) -> pd.DataFrame:
"""
Publish the data obtained from the optimization results.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: If True we will read data from optimization results in dayahead CSV file
:type save_data_to_file: bool, optional
:return: The output data of the optimization readed from a CSV file in the data folder
:rtype: pd.DataFrame
"""
logger.info("Publishing data to HASS instance")
# Check if a day ahead optimization has been performed (read CSV file)
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")
else:
filename = 'opt_res_latest'
if not os.path.isfile(input_data_dict['root'] + '/data/' + filename + '.csv'):
logger.error("File not found error, run an optimization task first.")
else:
opt_res_latest = pd.read_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_col='timestamp')
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
opt_res_latest.index.freq = input_data_dict['retrieve_hass_conf']['freq']
# Estimate the current index
now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='ffill')[0]
elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='bfill')[0]
if idx_closest == -1:
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
# Publish PV forecast
input_data_dict['rh'].post_data(opt_res_latest['P_PV'], idx_closest,
'sensor.p_pv_forecast', "W", "PV Power Forecast")
# Publish Load forecast
input_data_dict['rh'].post_data(opt_res_latest['P_Load'], idx_closest,
'sensor.p_load_forecast', "W", "Load Power Forecast")
cols_published = ['P_PV', 'P_Load']
# Publish deferrable loads
for k in range(input_data_dict['opt'].optim_conf['num_def_loads']):
if "P_deferrable{}".format(k) not in opt_res_latest.columns:
logger.error("P_deferrable{}".format(k)+" was not found in results DataFrame. Optimization task may need to be relaunched or it did not converged to a solution.")
else:
input_data_dict['rh'].post_data(opt_res_latest["P_deferrable{}".format(k)], idx_closest,
'sensor.p_deferrable{}'.format(k), "W", "Deferrable Load {}".format(k))
cols_published = cols_published+["P_deferrable{}".format(k)]
# Publish battery power
if input_data_dict['opt'].optim_conf['set_use_battery']:
if 'P_batt' not in opt_res_latest.columns:
logger.error("P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converged to a solution.")
else:
input_data_dict['rh'].post_data(opt_res_latest['P_batt'], idx_closest,
'sensor.p_batt_forecast', "W", "Battery Power Forecast")
cols_published = cols_published+["P_batt"]
input_data_dict['rh'].post_data(opt_res_latest['SOC_opt']*100, idx_closest,
'sensor.soc_batt_forecast', "%", "Battery SOC Forecast")
cols_published = cols_published+["SOC_opt"]
# Publish grid power
input_data_dict['rh'].post_data(opt_res_latest['P_grid'], idx_closest,
'sensor.p_grid_forecast', "W", "Grid Power Forecast")
cols_published = cols_published+["P_grid"]
# Publish total value of cost function
col_cost_fun = [i for i in opt_res_latest.columns if 'cost_fun_' in i]
input_data_dict['rh'].post_data(opt_res_latest[col_cost_fun], idx_closest,
'sensor.total_cost_fun_value', "", "Total cost function value")
# Create a DF resuming what has been published
opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
return opt_res
def main():
"""Define the main command line entry function."""
# Parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim, naive-mpc-optim and publish-data')
parser.add_argument('--config', type=str, help='Define path to the config.yaml file')
parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption')
parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not')
parser.add_argument('--params', type=str, default=None, help='Configuration parameters passed from data/options.json')
parser.add_argument('--runtimeparams', type=str, default=None, help='Pass runtime optimization parameters as dictionnary')
parser.add_argument('--version', action='version', version='%(prog)s '+version('emhass'))
args = parser.parse_args()
# The path to the configuration files
config_path = pathlib.Path(args.config)
base_path = str(config_path.parent)
# create logger
logger, ch = utils.get_logger(__name__, base_path, save_to_file=bool(args.log2file))
# Setup parameters
input_data_dict = set_input_data_dict(config_path, base_path, args.costfun, args.params, args.runtimeparams, args.action, logger)
# Perform selected action
if args.action == 'perfect-optim':
opt_res = perfect_forecast_optim(input_data_dict, logger)
elif args.action == 'dayahead-optim':
opt_res = dayahead_forecast_optim(input_data_dict, logger)
elif args.action == 'naive-mpc-optim':
opt_res = naive_mpc_optim(input_data_dict, logger)
elif args.action == 'publish-data':
opt_res = publish_data(input_data_dict, logger)
else:
logger.error("The passed action argument is not valid")
opt_res = None
logger.info(opt_res)
# Flush the logger
ch.close()
logger.removeHandler(ch)
if __name__ == '__main__':
main()
| 0.728169 | 0.171408 |
import fire
from pyhappn.happn import Relations
from pyhappn.happn import User
from pyhappn.settings import TOKEN
class HappnCli(object):
"""Cli Happn."""
def like_all(self):
"""Like all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 100
for i in range(int(9800 / limit)):
recs = user_inst.get_recommendations(limit, (i * limit))
for rec in recs:
relation = int(rec.get('notifier').get('my_relation'))
if relation == Relations.none:
user_inst.like_user(rec['notifier']['id'])
print('Like {}'.format(rec['notifier']['id']))
def hidden_all(self):
"""Hidden all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
while True:
recs = user_inst.get_recommendations(100)
if not recs:
break
for rec in recs:
relation = int(rec.get('notifier').get('my_relation'))
if (relation != Relations.none):
user_inst.reject_user(rec['notifier']['id'])
print('Hidden {}'.format(rec['notifier']['id']))
def send_message_all_new(self, message):
"""Send message for all new crush"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 20
idx = 0
while True:
offset = idx * limit
idx += 1
recs = user_inst.get_conversations(offset, limit)
if not recs:
break
for rec in recs:
if not rec.get('messages'):
msg = {'message': message}
user_inst.send_message(rec['id'], msg)
def send_message_all(self, message):
"""Send message for all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 20
idx = 70
messages_sent = {}
while True:
offset = idx * limit
idx += 1
recs = user_inst.get_conversations(offset, limit)
if not recs:
break
for rec in recs:
if not messages_sent.get(rec['id']):
msg = {'message': message}
user_inst.send_message(rec['id'], msg)
messages_sent.update({rec['id']: 1})
if __name__ == '__main__':
fire.Fire(HappnCli)
|
cli.py
|
import fire
from pyhappn.happn import Relations
from pyhappn.happn import User
from pyhappn.settings import TOKEN
class HappnCli(object):
"""Cli Happn."""
def like_all(self):
"""Like all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 100
for i in range(int(9800 / limit)):
recs = user_inst.get_recommendations(limit, (i * limit))
for rec in recs:
relation = int(rec.get('notifier').get('my_relation'))
if relation == Relations.none:
user_inst.like_user(rec['notifier']['id'])
print('Like {}'.format(rec['notifier']['id']))
def hidden_all(self):
"""Hidden all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
while True:
recs = user_inst.get_recommendations(100)
if not recs:
break
for rec in recs:
relation = int(rec.get('notifier').get('my_relation'))
if (relation != Relations.none):
user_inst.reject_user(rec['notifier']['id'])
print('Hidden {}'.format(rec['notifier']['id']))
def send_message_all_new(self, message):
"""Send message for all new crush"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 20
idx = 0
while True:
offset = idx * limit
idx += 1
recs = user_inst.get_conversations(offset, limit)
if not recs:
break
for rec in recs:
if not rec.get('messages'):
msg = {'message': message}
user_inst.send_message(rec['id'], msg)
def send_message_all(self, message):
"""Send message for all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 20
idx = 70
messages_sent = {}
while True:
offset = idx * limit
idx += 1
recs = user_inst.get_conversations(offset, limit)
if not recs:
break
for rec in recs:
if not messages_sent.get(rec['id']):
msg = {'message': message}
user_inst.send_message(rec['id'], msg)
messages_sent.update({rec['id']: 1})
if __name__ == '__main__':
fire.Fire(HappnCli)
| 0.29088 | 0.072834 |
import pytest
from ..manager import ZuulManager
from ..exceptions import ZuulManagerConfig
@pytest.mark.unit
@pytest.mark.zuul_manager
class TestZuulConnector:
def test_raise_when_no_user_key_file(self, path_to_test_file):
with pytest.raises(ZuulManagerConfig):
ZuulManager(host="host",
username="user",
user_key_file="there/is/no/file",
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
def test_raise_when_no_host_keys_file(self, path_to_test_file):
with pytest.raises(ZuulManagerConfig):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file="there/is/no/file",
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
def test_can_set_autoadd_policy(self, path_to_test_file):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
def test_can_set_reject_policy(self, path_to_test_file):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="RejectPolicy",
gearman_conf="/path/to/file/.conf")
def test_raise_when_try_to_set_not_exists_policy(self, path_to_test_file):
with pytest.raises(ZuulManagerConfig):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="no-policy",
gearman_conf="/path/to/file/.conf")
def test_enqueue_generate_correct_command(self, path_to_test_file, mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.enqueue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf enqueue-ref --tenant tenant --trigger '
'trigger --pipeline periodic-nightly --project project '
'--ref refs/heads/master > /dev/null 2>&1 &')
def test_dequeue_generate_correct_command(self, path_to_test_file, mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.dequeue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf dequeue --tenant tenant '
'--pipeline periodic-nightly --project project '
'--ref refs/heads/master > /dev/null 2>&1 &')
def test_enqueue_correct_escape_insecure_args(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="TENANT",
trigger="TRIGGER",
project="PROJECT",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.enqueue(pipeline="periodic`who`", branch="master???*")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf enqueue-ref --tenant TENANT '
'--trigger TRIGGER --pipeline \'periodic`who`\' --project PROJECT '
'--ref \'refs/heads/master???*\' > /dev/null 2>&1 &')
def test_dequeue_correct_escape_insecure_args(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="TENANT",
trigger="TRIGGER",
project="PROJECT",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.dequeue(pipeline="rm -r /", branch="master*/~")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf dequeue --tenant TENANT '
'--pipeline \'rm -r /\' --project PROJECT '
'--ref \'refs/heads/master*/~\' > /dev/null 2>&1 &')
def test_enqueue_incorect_gearman_conf(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="incorrect/path/to/file/.con")
zuul.enqueue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul enqueue-ref --tenant tenant --trigger trigger --pipeline '
'periodic-nightly --project project --ref refs/heads/master '
'> /dev/null 2>&1 &')
def test_dequeue_incorect_gearman_conf(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="incorrect/path/to/file/.con")
zuul.dequeue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul dequeue --tenant tenant --pipeline periodic-nightly '
'--project project --ref refs/heads/master > /dev/null 2>&1 &')
|
acid/features/zuul_manager/tests/test_manager.py
|
import pytest
from ..manager import ZuulManager
from ..exceptions import ZuulManagerConfig
@pytest.mark.unit
@pytest.mark.zuul_manager
class TestZuulConnector:
def test_raise_when_no_user_key_file(self, path_to_test_file):
with pytest.raises(ZuulManagerConfig):
ZuulManager(host="host",
username="user",
user_key_file="there/is/no/file",
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
def test_raise_when_no_host_keys_file(self, path_to_test_file):
with pytest.raises(ZuulManagerConfig):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file="there/is/no/file",
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
def test_can_set_autoadd_policy(self, path_to_test_file):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
def test_can_set_reject_policy(self, path_to_test_file):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="RejectPolicy",
gearman_conf="/path/to/file/.conf")
def test_raise_when_try_to_set_not_exists_policy(self, path_to_test_file):
with pytest.raises(ZuulManagerConfig):
ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="no-policy",
gearman_conf="/path/to/file/.conf")
def test_enqueue_generate_correct_command(self, path_to_test_file, mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.enqueue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf enqueue-ref --tenant tenant --trigger '
'trigger --pipeline periodic-nightly --project project '
'--ref refs/heads/master > /dev/null 2>&1 &')
def test_dequeue_generate_correct_command(self, path_to_test_file, mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.dequeue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf dequeue --tenant tenant '
'--pipeline periodic-nightly --project project '
'--ref refs/heads/master > /dev/null 2>&1 &')
def test_enqueue_correct_escape_insecure_args(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="TENANT",
trigger="TRIGGER",
project="PROJECT",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.enqueue(pipeline="periodic`who`", branch="master???*")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf enqueue-ref --tenant TENANT '
'--trigger TRIGGER --pipeline \'periodic`who`\' --project PROJECT '
'--ref \'refs/heads/master???*\' > /dev/null 2>&1 &')
def test_dequeue_correct_escape_insecure_args(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="TENANT",
trigger="TRIGGER",
project="PROJECT",
policy="AutoAddPolicy",
gearman_conf="/path/to/file/.conf")
zuul.dequeue(pipeline="rm -r /", branch="master*/~")
run_command.assert_called_with(
'zuul -c /path/to/file/.conf dequeue --tenant TENANT '
'--pipeline \'rm -r /\' --project PROJECT '
'--ref \'refs/heads/master*/~\' > /dev/null 2>&1 &')
def test_enqueue_incorect_gearman_conf(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="incorrect/path/to/file/.con")
zuul.enqueue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul enqueue-ref --tenant tenant --trigger trigger --pipeline '
'periodic-nightly --project project --ref refs/heads/master '
'> /dev/null 2>&1 &')
def test_dequeue_incorect_gearman_conf(self, path_to_test_file,
mocker):
run_command = mocker.patch.object(ZuulManager, '_run_command')
zuul = ZuulManager(host="host",
username="user",
user_key_file=path_to_test_file("insecure_user_key"),
host_key_file=path_to_test_file("host_key.pub"),
tenant="tenant",
trigger="trigger",
project="project",
policy="AutoAddPolicy",
gearman_conf="incorrect/path/to/file/.con")
zuul.dequeue(pipeline="periodic-nightly", branch="master")
run_command.assert_called_with(
'zuul dequeue --tenant tenant --pipeline periodic-nightly '
'--project project --ref refs/heads/master > /dev/null 2>&1 &')
| 0.309545 | 0.265321 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
def ParserArgs():
"""Args Parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--debug", default=False, action="store_true",
help="set debug mode")
parser.add_argument("--print_debug", default=False, action="store_true",
help="print debug information")
parser.add_argument("--model", default="",
help="which model to run (dmnist, cmnist)")
parser.add_argument("-o", "--logic_optimize", default=False,
action="store_true",
help="optimize network.")
parser.add_argument("-l", "--load_weight", default=False,
action="store_true",
help="load weights directly from file.")
parser.add_argument("-w", "--weight_file", default=None,
help="name of weights file")
parser.add_argument("--output_group", type=int, default=1,
help="number of outputs to group together")
parser.add_argument("--kernel", default=None, type=int,
help="kernel if more complex layer")
parser.add_argument("--strides", default=None, type=int,
help="stride if more complex layer")
parser.add_argument("--padding", default=None,
help="padding if more complex layer")
parser.add_argument("--conv_sample", default=None, type=int,
help="number of samples within image for conv layer")
parser.add_argument("--sample", default=None,
help="number of training samples")
parser.add_argument("--use_pla", default=False,
action="store_true",
help="use pla table format")
parser.add_argument("--binary", default=False,
action="store_true",
help="use binary inputs")
parser.add_argument("--i_name", default=None,
help="input layer name")
parser.add_argument("--o_name", default=None,
help="output layer name")
parser.add_argument("--qi", default="2,0,0",
help="quantized input type")
parser.add_argument("--qo", default="2,0,0",
help="quantized output type")
parser.add_argument("--run_abc", default=False, action="store_true",
help="use abc to optimize logic")
parser.add_argument("--espresso_flags", default="-Dexpand",
help="flags to be passed to espresso")
parser.add_argument("--abc_flags", default="",
help="flags to be passed to abc")
parser.add_argument("--run_rf", default=False, action="store_true",
help="use ranform forest to optimize logic")
parser.add_argument("--n_trees", default=3, type=int,
help="number of trees to optimize")
parser.add_argument("--max_bits", default=1, type=int,
help="maximum number of bits for random forest")
parser.add_argument("--is_regressor", default=False, action="store_true",
help="use regressor instead of classifier")
parser.add_argument("--n_features", default=None,
help="number of features for random forest")
parser.add_argument("--max_depth", default=None,
help="maximum depth of random tree")
parser.add_argument("--sample_size", default=None,
help="sample size of table for random tree")
return parser.parse_args()
|
experimental/args.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
def ParserArgs():
"""Args Parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--debug", default=False, action="store_true",
help="set debug mode")
parser.add_argument("--print_debug", default=False, action="store_true",
help="print debug information")
parser.add_argument("--model", default="",
help="which model to run (dmnist, cmnist)")
parser.add_argument("-o", "--logic_optimize", default=False,
action="store_true",
help="optimize network.")
parser.add_argument("-l", "--load_weight", default=False,
action="store_true",
help="load weights directly from file.")
parser.add_argument("-w", "--weight_file", default=None,
help="name of weights file")
parser.add_argument("--output_group", type=int, default=1,
help="number of outputs to group together")
parser.add_argument("--kernel", default=None, type=int,
help="kernel if more complex layer")
parser.add_argument("--strides", default=None, type=int,
help="stride if more complex layer")
parser.add_argument("--padding", default=None,
help="padding if more complex layer")
parser.add_argument("--conv_sample", default=None, type=int,
help="number of samples within image for conv layer")
parser.add_argument("--sample", default=None,
help="number of training samples")
parser.add_argument("--use_pla", default=False,
action="store_true",
help="use pla table format")
parser.add_argument("--binary", default=False,
action="store_true",
help="use binary inputs")
parser.add_argument("--i_name", default=None,
help="input layer name")
parser.add_argument("--o_name", default=None,
help="output layer name")
parser.add_argument("--qi", default="2,0,0",
help="quantized input type")
parser.add_argument("--qo", default="2,0,0",
help="quantized output type")
parser.add_argument("--run_abc", default=False, action="store_true",
help="use abc to optimize logic")
parser.add_argument("--espresso_flags", default="-Dexpand",
help="flags to be passed to espresso")
parser.add_argument("--abc_flags", default="",
help="flags to be passed to abc")
parser.add_argument("--run_rf", default=False, action="store_true",
help="use ranform forest to optimize logic")
parser.add_argument("--n_trees", default=3, type=int,
help="number of trees to optimize")
parser.add_argument("--max_bits", default=1, type=int,
help="maximum number of bits for random forest")
parser.add_argument("--is_regressor", default=False, action="store_true",
help="use regressor instead of classifier")
parser.add_argument("--n_features", default=None,
help="number of features for random forest")
parser.add_argument("--max_depth", default=None,
help="maximum depth of random tree")
parser.add_argument("--sample_size", default=None,
help="sample size of table for random tree")
return parser.parse_args()
| 0.800224 | 0.05455 |
import os
from datetime import date
import shutil
import pandas as pd
import urllib.request
import glob
# Name of the dataset
ds_name = 'WQP'
# Download directory
proj_dir = '/gpfs/space/home/holgerv/gis_holgerv/river_quality'
dl_dir = os.path.join(proj_dir, 'data', ds_name, 'raw', f'download_{date.today()}')
if os.path.exists(dl_dir):
shutil.rmtree(dl_dir)
os.mkdir(dl_dir)
# Import the code map
cmap_dtypes = {
'source_param_code': object
}
cmap_file = os.path.join(os.path.dirname(dl_dir), 'meta', ds_name + '_code_map.txt')
cmap_df = pd.read_csv(cmap_file, sep='\t', usecols=cmap_dtypes.keys(), dtype=cmap_dtypes, encoding='utf8')
param_codes = cmap_df['source_param_code'].to_list()
# Import US state codes
state_code_dtypes = {
'fips_code': object
}
state_code_file = os.path.join(os.path.dirname(dl_dir), 'meta', 'fips_state.csv')
state_code_df = pd.read_csv(state_code_file, sep=',', usecols=state_code_dtypes.keys(), dtype=state_code_dtypes)
state_codes = state_code_df['fips_code'].to_list()
# Download observation data
for param_code in param_codes:
# Temperature (00010) data has to be downloaded per state to avoid the 504 Gateway Timeout error
if param_code == '00010':
for state_code in state_codes:
url = 'https://www.waterqualitydata.us/data/Result/search?statecode=US%3A' + state_code \
+ '&siteType=Stream&pCode=' + param_code + '&mimeType=csv&zip=no&dataProfile=narrowResult'
fname = '_'.join([ds_name, param_code, state_code, 'obs.csv'])
file_path = os.path.join(dl_dir, fname)
if os.path.exists(file_path):
os.remove(file_path)
urllib.request.urlretrieve(url, file_path)
else:
url = 'https://www.waterqualitydata.us/data/Result/search?countrycode=US&siteType=Stream' \
'&pCode=' + param_code + '&mimeType=csv&zip=no&dataProfile=narrowResult'
fname = '_'.join([ds_name, param_code, 'obs.csv'])
file_path = os.path.join(dl_dir, fname)
if os.path.exists(file_path):
os.remove(file_path)
urllib.request.urlretrieve(url, file_path)
# Concatenate temperature observation data
temp_obs_files = glob.glob(os.path.join(dl_dir, ds_name + '_00010_*_obs.csv'))
temp_obs_df = pd.concat([pd.read_csv(file, dtype=object, low_memory=False) for file in temp_obs_files])
temp_obs_df.to_csv(os.path.join(dl_dir, ds_name + '_00010_obs.csv'), sep=',', index=False)
# Remove state temperature observation files
for file in temp_obs_files:
os.remove(file)
# Download site data
for param_code in param_codes:
url = 'https://www.waterqualitydata.us/data/Station/search?countrycode=US&siteType=Stream' \
'&pCode=' + param_code + '&mimeType=csv&zip=no'
fname = '_'.join([ds_name, param_code, 'sites.csv'])
file_path = os.path.join(dl_dir, fname)
if os.path.exists(file_path):
os.remove(file_path)
urllib.request.urlretrieve(url, file_path)
|
preprocessing/WQP/wqp_download.py
|
import os
from datetime import date
import shutil
import pandas as pd
import urllib.request
import glob
# Name of the dataset
ds_name = 'WQP'
# Download directory
proj_dir = '/gpfs/space/home/holgerv/gis_holgerv/river_quality'
dl_dir = os.path.join(proj_dir, 'data', ds_name, 'raw', f'download_{date.today()}')
if os.path.exists(dl_dir):
shutil.rmtree(dl_dir)
os.mkdir(dl_dir)
# Import the code map
cmap_dtypes = {
'source_param_code': object
}
cmap_file = os.path.join(os.path.dirname(dl_dir), 'meta', ds_name + '_code_map.txt')
cmap_df = pd.read_csv(cmap_file, sep='\t', usecols=cmap_dtypes.keys(), dtype=cmap_dtypes, encoding='utf8')
param_codes = cmap_df['source_param_code'].to_list()
# Import US state codes
state_code_dtypes = {
'fips_code': object
}
state_code_file = os.path.join(os.path.dirname(dl_dir), 'meta', 'fips_state.csv')
state_code_df = pd.read_csv(state_code_file, sep=',', usecols=state_code_dtypes.keys(), dtype=state_code_dtypes)
state_codes = state_code_df['fips_code'].to_list()
# Download observation data
for param_code in param_codes:
# Temperature (00010) data has to be downloaded per state to avoid the 504 Gateway Timeout error
if param_code == '00010':
for state_code in state_codes:
url = 'https://www.waterqualitydata.us/data/Result/search?statecode=US%3A' + state_code \
+ '&siteType=Stream&pCode=' + param_code + '&mimeType=csv&zip=no&dataProfile=narrowResult'
fname = '_'.join([ds_name, param_code, state_code, 'obs.csv'])
file_path = os.path.join(dl_dir, fname)
if os.path.exists(file_path):
os.remove(file_path)
urllib.request.urlretrieve(url, file_path)
else:
url = 'https://www.waterqualitydata.us/data/Result/search?countrycode=US&siteType=Stream' \
'&pCode=' + param_code + '&mimeType=csv&zip=no&dataProfile=narrowResult'
fname = '_'.join([ds_name, param_code, 'obs.csv'])
file_path = os.path.join(dl_dir, fname)
if os.path.exists(file_path):
os.remove(file_path)
urllib.request.urlretrieve(url, file_path)
# Concatenate temperature observation data
temp_obs_files = glob.glob(os.path.join(dl_dir, ds_name + '_00010_*_obs.csv'))
temp_obs_df = pd.concat([pd.read_csv(file, dtype=object, low_memory=False) for file in temp_obs_files])
temp_obs_df.to_csv(os.path.join(dl_dir, ds_name + '_00010_obs.csv'), sep=',', index=False)
# Remove state temperature observation files
for file in temp_obs_files:
os.remove(file)
# Download site data
for param_code in param_codes:
url = 'https://www.waterqualitydata.us/data/Station/search?countrycode=US&siteType=Stream' \
'&pCode=' + param_code + '&mimeType=csv&zip=no'
fname = '_'.join([ds_name, param_code, 'sites.csv'])
file_path = os.path.join(dl_dir, fname)
if os.path.exists(file_path):
os.remove(file_path)
urllib.request.urlretrieve(url, file_path)
| 0.273283 | 0.110856 |
from msrest.serialization import Model
class NotificationSubscriptionUpdateParameters(Model):
"""NotificationSubscriptionUpdateParameters.
:param admin_settings: Admin-managed settings for the subscription. Only applies to subscriptions where the subscriber is a group.
:type admin_settings: :class:`SubscriptionAdminSettings <notification.v4_0.models.SubscriptionAdminSettings>`
:param channel: Channel for delivering notifications triggered by the subscription.
:type channel: :class:`ISubscriptionChannel <notification.v4_0.models.ISubscriptionChannel>`
:param description: Updated description for the subscription. Typically describes filter criteria which helps identity the subscription.
:type description: str
:param filter: Matching criteria for the subscription. ExpressionFilter
:type filter: :class:`ISubscriptionFilter <notification.v4_0.models.ISubscriptionFilter>`
:param scope: The container in which events must be published from in order to be matched by the new subscription. If not specified, defaults to the current host (typically the current account or project collection). For example, a subscription scoped to project A will not produce notifications for events published from project B.
:type scope: :class:`SubscriptionScope <notification.v4_0.models.SubscriptionScope>`
:param status: Updated status for the subscription. Typically used to enable or disable a subscription.
:type status: object
:param status_message: Optional message that provides more details about the updated status.
:type status_message: str
:param user_settings: User-managed settings for the subscription. Only applies to subscriptions where the subscriber is a group. Typically used to opt-in or opt-out a user from a group subscription.
:type user_settings: :class:`SubscriptionUserSettings <notification.v4_0.models.SubscriptionUserSettings>`
"""
_attribute_map = {
'admin_settings': {'key': 'adminSettings', 'type': 'SubscriptionAdminSettings'},
'channel': {'key': 'channel', 'type': 'ISubscriptionChannel'},
'description': {'key': 'description', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'ISubscriptionFilter'},
'scope': {'key': 'scope', 'type': 'SubscriptionScope'},
'status': {'key': 'status', 'type': 'object'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'user_settings': {'key': 'userSettings', 'type': 'SubscriptionUserSettings'}
}
def __init__(self, admin_settings=None, channel=None, description=None, filter=None, scope=None, status=None, status_message=None, user_settings=None):
super(NotificationSubscriptionUpdateParameters, self).__init__()
self.admin_settings = admin_settings
self.channel = channel
self.description = description
self.filter = filter
self.scope = scope
self.status = status
self.status_message = status_message
self.user_settings = user_settings
|
venv/lib/python3.8/site-packages/vsts/notification/v4_0/models/notification_subscription_update_parameters.py
|
from msrest.serialization import Model
class NotificationSubscriptionUpdateParameters(Model):
"""NotificationSubscriptionUpdateParameters.
:param admin_settings: Admin-managed settings for the subscription. Only applies to subscriptions where the subscriber is a group.
:type admin_settings: :class:`SubscriptionAdminSettings <notification.v4_0.models.SubscriptionAdminSettings>`
:param channel: Channel for delivering notifications triggered by the subscription.
:type channel: :class:`ISubscriptionChannel <notification.v4_0.models.ISubscriptionChannel>`
:param description: Updated description for the subscription. Typically describes filter criteria which helps identity the subscription.
:type description: str
:param filter: Matching criteria for the subscription. ExpressionFilter
:type filter: :class:`ISubscriptionFilter <notification.v4_0.models.ISubscriptionFilter>`
:param scope: The container in which events must be published from in order to be matched by the new subscription. If not specified, defaults to the current host (typically the current account or project collection). For example, a subscription scoped to project A will not produce notifications for events published from project B.
:type scope: :class:`SubscriptionScope <notification.v4_0.models.SubscriptionScope>`
:param status: Updated status for the subscription. Typically used to enable or disable a subscription.
:type status: object
:param status_message: Optional message that provides more details about the updated status.
:type status_message: str
:param user_settings: User-managed settings for the subscription. Only applies to subscriptions where the subscriber is a group. Typically used to opt-in or opt-out a user from a group subscription.
:type user_settings: :class:`SubscriptionUserSettings <notification.v4_0.models.SubscriptionUserSettings>`
"""
_attribute_map = {
'admin_settings': {'key': 'adminSettings', 'type': 'SubscriptionAdminSettings'},
'channel': {'key': 'channel', 'type': 'ISubscriptionChannel'},
'description': {'key': 'description', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'ISubscriptionFilter'},
'scope': {'key': 'scope', 'type': 'SubscriptionScope'},
'status': {'key': 'status', 'type': 'object'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'user_settings': {'key': 'userSettings', 'type': 'SubscriptionUserSettings'}
}
def __init__(self, admin_settings=None, channel=None, description=None, filter=None, scope=None, status=None, status_message=None, user_settings=None):
super(NotificationSubscriptionUpdateParameters, self).__init__()
self.admin_settings = admin_settings
self.channel = channel
self.description = description
self.filter = filter
self.scope = scope
self.status = status
self.status_message = status_message
self.user_settings = user_settings
| 0.877562 | 0.17901 |
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
import re
class AngularGettextHTMLParser(HTMLParser):
"""Parse HTML to find translate directives.
Note: This will not cope with nested tags (which I don't think make any
sense)
"""
def __init__(self):
try:
super(self.__class__, self).__init__()
except TypeError:
HTMLParser.__init__(self)
self.in_translate = False
self.data = ''
self.strings = []
def handle_starttag(self, tag, attrs):
if tag == 'translate' or \
(attrs and 'translate' in [attr[0] for attr in attrs]):
self.in_translate = True
self.line = self.getpos()[0]
def handle_data(self, data):
if self.in_translate:
self.data += data
def handle_endtag(self, tag):
if self.in_translate:
self.strings.append(
(self.line, u'gettext', self.interpolate(), [])
)
self.in_translate = False
self.data = ''
def interpolate(self):
interpolation_regex = r"""{\$([\w\."'\]\[\(\)]+)\$}"""
return re.sub(interpolation_regex, r'%(\1)', self.data)
def extract_angular(fileobj, keywords, comment_tags, options):
"""Extract messages from angular template (HTML) files that use the
angular-gettext translate directive as per
https://angular-gettext.rocketeer.be/ .
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: This is a standard parameter so it isaccepted but ignored.
:param comment_tags: This is a standard parameter so it is accepted but
ignored.
:param options: Another standard parameter that is accepted but ignored.
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator``
This particular extractor is quite simple because it is intended to only
deal with angular templates which do not need comments, or the more
complicated forms of translations.
A later version will address pluralization.
"""
parser = AngularGettextHTMLParser()
for line in fileobj:
parser.feed(line)
for string in parser.strings:
yield(string)
|
horizon/utils/babel_extract_angular.py
|
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
import re
class AngularGettextHTMLParser(HTMLParser):
"""Parse HTML to find translate directives.
Note: This will not cope with nested tags (which I don't think make any
sense)
"""
def __init__(self):
try:
super(self.__class__, self).__init__()
except TypeError:
HTMLParser.__init__(self)
self.in_translate = False
self.data = ''
self.strings = []
def handle_starttag(self, tag, attrs):
if tag == 'translate' or \
(attrs and 'translate' in [attr[0] for attr in attrs]):
self.in_translate = True
self.line = self.getpos()[0]
def handle_data(self, data):
if self.in_translate:
self.data += data
def handle_endtag(self, tag):
if self.in_translate:
self.strings.append(
(self.line, u'gettext', self.interpolate(), [])
)
self.in_translate = False
self.data = ''
def interpolate(self):
interpolation_regex = r"""{\$([\w\."'\]\[\(\)]+)\$}"""
return re.sub(interpolation_regex, r'%(\1)', self.data)
def extract_angular(fileobj, keywords, comment_tags, options):
"""Extract messages from angular template (HTML) files that use the
angular-gettext translate directive as per
https://angular-gettext.rocketeer.be/ .
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: This is a standard parameter so it isaccepted but ignored.
:param comment_tags: This is a standard parameter so it is accepted but
ignored.
:param options: Another standard parameter that is accepted but ignored.
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator``
This particular extractor is quite simple because it is intended to only
deal with angular templates which do not need comments, or the more
complicated forms of translations.
A later version will address pluralization.
"""
parser = AngularGettextHTMLParser()
for line in fileobj:
parser.feed(line)
for string in parser.strings:
yield(string)
| 0.713032 | 0.175998 |
import discord, sqlite3, asyncio
from discord.ext import commands
from discord_slash import cog_ext, ButtonStyle
from discord_slash.utils.manage_commands import create_option
from discord_slash.utils.manage_components import *
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(guild_ids=[736689848626446396], name="card", description="Afficher ta carte", options=[
create_option(
name="membre",
description="Membre de discord",
option_type=6,
required=False
)])
async def _card(self, ctx, membre: discord.Member = None):
connection = sqlite3.connect("iso_card.db")
cursor = connection.cursor()
if membre == None:
membre = ctx.author
if membre.bot == True:
await ctx.send(f"{ctx.author.mention} Les bots n'ont pas de carte... :wink:")
if membre.bot == False:
member_id = (f"{membre.id}",)
cursor.execute('SELECT * FROM tt_iso_card WHERE user_id = ?', member_id)
member_values = cursor.fetchone()
if member_values == None:
if membre == ctx.author:
await ctx.send("Tu ne peux pas afficher ta carte car tu n'as pas commencé l'aventure ISO land ! (Pour débuter, fait : **/start**)")
else:
await ctx.send("Tu ne peux pas afficher la carte de cette personne car elle ne s'est pas inscrite à l'aventure ISO land...")
else:
about_para = member_values[1]
embed = discord.Embed(title=f"Carte de {membre.name}", description=membre.mention)
embed.add_field(name="À propos", value=about_para, inline=False)
if membre != ctx.author:
await ctx.send(embed=embed)
else:
buttons = [
create_button(
style = ButtonStyle.blue,
label = "Éditer À propos",
custom_id = "edit_apropos"
)
]
action_row = create_actionrow(*buttons)
choice_made = await ctx.send(embed=embed, components=[action_row])
def check(m):
return m.author_id == ctx.author_id and m.origin_message.id == choice_made.id
def check2(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
button_ctx = await wait_for_component(self.bot, components=action_row, check=check)
if button_ctx.custom_id == "edit_apropos":
await button_ctx.send(content="**Tu as 30 secondes pour envoyer le nouveau message de la section __à propos__.**", hidden=True)
try:
msg = await self.bot.wait_for("message", check=check2, timeout=30)
except asyncio.TimeoutError:
await button_ctx.send(content=f"{ctx.author.mention}, Le temps est écoulé. Réentre la commande pour éditer la section **à propos**.", hidden=True)
if len(list(msg.content)) >= 1024:
await button_ctx.send(content=f"{ctx.author.mention}, le message envoyé est trop long pour votre section **à propos**, la limite étant de 1024 caractères.", hidden=True)
else:
updated_user = (f"{msg.content}", f"{ctx.author.id}",)
cursor.execute('UPDATE tt_iso_card SET about = ? WHERE user_id = ?', updated_user)
connection.commit()
await msg.delete()
await button_ctx.send(content=f"{ctx.author.mention}, édition confirmée de la section **à propos** :\n\n> {msg.content}", hidden=True)
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("card")
|
cogs/card.py
|
import discord, sqlite3, asyncio
from discord.ext import commands
from discord_slash import cog_ext, ButtonStyle
from discord_slash.utils.manage_commands import create_option
from discord_slash.utils.manage_components import *
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(guild_ids=[736689848626446396], name="card", description="Afficher ta carte", options=[
create_option(
name="membre",
description="Membre de discord",
option_type=6,
required=False
)])
async def _card(self, ctx, membre: discord.Member = None):
connection = sqlite3.connect("iso_card.db")
cursor = connection.cursor()
if membre == None:
membre = ctx.author
if membre.bot == True:
await ctx.send(f"{ctx.author.mention} Les bots n'ont pas de carte... :wink:")
if membre.bot == False:
member_id = (f"{membre.id}",)
cursor.execute('SELECT * FROM tt_iso_card WHERE user_id = ?', member_id)
member_values = cursor.fetchone()
if member_values == None:
if membre == ctx.author:
await ctx.send("Tu ne peux pas afficher ta carte car tu n'as pas commencé l'aventure ISO land ! (Pour débuter, fait : **/start**)")
else:
await ctx.send("Tu ne peux pas afficher la carte de cette personne car elle ne s'est pas inscrite à l'aventure ISO land...")
else:
about_para = member_values[1]
embed = discord.Embed(title=f"Carte de {membre.name}", description=membre.mention)
embed.add_field(name="À propos", value=about_para, inline=False)
if membre != ctx.author:
await ctx.send(embed=embed)
else:
buttons = [
create_button(
style = ButtonStyle.blue,
label = "Éditer À propos",
custom_id = "edit_apropos"
)
]
action_row = create_actionrow(*buttons)
choice_made = await ctx.send(embed=embed, components=[action_row])
def check(m):
return m.author_id == ctx.author_id and m.origin_message.id == choice_made.id
def check2(msg):
return msg.author == ctx.author and msg.channel == ctx.channel
button_ctx = await wait_for_component(self.bot, components=action_row, check=check)
if button_ctx.custom_id == "edit_apropos":
await button_ctx.send(content="**Tu as 30 secondes pour envoyer le nouveau message de la section __à propos__.**", hidden=True)
try:
msg = await self.bot.wait_for("message", check=check2, timeout=30)
except asyncio.TimeoutError:
await button_ctx.send(content=f"{ctx.author.mention}, Le temps est écoulé. Réentre la commande pour éditer la section **à propos**.", hidden=True)
if len(list(msg.content)) >= 1024:
await button_ctx.send(content=f"{ctx.author.mention}, le message envoyé est trop long pour votre section **à propos**, la limite étant de 1024 caractères.", hidden=True)
else:
updated_user = (f"{msg.content}", f"{ctx.author.id}",)
cursor.execute('UPDATE tt_iso_card SET about = ? WHERE user_id = ?', updated_user)
connection.commit()
await msg.delete()
await button_ctx.send(content=f"{ctx.author.mention}, édition confirmée de la section **à propos** :\n\n> {msg.content}", hidden=True)
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("card")
| 0.304042 | 0.150746 |
import pygame
import random
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
class Block(pygame.sprite.Sprite):
"""
This class represents the ball
It derives from the "Sprite" class in Pygame
"""
def __init__(self, color, width, height):
""" Constructor. Pass in the color of the block,
and its x and y position. """
# Call the parent class (Sprite) constructor
super().__init__()
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
self.image = pygame.Surface([width, height])
self.image.fill(color)
# Fetch the rectangle object that has the dimensions of the image
# image.
# Update the position of this object by setting the values
# of rect.x and rect.y
self.rect = self.image.get_rect()
# Instance variables that control the edges of where we bounce
self.left_boundary = 0
self.right_boundary = 0
self.top_boundary = 0
self.bottom_boundary = 0
# Instance variables for our current speed and direction
self.change_x = 0
self.change_y = 0
def update(self):
""" Called each frame. """
self.rect.x += self.change_x
self.rect.y += self.change_y
if self.rect.right >= self.right_boundary or self.rect.left <= self.left_boundary:
self.change_x *= -1
if self.rect.bottom >= self.bottom_boundary or self.rect.top <= self.top_boundary:
self.change_y *= -1
class Player(Block):
""" The player class derives from Block, but overrides the 'update'
functionality with new a movement function that will move the block
with the mouse. """
def update(self):
# Get the current mouse position. This returns the position
# as a list of two numbers.
pos = pygame.mouse.get_pos()
# Fetch the x and y out of the list,
# just like we'd fetch letters out of a string.
# Set the player object to the mouse location
self.rect.x = pos[0]
self.rect.y = pos[1]
# Initialize Pygame
pygame.init()
# Set the height and width of the screen
screen_width = 700
screen_height = 400
screen = pygame.display.set_mode([screen_width, screen_height])
# This is a list of 'sprites.' Each block in the program is
# added to this list. The list is managed by a class called 'Group.'
block_list = pygame.sprite.Group()
# This is a list of every sprite. All blocks and the player block as well.
all_sprites_list = pygame.sprite.Group()
for i in range(50):
# This represents a block
block = Block(BLACK, 20, 15)
# Set a random location for the block
block.rect.x = random.randrange(screen_width)
block.rect.y = random.randrange(screen_height)
block.change_x = random.randrange(-3, 4)
block.change_y = random.randrange(-3, 4)
block.left_boundary = 0
block.top_boundary = 0
block.right_boundary = screen_width
block.bottom_boundary = screen_height
# Add the block to the list of objects
block_list.add(block)
all_sprites_list.add(block)
# Create a red player block
player = Player(RED, 20, 15)
all_sprites_list.add(player)
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
score = 0
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Clear the screen
screen.fill(WHITE)
# Calls update() method on every sprite in the list
all_sprites_list.update()
# See if the player block has collided with anything.
blocks_hit_list = pygame.sprite.spritecollide(player, block_list, True)
# Check the list of collisions.
for block in blocks_hit_list:
score += 1
print(score)
# Draw all the spites
all_sprites_list.draw(screen)
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
pygame.quit()
|
pygame/hungry lion/hungry.py
|
import pygame
import random
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
class Block(pygame.sprite.Sprite):
"""
This class represents the ball
It derives from the "Sprite" class in Pygame
"""
def __init__(self, color, width, height):
""" Constructor. Pass in the color of the block,
and its x and y position. """
# Call the parent class (Sprite) constructor
super().__init__()
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
self.image = pygame.Surface([width, height])
self.image.fill(color)
# Fetch the rectangle object that has the dimensions of the image
# image.
# Update the position of this object by setting the values
# of rect.x and rect.y
self.rect = self.image.get_rect()
# Instance variables that control the edges of where we bounce
self.left_boundary = 0
self.right_boundary = 0
self.top_boundary = 0
self.bottom_boundary = 0
# Instance variables for our current speed and direction
self.change_x = 0
self.change_y = 0
def update(self):
""" Called each frame. """
self.rect.x += self.change_x
self.rect.y += self.change_y
if self.rect.right >= self.right_boundary or self.rect.left <= self.left_boundary:
self.change_x *= -1
if self.rect.bottom >= self.bottom_boundary or self.rect.top <= self.top_boundary:
self.change_y *= -1
class Player(Block):
""" The player class derives from Block, but overrides the 'update'
functionality with new a movement function that will move the block
with the mouse. """
def update(self):
# Get the current mouse position. This returns the position
# as a list of two numbers.
pos = pygame.mouse.get_pos()
# Fetch the x and y out of the list,
# just like we'd fetch letters out of a string.
# Set the player object to the mouse location
self.rect.x = pos[0]
self.rect.y = pos[1]
# Initialize Pygame
pygame.init()
# Set the height and width of the screen
screen_width = 700
screen_height = 400
screen = pygame.display.set_mode([screen_width, screen_height])
# This is a list of 'sprites.' Each block in the program is
# added to this list. The list is managed by a class called 'Group.'
block_list = pygame.sprite.Group()
# This is a list of every sprite. All blocks and the player block as well.
all_sprites_list = pygame.sprite.Group()
for i in range(50):
# This represents a block
block = Block(BLACK, 20, 15)
# Set a random location for the block
block.rect.x = random.randrange(screen_width)
block.rect.y = random.randrange(screen_height)
block.change_x = random.randrange(-3, 4)
block.change_y = random.randrange(-3, 4)
block.left_boundary = 0
block.top_boundary = 0
block.right_boundary = screen_width
block.bottom_boundary = screen_height
# Add the block to the list of objects
block_list.add(block)
all_sprites_list.add(block)
# Create a red player block
player = Player(RED, 20, 15)
all_sprites_list.add(player)
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
score = 0
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Clear the screen
screen.fill(WHITE)
# Calls update() method on every sprite in the list
all_sprites_list.update()
# See if the player block has collided with anything.
blocks_hit_list = pygame.sprite.spritecollide(player, block_list, True)
# Check the list of collisions.
for block in blocks_hit_list:
score += 1
print(score)
# Draw all the spites
all_sprites_list.draw(screen)
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
pygame.quit()
| 0.453988 | 0.356755 |
from builtins import hasattr
import logging
import re
from urllib.parse import urljoin, urlencode
import requests
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from applications.constants import INTEGRATION_PREFIX
from applications.models import BootcampApplication
from hubspot.decorators import try_again
from hubspot.serializers import (
HubspotProductSerializer,
HubspotDealSerializer,
HubspotLineSerializer,
)
from klasses.models import BootcampRun
HUBSPOT_API_BASE_URL = "https://api.hubapi.com"
log = logging.getLogger()
def hubspot_timestamp(dt):
"""
Convert a datetime to a Hubspot timestamp
Args:
dt (DateTime): the DateTime to convert
Returns:
int: The timestamp in milliseconds
"""
return int(dt.timestamp() * 1000)
def format_hubspot_id(object_id):
"""
Return a formatted Hubspot ID for an object
Args:
object_id(int): The object id
Returns:
str: The hubspot id
"""
return "{}-{}".format(settings.HUBSPOT_ID_PREFIX, object_id)
def parse_hubspot_deal_id(hubspot_id):
"""
Return an object ID parsed from a hubspot ID
Args:
hubspot_id(str): The formatted hubspot ID
Returns:
int: The object ID or None
"""
match = re.compile(
fr"{settings.HUBSPOT_ID_PREFIX}-{INTEGRATION_PREFIX}(\d+)"
).match(hubspot_id)
return int(match.group(1)) if match else None
@try_again
def send_hubspot_request(
endpoint, api_url, method, body=None, query_params=None, **kwargs
):
"""
Send a request to Hubspot using the given params, body and api key specified in settings
Args:
endpoint (String): Specific endpoint to hit. Can be the empty string
api_url (String): The url path to append endpoint to
method (String): GET, POST, or PUT
body (serializable data): Data to be JSON serialized and sent with a PUT or POST request
query_params (Dict): Params to be added to the query string
kwargs: keyword arguments to add to the request method
Returns:
Response: HTML response to the constructed url
"""
base_url = urljoin(f"{HUBSPOT_API_BASE_URL}/", api_url)
if endpoint:
base_url = urljoin(f"{base_url}/", endpoint)
if query_params is None:
query_params = {}
if "hapikey" not in query_params:
query_params["hapikey"] = settings.HUBSPOT_API_KEY
params = urlencode(query_params)
url = f"{base_url}?{params}"
if method == "GET":
return requests.get(url=url, **kwargs)
if method == "PUT":
return requests.put(url=url, json=body, **kwargs)
if method == "POST":
return requests.post(url=url, json=body, **kwargs)
if method == "DELETE":
return requests.delete(url=url, **kwargs)
def sanitize_properties(properties):
"""
Ensures we don't pass any invalid values (e.g. nulls) to hubspot_timestamp
Args:
properties (dict):
the dict of properties to be sanitized
Returns:
dict:
the sanitized dict
"""
return {
key: value if value is not None else "" for key, value in properties.items()
}
def make_sync_message(object_id, properties):
"""
Create data for sync message
Args:
object_id (ObjectID): Internal ID to match with Hubspot object
properties (dict): dict of properties to be synced
Returns:
dict: serialized sync-message
"""
properties = sanitize_properties(properties)
return {
"integratorObjectId": format_hubspot_id(object_id),
"action": "UPSERT",
"changeOccurredTimestamp": hubspot_timestamp(timezone.now()),
"propertyNameToValues": properties,
}
def paged_sync_errors(limit=200, offset=0):
"""
Query the Ubspot API for errors that have occurred during sync
Args:
limit (Int): The number of errors to be returned
offset (Int): The index of the first error to be returned
Returns:
list: errors in JSON format
"""
response = send_hubspot_request(
"sync-errors",
"/extensions/ecomm/v1",
"GET",
query_params={"limit": limit, "offset": offset},
)
response.raise_for_status()
return response.json().get("results", [])
def get_sync_errors(limit=200, offset=0):
"""
Yield hubspot errors
Args:
timestamp (int): The timestamp of the last error check
limit (int): The number of errors to be returned
offset (int): The index of the first error to be returned
Yields:
dict : error in JSON format
"""
errors = paged_sync_errors(limit, offset)
while len(errors) > 0:
yield from errors
offset += limit
errors = paged_sync_errors(limit, offset)
def get_sync_status(object_type, object_id):
"""
Get errors that have occurred during sync
Args:
object_type (STRING): "CONTACT", "DEAL", "PRODUCT", "LINE_ITEM"
object_id (Int): The internal django ID of the object to check
Returns:
HTML response including sync status
"""
response = send_hubspot_request(
format_hubspot_id(object_id),
f"/extensions/ecomm/v1/sync-status/{object_type.upper()}",
"GET",
)
response.raise_for_status()
return response.json()
def exists_in_hubspot(object_type, object_id):
"""
Check if object exists in hubspot by looking for the presence of a hubspot ID
Args:
object_type (str): The hubspot object_type
object_id (ID): The ID of the object to check
Return:
boolean: True if the object exists
"""
try:
sync_status = get_sync_status(object_type, object_id)
except requests.HTTPError as sync_status_error:
if sync_status_error.response.status_code != 400:
log.error(sync_status_error)
return False
else:
return sync_status["hubspotId"] is not None
def make_contact_sync_message(user_id):
"""
Create the body of a sync message for a contact.
Args:
user_id (int): User id
Returns:
list: dict containing serializable sync-message data
"""
from profiles.serializers import UserSerializer
user = User.objects.get(id=user_id)
if not hasattr(user, "profile"):
return [{}]
properties = UserSerializer(user).data
properties.update(properties.pop("legal_address") or {})
properties.update(properties.pop("profile") or {})
properties["work_experience"] = properties.pop("years_experience", None)
if "street_address" in properties:
properties["street_address"] = "\n".join(properties.pop("street_address"))
# Use profile id to maintain consistency with existing hubspot contacts
return [make_sync_message(user.profile.id, properties)]
def make_product_sync_message(bootcamp_run_id):
"""
Create the body of a sync message for a product.
Args:
bootcamp_run_id (int): Bootcamp run id
Returns:
list: dict containing serializable sync-message data
"""
bootcamp_run = BootcampRun.objects.get(id=bootcamp_run_id)
properties = HubspotProductSerializer(instance=bootcamp_run).data
return [make_sync_message(bootcamp_run.integration_id, properties)]
def make_deal_sync_message(application_id):
"""
Create the body of a sync message for a deal.
Args:
application_id (int): BootcampApplication id
Returns:
list: dict containing serializable sync-message data
"""
application = BootcampApplication.objects.get(id=application_id)
properties = HubspotDealSerializer(instance=application).data
return [make_sync_message(application.integration_id, properties)]
def make_line_sync_message(application_id):
"""
Create the body of a sync message for a Line Item.
Args:
application_id (int):BootcampApplication id
Returns:
list: dict containing serializable sync-message data
"""
application = BootcampApplication.objects.get(id=application_id)
properties = HubspotLineSerializer(instance=application).data
properties["quantity"] = 1
return [make_sync_message(application.integration_id, properties)]
def sync_object_property(object_type, property_dict):
"""
Create or update a new object property
Args:
object_type (str): The object type of the property (ie "deals")
property_dict (dict): The attributes of the property
Returns:
dict: The new/updated property attributes
"""
required_fields = {"name", "label", "groupName"}
missing_fields = required_fields.difference(property_dict.keys())
if missing_fields:
raise KeyError(
"The following property attributes are required: {}".format(
",".join(missing_fields)
)
)
for key in property_dict.keys():
if property_dict[key] is None:
property_dict[key] = ""
exists = object_property_exists(object_type, property_dict["name"])
if exists:
method = "PUT"
endpoint = f"named/{property_dict['name']}"
else:
method = "POST"
endpoint = ""
response = send_hubspot_request(
endpoint, f"/properties/v1/{object_type}/properties", method, body=property_dict
)
response.raise_for_status()
return response.json()
def get_object_property(object_type, property_name):
"""
Get a Hubspot object property.
Args:
object_type (str): The object type of the property (ie "deals")
property_name (str): The property name
Returns:
dict: the property attributes
"""
response = send_hubspot_request(
property_name, f"/properties/v1/{object_type}/properties/named", "GET"
)
response.raise_for_status()
return response.json()
def object_property_exists(object_type, property_name):
"""
Return True if the specified property exists, False otherwise
Args:
object_type (str): The object type of the property (ie "deals")
property_name (str): The property name
Returns:
boolean: True if the property exists otherwise False
"""
try:
get_object_property(object_type, property_name)
return True
except requests.HTTPError:
return False
def delete_object_property(object_type, property_name):
"""
Delete a property from Hubspot
Args:
object_type (str): The object type of the property (ie "deals")
property_name (str): The property name
Returns:
dict: the result of the delete request in JSON format
"""
response = send_hubspot_request(
"",
"/properties/v1/{}/properties/named/{}".format(
object_type.lower(), property_name
),
"DELETE",
)
response.raise_for_status()
return response.json()
def get_property_group(object_type, group_name):
"""
Get a Hubspot property group.
Args:
object_type (str): The object type of the group (ie "deals")
group_name (str): The group name
Returns:
dict: The group attributes
"""
response = send_hubspot_request(
group_name, f"/properties/v1/{object_type}/groups/named", "GET"
)
response.raise_for_status()
return response.json()
def property_group_exists(object_type, group_name):
"""
Return True if the specified group exists (status=200), False otherwise
Args:
object_type (str): The object type of the group (ie "deals")
group_name (str): The group name
Returns:
boolean: True if the group exists otherwise False
"""
try:
get_property_group(object_type, group_name)
return True
except requests.HTTPError:
return False
def sync_property_group(object_type, name, label):
"""
Create or update a property group for an object type
Args:
object_type (str): The object type of the group (ie "deals")
name (str): The group name
label (str): The group label
Returns:
dict: the new/updated group attributes
"""
body = {"name": name, "displayName": label}
exists = property_group_exists(object_type, name)
if exists:
method = "PUT"
endpoint = f"named/{name}"
else:
method = "POST"
endpoint = ""
response = send_hubspot_request(
endpoint, f"/properties/v1/{object_type}/groups", method, body=body
)
response.raise_for_status()
return response.json()
def delete_property_group(object_type, group_name):
"""
Delete a group from Hubspot
Args:
object_type (str): The object type of the group (ie "deals")
group_name (str): The group name
Returns:
dict: The result of the delete command in JSON format
"""
response = send_hubspot_request(
"",
"/properties/v1/{}/groups/named/{}".format(object_type.lower(), group_name),
"DELETE",
)
response.raise_for_status()
return response.json()
|
hubspot/api.py
|
from builtins import hasattr
import logging
import re
from urllib.parse import urljoin, urlencode
import requests
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from applications.constants import INTEGRATION_PREFIX
from applications.models import BootcampApplication
from hubspot.decorators import try_again
from hubspot.serializers import (
HubspotProductSerializer,
HubspotDealSerializer,
HubspotLineSerializer,
)
from klasses.models import BootcampRun
HUBSPOT_API_BASE_URL = "https://api.hubapi.com"
log = logging.getLogger()
def hubspot_timestamp(dt):
"""
Convert a datetime to a Hubspot timestamp
Args:
dt (DateTime): the DateTime to convert
Returns:
int: The timestamp in milliseconds
"""
return int(dt.timestamp() * 1000)
def format_hubspot_id(object_id):
"""
Return a formatted Hubspot ID for an object
Args:
object_id(int): The object id
Returns:
str: The hubspot id
"""
return "{}-{}".format(settings.HUBSPOT_ID_PREFIX, object_id)
def parse_hubspot_deal_id(hubspot_id):
"""
Return an object ID parsed from a hubspot ID
Args:
hubspot_id(str): The formatted hubspot ID
Returns:
int: The object ID or None
"""
match = re.compile(
fr"{settings.HUBSPOT_ID_PREFIX}-{INTEGRATION_PREFIX}(\d+)"
).match(hubspot_id)
return int(match.group(1)) if match else None
@try_again
def send_hubspot_request(
endpoint, api_url, method, body=None, query_params=None, **kwargs
):
"""
Send a request to Hubspot using the given params, body and api key specified in settings
Args:
endpoint (String): Specific endpoint to hit. Can be the empty string
api_url (String): The url path to append endpoint to
method (String): GET, POST, or PUT
body (serializable data): Data to be JSON serialized and sent with a PUT or POST request
query_params (Dict): Params to be added to the query string
kwargs: keyword arguments to add to the request method
Returns:
Response: HTML response to the constructed url
"""
base_url = urljoin(f"{HUBSPOT_API_BASE_URL}/", api_url)
if endpoint:
base_url = urljoin(f"{base_url}/", endpoint)
if query_params is None:
query_params = {}
if "hapikey" not in query_params:
query_params["hapikey"] = settings.HUBSPOT_API_KEY
params = urlencode(query_params)
url = f"{base_url}?{params}"
if method == "GET":
return requests.get(url=url, **kwargs)
if method == "PUT":
return requests.put(url=url, json=body, **kwargs)
if method == "POST":
return requests.post(url=url, json=body, **kwargs)
if method == "DELETE":
return requests.delete(url=url, **kwargs)
def sanitize_properties(properties):
"""
Ensures we don't pass any invalid values (e.g. nulls) to hubspot_timestamp
Args:
properties (dict):
the dict of properties to be sanitized
Returns:
dict:
the sanitized dict
"""
return {
key: value if value is not None else "" for key, value in properties.items()
}
def make_sync_message(object_id, properties):
"""
Create data for sync message
Args:
object_id (ObjectID): Internal ID to match with Hubspot object
properties (dict): dict of properties to be synced
Returns:
dict: serialized sync-message
"""
properties = sanitize_properties(properties)
return {
"integratorObjectId": format_hubspot_id(object_id),
"action": "UPSERT",
"changeOccurredTimestamp": hubspot_timestamp(timezone.now()),
"propertyNameToValues": properties,
}
def paged_sync_errors(limit=200, offset=0):
"""
Query the Ubspot API for errors that have occurred during sync
Args:
limit (Int): The number of errors to be returned
offset (Int): The index of the first error to be returned
Returns:
list: errors in JSON format
"""
response = send_hubspot_request(
"sync-errors",
"/extensions/ecomm/v1",
"GET",
query_params={"limit": limit, "offset": offset},
)
response.raise_for_status()
return response.json().get("results", [])
def get_sync_errors(limit=200, offset=0):
"""
Yield hubspot errors
Args:
timestamp (int): The timestamp of the last error check
limit (int): The number of errors to be returned
offset (int): The index of the first error to be returned
Yields:
dict : error in JSON format
"""
errors = paged_sync_errors(limit, offset)
while len(errors) > 0:
yield from errors
offset += limit
errors = paged_sync_errors(limit, offset)
def get_sync_status(object_type, object_id):
"""
Get errors that have occurred during sync
Args:
object_type (STRING): "CONTACT", "DEAL", "PRODUCT", "LINE_ITEM"
object_id (Int): The internal django ID of the object to check
Returns:
HTML response including sync status
"""
response = send_hubspot_request(
format_hubspot_id(object_id),
f"/extensions/ecomm/v1/sync-status/{object_type.upper()}",
"GET",
)
response.raise_for_status()
return response.json()
def exists_in_hubspot(object_type, object_id):
"""
Check if object exists in hubspot by looking for the presence of a hubspot ID
Args:
object_type (str): The hubspot object_type
object_id (ID): The ID of the object to check
Return:
boolean: True if the object exists
"""
try:
sync_status = get_sync_status(object_type, object_id)
except requests.HTTPError as sync_status_error:
if sync_status_error.response.status_code != 400:
log.error(sync_status_error)
return False
else:
return sync_status["hubspotId"] is not None
def make_contact_sync_message(user_id):
"""
Create the body of a sync message for a contact.
Args:
user_id (int): User id
Returns:
list: dict containing serializable sync-message data
"""
from profiles.serializers import UserSerializer
user = User.objects.get(id=user_id)
if not hasattr(user, "profile"):
return [{}]
properties = UserSerializer(user).data
properties.update(properties.pop("legal_address") or {})
properties.update(properties.pop("profile") or {})
properties["work_experience"] = properties.pop("years_experience", None)
if "street_address" in properties:
properties["street_address"] = "\n".join(properties.pop("street_address"))
# Use profile id to maintain consistency with existing hubspot contacts
return [make_sync_message(user.profile.id, properties)]
def make_product_sync_message(bootcamp_run_id):
"""
Create the body of a sync message for a product.
Args:
bootcamp_run_id (int): Bootcamp run id
Returns:
list: dict containing serializable sync-message data
"""
bootcamp_run = BootcampRun.objects.get(id=bootcamp_run_id)
properties = HubspotProductSerializer(instance=bootcamp_run).data
return [make_sync_message(bootcamp_run.integration_id, properties)]
def make_deal_sync_message(application_id):
"""
Create the body of a sync message for a deal.
Args:
application_id (int): BootcampApplication id
Returns:
list: dict containing serializable sync-message data
"""
application = BootcampApplication.objects.get(id=application_id)
properties = HubspotDealSerializer(instance=application).data
return [make_sync_message(application.integration_id, properties)]
def make_line_sync_message(application_id):
"""
Create the body of a sync message for a Line Item.
Args:
application_id (int):BootcampApplication id
Returns:
list: dict containing serializable sync-message data
"""
application = BootcampApplication.objects.get(id=application_id)
properties = HubspotLineSerializer(instance=application).data
properties["quantity"] = 1
return [make_sync_message(application.integration_id, properties)]
def sync_object_property(object_type, property_dict):
"""
Create or update a new object property
Args:
object_type (str): The object type of the property (ie "deals")
property_dict (dict): The attributes of the property
Returns:
dict: The new/updated property attributes
"""
required_fields = {"name", "label", "groupName"}
missing_fields = required_fields.difference(property_dict.keys())
if missing_fields:
raise KeyError(
"The following property attributes are required: {}".format(
",".join(missing_fields)
)
)
for key in property_dict.keys():
if property_dict[key] is None:
property_dict[key] = ""
exists = object_property_exists(object_type, property_dict["name"])
if exists:
method = "PUT"
endpoint = f"named/{property_dict['name']}"
else:
method = "POST"
endpoint = ""
response = send_hubspot_request(
endpoint, f"/properties/v1/{object_type}/properties", method, body=property_dict
)
response.raise_for_status()
return response.json()
def get_object_property(object_type, property_name):
"""
Get a Hubspot object property.
Args:
object_type (str): The object type of the property (ie "deals")
property_name (str): The property name
Returns:
dict: the property attributes
"""
response = send_hubspot_request(
property_name, f"/properties/v1/{object_type}/properties/named", "GET"
)
response.raise_for_status()
return response.json()
def object_property_exists(object_type, property_name):
"""
Return True if the specified property exists, False otherwise
Args:
object_type (str): The object type of the property (ie "deals")
property_name (str): The property name
Returns:
boolean: True if the property exists otherwise False
"""
try:
get_object_property(object_type, property_name)
return True
except requests.HTTPError:
return False
def delete_object_property(object_type, property_name):
"""
Delete a property from Hubspot
Args:
object_type (str): The object type of the property (ie "deals")
property_name (str): The property name
Returns:
dict: the result of the delete request in JSON format
"""
response = send_hubspot_request(
"",
"/properties/v1/{}/properties/named/{}".format(
object_type.lower(), property_name
),
"DELETE",
)
response.raise_for_status()
return response.json()
def get_property_group(object_type, group_name):
"""
Get a Hubspot property group.
Args:
object_type (str): The object type of the group (ie "deals")
group_name (str): The group name
Returns:
dict: The group attributes
"""
response = send_hubspot_request(
group_name, f"/properties/v1/{object_type}/groups/named", "GET"
)
response.raise_for_status()
return response.json()
def property_group_exists(object_type, group_name):
"""
Return True if the specified group exists (status=200), False otherwise
Args:
object_type (str): The object type of the group (ie "deals")
group_name (str): The group name
Returns:
boolean: True if the group exists otherwise False
"""
try:
get_property_group(object_type, group_name)
return True
except requests.HTTPError:
return False
def sync_property_group(object_type, name, label):
"""
Create or update a property group for an object type
Args:
object_type (str): The object type of the group (ie "deals")
name (str): The group name
label (str): The group label
Returns:
dict: the new/updated group attributes
"""
body = {"name": name, "displayName": label}
exists = property_group_exists(object_type, name)
if exists:
method = "PUT"
endpoint = f"named/{name}"
else:
method = "POST"
endpoint = ""
response = send_hubspot_request(
endpoint, f"/properties/v1/{object_type}/groups", method, body=body
)
response.raise_for_status()
return response.json()
def delete_property_group(object_type, group_name):
"""
Delete a group from Hubspot
Args:
object_type (str): The object type of the group (ie "deals")
group_name (str): The group name
Returns:
dict: The result of the delete command in JSON format
"""
response = send_hubspot_request(
"",
"/properties/v1/{}/groups/named/{}".format(object_type.lower(), group_name),
"DELETE",
)
response.raise_for_status()
return response.json()
| 0.776792 | 0.211946 |
import shutil
from pathlib import Path
from typing import Dict, List, Union
import lmdb
from docarray import Document, DocumentArray
LMDB_MAP_SIZE = 100 * 1024 * 1024 * 1024
class DocStorage:
"""The backend storage engine of Documents"""
def __init__(
self, path: Union[str, Path], serialize_config: Dict = {}, lock: bool = True
):
self._path = path
self._env = self._open(path, lock=lock)
self._serialize_config = serialize_config
def _open(self, db_path: Union[str, Path], lock: bool = True):
return lmdb.Environment(
str(self._path),
map_size=LMDB_MAP_SIZE,
subdir=True,
readonly=False,
metasync=True,
sync=True,
map_async=False,
mode=493,
create=True,
readahead=True,
writemap=False,
meminit=True,
max_readers=126,
max_dbs=0, # means only one db
max_spare_txns=1,
lock=lock,
)
def insert(self, docs: 'DocumentArray'):
with self._env.begin(write=True) as txn:
for doc in docs:
success = txn.put(
doc.id.encode(),
doc.to_bytes(**self._serialize_config),
overwrite=True,
)
if not success:
txn.abort()
raise ValueError(
f'The Doc ({doc.id}) has already been added into database!'
)
def update(self, docs: 'DocumentArray'):
with self._env.begin(write=True) as txn:
for doc in docs:
old_value = txn.replace(
doc.id.encode(), doc.to_bytes(**self._serialize_config)
)
if not old_value:
txn.abort()
raise ValueError(f'The Doc ({doc.id}) does not exist in database!')
def delete(self, doc_ids: List[str]):
with self._env.begin(write=True) as txn:
for doc_id in doc_ids:
txn.delete(doc_id.encode())
def get(self, doc_ids: Union[str, list]) -> DocumentArray:
docs = DocumentArray()
if isinstance(doc_ids, str):
doc_ids = [doc_ids]
with self._env.begin(write=False) as txn:
for doc_id in doc_ids:
buffer = txn.get(doc_id.encode())
if buffer:
doc = Document.from_bytes(buffer, **self._serialize_config)
docs.append(doc)
return docs
def clear(self):
self._env.close()
shutil.rmtree(self._path)
self._env = self._open(self._path)
def close(self):
self._env.close()
@property
def stat(self):
with self._env.begin(write=False) as txn:
return txn.stat()
@property
def size(self):
return self.stat['entries']
def batched_iterator(self, batch_size: int = 1, **kwargs):
with self._env.begin(write=False) as txn:
count = 0
docs = DocumentArray()
cursor = txn.cursor()
cursor.iternext()
iterator = cursor.iternext(keys=False, values=True)
for value in iterator:
doc = Document.from_bytes(value, **self._serialize_config)
docs.append(doc)
count += 1
if count == batch_size:
yield docs
count = 0
docs = DocumentArray()
if count > 0:
yield docs
|
annlite/storage/kv.py
|
import shutil
from pathlib import Path
from typing import Dict, List, Union
import lmdb
from docarray import Document, DocumentArray
LMDB_MAP_SIZE = 100 * 1024 * 1024 * 1024
class DocStorage:
"""The backend storage engine of Documents"""
def __init__(
self, path: Union[str, Path], serialize_config: Dict = {}, lock: bool = True
):
self._path = path
self._env = self._open(path, lock=lock)
self._serialize_config = serialize_config
def _open(self, db_path: Union[str, Path], lock: bool = True):
return lmdb.Environment(
str(self._path),
map_size=LMDB_MAP_SIZE,
subdir=True,
readonly=False,
metasync=True,
sync=True,
map_async=False,
mode=493,
create=True,
readahead=True,
writemap=False,
meminit=True,
max_readers=126,
max_dbs=0, # means only one db
max_spare_txns=1,
lock=lock,
)
def insert(self, docs: 'DocumentArray'):
with self._env.begin(write=True) as txn:
for doc in docs:
success = txn.put(
doc.id.encode(),
doc.to_bytes(**self._serialize_config),
overwrite=True,
)
if not success:
txn.abort()
raise ValueError(
f'The Doc ({doc.id}) has already been added into database!'
)
def update(self, docs: 'DocumentArray'):
with self._env.begin(write=True) as txn:
for doc in docs:
old_value = txn.replace(
doc.id.encode(), doc.to_bytes(**self._serialize_config)
)
if not old_value:
txn.abort()
raise ValueError(f'The Doc ({doc.id}) does not exist in database!')
def delete(self, doc_ids: List[str]):
with self._env.begin(write=True) as txn:
for doc_id in doc_ids:
txn.delete(doc_id.encode())
def get(self, doc_ids: Union[str, list]) -> DocumentArray:
docs = DocumentArray()
if isinstance(doc_ids, str):
doc_ids = [doc_ids]
with self._env.begin(write=False) as txn:
for doc_id in doc_ids:
buffer = txn.get(doc_id.encode())
if buffer:
doc = Document.from_bytes(buffer, **self._serialize_config)
docs.append(doc)
return docs
def clear(self):
self._env.close()
shutil.rmtree(self._path)
self._env = self._open(self._path)
def close(self):
self._env.close()
@property
def stat(self):
with self._env.begin(write=False) as txn:
return txn.stat()
@property
def size(self):
return self.stat['entries']
def batched_iterator(self, batch_size: int = 1, **kwargs):
with self._env.begin(write=False) as txn:
count = 0
docs = DocumentArray()
cursor = txn.cursor()
cursor.iternext()
iterator = cursor.iternext(keys=False, values=True)
for value in iterator:
doc = Document.from_bytes(value, **self._serialize_config)
docs.append(doc)
count += 1
if count == batch_size:
yield docs
count = 0
docs = DocumentArray()
if count > 0:
yield docs
| 0.75985 | 0.214897 |
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from starlette.exceptions import HTTPException as StarletteHTTPException
from app.router import KaKao, Record, Images, Profile, Objective, Apple
from app.database import Tables
from app.database.conn import engine
import time
import logging
def init_app(description):
"""
메인 함수 - 앱 실행
:return:
app
"""
app = FastAPI(
title="Wright Backend",
description=description,
version="1.0.1",
terms_of_service="https://blog.naver.com/PostView.naver?blogId=sujinju0311&logNo=222583009802&proxyReferer=",
contact={
"name": "Github",
"url": "https://github.com/PodiumDreaming/Dreaming_Podium_Backend",
"email": "<EMAIL>",
},
license_info={
"name": "MIT License",
},
)
app.include_router(KaKao.router)
app.include_router(Apple.router)
app.include_router(Profile.router)
app.include_router(Objective.router)
app.include_router(Record.router)
app.include_router(Images.router)
origins = [
"http://localhost:3000",
"localhost:3000",
"http://localhost:3000"
"localhost:8080",
"http://localhost",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
Tables.Base.metadata.create_all(bind=engine)
return app
def init_logger():
"""
initialize logger for Request logging.
:return: logger fot logging request and logger for logging error.
"""
req_logger = logging.getLogger("Request")
req_logger.setLevel(logging.INFO)
err_logger = logging.getLogger("Error")
err_logger.setLevel(logging.WARNING)
req_handler = logging.FileHandler(filename="log/request.log")
err_handler = logging.FileHandler(filename="log/error.log")
formatter = logging.Formatter(fmt="[%(asctime)s] %(name)s:%(levelname)s - %(message)s")
req_handler.setLevel(logging.INFO)
req_handler.setFormatter(formatter)
err_handler.setLevel(logging.ERROR)
err_handler.setFormatter(formatter)
req_logger.addHandler(req_handler)
err_logger.addHandler(err_handler)
return req_logger, err_logger
description = """
Wright API Server. 🚀
## KaKao
You can **Test Login with KaKao Account**.
## Apple
You can **Test Login with Apple Account**.
## Profile
You will be able to:
* **Create Profile**.
* **Read Profile**.
* **Update Profile**.
## Objective
You will be able to:
* **Create Objective**.
* **Read Objective**/
* **Update Objective**.
## Image Upload
You will be able to:
* **Upload Image to S3**.
* **Delete Image Url**.
## Record
You will be able to:
* **Create Record**.
* **Read Record**/
* **Read your API_Token**.
* **Test if your API_Token is valid.
"""
app = init_app(description=description)
req_log, err_log = init_logger()
@app.middleware("http")
async def log_req(request: Request, call_next):
"""
Middleware that executes before and after request gets handled.
:param request:
:param call_next: Called API.
:return:
"""
# set log information
start_time = time.time()
method = request.method
user = request.client.host
port = request.client.port
path = request.url.path
scheme = request.url.scheme
response = await call_next(request)
process_time = start_time - time.time()
process_time_f = f"{process_time:.3f}"
status_code = response.status_code
msg = f"{user}:{port} - [{method} {path} {scheme}] [{status_code}]: {process_time_f}"
if 200 <= status_code <= 300:
# Record log.
req_log.info(msg)
elif status_code >= 400:
# error is handled by exception handler
pass
else:
req_log.info(msg)
return response
@app.exception_handler(StarletteHTTPException)
async def leave_log(request: Request, exception):
"""
Overriding exception handler to leave log.
:param request:
:param exception:
:return:
"""
# set log information
method = request.method
user = request.client.host
port = request.client.port
path = request.url.path
scheme = request.url.scheme
msg = f"{user}:{port} - [{method} {path} {scheme}] [{exception.status_code}]"
# Record log.
err_log.error(msg)
return JSONResponse(status_code=exception.status_code,
content=exception.detail)
@app.get("/")
def read_root():
return {"hello": "world"}
|
main.py
|
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from starlette.exceptions import HTTPException as StarletteHTTPException
from app.router import KaKao, Record, Images, Profile, Objective, Apple
from app.database import Tables
from app.database.conn import engine
import time
import logging
def init_app(description):
"""
메인 함수 - 앱 실행
:return:
app
"""
app = FastAPI(
title="Wright Backend",
description=description,
version="1.0.1",
terms_of_service="https://blog.naver.com/PostView.naver?blogId=sujinju0311&logNo=222583009802&proxyReferer=",
contact={
"name": "Github",
"url": "https://github.com/PodiumDreaming/Dreaming_Podium_Backend",
"email": "<EMAIL>",
},
license_info={
"name": "MIT License",
},
)
app.include_router(KaKao.router)
app.include_router(Apple.router)
app.include_router(Profile.router)
app.include_router(Objective.router)
app.include_router(Record.router)
app.include_router(Images.router)
origins = [
"http://localhost:3000",
"localhost:3000",
"http://localhost:3000"
"localhost:8080",
"http://localhost",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
Tables.Base.metadata.create_all(bind=engine)
return app
def init_logger():
"""
initialize logger for Request logging.
:return: logger fot logging request and logger for logging error.
"""
req_logger = logging.getLogger("Request")
req_logger.setLevel(logging.INFO)
err_logger = logging.getLogger("Error")
err_logger.setLevel(logging.WARNING)
req_handler = logging.FileHandler(filename="log/request.log")
err_handler = logging.FileHandler(filename="log/error.log")
formatter = logging.Formatter(fmt="[%(asctime)s] %(name)s:%(levelname)s - %(message)s")
req_handler.setLevel(logging.INFO)
req_handler.setFormatter(formatter)
err_handler.setLevel(logging.ERROR)
err_handler.setFormatter(formatter)
req_logger.addHandler(req_handler)
err_logger.addHandler(err_handler)
return req_logger, err_logger
description = """
Wright API Server. 🚀
## KaKao
You can **Test Login with KaKao Account**.
## Apple
You can **Test Login with Apple Account**.
## Profile
You will be able to:
* **Create Profile**.
* **Read Profile**.
* **Update Profile**.
## Objective
You will be able to:
* **Create Objective**.
* **Read Objective**/
* **Update Objective**.
## Image Upload
You will be able to:
* **Upload Image to S3**.
* **Delete Image Url**.
## Record
You will be able to:
* **Create Record**.
* **Read Record**/
* **Read your API_Token**.
* **Test if your API_Token is valid.
"""
app = init_app(description=description)
req_log, err_log = init_logger()
@app.middleware("http")
async def log_req(request: Request, call_next):
"""
Middleware that executes before and after request gets handled.
:param request:
:param call_next: Called API.
:return:
"""
# set log information
start_time = time.time()
method = request.method
user = request.client.host
port = request.client.port
path = request.url.path
scheme = request.url.scheme
response = await call_next(request)
process_time = start_time - time.time()
process_time_f = f"{process_time:.3f}"
status_code = response.status_code
msg = f"{user}:{port} - [{method} {path} {scheme}] [{status_code}]: {process_time_f}"
if 200 <= status_code <= 300:
# Record log.
req_log.info(msg)
elif status_code >= 400:
# error is handled by exception handler
pass
else:
req_log.info(msg)
return response
@app.exception_handler(StarletteHTTPException)
async def leave_log(request: Request, exception):
"""
Overriding exception handler to leave log.
:param request:
:param exception:
:return:
"""
# set log information
method = request.method
user = request.client.host
port = request.client.port
path = request.url.path
scheme = request.url.scheme
msg = f"{user}:{port} - [{method} {path} {scheme}] [{exception.status_code}]"
# Record log.
err_log.error(msg)
return JSONResponse(status_code=exception.status_code,
content=exception.detail)
@app.get("/")
def read_root():
return {"hello": "world"}
| 0.464173 | 0.09782 |
import os
import unittest
from lime.lime_text import LimeTextExplainer as OriginalLimeTextExplainer
from xai.explainer.explainer_exceptions import ExplainerUninitializedError
from xai.explainer.text.lime_text_explainer import LimeTextExplainer
class TestLimeTextExplainer(unittest.TestCase):
def setUp(self) -> None:
self.save_path = 'lime_text_explainer.pkl'
def test_build_explainer_uninitialized_explainer(self):
"""
Test exception check when explain_instance is called for un-built explainer
"""
with self.assertRaises(ExplainerUninitializedError, msg='Algorithm should raise '
'uninitialized error if exlpainers'
'build method is not called'):
explainer = LimeTextExplainer()
explainer.explain_instance(None, None)
def test_build_explainer(self):
"""
Test building the explainer
"""
explainer = LimeTextExplainer()
explainer.build_explainer(predict_fn=None)
self.assertIsInstance(explainer.explainer_object, OriginalLimeTextExplainer)
def test_save_explainer(self):
"""
Test the saving of the explainer
"""
explainer = LimeTextExplainer()
explainer.build_explainer(predict_fn=None)
explainer.save_explainer(self.save_path)
self.assertTrue(os.path.exists(self.save_path))
def test_load_explainer(self):
"""
Test loading the explainer
"""
explainer = LimeTextExplainer()
explainer.build_explainer(predict_fn=None)
explainer.save_explainer(self.save_path)
new_explainer = LimeTextExplainer()
new_explainer.load_explainer(self.save_path)
self.assertIsNotNone(new_explainer.explainer_object)
def tearDown(self) -> None:
if os.path.exists(self.save_path):
os.remove(self.save_path)
if __name__ == '__main__':
unittest.main()
|
tests/explainer/test_lime_text_explainer.py
|
import os
import unittest
from lime.lime_text import LimeTextExplainer as OriginalLimeTextExplainer
from xai.explainer.explainer_exceptions import ExplainerUninitializedError
from xai.explainer.text.lime_text_explainer import LimeTextExplainer
class TestLimeTextExplainer(unittest.TestCase):
def setUp(self) -> None:
self.save_path = 'lime_text_explainer.pkl'
def test_build_explainer_uninitialized_explainer(self):
"""
Test exception check when explain_instance is called for un-built explainer
"""
with self.assertRaises(ExplainerUninitializedError, msg='Algorithm should raise '
'uninitialized error if exlpainers'
'build method is not called'):
explainer = LimeTextExplainer()
explainer.explain_instance(None, None)
def test_build_explainer(self):
"""
Test building the explainer
"""
explainer = LimeTextExplainer()
explainer.build_explainer(predict_fn=None)
self.assertIsInstance(explainer.explainer_object, OriginalLimeTextExplainer)
def test_save_explainer(self):
"""
Test the saving of the explainer
"""
explainer = LimeTextExplainer()
explainer.build_explainer(predict_fn=None)
explainer.save_explainer(self.save_path)
self.assertTrue(os.path.exists(self.save_path))
def test_load_explainer(self):
"""
Test loading the explainer
"""
explainer = LimeTextExplainer()
explainer.build_explainer(predict_fn=None)
explainer.save_explainer(self.save_path)
new_explainer = LimeTextExplainer()
new_explainer.load_explainer(self.save_path)
self.assertIsNotNone(new_explainer.explainer_object)
def tearDown(self) -> None:
if os.path.exists(self.save_path):
os.remove(self.save_path)
if __name__ == '__main__':
unittest.main()
| 0.502197 | 0.304869 |
import json
from aiozk import ZKClient
from aiozk.exc import NoNode, NodeExists
from aiozk.protocol import AuthRequest
from vmshepherd.runtime import AbstractRuntimeData
class ZookeeperDriver(AbstractRuntimeData):
def __init__(self, instance_id, config):
super().__init__(instance_id)
self.reconfigure(config)
def set_auth(self, addauth):
if addauth is not None:
self._auth = {
'scheme': addauth.get('scheme', 'digest'),
'auth': addauth.get('auth', 'vmshepherd:vmshepherd'),
}
else:
self._auth = None
async def _assure_connected(self):
if self._zk is None:
self._zk = ZKClient(servers=self._servers, chroot=self._working_path)
await self._zk.start()
if self._auth is not None:
auth_req = AuthRequest(type=0, **self._auth)
await self._zk.send(auth_req)
def reconfigure(self, config):
if isinstance(config['servers'], list):
self._servers = ','.join(config['servers'])
else:
self._servers = config['servers']
self._working_path = config.get('working_path', '/vmshepherd')
self.set_auth(config.get('addauth'))
self._zk = None
async def _set_preset_data(self, preset_name, data):
await self._assure_connected()
prepared_data = json.dumps(data)
try:
await self._zk.set_data(preset_name, prepared_data)
except NoNode:
await self._zk.create(preset_name)
await self._zk.set_data(preset_name, prepared_data)
async def _get_preset_data(self, preset_name):
await self._assure_connected()
try:
res = await self._zk.get_data(preset_name)
except NoNode:
return {}
return json.loads(res.decode('utf-8'))
async def _acquire_lock(self, name, timeout=1):
try:
await self._zk.create(f'{name}.lock')
return True
except NodeExists:
return False
async def _release_lock(self, name):
try:
await self._zk.delete(f'{name}.lock')
return True
except NoNode:
return False
|
src/vmshepherd_zookeeper_driver/__init__.py
|
import json
from aiozk import ZKClient
from aiozk.exc import NoNode, NodeExists
from aiozk.protocol import AuthRequest
from vmshepherd.runtime import AbstractRuntimeData
class ZookeeperDriver(AbstractRuntimeData):
def __init__(self, instance_id, config):
super().__init__(instance_id)
self.reconfigure(config)
def set_auth(self, addauth):
if addauth is not None:
self._auth = {
'scheme': addauth.get('scheme', 'digest'),
'auth': addauth.get('auth', 'vmshepherd:vmshepherd'),
}
else:
self._auth = None
async def _assure_connected(self):
if self._zk is None:
self._zk = ZKClient(servers=self._servers, chroot=self._working_path)
await self._zk.start()
if self._auth is not None:
auth_req = AuthRequest(type=0, **self._auth)
await self._zk.send(auth_req)
def reconfigure(self, config):
if isinstance(config['servers'], list):
self._servers = ','.join(config['servers'])
else:
self._servers = config['servers']
self._working_path = config.get('working_path', '/vmshepherd')
self.set_auth(config.get('addauth'))
self._zk = None
async def _set_preset_data(self, preset_name, data):
await self._assure_connected()
prepared_data = json.dumps(data)
try:
await self._zk.set_data(preset_name, prepared_data)
except NoNode:
await self._zk.create(preset_name)
await self._zk.set_data(preset_name, prepared_data)
async def _get_preset_data(self, preset_name):
await self._assure_connected()
try:
res = await self._zk.get_data(preset_name)
except NoNode:
return {}
return json.loads(res.decode('utf-8'))
async def _acquire_lock(self, name, timeout=1):
try:
await self._zk.create(f'{name}.lock')
return True
except NodeExists:
return False
async def _release_lock(self, name):
try:
await self._zk.delete(f'{name}.lock')
return True
except NoNode:
return False
| 0.403802 | 0.048971 |
from nate.svonet.graph_svo import generate_ticks, find_max_burst
import networkx as nx
import stop_words as sw
import copy
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MaxNLocator
import numpy as np
from multiprocessing import Process, Queue
from os import cpu_count
def get_degree_for_slice(
q: Queue,
G,
edge_burst_dict,
time_slice_start,
time_slice_end,
minimum_burst_level,
stops,
overlap_threshold,
return_edge_overlaps,
list_top,
time_label):
graphCopy = copy.deepcopy(G)
for key in edge_burst_dict:
burst_level = find_max_burst(edge_burst_dict[key], time_slice_start, time_slice_end)
if burst_level > minimum_burst_level:
for node in graphCopy.nodes():
for j in [0, -1]:
for k in [0, -1]:
if key[j] == node[k] and key[j] not in stops:
overlap = len(set(key).intersection(set(node)))
if overlap >= overlap_threshold:
graphCopy.add_edge(key, node, overlap=overlap)
graphCopy.remove_edges_from(nx.selfloop_edges(graphCopy))
degree_list = list(graphCopy.degree)
degree_list.sort(key=lambda x: x[1], reverse=True)
degree_list = degree_list[0:list_top]
overlap_list = []
if return_edge_overlaps:
for entry in degree_list[0:list_top]:
overlap_sum = []
for edge in graphCopy.edges(entry[0]):
overlap_sum.append(graphCopy.edges[edge]['overlap'])
if len(overlap_sum) > 0:
avg = round(sum(overlap_sum) / len(overlap_sum), 2)
else:
avg = 0
overlap_list.append((entry[0], avg))
if return_edge_overlaps:
q.put((time_label, time_slice_end, degree_list, overlap_list))
else:
q.put((time_label, time_slice_end, degree_list))
class SVODegreeOverTimeMixin():
def __init__(self):
self.offset_dict:dict
self.edge_burst_dict:dict
self.s: int
self.gamma: int
self.from_svo: bool
self.lookup: dict
def top_svo_degree(
self,
number_of_slices: int = 8,
list_top: int = 10,
minimum_burst_level: int = 0,
return_edge_overlaps: bool = True,
overlap_threshold: int = 1):
"""[summary]
Args:
number_of_slices (int, optional): [description]. Defaults to 20.
list_top (int, optional): [description]. Defaults to 10.
minimum_burst_level (int, optional): [description]. Defaults to 0.
return_edge_overlaps (bool, optional): [description]. Defaults to True.
overlap_threshold (int, optional): [description]. Defaults to 1.
Raises:
Exception: [description]
Returns:
[type]: [description]
"""
if overlap_threshold > 2 or overlap_threshold < 1:
raise Exception("Overlap Filter must be 1 or 2.")
stops = sw.get_stop_words("english")
# Create list of time slices:
offset_set = set()
for key in self.offset_dict:
for offset in self.offset_dict[key]:
offset_set.add(offset)
time_slices, time_labels = generate_ticks(offset_set, number_of_ticks=(number_of_slices))
# Create network consisting of all Subjects and Objects:
G = nx.Graph()
for entry in self.edge_burst_dict:
G.add_node(entry)
if list_top == None:
list_top = len(self.edge_burst_dict)
# Iterate over time slices
q = Queue()
processes = []
for i in range(1, len(time_slices)):
time_slice_start = time_slices[i-1]
time_slice_end = time_slices[i]
time_label = time_labels[i]
t = Process(
target = get_degree_for_slice,
args= (
q,
G,
self.edge_burst_dict,
time_slice_start,
time_slice_end,
minimum_burst_level,
stops,
overlap_threshold,
return_edge_overlaps,
list_top,
time_label
)
)
processes.append(t)
t.start()
result_list = []
for i in range(1, len(time_slices)):
result_list.append(q.get())
top_degree_by_slice = {}
edge_overlap = {}
result_list = sorted(result_list, key = lambda x: x[1])
for result in result_list:
time_label = result[0]
degree_list = result[2]
top_degree_by_slice[time_label] = degree_list
if return_edge_overlaps:
edge_overlap[time_label] = result[3]
if return_edge_overlaps:
return top_degree_by_slice, edge_overlap
else:
return top_degree_by_slice
def specific_svo_degree(self,
tokens: list,
number_of_slices: int = 15,
minimum_burst_level: int = 0,
overlap_threshold: int = 1):
"""[summary]
Args:
tokens (list): [description]
number_of_slices (int, optional): [description]. Defaults to 20.
minimum_burst_level (int, optional): [description]. Defaults to 0.
overlap_threshold (int, optional): [description]. Defaults to 1.
Returns:
[type]: [description]
"""
if isinstance(tokens, list) == False:
tokens = [tokens]
full_lists = self.top_svo_degree(number_of_slices=number_of_slices,
list_top=None,
minimum_burst_level=minimum_burst_level,
return_edge_overlaps=False,
overlap_threshold=overlap_threshold,
)
token_rank_dict = {}
for day in full_lists:
v = [item for item in full_lists[day] if item[0] in tokens]
token_rank_dict[day] = v
return token_rank_dict
def plot_top_svo_degree(
self,
number_of_slices: int = 8,
list_top: int = 10,
minimum_burst_level: int = 0,
overlap_threshold: int = 1,
filename: str = False,):
"""[summary]
Args:
number_of_slices (int, optional): [description]. Defaults to 20.
list_top (int, optional): [description]. Defaults to 10.
minimum_burst_level (int, optional): [description]. Defaults to 0.
overlap_threshold (int, optional): [description]. Defaults to 1.
"""
data = self.top_svo_degree(
number_of_slices = number_of_slices,
list_top = list_top,
minimum_burst_level = minimum_burst_level,
return_edge_overlaps = False,
overlap_threshold=overlap_threshold,)
date_names = []
time_slices = []
for k, v in data.items():
date_names.append(k)
time_slices.append(v)
for i in range(1, len(date_names)):
x = np.arange(list_top)
values = []
names = []
for top_degrees in time_slices[i]:
values.append(top_degrees[1])
names.append(top_degrees[0])
values.reverse()
names.reverse()
fig, ax = plt.subplots()
fig.set_figwidth(6)
fig.set_figheight(10)
fig.suptitle('{} to {}'.format(date_names[i-1], date_names[i]), fontsize=12, ha="center")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.barh(x, values, color='#32363A')
plt.yticks(x, names)
if filename:
plt.savefig(str(filename) + str(i) + ".pdf")
else:
plt.show()
def plot_specific_svo_degree(self,
tokens: list,
number_of_slices: int = 15,
minimum_burst_level: int = 0,
overlap_threshold: int = 1,
plot_type="line",
filename: str = False,):
if isinstance(tokens, list) == False:
tokens = [tokens]
if plot_type != "line" and plot_type != "bar":
raise Exception("`plot_type` must be one of 'line' or 'bar'")
data = self.specific_svo_degree(tokens=tokens,
number_of_slices=number_of_slices,
minimum_burst_level=minimum_burst_level,
overlap_threshold=overlap_threshold,
)
inverted_dict = {}
for token in tokens:
full_list = []
for date, degree_list in data.items():
degree = [item[1] for item in degree_list if item[0] == token]
full_list.append((date, degree[0]))
inverted_dict[token] = full_list
x = np.arange(number_of_slices)
for k, v in inverted_dict.items():
values = [item[1] for item in v]
dates = [item[0].replace(", ", "\n") for item in v]
fig, ax = plt.subplots()
fig.set_figwidth(10)
fig.set_figheight(6)
fig.suptitle("'{}'".format(k), fontsize=12, ha="center")
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if plot_type == "bar":
plt.bar(x, values, color='#32363A')
elif plot_type == "line":
plt.plot(x, values, color='#32363A')
plt.xticks(x, dates)
if filename:
plt.savefig(str(filename) + str(k) + ".pdf")
else:
plt.show()
|
nate/svonet/svo_degree_over_time.py
|
from nate.svonet.graph_svo import generate_ticks, find_max_burst
import networkx as nx
import stop_words as sw
import copy
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MaxNLocator
import numpy as np
from multiprocessing import Process, Queue
from os import cpu_count
def get_degree_for_slice(
q: Queue,
G,
edge_burst_dict,
time_slice_start,
time_slice_end,
minimum_burst_level,
stops,
overlap_threshold,
return_edge_overlaps,
list_top,
time_label):
graphCopy = copy.deepcopy(G)
for key in edge_burst_dict:
burst_level = find_max_burst(edge_burst_dict[key], time_slice_start, time_slice_end)
if burst_level > minimum_burst_level:
for node in graphCopy.nodes():
for j in [0, -1]:
for k in [0, -1]:
if key[j] == node[k] and key[j] not in stops:
overlap = len(set(key).intersection(set(node)))
if overlap >= overlap_threshold:
graphCopy.add_edge(key, node, overlap=overlap)
graphCopy.remove_edges_from(nx.selfloop_edges(graphCopy))
degree_list = list(graphCopy.degree)
degree_list.sort(key=lambda x: x[1], reverse=True)
degree_list = degree_list[0:list_top]
overlap_list = []
if return_edge_overlaps:
for entry in degree_list[0:list_top]:
overlap_sum = []
for edge in graphCopy.edges(entry[0]):
overlap_sum.append(graphCopy.edges[edge]['overlap'])
if len(overlap_sum) > 0:
avg = round(sum(overlap_sum) / len(overlap_sum), 2)
else:
avg = 0
overlap_list.append((entry[0], avg))
if return_edge_overlaps:
q.put((time_label, time_slice_end, degree_list, overlap_list))
else:
q.put((time_label, time_slice_end, degree_list))
class SVODegreeOverTimeMixin():
def __init__(self):
self.offset_dict:dict
self.edge_burst_dict:dict
self.s: int
self.gamma: int
self.from_svo: bool
self.lookup: dict
def top_svo_degree(
self,
number_of_slices: int = 8,
list_top: int = 10,
minimum_burst_level: int = 0,
return_edge_overlaps: bool = True,
overlap_threshold: int = 1):
"""[summary]
Args:
number_of_slices (int, optional): [description]. Defaults to 20.
list_top (int, optional): [description]. Defaults to 10.
minimum_burst_level (int, optional): [description]. Defaults to 0.
return_edge_overlaps (bool, optional): [description]. Defaults to True.
overlap_threshold (int, optional): [description]. Defaults to 1.
Raises:
Exception: [description]
Returns:
[type]: [description]
"""
if overlap_threshold > 2 or overlap_threshold < 1:
raise Exception("Overlap Filter must be 1 or 2.")
stops = sw.get_stop_words("english")
# Create list of time slices:
offset_set = set()
for key in self.offset_dict:
for offset in self.offset_dict[key]:
offset_set.add(offset)
time_slices, time_labels = generate_ticks(offset_set, number_of_ticks=(number_of_slices))
# Create network consisting of all Subjects and Objects:
G = nx.Graph()
for entry in self.edge_burst_dict:
G.add_node(entry)
if list_top == None:
list_top = len(self.edge_burst_dict)
# Iterate over time slices
q = Queue()
processes = []
for i in range(1, len(time_slices)):
time_slice_start = time_slices[i-1]
time_slice_end = time_slices[i]
time_label = time_labels[i]
t = Process(
target = get_degree_for_slice,
args= (
q,
G,
self.edge_burst_dict,
time_slice_start,
time_slice_end,
minimum_burst_level,
stops,
overlap_threshold,
return_edge_overlaps,
list_top,
time_label
)
)
processes.append(t)
t.start()
result_list = []
for i in range(1, len(time_slices)):
result_list.append(q.get())
top_degree_by_slice = {}
edge_overlap = {}
result_list = sorted(result_list, key = lambda x: x[1])
for result in result_list:
time_label = result[0]
degree_list = result[2]
top_degree_by_slice[time_label] = degree_list
if return_edge_overlaps:
edge_overlap[time_label] = result[3]
if return_edge_overlaps:
return top_degree_by_slice, edge_overlap
else:
return top_degree_by_slice
def specific_svo_degree(self,
tokens: list,
number_of_slices: int = 15,
minimum_burst_level: int = 0,
overlap_threshold: int = 1):
"""[summary]
Args:
tokens (list): [description]
number_of_slices (int, optional): [description]. Defaults to 20.
minimum_burst_level (int, optional): [description]. Defaults to 0.
overlap_threshold (int, optional): [description]. Defaults to 1.
Returns:
[type]: [description]
"""
if isinstance(tokens, list) == False:
tokens = [tokens]
full_lists = self.top_svo_degree(number_of_slices=number_of_slices,
list_top=None,
minimum_burst_level=minimum_burst_level,
return_edge_overlaps=False,
overlap_threshold=overlap_threshold,
)
token_rank_dict = {}
for day in full_lists:
v = [item for item in full_lists[day] if item[0] in tokens]
token_rank_dict[day] = v
return token_rank_dict
def plot_top_svo_degree(
self,
number_of_slices: int = 8,
list_top: int = 10,
minimum_burst_level: int = 0,
overlap_threshold: int = 1,
filename: str = False,):
"""[summary]
Args:
number_of_slices (int, optional): [description]. Defaults to 20.
list_top (int, optional): [description]. Defaults to 10.
minimum_burst_level (int, optional): [description]. Defaults to 0.
overlap_threshold (int, optional): [description]. Defaults to 1.
"""
data = self.top_svo_degree(
number_of_slices = number_of_slices,
list_top = list_top,
minimum_burst_level = minimum_burst_level,
return_edge_overlaps = False,
overlap_threshold=overlap_threshold,)
date_names = []
time_slices = []
for k, v in data.items():
date_names.append(k)
time_slices.append(v)
for i in range(1, len(date_names)):
x = np.arange(list_top)
values = []
names = []
for top_degrees in time_slices[i]:
values.append(top_degrees[1])
names.append(top_degrees[0])
values.reverse()
names.reverse()
fig, ax = plt.subplots()
fig.set_figwidth(6)
fig.set_figheight(10)
fig.suptitle('{} to {}'.format(date_names[i-1], date_names[i]), fontsize=12, ha="center")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.barh(x, values, color='#32363A')
plt.yticks(x, names)
if filename:
plt.savefig(str(filename) + str(i) + ".pdf")
else:
plt.show()
def plot_specific_svo_degree(self,
tokens: list,
number_of_slices: int = 15,
minimum_burst_level: int = 0,
overlap_threshold: int = 1,
plot_type="line",
filename: str = False,):
if isinstance(tokens, list) == False:
tokens = [tokens]
if plot_type != "line" and plot_type != "bar":
raise Exception("`plot_type` must be one of 'line' or 'bar'")
data = self.specific_svo_degree(tokens=tokens,
number_of_slices=number_of_slices,
minimum_burst_level=minimum_burst_level,
overlap_threshold=overlap_threshold,
)
inverted_dict = {}
for token in tokens:
full_list = []
for date, degree_list in data.items():
degree = [item[1] for item in degree_list if item[0] == token]
full_list.append((date, degree[0]))
inverted_dict[token] = full_list
x = np.arange(number_of_slices)
for k, v in inverted_dict.items():
values = [item[1] for item in v]
dates = [item[0].replace(", ", "\n") for item in v]
fig, ax = plt.subplots()
fig.set_figwidth(10)
fig.set_figheight(6)
fig.suptitle("'{}'".format(k), fontsize=12, ha="center")
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if plot_type == "bar":
plt.bar(x, values, color='#32363A')
elif plot_type == "line":
plt.plot(x, values, color='#32363A')
plt.xticks(x, dates)
if filename:
plt.savefig(str(filename) + str(k) + ".pdf")
else:
plt.show()
| 0.456894 | 0.368974 |
import requests
from bs4 import BeautifulSoup, Tag
class LinkPreview:
"""
A teaser of a website, containing a title, description, and a preview image.
The website is fetched from 'url' and the preview is generated from html.
Uses the algorithm described here:
https://andrejgajdos.com/how-to-create-a-link-preview/
"""
url: str
title: str
description: str
image_url: str
def __init__(self, url, html):
self.url = url
soup = BeautifulSoup(html, 'html.parser')
self.title = self._extract_title(soup)
self.description = self._extract_description(soup)
self.image_url = self._extract_image_url(soup)
@staticmethod
def fetch_from_url(url):
if not (url.startswith('http://') or url.startswith('https://')):
url = f'http://{url}'
r = requests.get(url)
if not (200 <= r.status_code < 300):
raise Exception(f'link preview url {url} returned non-2xx status code {r.status_code}')
return LinkPreview(url, r.text)
def to_dict(self):
return {
'url': self.url,
'title': self.title,
'description': self.description,
'image_url': self.image_url
}
def _extract_title(self, soup: BeautifulSoup):
og_title = soup.find('meta', {'property': 'og:title'})
twitter_title = soup.find('meta', {'name: ': 'twitter:title'})
h1 = soup.h1
h2 = soup.h2
if og_title is not None:
return og_title['content']
elif twitter_title is not None:
return twitter_title['content']
elif h1 is not None:
return ' '.join(h1.stripped_strings)
elif h2 is not None:
return ' '.join(h2.stripped_strings)
else:
return None
def _extract_description(self, soup: BeautifulSoup):
og_desc = soup.find('meta', {'property': 'og:description'})
twitter_desc = soup.find('meta', {'name: ': 'twitter:description'})
meta_desc = soup.find('meta', {'name: ': 'description'})
def non_empty_paragraph(p):
return p.name == 'p' and len(p.contents) > 0
first_paragraph = soup.find(non_empty_paragraph)
if og_desc is not None:
return og_desc['content']
elif twitter_desc is not None:
return twitter_desc['content']
elif meta_desc is not None:
return meta_desc['content']
elif first_paragraph is not None:
return ' '.join(first_paragraph.stripped_strings)
else:
return None
def _extract_image_url(self, soup: BeautifulSoup):
og_image = soup.find('meta', {'property': 'og:image'})
link_image = soup.find('link', {'rel': 'image_src'})
twitter_image = soup.find('meta', {'name: ': 'twitter:image'})
if og_image is not None:
return og_image['content']
elif link_image is not None:
return link_image['href']
elif twitter_image is not None:
return twitter_image['content']
imgs = [img for img in soup.find_all('img') if self._keep_image(img)]
if len(imgs) > 0:
return self._absolute_url(imgs[0].src)
return None
def _keep_image(self, img: Tag):
if img.width is None or img.height is None:
return False
if img.width > img.height:
if img.width / img.height > 3:
return False
else:
if img.height / img.width > 3:
return False
if img.height <= 50 or img.width <= 50:
return False
return True
def _absolute_url(self, relative_url):
"""
Transform a relative url on the website to an absolute on,
e.g. /img/img1.jpg -> example.com/img/img1.jpg
"""
if not relative_url.startswith('/'):
return relative_url
origin = self.url.split('/', 1)[0]
return f'{origin}{relative_url}'
|
backend/service/linkpreview.py
|
import requests
from bs4 import BeautifulSoup, Tag
class LinkPreview:
"""
A teaser of a website, containing a title, description, and a preview image.
The website is fetched from 'url' and the preview is generated from html.
Uses the algorithm described here:
https://andrejgajdos.com/how-to-create-a-link-preview/
"""
url: str
title: str
description: str
image_url: str
def __init__(self, url, html):
self.url = url
soup = BeautifulSoup(html, 'html.parser')
self.title = self._extract_title(soup)
self.description = self._extract_description(soup)
self.image_url = self._extract_image_url(soup)
@staticmethod
def fetch_from_url(url):
if not (url.startswith('http://') or url.startswith('https://')):
url = f'http://{url}'
r = requests.get(url)
if not (200 <= r.status_code < 300):
raise Exception(f'link preview url {url} returned non-2xx status code {r.status_code}')
return LinkPreview(url, r.text)
def to_dict(self):
return {
'url': self.url,
'title': self.title,
'description': self.description,
'image_url': self.image_url
}
def _extract_title(self, soup: BeautifulSoup):
og_title = soup.find('meta', {'property': 'og:title'})
twitter_title = soup.find('meta', {'name: ': 'twitter:title'})
h1 = soup.h1
h2 = soup.h2
if og_title is not None:
return og_title['content']
elif twitter_title is not None:
return twitter_title['content']
elif h1 is not None:
return ' '.join(h1.stripped_strings)
elif h2 is not None:
return ' '.join(h2.stripped_strings)
else:
return None
def _extract_description(self, soup: BeautifulSoup):
og_desc = soup.find('meta', {'property': 'og:description'})
twitter_desc = soup.find('meta', {'name: ': 'twitter:description'})
meta_desc = soup.find('meta', {'name: ': 'description'})
def non_empty_paragraph(p):
return p.name == 'p' and len(p.contents) > 0
first_paragraph = soup.find(non_empty_paragraph)
if og_desc is not None:
return og_desc['content']
elif twitter_desc is not None:
return twitter_desc['content']
elif meta_desc is not None:
return meta_desc['content']
elif first_paragraph is not None:
return ' '.join(first_paragraph.stripped_strings)
else:
return None
def _extract_image_url(self, soup: BeautifulSoup):
og_image = soup.find('meta', {'property': 'og:image'})
link_image = soup.find('link', {'rel': 'image_src'})
twitter_image = soup.find('meta', {'name: ': 'twitter:image'})
if og_image is not None:
return og_image['content']
elif link_image is not None:
return link_image['href']
elif twitter_image is not None:
return twitter_image['content']
imgs = [img for img in soup.find_all('img') if self._keep_image(img)]
if len(imgs) > 0:
return self._absolute_url(imgs[0].src)
return None
def _keep_image(self, img: Tag):
if img.width is None or img.height is None:
return False
if img.width > img.height:
if img.width / img.height > 3:
return False
else:
if img.height / img.width > 3:
return False
if img.height <= 50 or img.width <= 50:
return False
return True
def _absolute_url(self, relative_url):
"""
Transform a relative url on the website to an absolute on,
e.g. /img/img1.jpg -> example.com/img/img1.jpg
"""
if not relative_url.startswith('/'):
return relative_url
origin = self.url.split('/', 1)[0]
return f'{origin}{relative_url}'
| 0.502686 | 0.155174 |
import pigpio
class Servo:
def __init__(self, gpio, min_value=0, max_value=180, min_pulse=0.5, max_pulse=2.4, frequency=50):
if min_pulse < 0:
raise ValueError("The value of the argument min_pulse is out of range.")
if max_pulse < 0:
raise ValueError("The value of the argument max_pulse is out of range.")
if max_pulse * 1000 >= 1000000 / frequency:
raise ValueError("The value of the argument frequency is too large.")
self.__gpio = gpio
self.__min_pulse = min_pulse
self.__max_pulse = max_pulse
self.__frequency = frequency
self.__min_value = min_value
self.__max_value = max_value
self.__value = None
self.start()
try:
self.__servo.hardware_PWM(self.__gpio, self.__frequency, 0)
except Exception:
raise ValueError("The value of the argument gpio is out of range.")
def write(self, value):
if self.__servo is None:
raise Exception("The function start is not being executed.")
if value < self.__min_value or value > self.__max_value:
raise ValueError("The value of the argument value is out of range.")
self.__value = value
write_value = (value - self.__min_value) / (self.__max_value - self.__min_value) * (self.__max_pulse - self.__min_pulse) + self.__min_pulse
self.__servo.hardware_PWM(self.__gpio, self.__frequency, int(write_value * self.__frequency * 1000))
def read(self):
return self.__value
def stop(self):
self.__value = None
self.__servo.set_mode(self.__gpio, pigpio.INPUT)
self.__servo.stop()
self.__servo = None
def start(self):
self.__servo = pigpio.pi()
self.__servo.set_mode(self.__gpio, pigpio.OUTPUT)
class Drive:
def __init__(self, left_gpio, right_gpio, min_value=-100, max_value=100, min_pulse=0.5, max_pulse=2.4, frequency=50):
self.__left = Servo(left_gpio, min_value=min_value, max_value=max_value, min_pulse=min_pulse, max_pulse=max_pulse, frequency=frequency)
self.__right = Servo(right_gpio, min_value=min_value, max_value=max_value, min_pulse=min_pulse, max_pulse=max_pulse, frequency=frequency)
self.__min_value = min_value
self.__max_value = max_value
self.__speed = None
self.__direction = None
def steering(self, speed, direction=0):
if speed < self.__min_value or speed > self.__max_value:
raise ValueError("The value of the argument speed is out of range.")
if direction < -180 or direction > 180:
raise ValueError("The value of the argument direction is out of range.")
self.__speed = speed
self.__direction = direction
if direction >= 0:
self.__left.write(speed)
rightValue = (90 - direction) / 90 * speed
self.__right.write(-rightValue)
else:
self.__right.write(-speed)
leftValue = (90 + direction) / 90 * speed
self.__left.write(leftValue)
def set_speed(self, speed):
if self.__direction is None:
raise Exception("The function steering or start is not being executed.")
self.steering(speed=speed, direction=self.__direction)
def set_direction(self, direction):
if self.__speed is None:
raise Exception("The function steering or start is not being executed.")
self.steering(speed=self.__speed, direction=direction)
def get_speed(self):
return self.__speed
def get_direction(self):
return self.__direction
def stop(self):
self.__speed = None
self.__direction = None
self.__right.stop()
self.__left.stop()
def start(self):
self.__right.start()
self.__left.start()
|
src/piservo/servo.py
|
import pigpio
class Servo:
def __init__(self, gpio, min_value=0, max_value=180, min_pulse=0.5, max_pulse=2.4, frequency=50):
if min_pulse < 0:
raise ValueError("The value of the argument min_pulse is out of range.")
if max_pulse < 0:
raise ValueError("The value of the argument max_pulse is out of range.")
if max_pulse * 1000 >= 1000000 / frequency:
raise ValueError("The value of the argument frequency is too large.")
self.__gpio = gpio
self.__min_pulse = min_pulse
self.__max_pulse = max_pulse
self.__frequency = frequency
self.__min_value = min_value
self.__max_value = max_value
self.__value = None
self.start()
try:
self.__servo.hardware_PWM(self.__gpio, self.__frequency, 0)
except Exception:
raise ValueError("The value of the argument gpio is out of range.")
def write(self, value):
if self.__servo is None:
raise Exception("The function start is not being executed.")
if value < self.__min_value or value > self.__max_value:
raise ValueError("The value of the argument value is out of range.")
self.__value = value
write_value = (value - self.__min_value) / (self.__max_value - self.__min_value) * (self.__max_pulse - self.__min_pulse) + self.__min_pulse
self.__servo.hardware_PWM(self.__gpio, self.__frequency, int(write_value * self.__frequency * 1000))
def read(self):
return self.__value
def stop(self):
self.__value = None
self.__servo.set_mode(self.__gpio, pigpio.INPUT)
self.__servo.stop()
self.__servo = None
def start(self):
self.__servo = pigpio.pi()
self.__servo.set_mode(self.__gpio, pigpio.OUTPUT)
class Drive:
def __init__(self, left_gpio, right_gpio, min_value=-100, max_value=100, min_pulse=0.5, max_pulse=2.4, frequency=50):
self.__left = Servo(left_gpio, min_value=min_value, max_value=max_value, min_pulse=min_pulse, max_pulse=max_pulse, frequency=frequency)
self.__right = Servo(right_gpio, min_value=min_value, max_value=max_value, min_pulse=min_pulse, max_pulse=max_pulse, frequency=frequency)
self.__min_value = min_value
self.__max_value = max_value
self.__speed = None
self.__direction = None
def steering(self, speed, direction=0):
if speed < self.__min_value or speed > self.__max_value:
raise ValueError("The value of the argument speed is out of range.")
if direction < -180 or direction > 180:
raise ValueError("The value of the argument direction is out of range.")
self.__speed = speed
self.__direction = direction
if direction >= 0:
self.__left.write(speed)
rightValue = (90 - direction) / 90 * speed
self.__right.write(-rightValue)
else:
self.__right.write(-speed)
leftValue = (90 + direction) / 90 * speed
self.__left.write(leftValue)
def set_speed(self, speed):
if self.__direction is None:
raise Exception("The function steering or start is not being executed.")
self.steering(speed=speed, direction=self.__direction)
def set_direction(self, direction):
if self.__speed is None:
raise Exception("The function steering or start is not being executed.")
self.steering(speed=self.__speed, direction=direction)
def get_speed(self):
return self.__speed
def get_direction(self):
return self.__direction
def stop(self):
self.__speed = None
self.__direction = None
self.__right.stop()
self.__left.stop()
def start(self):
self.__right.start()
self.__left.start()
| 0.639961 | 0.189184 |
import pytest
from sanic.blueprints import Blueprint
from sanic.exceptions import HeaderExpectationFailed
from sanic.request import StreamBuffer
from sanic.response import stream, text
from sanic.views import CompositionView, HTTPMethodView
from sanic.views import stream as stream_decorator
data = "abc" * 10000000
def test_request_stream_method_view(app):
"""for self.is_request_stream = True"""
class SimpleView(HTTPMethodView):
def get(self, request):
assert request.stream is None
return text("OK")
@stream_decorator
async def post(self, request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.add_route(SimpleView.as_view(), "/method_view")
assert app.is_request_stream is True
request, response = app.test_client.get("/method_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/method_view", data=data)
assert response.status == 200
assert response.text == data
@pytest.mark.parametrize(
"headers, expect_raise_exception",
[
({"EXPECT": "100-continue"}, False),
({"EXPECT": "100-continue-extra"}, True),
],
)
def test_request_stream_100_continue(app, headers, expect_raise_exception):
class SimpleView(HTTPMethodView):
@stream_decorator
async def post(self, request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.add_route(SimpleView.as_view(), "/method_view")
assert app.is_request_stream is True
if not expect_raise_exception:
request, response = app.test_client.post(
"/method_view", data=data, headers={"EXPECT": "100-continue"}
)
assert response.status == 200
assert response.text == data
else:
with pytest.raises(ValueError) as e:
app.test_client.post(
"/method_view",
data=data,
headers={"EXPECT": "100-continue-extra"},
)
assert "Unknown Expect: 100-continue-extra" in str(e)
def test_request_stream_app(app):
"""for self.is_request_stream = True and decorators"""
@app.get("/get")
async def get(request):
assert request.stream is None
return text("GET")
@app.head("/head")
async def head(request):
assert request.stream is None
return text("HEAD")
@app.delete("/delete")
async def delete(request):
assert request.stream is None
return text("DELETE")
@app.options("/options")
async def options(request):
assert request.stream is None
return text("OPTIONS")
@app.post("/_post/<id>")
async def _post(request, id):
assert request.stream is None
return text("_POST")
@app.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.put("/_put")
async def _put(request):
assert request.stream is None
return text("_PUT")
@app.put("/put", stream=True)
async def put(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.patch("/_patch")
async def _patch(request):
assert request.stream is None
return text("_PATCH")
@app.patch("/patch", stream=True)
async def patch(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
assert app.is_request_stream is True
request, response = app.test_client.get("/get")
assert response.status == 200
assert response.text == "GET"
request, response = app.test_client.head("/head")
assert response.status == 200
assert response.text == ""
request, response = app.test_client.delete("/delete")
assert response.status == 200
assert response.text == "DELETE"
request, response = app.test_client.options("/options")
assert response.status == 200
assert response.text == "OPTIONS"
request, response = app.test_client.post("/_post/1", data=data)
assert response.status == 200
assert response.text == "_POST"
request, response = app.test_client.post("/post/1", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.put("/_put", data=data)
assert response.status == 200
assert response.text == "_PUT"
request, response = app.test_client.put("/put", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.patch("/_patch", data=data)
assert response.status == 200
assert response.text == "_PATCH"
request, response = app.test_client.patch("/patch", data=data)
assert response.status == 200
assert response.text == data
@pytest.mark.asyncio
async def test_request_stream_app_asgi(app):
"""for self.is_request_stream = True and decorators"""
@app.get("/get")
async def get(request):
assert request.stream is None
return text("GET")
@app.head("/head")
async def head(request):
assert request.stream is None
return text("HEAD")
@app.delete("/delete")
async def delete(request):
assert request.stream is None
return text("DELETE")
@app.options("/options")
async def options(request):
assert request.stream is None
return text("OPTIONS")
@app.post("/_post/<id>")
async def _post(request, id):
assert request.stream is None
return text("_POST")
@app.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.put("/_put")
async def _put(request):
assert request.stream is None
return text("_PUT")
@app.put("/put", stream=True)
async def put(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.patch("/_patch")
async def _patch(request):
assert request.stream is None
return text("_PATCH")
@app.patch("/patch", stream=True)
async def patch(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
assert app.is_request_stream is True
request, response = await app.asgi_client.get("/get")
assert response.status == 200
assert response.text == "GET"
request, response = await app.asgi_client.head("/head")
assert response.status == 200
assert response.text == ""
request, response = await app.asgi_client.delete("/delete")
assert response.status == 200
assert response.text == "DELETE"
request, response = await app.asgi_client.options("/options")
assert response.status == 200
assert response.text == "OPTIONS"
request, response = await app.asgi_client.post("/_post/1", data=data)
assert response.status == 200
assert response.text == "_POST"
request, response = await app.asgi_client.post("/post/1", data=data)
assert response.status == 200
assert response.text == data
request, response = await app.asgi_client.put("/_put", data=data)
assert response.status == 200
assert response.text == "_PUT"
request, response = await app.asgi_client.put("/put", data=data)
assert response.status == 200
assert response.text == data
request, response = await app.asgi_client.patch("/_patch", data=data)
assert response.status == 200
assert response.text == "_PATCH"
request, response = await app.asgi_client.patch("/patch", data=data)
assert response.status == 200
assert response.text == data
def test_request_stream_handle_exception(app):
"""for handling exceptions properly"""
@app.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
# 404
request, response = app.test_client.post("/in_valid_post", data=data)
assert response.status == 404
assert response.text == "Error: Requested URL /in_valid_post not found"
# 405
request, response = app.test_client.get("/post/random_id")
assert response.status == 405
assert (
response.text == "Error: Method GET not allowed for URL"
" /post/random_id"
)
def test_request_stream_blueprint(app):
"""for self.is_request_stream = True"""
bp = Blueprint("test_blueprint_request_stream_blueprint")
@app.get("/get")
async def get(request):
assert request.stream is None
return text("GET")
@bp.head("/head")
async def head(request):
assert request.stream is None
return text("HEAD")
@bp.delete("/delete")
async def delete(request):
assert request.stream is None
return text("DELETE")
@bp.options("/options")
async def options(request):
assert request.stream is None
return text("OPTIONS")
@bp.post("/_post/<id>")
async def _post(request, id):
assert request.stream is None
return text("_POST")
@bp.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@bp.put("/_put")
async def _put(request):
assert request.stream is None
return text("_PUT")
@bp.put("/put", stream=True)
async def put(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@bp.patch("/_patch")
async def _patch(request):
assert request.stream is None
return text("_PATCH")
@bp.patch("/patch", stream=True)
async def patch(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
async def post_add_route(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
bp.add_route(
post_add_route, "/post/add_route", methods=["POST"], stream=True
)
app.blueprint(bp)
assert app.is_request_stream is True
request, response = app.test_client.get("/get")
assert response.status == 200
assert response.text == "GET"
request, response = app.test_client.head("/head")
assert response.status == 200
assert response.text == ""
request, response = app.test_client.delete("/delete")
assert response.status == 200
assert response.text == "DELETE"
request, response = app.test_client.options("/options")
assert response.status == 200
assert response.text == "OPTIONS"
request, response = app.test_client.post("/_post/1", data=data)
assert response.status == 200
assert response.text == "_POST"
request, response = app.test_client.post("/post/1", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.put("/_put", data=data)
assert response.status == 200
assert response.text == "_PUT"
request, response = app.test_client.put("/put", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.patch("/_patch", data=data)
assert response.status == 200
assert response.text == "_PATCH"
request, response = app.test_client.patch("/patch", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.post("/post/add_route", data=data)
assert response.status == 200
assert response.text == data
def test_request_stream_composition_view(app):
"""for self.is_request_stream = True"""
def get_handler(request):
assert request.stream is None
return text("OK")
async def post_handler(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
view = CompositionView()
view.add(["GET"], get_handler)
view.add(["POST"], post_handler, stream=True)
app.add_route(view, "/composition_view")
assert app.is_request_stream is True
request, response = app.test_client.get("/composition_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/composition_view", data=data)
assert response.status == 200
assert response.text == data
def test_request_stream(app):
"""test for complex application"""
bp = Blueprint("test_blueprint_request_stream")
class SimpleView(HTTPMethodView):
def get(self, request):
assert request.stream is None
return text("OK")
@stream_decorator
async def post(self, request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.post("/stream", stream=True)
async def handler(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.get("/get")
async def get(request):
assert request.stream is None
return text("OK")
@bp.post("/bp_stream", stream=True)
async def bp_stream(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@bp.get("/bp_get")
async def bp_get(request):
assert request.stream is None
return text("OK")
def get_handler(request):
assert request.stream is None
return text("OK")
async def post_handler(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.add_route(SimpleView.as_view(), "/method_view")
view = CompositionView()
view.add(["GET"], get_handler)
view.add(["POST"], post_handler, stream=True)
app.blueprint(bp)
app.add_route(view, "/composition_view")
assert app.is_request_stream is True
request, response = app.test_client.get("/method_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/method_view", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.get("/composition_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/composition_view", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.get("/get")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/stream", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.get("/bp_get")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/bp_stream", data=data)
assert response.status == 200
assert response.text == data
|
tests/test_request_stream.py
|
import pytest
from sanic.blueprints import Blueprint
from sanic.exceptions import HeaderExpectationFailed
from sanic.request import StreamBuffer
from sanic.response import stream, text
from sanic.views import CompositionView, HTTPMethodView
from sanic.views import stream as stream_decorator
data = "abc" * 10000000
def test_request_stream_method_view(app):
"""for self.is_request_stream = True"""
class SimpleView(HTTPMethodView):
def get(self, request):
assert request.stream is None
return text("OK")
@stream_decorator
async def post(self, request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.add_route(SimpleView.as_view(), "/method_view")
assert app.is_request_stream is True
request, response = app.test_client.get("/method_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/method_view", data=data)
assert response.status == 200
assert response.text == data
@pytest.mark.parametrize(
"headers, expect_raise_exception",
[
({"EXPECT": "100-continue"}, False),
({"EXPECT": "100-continue-extra"}, True),
],
)
def test_request_stream_100_continue(app, headers, expect_raise_exception):
class SimpleView(HTTPMethodView):
@stream_decorator
async def post(self, request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.add_route(SimpleView.as_view(), "/method_view")
assert app.is_request_stream is True
if not expect_raise_exception:
request, response = app.test_client.post(
"/method_view", data=data, headers={"EXPECT": "100-continue"}
)
assert response.status == 200
assert response.text == data
else:
with pytest.raises(ValueError) as e:
app.test_client.post(
"/method_view",
data=data,
headers={"EXPECT": "100-continue-extra"},
)
assert "Unknown Expect: 100-continue-extra" in str(e)
def test_request_stream_app(app):
"""for self.is_request_stream = True and decorators"""
@app.get("/get")
async def get(request):
assert request.stream is None
return text("GET")
@app.head("/head")
async def head(request):
assert request.stream is None
return text("HEAD")
@app.delete("/delete")
async def delete(request):
assert request.stream is None
return text("DELETE")
@app.options("/options")
async def options(request):
assert request.stream is None
return text("OPTIONS")
@app.post("/_post/<id>")
async def _post(request, id):
assert request.stream is None
return text("_POST")
@app.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.put("/_put")
async def _put(request):
assert request.stream is None
return text("_PUT")
@app.put("/put", stream=True)
async def put(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.patch("/_patch")
async def _patch(request):
assert request.stream is None
return text("_PATCH")
@app.patch("/patch", stream=True)
async def patch(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
assert app.is_request_stream is True
request, response = app.test_client.get("/get")
assert response.status == 200
assert response.text == "GET"
request, response = app.test_client.head("/head")
assert response.status == 200
assert response.text == ""
request, response = app.test_client.delete("/delete")
assert response.status == 200
assert response.text == "DELETE"
request, response = app.test_client.options("/options")
assert response.status == 200
assert response.text == "OPTIONS"
request, response = app.test_client.post("/_post/1", data=data)
assert response.status == 200
assert response.text == "_POST"
request, response = app.test_client.post("/post/1", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.put("/_put", data=data)
assert response.status == 200
assert response.text == "_PUT"
request, response = app.test_client.put("/put", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.patch("/_patch", data=data)
assert response.status == 200
assert response.text == "_PATCH"
request, response = app.test_client.patch("/patch", data=data)
assert response.status == 200
assert response.text == data
@pytest.mark.asyncio
async def test_request_stream_app_asgi(app):
"""for self.is_request_stream = True and decorators"""
@app.get("/get")
async def get(request):
assert request.stream is None
return text("GET")
@app.head("/head")
async def head(request):
assert request.stream is None
return text("HEAD")
@app.delete("/delete")
async def delete(request):
assert request.stream is None
return text("DELETE")
@app.options("/options")
async def options(request):
assert request.stream is None
return text("OPTIONS")
@app.post("/_post/<id>")
async def _post(request, id):
assert request.stream is None
return text("_POST")
@app.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.put("/_put")
async def _put(request):
assert request.stream is None
return text("_PUT")
@app.put("/put", stream=True)
async def put(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.patch("/_patch")
async def _patch(request):
assert request.stream is None
return text("_PATCH")
@app.patch("/patch", stream=True)
async def patch(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
assert app.is_request_stream is True
request, response = await app.asgi_client.get("/get")
assert response.status == 200
assert response.text == "GET"
request, response = await app.asgi_client.head("/head")
assert response.status == 200
assert response.text == ""
request, response = await app.asgi_client.delete("/delete")
assert response.status == 200
assert response.text == "DELETE"
request, response = await app.asgi_client.options("/options")
assert response.status == 200
assert response.text == "OPTIONS"
request, response = await app.asgi_client.post("/_post/1", data=data)
assert response.status == 200
assert response.text == "_POST"
request, response = await app.asgi_client.post("/post/1", data=data)
assert response.status == 200
assert response.text == data
request, response = await app.asgi_client.put("/_put", data=data)
assert response.status == 200
assert response.text == "_PUT"
request, response = await app.asgi_client.put("/put", data=data)
assert response.status == 200
assert response.text == data
request, response = await app.asgi_client.patch("/_patch", data=data)
assert response.status == 200
assert response.text == "_PATCH"
request, response = await app.asgi_client.patch("/patch", data=data)
assert response.status == 200
assert response.text == data
def test_request_stream_handle_exception(app):
"""for handling exceptions properly"""
@app.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
# 404
request, response = app.test_client.post("/in_valid_post", data=data)
assert response.status == 404
assert response.text == "Error: Requested URL /in_valid_post not found"
# 405
request, response = app.test_client.get("/post/random_id")
assert response.status == 405
assert (
response.text == "Error: Method GET not allowed for URL"
" /post/random_id"
)
def test_request_stream_blueprint(app):
"""for self.is_request_stream = True"""
bp = Blueprint("test_blueprint_request_stream_blueprint")
@app.get("/get")
async def get(request):
assert request.stream is None
return text("GET")
@bp.head("/head")
async def head(request):
assert request.stream is None
return text("HEAD")
@bp.delete("/delete")
async def delete(request):
assert request.stream is None
return text("DELETE")
@bp.options("/options")
async def options(request):
assert request.stream is None
return text("OPTIONS")
@bp.post("/_post/<id>")
async def _post(request, id):
assert request.stream is None
return text("_POST")
@bp.post("/post/<id>", stream=True)
async def post(request, id):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@bp.put("/_put")
async def _put(request):
assert request.stream is None
return text("_PUT")
@bp.put("/put", stream=True)
async def put(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@bp.patch("/_patch")
async def _patch(request):
assert request.stream is None
return text("_PATCH")
@bp.patch("/patch", stream=True)
async def patch(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
async def post_add_route(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
bp.add_route(
post_add_route, "/post/add_route", methods=["POST"], stream=True
)
app.blueprint(bp)
assert app.is_request_stream is True
request, response = app.test_client.get("/get")
assert response.status == 200
assert response.text == "GET"
request, response = app.test_client.head("/head")
assert response.status == 200
assert response.text == ""
request, response = app.test_client.delete("/delete")
assert response.status == 200
assert response.text == "DELETE"
request, response = app.test_client.options("/options")
assert response.status == 200
assert response.text == "OPTIONS"
request, response = app.test_client.post("/_post/1", data=data)
assert response.status == 200
assert response.text == "_POST"
request, response = app.test_client.post("/post/1", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.put("/_put", data=data)
assert response.status == 200
assert response.text == "_PUT"
request, response = app.test_client.put("/put", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.patch("/_patch", data=data)
assert response.status == 200
assert response.text == "_PATCH"
request, response = app.test_client.patch("/patch", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.post("/post/add_route", data=data)
assert response.status == 200
assert response.text == data
def test_request_stream_composition_view(app):
"""for self.is_request_stream = True"""
def get_handler(request):
assert request.stream is None
return text("OK")
async def post_handler(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
view = CompositionView()
view.add(["GET"], get_handler)
view.add(["POST"], post_handler, stream=True)
app.add_route(view, "/composition_view")
assert app.is_request_stream is True
request, response = app.test_client.get("/composition_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/composition_view", data=data)
assert response.status == 200
assert response.text == data
def test_request_stream(app):
"""test for complex application"""
bp = Blueprint("test_blueprint_request_stream")
class SimpleView(HTTPMethodView):
def get(self, request):
assert request.stream is None
return text("OK")
@stream_decorator
async def post(self, request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.post("/stream", stream=True)
async def handler(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.get("/get")
async def get(request):
assert request.stream is None
return text("OK")
@bp.post("/bp_stream", stream=True)
async def bp_stream(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@bp.get("/bp_get")
async def bp_get(request):
assert request.stream is None
return text("OK")
def get_handler(request):
assert request.stream is None
return text("OK")
async def post_handler(request):
assert isinstance(request.stream, StreamBuffer)
result = ""
while True:
body = await request.stream.read()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.add_route(SimpleView.as_view(), "/method_view")
view = CompositionView()
view.add(["GET"], get_handler)
view.add(["POST"], post_handler, stream=True)
app.blueprint(bp)
app.add_route(view, "/composition_view")
assert app.is_request_stream is True
request, response = app.test_client.get("/method_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/method_view", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.get("/composition_view")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/composition_view", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.get("/get")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/stream", data=data)
assert response.status == 200
assert response.text == data
request, response = app.test_client.get("/bp_get")
assert response.status == 200
assert response.text == "OK"
request, response = app.test_client.post("/bp_stream", data=data)
assert response.status == 200
assert response.text == data
| 0.574753 | 0.330525 |
from papermill.iorw import load_notebook_node
import papermill as pm
import yaml
import os
import shutil
import copy
import numpy as np
import json
import plotly
import IPython
from IPython.display import display as idisplay, Video, Image
from base64 import b64decode, b64encode
from .input_types import parse
class DB(object):
def __init__(self, outputs):
self.out = parse(outputs)
def save(self, name, value, display=False):
if not name in self.out:
raise ValueError('\"%s\" not in output schema!' % name)
otype = self.out[name]['type']
if otype == 'Image':
if type(value) is str:
# filename
value = Image(value)
if type(value) is Image:
if display:
idisplay(value)
data, _metadata = IPython.core.formatters.format_display_data(
value)
pm.record(name, data)
return
if display:
idisplay(value)
if otype == 'Array' and type(value) is np.ndarray:
sval = json.dumps(value, cls=plotly.utils.PlotlyJSONEncoder)
pm.record(name, sval)
return
pm.record(name, value)
# deprecated
def save(name, value, display=False):
if display:
idisplay(value)
if type(value) is np.ndarray:
sval = json.dumps(value, cls=plotly.utils.PlotlyJSONEncoder)
pm.record(name, sval)
return
if type(value) is Video or type(value) is Image:
data, _metadata = IPython.core.formatters.format_display_data(value)
pm.record(name, data)
return
pm.record(name, value)
def read(nb, name):
data = nb.data[name]
if type(data) is str:
try:
data = json.loads(data)
except:
pass
try:
if 'image/jpeg' in data:
return b64decode(data['image/jpeg'])
if 'image/png' in data:
return b64decode(data['image/png'])
except:
pass
return data
def rdisplay(nb, name):
data = nb.data[name]
if type(data) is str:
try:
data = json.loads(data)
except:
pass
try:
if 'image/jpeg' in data \
or 'image/png' in data \
or 'image/gif' in data \
or 'text/html' in data:
idisplay(data, raw=True)
return
except:
pass
idisplay(data)
def _get_dict(inputs):
if type(inputs) == dict:
return inputs
d = {}
for i in inputs:
try:
d[i] = inputs[i].value
except:
pass
return d
def run_simtool(nb, outname, inputs, outdir=None, append=True, parallel=False):
inputs = _get_dict(inputs)
if outdir is not None:
if append:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
pm.execute_notebook(nb, os.path.join(outdir, outname), parameters=inputs)
|
hublib/tool/rw.py
|
from papermill.iorw import load_notebook_node
import papermill as pm
import yaml
import os
import shutil
import copy
import numpy as np
import json
import plotly
import IPython
from IPython.display import display as idisplay, Video, Image
from base64 import b64decode, b64encode
from .input_types import parse
class DB(object):
def __init__(self, outputs):
self.out = parse(outputs)
def save(self, name, value, display=False):
if not name in self.out:
raise ValueError('\"%s\" not in output schema!' % name)
otype = self.out[name]['type']
if otype == 'Image':
if type(value) is str:
# filename
value = Image(value)
if type(value) is Image:
if display:
idisplay(value)
data, _metadata = IPython.core.formatters.format_display_data(
value)
pm.record(name, data)
return
if display:
idisplay(value)
if otype == 'Array' and type(value) is np.ndarray:
sval = json.dumps(value, cls=plotly.utils.PlotlyJSONEncoder)
pm.record(name, sval)
return
pm.record(name, value)
# deprecated
def save(name, value, display=False):
if display:
idisplay(value)
if type(value) is np.ndarray:
sval = json.dumps(value, cls=plotly.utils.PlotlyJSONEncoder)
pm.record(name, sval)
return
if type(value) is Video or type(value) is Image:
data, _metadata = IPython.core.formatters.format_display_data(value)
pm.record(name, data)
return
pm.record(name, value)
def read(nb, name):
data = nb.data[name]
if type(data) is str:
try:
data = json.loads(data)
except:
pass
try:
if 'image/jpeg' in data:
return b64decode(data['image/jpeg'])
if 'image/png' in data:
return b64decode(data['image/png'])
except:
pass
return data
def rdisplay(nb, name):
data = nb.data[name]
if type(data) is str:
try:
data = json.loads(data)
except:
pass
try:
if 'image/jpeg' in data \
or 'image/png' in data \
or 'image/gif' in data \
or 'text/html' in data:
idisplay(data, raw=True)
return
except:
pass
idisplay(data)
def _get_dict(inputs):
if type(inputs) == dict:
return inputs
d = {}
for i in inputs:
try:
d[i] = inputs[i].value
except:
pass
return d
def run_simtool(nb, outname, inputs, outdir=None, append=True, parallel=False):
inputs = _get_dict(inputs)
if outdir is not None:
if append:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
pm.execute_notebook(nb, os.path.join(outdir, outname), parameters=inputs)
| 0.368974 | 0.186428 |
import os
import re
from time import time
import numpy as np
import pypinyin
import tensorflow as tf
from pypinyin import lazy_pinyin
from datasets import audio
from hparams import hparams
from tacotron.synthesizerp import Synthesizer
def pinyin_sentence(sentences):
pinyin = lazy_pinyin(sentences, style=pypinyin.TONE3)
return " ".join(pinyin)
# logs-Tacotron/taco_pretrained/tacotron_model.ckpt-65000
def exp(checkpoint):
checkpoint_path = "logs-Tacotron/taco_pretrained/tacotron_model.ckpt-" + str(checkpoint)
synth = Synthesizer()
synth.load(checkpoint_path, hparams)
test_times = 1
wav_time = []
generate_time = []
# sentences = [
# '你好',
# '您现在这个手机对您打过来',
# '我们套餐从几块钱到几百的都有,您要看您自己一个月',
# '您一个月能打多少分钟电话用多少兆流量',
# '有一个大屏神卡,流量通话不限量使用,但达到一定的值,它会限制',
# '对,在网站上可以那个要套餐要办的话',
# '您这边是二百九十六的,再往上就到您这个,噢,您的呢',
# '您要看您自己一个月能打多少分钟电话,用多少兆流量去选套餐',
# '嗯,那您还要改那个一百九十九元的吗',
# '嗯,那您考虑好,有需要再办',
# '您要咨询您这个套餐怎么扣费呢?还是说是一号之间的消费在哪些方面呢',
# '这个查某一个时段呢,我查不了,我这边只能看到一号之间总的消费情况',
# '对是不是买了什么游戏道具之类的',
# '嗯,那这边我们看到的是叫沃商店,我看一下能不能检测出来',
# '感谢您的耐心等待上上面写的是粒子搜索',
# '那您这边应该是游戏吧,您是不是购买了什么五百七十的金币',
# '上面分析的话是叫粒子搜索',
# '就是您不要去点,不要去订购就不会收费好的,那请问还有其他的问题吗',
# '这个我看到,嗯,您还有办理过国防科技四块钱,唉,奇艺十五元,那就是其他对',
# '积分您是说您用积分兑换兑换过话费还是怎么说没听清',
# '好,您稍等一下,感谢耐心等待,您这张卡当前可用积分是七百零二分',
# '用了七百分是什么时候消费的',
# '您之前用七百分是兑换的什么呢兑换的话费是吧',
# '观察查询,八月五号是有一个兑换记录',
# '那您自己通过您当时兑换是通过我们这边网厅手厅兑换的吗?还是积分商城',
# '不客气,那那您先查一下,还有其它的问题吗',
# '好的,我的宝贝用心,噢,您好,还在吗',
# '嗯就是不知道这本机号码是吗',
# '那您提供一下身份证号码和机主姓名',
# '嗯好的炫铃需要什么呢',
# '嗯,好的资料核对正确,那您请记录一下电话号码好吧',
# '月租费是十九块钱一个月',
# '大概六毛六毛零一点的费用',
# '您要查本机余额是吗?那您不要挂机,我给您转到自动台,请稍等',
# '我帮您看了一下,您有话费的呀,您现在这里还有一百八十块一毛五的',
# '但是我帮您看了一下,您手机卡是开通状态的功能之前,也是正常的',
# '您在欠费停机状态下,它也能拨打幺零零幺零的,嗯最低',
# '嗯,那非常抱歉了,嗯,如果之后有出现的话,就及时给我们打电话进来好吧',
# '您好,电话已接通,请问什么可以帮您',
# '余额是吧,我帮您查了一下,您的这边余额的话,显示到还有这个二十七块二毛五',
# '您好,您现在的话就是说没有收到停机提醒那个短信提醒对吗',
# '您现在话费的话有四十八块零九毛的费用了,您今天缴了五十块钱',
# '您缴的费用已经到账了呀',
# '说起吴总,大家都认识'
# ]
sentences= [
'你好',
'我的电话号码是一八五八三九五五五七零',
'我的电话号码是一五二零一七三五八零八',
'嗯,那非常抱歉了,嗯,如果之后有出现的话,就及时给我们打电话进来好吧',
]
dic = list("①①②③④")
for i in range(test_times):
print("{} time test".format(str(i)))
for i,sentence in enumerate(sentences):
start_time = time()
sentence = pinyin_sentence(sentence)
sentence = re.sub(r'[a-z]\d', lambda x: x.group(0)[0] + dic[int(x.group(0)[1])], sentence)
wav = synth.synthesize([sentence])
output_file = "test-{}-{}.wav".format(str(i),str(checkpoint))
audio.save_wav(wav, os.path.join('wav_out/{}'.format(output_file)), sr=hparams.sample_rate)
stop_time = time()
if i == 0:
pass
else:
one_wav_time = len(wav)/hparams.sample_rate
one_genrate_time = stop_time - start_time
print("the {} wav len is {}, generate time is {}, the ratio is {}".format(str(i),str(one_wav_time), str(one_genrate_time),str(one_wav_time/one_genrate_time)))
wav_time.append(one_wav_time)
generate_time.append(one_genrate_time)
wav_time_mean = np.mean(wav_time)
generate_time_mean = np.mean(generate_time)
print("It will comsume average time about : {}".format(str(generate_time_mean)))
print("The wav average length is {}".format(str(wav_time_mean)))
print("The ratio is {}".format(str(wav_time_mean/generate_time_mean)))
save_dir = "logs-Tacotron/taco_pretrained"
for checkpoint in list(os.walk(save_dir))[0][2]:
checkpoint = checkpoint.split(".")
if checkpoint[-1] == "index":
checkpoint = checkpoint[1].split("-")[-1]
tf.reset_default_graph()
print(checkpoint)
exp(checkpoint)
# taco_checkpoint = os.path.join('logs-Tacotron', 'taco_pretrained/')
# checkpoint_path = tf.train.get_checkpoint_state(taco_checkpoint).model_checkpoint_path
# checkpoint = ""
# exp(checkpoint)
|
test.py
|
import os
import re
from time import time
import numpy as np
import pypinyin
import tensorflow as tf
from pypinyin import lazy_pinyin
from datasets import audio
from hparams import hparams
from tacotron.synthesizerp import Synthesizer
def pinyin_sentence(sentences):
pinyin = lazy_pinyin(sentences, style=pypinyin.TONE3)
return " ".join(pinyin)
# logs-Tacotron/taco_pretrained/tacotron_model.ckpt-65000
def exp(checkpoint):
checkpoint_path = "logs-Tacotron/taco_pretrained/tacotron_model.ckpt-" + str(checkpoint)
synth = Synthesizer()
synth.load(checkpoint_path, hparams)
test_times = 1
wav_time = []
generate_time = []
# sentences = [
# '你好',
# '您现在这个手机对您打过来',
# '我们套餐从几块钱到几百的都有,您要看您自己一个月',
# '您一个月能打多少分钟电话用多少兆流量',
# '有一个大屏神卡,流量通话不限量使用,但达到一定的值,它会限制',
# '对,在网站上可以那个要套餐要办的话',
# '您这边是二百九十六的,再往上就到您这个,噢,您的呢',
# '您要看您自己一个月能打多少分钟电话,用多少兆流量去选套餐',
# '嗯,那您还要改那个一百九十九元的吗',
# '嗯,那您考虑好,有需要再办',
# '您要咨询您这个套餐怎么扣费呢?还是说是一号之间的消费在哪些方面呢',
# '这个查某一个时段呢,我查不了,我这边只能看到一号之间总的消费情况',
# '对是不是买了什么游戏道具之类的',
# '嗯,那这边我们看到的是叫沃商店,我看一下能不能检测出来',
# '感谢您的耐心等待上上面写的是粒子搜索',
# '那您这边应该是游戏吧,您是不是购买了什么五百七十的金币',
# '上面分析的话是叫粒子搜索',
# '就是您不要去点,不要去订购就不会收费好的,那请问还有其他的问题吗',
# '这个我看到,嗯,您还有办理过国防科技四块钱,唉,奇艺十五元,那就是其他对',
# '积分您是说您用积分兑换兑换过话费还是怎么说没听清',
# '好,您稍等一下,感谢耐心等待,您这张卡当前可用积分是七百零二分',
# '用了七百分是什么时候消费的',
# '您之前用七百分是兑换的什么呢兑换的话费是吧',
# '观察查询,八月五号是有一个兑换记录',
# '那您自己通过您当时兑换是通过我们这边网厅手厅兑换的吗?还是积分商城',
# '不客气,那那您先查一下,还有其它的问题吗',
# '好的,我的宝贝用心,噢,您好,还在吗',
# '嗯就是不知道这本机号码是吗',
# '那您提供一下身份证号码和机主姓名',
# '嗯好的炫铃需要什么呢',
# '嗯,好的资料核对正确,那您请记录一下电话号码好吧',
# '月租费是十九块钱一个月',
# '大概六毛六毛零一点的费用',
# '您要查本机余额是吗?那您不要挂机,我给您转到自动台,请稍等',
# '我帮您看了一下,您有话费的呀,您现在这里还有一百八十块一毛五的',
# '但是我帮您看了一下,您手机卡是开通状态的功能之前,也是正常的',
# '您在欠费停机状态下,它也能拨打幺零零幺零的,嗯最低',
# '嗯,那非常抱歉了,嗯,如果之后有出现的话,就及时给我们打电话进来好吧',
# '您好,电话已接通,请问什么可以帮您',
# '余额是吧,我帮您查了一下,您的这边余额的话,显示到还有这个二十七块二毛五',
# '您好,您现在的话就是说没有收到停机提醒那个短信提醒对吗',
# '您现在话费的话有四十八块零九毛的费用了,您今天缴了五十块钱',
# '您缴的费用已经到账了呀',
# '说起吴总,大家都认识'
# ]
sentences= [
'你好',
'我的电话号码是一八五八三九五五五七零',
'我的电话号码是一五二零一七三五八零八',
'嗯,那非常抱歉了,嗯,如果之后有出现的话,就及时给我们打电话进来好吧',
]
dic = list("①①②③④")
for i in range(test_times):
print("{} time test".format(str(i)))
for i,sentence in enumerate(sentences):
start_time = time()
sentence = pinyin_sentence(sentence)
sentence = re.sub(r'[a-z]\d', lambda x: x.group(0)[0] + dic[int(x.group(0)[1])], sentence)
wav = synth.synthesize([sentence])
output_file = "test-{}-{}.wav".format(str(i),str(checkpoint))
audio.save_wav(wav, os.path.join('wav_out/{}'.format(output_file)), sr=hparams.sample_rate)
stop_time = time()
if i == 0:
pass
else:
one_wav_time = len(wav)/hparams.sample_rate
one_genrate_time = stop_time - start_time
print("the {} wav len is {}, generate time is {}, the ratio is {}".format(str(i),str(one_wav_time), str(one_genrate_time),str(one_wav_time/one_genrate_time)))
wav_time.append(one_wav_time)
generate_time.append(one_genrate_time)
wav_time_mean = np.mean(wav_time)
generate_time_mean = np.mean(generate_time)
print("It will comsume average time about : {}".format(str(generate_time_mean)))
print("The wav average length is {}".format(str(wav_time_mean)))
print("The ratio is {}".format(str(wav_time_mean/generate_time_mean)))
save_dir = "logs-Tacotron/taco_pretrained"
for checkpoint in list(os.walk(save_dir))[0][2]:
checkpoint = checkpoint.split(".")
if checkpoint[-1] == "index":
checkpoint = checkpoint[1].split("-")[-1]
tf.reset_default_graph()
print(checkpoint)
exp(checkpoint)
# taco_checkpoint = os.path.join('logs-Tacotron', 'taco_pretrained/')
# checkpoint_path = tf.train.get_checkpoint_state(taco_checkpoint).model_checkpoint_path
# checkpoint = ""
# exp(checkpoint)
| 0.146148 | 0.211743 |
from django.test import TestCase
from mockldap import MockLdap
from ..ldap.actions import LDAPAccountAdder
from ..ldap.actions import LDAPAccountUpdater
from ..ldap import utils
from ..models import PendingUser
class LDAPFunctionsTestCase(TestCase):
"""
Warning : you must update your project settings in order to pass those
tests : the described LDAP configuration in this class may not match your
own config.
"""
def setUp(self):
root = ('dc=org', {'dc': ['org']})
top = ('dc=atilla,dc=org', {'dc': ['atilla']})
users = ('ou=users,dc=atilla,dc=org', {'ou': ['users']})
manager = (
'cn=admin,dc=atilla,dc=org', {
'cn': ['admin'],
'userPassword': ['<PASSWORD>']})
user1 = (
'cn=Test USER,ou=users,dc=atilla,dc=org', {
'cn': ['Test USER'],
'uid': ['usertest'],
'userPassword': ['<PASSWORD> !']})
self.directory = dict([root, top, users, manager, user1])
self.mockldap = MockLdap(self.directory)
self.mockldap.start()
self.ldap = self.mockldap['ldap://127.0.0.1']
def tearDown(self):
self.mockldap.stop()
del self.ldap
# Tests for high-level functions
def test_user_bind(self):
self.assertTrue(
utils.test_user_bind(
'cn=Test User,ou=users,dc=atilla,dc=org',
'We love HDM !',
self.ldap))
def test_user_migration(self):
# First, create a pending user
user = PendingUser(
username='randomuser',
first_name='Random',
last_name='User',
email='<EMAIL>',
validation_token='42')
password = '<PASSWORD> !'
# Migrate this user to the LDAP
result = LDAPAccountAdder(self.ldap).add(user, password)
self.assertTrue(result)
self.assertEquals(
self.ldap.methods_called(),
['search', 'result', 'search_s', 'add_s'])
def test_duplicate_user_cn_migration(self):
# Create a duplicate user from Test User
user = PendingUser(
username='anothertestuser',
first_name='Test',
last_name='User',
email='<EMAIL>',
validation_token='42')
password = '<PASSWORD> !'
# Migrate this user to the LDAP
result = LDAPAccountAdder(self.ldap).add(user, password)
self.assertFalse(result)
def test_duplicate_user_uid_migration(self):
# Create a duplicate user from Test User
user = PendingUser(
username='usertest',
first_name='Another',
last_name='User',
email='<EMAIL>',
validation_token='42')
password = '<PASSWORD> !'
# Migrate this user to the LDAP
result = LDAPAccountAdder(self.ldap).add(user, password)
self.assertFalse(result)
def test_user_password_update(self):
account_updater = LDAPAccountUpdater('cn=Test User,ou=users,dc=atilla,dc=org')
account_updater.change_password('<PASSWORD> !', 'Can<PASSWORD>', self.ldap)
self.assertEquals(
self.ldap.methods_called(),
['simple_bind_s', 'modify_s', 'unbind_s'])
|
accounts/tests/test_ldap_functions.py
|
from django.test import TestCase
from mockldap import MockLdap
from ..ldap.actions import LDAPAccountAdder
from ..ldap.actions import LDAPAccountUpdater
from ..ldap import utils
from ..models import PendingUser
class LDAPFunctionsTestCase(TestCase):
"""
Warning : you must update your project settings in order to pass those
tests : the described LDAP configuration in this class may not match your
own config.
"""
def setUp(self):
root = ('dc=org', {'dc': ['org']})
top = ('dc=atilla,dc=org', {'dc': ['atilla']})
users = ('ou=users,dc=atilla,dc=org', {'ou': ['users']})
manager = (
'cn=admin,dc=atilla,dc=org', {
'cn': ['admin'],
'userPassword': ['<PASSWORD>']})
user1 = (
'cn=Test USER,ou=users,dc=atilla,dc=org', {
'cn': ['Test USER'],
'uid': ['usertest'],
'userPassword': ['<PASSWORD> !']})
self.directory = dict([root, top, users, manager, user1])
self.mockldap = MockLdap(self.directory)
self.mockldap.start()
self.ldap = self.mockldap['ldap://127.0.0.1']
def tearDown(self):
self.mockldap.stop()
del self.ldap
# Tests for high-level functions
def test_user_bind(self):
self.assertTrue(
utils.test_user_bind(
'cn=Test User,ou=users,dc=atilla,dc=org',
'We love HDM !',
self.ldap))
def test_user_migration(self):
# First, create a pending user
user = PendingUser(
username='randomuser',
first_name='Random',
last_name='User',
email='<EMAIL>',
validation_token='42')
password = '<PASSWORD> !'
# Migrate this user to the LDAP
result = LDAPAccountAdder(self.ldap).add(user, password)
self.assertTrue(result)
self.assertEquals(
self.ldap.methods_called(),
['search', 'result', 'search_s', 'add_s'])
def test_duplicate_user_cn_migration(self):
# Create a duplicate user from Test User
user = PendingUser(
username='anothertestuser',
first_name='Test',
last_name='User',
email='<EMAIL>',
validation_token='42')
password = '<PASSWORD> !'
# Migrate this user to the LDAP
result = LDAPAccountAdder(self.ldap).add(user, password)
self.assertFalse(result)
def test_duplicate_user_uid_migration(self):
# Create a duplicate user from Test User
user = PendingUser(
username='usertest',
first_name='Another',
last_name='User',
email='<EMAIL>',
validation_token='42')
password = '<PASSWORD> !'
# Migrate this user to the LDAP
result = LDAPAccountAdder(self.ldap).add(user, password)
self.assertFalse(result)
def test_user_password_update(self):
account_updater = LDAPAccountUpdater('cn=Test User,ou=users,dc=atilla,dc=org')
account_updater.change_password('<PASSWORD> !', 'Can<PASSWORD>', self.ldap)
self.assertEquals(
self.ldap.methods_called(),
['simple_bind_s', 'modify_s', 'unbind_s'])
| 0.571886 | 0.330931 |
import maya.cmds
import maya.OpenMaya
import pymel.core as pm
import imath
import math
import IECore
import IECoreScene
import IECoreMaya
class FromMayaInstancerConverter( IECoreMaya.TestCase ) :
def setUp( self ) :
super( FromMayaInstancerConverter, self ).setUp()
def makeScene( self ):
maya.cmds.polyCube()
maya.cmds.polySphere()
maya.cmds.particle( p = [[4, 0, 0], [4, 4, 0], [0, 4, 0], [0, 0, 0]], c = 1 )
maya.cmds.addAttr( "particleShape1", ln = "rotationPP", dt = "vectorArray" )
maya.cmds.addAttr( "particleShape1", ln = "instancePP", dt = "doubleArray" )
maya.cmds.select( ["particle1", "pCube1", "pSphere1"], r = True )
maya.cmds.particleInstancer( addObject = True, object = ["pCube1","pSphere1"] )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", rotation = "rotationPP" )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", objectIndex = "instancePP" )
n = pm.PyNode( "particleShape1" )
n.attr( "rotationPP" ).set( [pm.dt.Vector( 45, 0, 0 ), pm.dt.Vector( 0, 45, 0 ), pm.dt.Vector( 0, 0, 45 ), pm.dt.Vector( 45, 45, 0 )] )
n.attr( "instancePP" ).set( [0, 1, 0, 1] )
def makeRotationOrderOrUnitScene( self, rotationOrder, useRadians ) :
maya.cmds.polyCube()
maya.cmds.particle( p = [[0, 0, 0]], c = 1 )
maya.cmds.addAttr( "particleShape1", ln = "rotationPP", dt = "vectorArray" )
maya.cmds.addAttr( "particleShape1", ln = "instancePP", dt = "doubleArray" )
maya.cmds.select( ["particle1", "pCube1" ], r = True )
maya.cmds.particleInstancer( addObject = True, object = ["pCube1"] )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", rotation = "rotationPP" )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", objectIndex = "instancePP" )
maya.cmds.setAttr( "instancer1.rotationOrder", rotationOrder ) # ZYX
if useRadians :
maya.cmds.setAttr( "instancer1.rotationAngleUnits", 1 )
n = pm.PyNode( "particleShape1" )
n.attr( "rotationPP" ).set( [pm.dt.Vector( 90, 90, 0 )] )
n.attr( "instancePP" ).set( [0 ] )
def testCanCreateConverterOfCorrectType( self ) :
self.makeScene()
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
assert (converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaInstancerConverter ) ))
def assertUnorderedEqual( self, a, b ) :
self.assertEqual( len( a ), len( b ) )
self.assertEqual( set( a ), set( b ) )
def testConvertsToPointsPrimitive( self ) :
self.makeScene()
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertEqual( convertedPoints.numPoints, 4 )
self.assertUnorderedEqual( convertedPoints.keys(), ['P', 'age', 'id', 'instances', 'instanceType', 'orient'] )
self.assertEqual( convertedPoints["P"].data[0], imath.V3f( 4, 0, 0 ) )
self.assertEqual( convertedPoints["P"].data[1], imath.V3f( 4, 4, 0 ) )
self.assertEqual( convertedPoints["P"].data[2], imath.V3f( 0, 4, 0 ) )
self.assertEqual( convertedPoints["P"].data[3], imath.V3f( 0, 0, 0 ) )
self.assertEqual( convertedPoints["id"].data[0], 0 )
self.assertEqual( convertedPoints["id"].data[1], 1 )
self.assertEqual( convertedPoints["id"].data[2], 2 )
self.assertEqual( convertedPoints["id"].data[3], 3 )
self.assertEqual( convertedPoints["age"].data[0], 0.0 )
self.assertEqual( convertedPoints["age"].data[1], 0.0 )
self.assertEqual( convertedPoints["age"].data[2], 0.0 )
self.assertEqual( convertedPoints["age"].data[3], 0.0 )
# instance indices to ensure we can instance the correct object
self.assertEqual( convertedPoints["instanceType"].data[0], 0 )
self.assertEqual( convertedPoints["instanceType"].data[1], 1 )
self.assertEqual( convertedPoints["instanceType"].data[2], 0 )
self.assertEqual( convertedPoints["instanceType"].data[3], 1 )
# rotation is converted to orient
self.assertEqual( convertedPoints["orient"].data[0], imath.Eulerf( math.pi / 4.0, 0, 0 ).toQuat() )
self.assertEqual( convertedPoints["orient"].data[1], imath.Eulerf( 0, math.pi / 4.0, 0 ).toQuat() )
self.assertEqual( convertedPoints["orient"].data[2], imath.Eulerf( 0, 0, math.pi / 4.0 ).toQuat() )
self.assertEqual( convertedPoints["orient"].data[3], imath.Eulerf( math.pi / 4.0, math.pi / 4.0, 0 ).toQuat() )
# check we're capturing the locations in maya we're instancing
self.assertEqual( convertedPoints["instances"].data, IECore.StringVectorData( ['/pCube1', '/pSphere1'] ) )
def testCanConvertEmptyInstancer( self ) :
self.makeScene()
# disconnect the particles from the instancer
maya.cmds.disconnectAttr( "particleShape1.instanceData[0].instancePointData", "instancer1.inputPoints" )
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertTrue( "P" in convertedPoints.keys() )
self.assertEqual( convertedPoints["P"].data, IECore.V3fVectorData( [], IECore.GeometricData.Interpretation.Point ) )
def testCanChangeInstancerRotationOrder( self ):
self.makeRotationOrderOrUnitScene( 5, False )
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertEqual( convertedPoints.numPoints, 1 )
self.assertUnorderedEqual( convertedPoints.keys(), ['P', 'age', 'id', 'instances', 'instanceType', 'orient'] )
self.assertEqual( convertedPoints["orient"].data[0], imath.Eulerf( math.pi / 2.0, math.pi / 2.0, 0, imath.Eulerf.ZYX ).toQuat() )
def testCanChangeInstancerRotationUnits( self ) :
self.makeRotationOrderOrUnitScene( 0, True )
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertEqual( convertedPoints.numPoints, 1 )
self.assertUnorderedEqual( convertedPoints.keys(), ['P', 'age', 'id', 'instances', 'instanceType', 'orient'] )
self.assertEqual( convertedPoints["orient"].data[0], imath.Eulerf( 90.0, 90.0, 0, imath.Eulerf.XYZ ).toQuat() )
if __name__ == "__main__" :
IECoreMaya.TestProgram( plugins = ["ieCore"] )
|
test/IECoreMaya/FromMayaInstancerConverterTest.py
|
import maya.cmds
import maya.OpenMaya
import pymel.core as pm
import imath
import math
import IECore
import IECoreScene
import IECoreMaya
class FromMayaInstancerConverter( IECoreMaya.TestCase ) :
def setUp( self ) :
super( FromMayaInstancerConverter, self ).setUp()
def makeScene( self ):
maya.cmds.polyCube()
maya.cmds.polySphere()
maya.cmds.particle( p = [[4, 0, 0], [4, 4, 0], [0, 4, 0], [0, 0, 0]], c = 1 )
maya.cmds.addAttr( "particleShape1", ln = "rotationPP", dt = "vectorArray" )
maya.cmds.addAttr( "particleShape1", ln = "instancePP", dt = "doubleArray" )
maya.cmds.select( ["particle1", "pCube1", "pSphere1"], r = True )
maya.cmds.particleInstancer( addObject = True, object = ["pCube1","pSphere1"] )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", rotation = "rotationPP" )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", objectIndex = "instancePP" )
n = pm.PyNode( "particleShape1" )
n.attr( "rotationPP" ).set( [pm.dt.Vector( 45, 0, 0 ), pm.dt.Vector( 0, 45, 0 ), pm.dt.Vector( 0, 0, 45 ), pm.dt.Vector( 45, 45, 0 )] )
n.attr( "instancePP" ).set( [0, 1, 0, 1] )
def makeRotationOrderOrUnitScene( self, rotationOrder, useRadians ) :
maya.cmds.polyCube()
maya.cmds.particle( p = [[0, 0, 0]], c = 1 )
maya.cmds.addAttr( "particleShape1", ln = "rotationPP", dt = "vectorArray" )
maya.cmds.addAttr( "particleShape1", ln = "instancePP", dt = "doubleArray" )
maya.cmds.select( ["particle1", "pCube1" ], r = True )
maya.cmds.particleInstancer( addObject = True, object = ["pCube1"] )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", rotation = "rotationPP" )
maya.cmds.particleInstancer( "particleShape1", e = True, name = "instancer1", objectIndex = "instancePP" )
maya.cmds.setAttr( "instancer1.rotationOrder", rotationOrder ) # ZYX
if useRadians :
maya.cmds.setAttr( "instancer1.rotationAngleUnits", 1 )
n = pm.PyNode( "particleShape1" )
n.attr( "rotationPP" ).set( [pm.dt.Vector( 90, 90, 0 )] )
n.attr( "instancePP" ).set( [0 ] )
def testCanCreateConverterOfCorrectType( self ) :
self.makeScene()
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
assert (converter.isInstanceOf( IECore.TypeId( IECoreMaya.TypeId.FromMayaInstancerConverter ) ))
def assertUnorderedEqual( self, a, b ) :
self.assertEqual( len( a ), len( b ) )
self.assertEqual( set( a ), set( b ) )
def testConvertsToPointsPrimitive( self ) :
self.makeScene()
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertEqual( convertedPoints.numPoints, 4 )
self.assertUnorderedEqual( convertedPoints.keys(), ['P', 'age', 'id', 'instances', 'instanceType', 'orient'] )
self.assertEqual( convertedPoints["P"].data[0], imath.V3f( 4, 0, 0 ) )
self.assertEqual( convertedPoints["P"].data[1], imath.V3f( 4, 4, 0 ) )
self.assertEqual( convertedPoints["P"].data[2], imath.V3f( 0, 4, 0 ) )
self.assertEqual( convertedPoints["P"].data[3], imath.V3f( 0, 0, 0 ) )
self.assertEqual( convertedPoints["id"].data[0], 0 )
self.assertEqual( convertedPoints["id"].data[1], 1 )
self.assertEqual( convertedPoints["id"].data[2], 2 )
self.assertEqual( convertedPoints["id"].data[3], 3 )
self.assertEqual( convertedPoints["age"].data[0], 0.0 )
self.assertEqual( convertedPoints["age"].data[1], 0.0 )
self.assertEqual( convertedPoints["age"].data[2], 0.0 )
self.assertEqual( convertedPoints["age"].data[3], 0.0 )
# instance indices to ensure we can instance the correct object
self.assertEqual( convertedPoints["instanceType"].data[0], 0 )
self.assertEqual( convertedPoints["instanceType"].data[1], 1 )
self.assertEqual( convertedPoints["instanceType"].data[2], 0 )
self.assertEqual( convertedPoints["instanceType"].data[3], 1 )
# rotation is converted to orient
self.assertEqual( convertedPoints["orient"].data[0], imath.Eulerf( math.pi / 4.0, 0, 0 ).toQuat() )
self.assertEqual( convertedPoints["orient"].data[1], imath.Eulerf( 0, math.pi / 4.0, 0 ).toQuat() )
self.assertEqual( convertedPoints["orient"].data[2], imath.Eulerf( 0, 0, math.pi / 4.0 ).toQuat() )
self.assertEqual( convertedPoints["orient"].data[3], imath.Eulerf( math.pi / 4.0, math.pi / 4.0, 0 ).toQuat() )
# check we're capturing the locations in maya we're instancing
self.assertEqual( convertedPoints["instances"].data, IECore.StringVectorData( ['/pCube1', '/pSphere1'] ) )
def testCanConvertEmptyInstancer( self ) :
self.makeScene()
# disconnect the particles from the instancer
maya.cmds.disconnectAttr( "particleShape1.instanceData[0].instancePointData", "instancer1.inputPoints" )
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertTrue( "P" in convertedPoints.keys() )
self.assertEqual( convertedPoints["P"].data, IECore.V3fVectorData( [], IECore.GeometricData.Interpretation.Point ) )
def testCanChangeInstancerRotationOrder( self ):
self.makeRotationOrderOrUnitScene( 5, False )
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertEqual( convertedPoints.numPoints, 1 )
self.assertUnorderedEqual( convertedPoints.keys(), ['P', 'age', 'id', 'instances', 'instanceType', 'orient'] )
self.assertEqual( convertedPoints["orient"].data[0], imath.Eulerf( math.pi / 2.0, math.pi / 2.0, 0, imath.Eulerf.ZYX ).toQuat() )
def testCanChangeInstancerRotationUnits( self ) :
self.makeRotationOrderOrUnitScene( 0, True )
converter = IECoreMaya.FromMayaDagNodeConverter.create( "instancer1" )
convertedPoints = converter.convert()
self.assertTrue( convertedPoints.isInstanceOf( IECoreScene.TypeId.PointsPrimitive ) )
self.assertEqual( convertedPoints.numPoints, 1 )
self.assertUnorderedEqual( convertedPoints.keys(), ['P', 'age', 'id', 'instances', 'instanceType', 'orient'] )
self.assertEqual( convertedPoints["orient"].data[0], imath.Eulerf( 90.0, 90.0, 0, imath.Eulerf.XYZ ).toQuat() )
if __name__ == "__main__" :
IECoreMaya.TestProgram( plugins = ["ieCore"] )
| 0.529993 | 0.395368 |
import logging
import voluptuous as vol
from bluepy.btle import BTLEDisconnectError
from bluepy.btle import BTLEManagementError
from homeassistant import config_entries
from homeassistant.const import CONF_MAC
from homeassistant.const import CONF_NAME
from homeassistant.helpers import device_registry
from radiacode.transports.bluetooth import Bluetooth as BTPeriph
from .const import CONF_METHOD
from .const import CONF_METHOD_MANUAL
from .const import CONF_METHOD_SCAN
from .const import DOMAIN
from .helper import discover_devices
_logger = logging.getLogger(__name__)
class RadiacodeBtFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for radiacode_bt."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self.devices = []
@property
def data_schema(self):
return vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_MAC): str,
}
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is None:
schema = vol.Schema(
{
vol.Required(CONF_METHOD): vol.In(
(CONF_METHOD_SCAN, CONF_METHOD_MANUAL)
)
}
)
return self.async_show_form(step_id="user", data_schema=schema)
method = user_input[CONF_METHOD]
_logger.debug(f"selected method: {method}")
if method == CONF_METHOD_SCAN:
return await self.async_step_scan()
else:
self.devices = []
return await self.async_step_device()
async def async_step_scan(self, user_input=None):
"""Handle discovery by scanning."""
errors = {}
if user_input is None:
return self.async_show_form(step_id="scan")
_logger.debug("Starting a scan for RadiaCode devices...")
try:
devices = await self.hass.async_add_executor_job(discover_devices)
except BTLEDisconnectError:
_logger.exception("BLE Connection error")
errors["base"] = "btle_disconnection"
return self.async_show_form(step_id="scan", errors=errors)
except BTLEManagementError:
_logger.exception("BLE Management error")
errors["base"] = "btle_management"
return self.async_show_form(step_id="scan", errors=errors)
if not devices:
return self.async_abort(reason="not_found")
self.devices = devices
return await self.async_step_device()
async def async_step_device(self, user_input=None):
"""Handle setting up a device"""
if not user_input:
schema_mac = str
if self.devices:
schema_mac = vol.In(self.devices)
schema = vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_MAC): schema_mac,
}
)
return self.async_show_form(step_id="device", data_schema=schema)
mac = user_input[CONF_MAC] = user_input[CONF_MAC].strip()
unique_id = device_registry.format_mac(mac)
_logger.info(f"Setting up RadiaCode MAC: {mac}, unique_id: {unique_id}")
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
try:
BTPeriph(mac)
except Exception:
_logger.exception("Failed to connect to the device.")
return self.async_show_form(step_id="device", errors={"base": "exception"})
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
|
custom_components/radiacode_bt/config_flow.py
|
import logging
import voluptuous as vol
from bluepy.btle import BTLEDisconnectError
from bluepy.btle import BTLEManagementError
from homeassistant import config_entries
from homeassistant.const import CONF_MAC
from homeassistant.const import CONF_NAME
from homeassistant.helpers import device_registry
from radiacode.transports.bluetooth import Bluetooth as BTPeriph
from .const import CONF_METHOD
from .const import CONF_METHOD_MANUAL
from .const import CONF_METHOD_SCAN
from .const import DOMAIN
from .helper import discover_devices
_logger = logging.getLogger(__name__)
class RadiacodeBtFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for radiacode_bt."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self.devices = []
@property
def data_schema(self):
return vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_MAC): str,
}
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is None:
schema = vol.Schema(
{
vol.Required(CONF_METHOD): vol.In(
(CONF_METHOD_SCAN, CONF_METHOD_MANUAL)
)
}
)
return self.async_show_form(step_id="user", data_schema=schema)
method = user_input[CONF_METHOD]
_logger.debug(f"selected method: {method}")
if method == CONF_METHOD_SCAN:
return await self.async_step_scan()
else:
self.devices = []
return await self.async_step_device()
async def async_step_scan(self, user_input=None):
"""Handle discovery by scanning."""
errors = {}
if user_input is None:
return self.async_show_form(step_id="scan")
_logger.debug("Starting a scan for RadiaCode devices...")
try:
devices = await self.hass.async_add_executor_job(discover_devices)
except BTLEDisconnectError:
_logger.exception("BLE Connection error")
errors["base"] = "btle_disconnection"
return self.async_show_form(step_id="scan", errors=errors)
except BTLEManagementError:
_logger.exception("BLE Management error")
errors["base"] = "btle_management"
return self.async_show_form(step_id="scan", errors=errors)
if not devices:
return self.async_abort(reason="not_found")
self.devices = devices
return await self.async_step_device()
async def async_step_device(self, user_input=None):
"""Handle setting up a device"""
if not user_input:
schema_mac = str
if self.devices:
schema_mac = vol.In(self.devices)
schema = vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_MAC): schema_mac,
}
)
return self.async_show_form(step_id="device", data_schema=schema)
mac = user_input[CONF_MAC] = user_input[CONF_MAC].strip()
unique_id = device_registry.format_mac(mac)
_logger.info(f"Setting up RadiaCode MAC: {mac}, unique_id: {unique_id}")
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
try:
BTPeriph(mac)
except Exception:
_logger.exception("Failed to connect to the device.")
return self.async_show_form(step_id="device", errors={"base": "exception"})
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
| 0.689724 | 0.112065 |
from binascii import hexlify
from typing import TYPE_CHECKING
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.backends import default_backend
import six
from .._internal.client_credential_base import ClientCredentialBase
if TYPE_CHECKING:
from typing import Any
class CertificateCredential(ClientCredentialBase):
"""Authenticates as a service principal using a certificate.
:param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID.
:param str client_id: the service principal's client ID
:param str certificate_path: path to a PEM-encoded certificate file including the private key.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate
requires a different encoding, pass appropriately encoded bytes instead.
:paramtype password: str or bytes
:keyword bool enable_persistent_cache: if True, the credential will store tokens in a persistent cache. Defaults to
False.
:keyword bool allow_unencrypted_cache: if True, the credential will fall back to a plaintext cache when encryption
is unavailable. Default to False. Has no effect when `enable_persistent_cache` is False.
"""
def __init__(self, tenant_id, client_id, certificate_path, **kwargs):
# type: (str, str, str, **Any) -> None
if not certificate_path:
raise ValueError(
"'certificate_path' must be the path to a PEM file containing an x509 certificate and its private key"
)
password = kwargs.pop("password", None)
if isinstance(password, six.text_type):
password = password.encode(encoding="utf-8")
with open(certificate_path, "rb") as f:
pem_bytes = f.read()
cert = x509.load_pem_x509_certificate(pem_bytes, default_backend())
fingerprint = cert.fingerprint(hashes.SHA1()) # nosec
# TODO: msal doesn't formally support passwords (but soon will); the below depends on an implementation detail
private_key = serialization.load_pem_private_key(pem_bytes, password=password, backend=default_backend())
super(CertificateCredential, self).__init__(
client_id=client_id,
client_credential={"private_key": private_key, "thumbprint": hexlify(fingerprint).decode("utf-8")},
tenant_id=tenant_id,
**kwargs
)
|
sdk/identity/azure-identity/azure/identity/_credentials/certificate.py
|
from binascii import hexlify
from typing import TYPE_CHECKING
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.backends import default_backend
import six
from .._internal.client_credential_base import ClientCredentialBase
if TYPE_CHECKING:
from typing import Any
class CertificateCredential(ClientCredentialBase):
"""Authenticates as a service principal using a certificate.
:param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID.
:param str client_id: the service principal's client ID
:param str certificate_path: path to a PEM-encoded certificate file including the private key.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds.
:keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate
requires a different encoding, pass appropriately encoded bytes instead.
:paramtype password: str or bytes
:keyword bool enable_persistent_cache: if True, the credential will store tokens in a persistent cache. Defaults to
False.
:keyword bool allow_unencrypted_cache: if True, the credential will fall back to a plaintext cache when encryption
is unavailable. Default to False. Has no effect when `enable_persistent_cache` is False.
"""
def __init__(self, tenant_id, client_id, certificate_path, **kwargs):
# type: (str, str, str, **Any) -> None
if not certificate_path:
raise ValueError(
"'certificate_path' must be the path to a PEM file containing an x509 certificate and its private key"
)
password = kwargs.pop("password", None)
if isinstance(password, six.text_type):
password = password.encode(encoding="utf-8")
with open(certificate_path, "rb") as f:
pem_bytes = f.read()
cert = x509.load_pem_x509_certificate(pem_bytes, default_backend())
fingerprint = cert.fingerprint(hashes.SHA1()) # nosec
# TODO: msal doesn't formally support passwords (but soon will); the below depends on an implementation detail
private_key = serialization.load_pem_private_key(pem_bytes, password=password, backend=default_backend())
super(CertificateCredential, self).__init__(
client_id=client_id,
client_credential={"private_key": private_key, "thumbprint": hexlify(fingerprint).decode("utf-8")},
tenant_id=tenant_id,
**kwargs
)
| 0.800848 | 0.229654 |
import abc
import re
import six
@six.add_metaclass(abc.ABCMeta)
class retry_base(object):
"""Abstract base class for retry strategies."""
@abc.abstractmethod
def __call__(self, retry_state):
pass
def __and__(self, other):
return retry_all(self, other)
def __or__(self, other):
return retry_any(self, other)
class _retry_never(retry_base):
"""Retry strategy that never rejects any result."""
def __call__(self, retry_state):
return False
retry_never = _retry_never()
class _retry_always(retry_base):
"""Retry strategy that always rejects any result."""
def __call__(self, retry_state):
return True
retry_always = _retry_always()
class retry_if_exception(retry_base):
"""Retry strategy that retries if an exception verifies a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, retry_state):
if retry_state.outcome.failed:
return self.predicate(retry_state.outcome.exception())
else:
return False
class retry_if_exception_type(retry_if_exception):
"""Retries if an exception has been raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_if_exception_type, self).__init__(
lambda e: isinstance(e, exception_types)
)
class retry_if_not_exception_type(retry_if_exception):
"""Retries except an exception has been raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_if_not_exception_type, self).__init__(
lambda e: not isinstance(e, exception_types)
)
class retry_unless_exception_type(retry_if_exception):
"""Retries until an exception is raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_unless_exception_type, self).__init__(
lambda e: not isinstance(e, exception_types)
)
def __call__(self, retry_state):
# always retry if no exception was raised
if not retry_state.outcome.failed:
return True
return self.predicate(retry_state.outcome.exception())
class retry_if_result(retry_base):
"""Retries if the result verifies a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, retry_state):
if not retry_state.outcome.failed:
return self.predicate(retry_state.outcome.result())
else:
return False
class retry_if_not_result(retry_base):
"""Retries if the result refutes a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, retry_state):
if not retry_state.outcome.failed:
return not self.predicate(retry_state.outcome.result())
else:
return False
class retry_if_exception_message(retry_if_exception):
"""Retries if an exception message equals or matches."""
def __init__(self, message=None, match=None):
if message and match:
raise TypeError(
"{}() takes either 'message' or 'match', not both".format(
self.__class__.__name__
)
)
# set predicate
if message:
def message_fnc(exception):
return message == str(exception)
predicate = message_fnc
elif match:
prog = re.compile(match)
def match_fnc(exception):
return prog.match(str(exception))
predicate = match_fnc
else:
raise TypeError(
"{}() missing 1 required argument 'message' or 'match'".format(
self.__class__.__name__
)
)
super(retry_if_exception_message, self).__init__(predicate)
class retry_if_not_exception_message(retry_if_exception_message):
"""Retries until an exception message equals or matches."""
def __init__(self, *args, **kwargs):
super(retry_if_not_exception_message, self).__init__(*args, **kwargs)
# invert predicate
if_predicate = self.predicate
self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
def __call__(self, retry_state):
if not retry_state.outcome.failed:
return True
return self.predicate(retry_state.outcome.exception())
class retry_any(retry_base):
"""Retries if any of the retries condition is valid."""
def __init__(self, *retries):
self.retries = retries
def __call__(self, retry_state):
return any(r(retry_state) for r in self.retries)
class retry_all(retry_base):
"""Retries if all the retries condition are valid."""
def __init__(self, *retries):
self.retries = retries
def __call__(self, retry_state):
return all(r(retry_state) for r in self.retries)
|
tenacity/retry.py
|
import abc
import re
import six
@six.add_metaclass(abc.ABCMeta)
class retry_base(object):
"""Abstract base class for retry strategies."""
@abc.abstractmethod
def __call__(self, retry_state):
pass
def __and__(self, other):
return retry_all(self, other)
def __or__(self, other):
return retry_any(self, other)
class _retry_never(retry_base):
"""Retry strategy that never rejects any result."""
def __call__(self, retry_state):
return False
retry_never = _retry_never()
class _retry_always(retry_base):
"""Retry strategy that always rejects any result."""
def __call__(self, retry_state):
return True
retry_always = _retry_always()
class retry_if_exception(retry_base):
"""Retry strategy that retries if an exception verifies a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, retry_state):
if retry_state.outcome.failed:
return self.predicate(retry_state.outcome.exception())
else:
return False
class retry_if_exception_type(retry_if_exception):
"""Retries if an exception has been raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_if_exception_type, self).__init__(
lambda e: isinstance(e, exception_types)
)
class retry_if_not_exception_type(retry_if_exception):
"""Retries except an exception has been raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_if_not_exception_type, self).__init__(
lambda e: not isinstance(e, exception_types)
)
class retry_unless_exception_type(retry_if_exception):
"""Retries until an exception is raised of one or more types."""
def __init__(self, exception_types=Exception):
self.exception_types = exception_types
super(retry_unless_exception_type, self).__init__(
lambda e: not isinstance(e, exception_types)
)
def __call__(self, retry_state):
# always retry if no exception was raised
if not retry_state.outcome.failed:
return True
return self.predicate(retry_state.outcome.exception())
class retry_if_result(retry_base):
"""Retries if the result verifies a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, retry_state):
if not retry_state.outcome.failed:
return self.predicate(retry_state.outcome.result())
else:
return False
class retry_if_not_result(retry_base):
"""Retries if the result refutes a predicate."""
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, retry_state):
if not retry_state.outcome.failed:
return not self.predicate(retry_state.outcome.result())
else:
return False
class retry_if_exception_message(retry_if_exception):
"""Retries if an exception message equals or matches."""
def __init__(self, message=None, match=None):
if message and match:
raise TypeError(
"{}() takes either 'message' or 'match', not both".format(
self.__class__.__name__
)
)
# set predicate
if message:
def message_fnc(exception):
return message == str(exception)
predicate = message_fnc
elif match:
prog = re.compile(match)
def match_fnc(exception):
return prog.match(str(exception))
predicate = match_fnc
else:
raise TypeError(
"{}() missing 1 required argument 'message' or 'match'".format(
self.__class__.__name__
)
)
super(retry_if_exception_message, self).__init__(predicate)
class retry_if_not_exception_message(retry_if_exception_message):
"""Retries until an exception message equals or matches."""
def __init__(self, *args, **kwargs):
super(retry_if_not_exception_message, self).__init__(*args, **kwargs)
# invert predicate
if_predicate = self.predicate
self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
def __call__(self, retry_state):
if not retry_state.outcome.failed:
return True
return self.predicate(retry_state.outcome.exception())
class retry_any(retry_base):
"""Retries if any of the retries condition is valid."""
def __init__(self, *retries):
self.retries = retries
def __call__(self, retry_state):
return any(r(retry_state) for r in self.retries)
class retry_all(retry_base):
"""Retries if all the retries condition are valid."""
def __init__(self, *retries):
self.retries = retries
def __call__(self, retry_state):
return all(r(retry_state) for r in self.retries)
| 0.829388 | 0.086671 |
from os.path import join, dirname
from jig.tools import NumberedDirectoriesToGit, slugify, indent
from jig.tests.testcase import JigTestCase
class TestSlugify(JigTestCase):
"""
Converting strings into slugs.
"""
def test_nothing(self):
"""
An empty string is given.
"""
self.assertEqual(u'', slugify(u''))
def test_normal_ascii_string(self):
"""
ASCII string.
"""
self.assertEqual(u'abc-def-ghi', slugify(u'Abc & Def Ghi'))
def test_special_characters(self):
"""
Special characters in the string.
"""
self.assertEqual(u'abc-def-ghi', slugify(u'Abç \u0000 Def Ghi'))
class TestNumberedDirectoriesToGit(JigTestCase):
"""
Utility for converting snapshots into Git repos.
"""
def get_nd2g(self, name):
"""
Gets a NumberedDirectoriesToGit object.
Where ``name`` is the basename of a directory in
:file:`src/jig/tests/fixtures/numbereddirs`.
"""
nd = join(dirname(__file__), 'fixtures', 'numbereddirs', name)
return NumberedDirectoriesToGit(nd)
def get_group(self, name):
"""
Gets a ``git.Repo`` from the group ``name``.
Where ``name`` is the basename of a directory in
:file:`src/jig/tests/fixtures/numbereddirs`.
"""
return self.get_nd2g(name).repo
def test_bad_directory(self):
"""
Detects bad directory.
"""
with self.assertRaises(ValueError):
self.get_group('bad-directory')
def test_add_one_file(self):
"""
Adding one file.
"""
repo = self.get_group('group-a')
self.assertEqual(['a.txt'],
[i.path for i in repo.commit('HEAD^1').tree])
# We added a second file
self.assertEqual(['a.txt', 'b.txt'],
[i.path for i in repo.commit('HEAD').tree])
def test_modifying_one_file(self):
"""
Modifying one file.
"""
repo = self.get_group('group-b')
# Start with one file
self.assertEqual(['a.txt'],
[i.path for i in repo.commit('HEAD^1').tree])
# Same filename since it was modified
self.assertEqual(['a.txt'],
[i.path for i in repo.commit('HEAD').tree])
# Should be a diff between them
diff = repo.commit('HEAD^1').diff('HEAD')
self.assertEqual('111\n', diff[0].a_blob.data_stream.read())
self.assertEqual('222\n', diff[0].b_blob.data_stream.read())
def test_remove_one_file(self):
"""
Removing one file.
"""
repo = self.get_group('group-c')
diff = repo.commit('HEAD^1').diff('HEAD')
# It's been removed
self.assertEqual('b.txt', diff[0].a_blob.path)
self.assertEqual(None, diff[0].b_blob)
def test_adding_two_removing_two(self):
"""
Adding two, removing two.
"""
repo = self.get_group('group-d')
diff = repo.commit('HEAD^1').diff('HEAD')
self.assertEqual('b.txt', diff[0].a_blob.path)
self.assertEqual(None, diff[0].b_blob)
self.assertEqual('c.txt', diff[1].a_blob.path)
self.assertEqual(None, diff[1].b_blob)
self.assertEqual(None, diff[2].a_blob)
self.assertEqual('d.txt', diff[2].b_blob.path)
self.assertEqual(None, diff[3].a_blob)
self.assertEqual('e.txt', diff[3].b_blob.path)
def test_add_one_modify_one_delete_one(self):
"""
Add one, modify one, remove one.
"""
repo = self.get_group('group-e')
diff = repo.commit('HEAD^1').diff('HEAD')
# We modified a.txt
self.assertEqual('a.txt', diff[0].a_blob.path)
self.assertEqual('a\n', diff[0].a_blob.data_stream.read())
self.assertEqual('aa\n', diff[0].b_blob.data_stream.read())
# We removed b.txt
self.assertEqual('b.txt', diff[1].a_blob.path)
self.assertEqual(None, diff[1].b_blob)
# And we added c.txt
self.assertEqual(None, diff[2].a_blob)
self.assertEqual('c.txt', diff[2].b_blob.path)
def test_move_one_file(self):
"""
Move one file.
"""
repo = self.get_group('group-f')
diff = repo.commit('HEAD^1').diff('HEAD')
self.assertEqual('a/b.txt', diff[0].a_blob.path)
self.assertEqual(None, diff[0].b_blob)
self.assertEqual(None, diff[1].a_blob)
self.assertEqual('b/b.txt', diff[1].b_blob.path)
def test_caches_repo(self):
"""
Calling repo twice will return the same object.
"""
nd2g = self.get_nd2g('group-a')
self.assertEqual(id(nd2g.repo), id(nd2g.repo))
def test_lots_of_changes(self):
"""
Numerous changesets.
"""
nd2g = self.get_nd2g('group-g')
# Make sure we have the expected 5 commits
self.assertEqual(5, len(list(nd2g.repo.iter_commits())))
# And 4 diffs
self.assertEqual(4, len(nd2g.diffs()))
class TestIndent(JigTestCase):
"""
The indent method will indent a sequence of strings.
"""
def test_indent_string(self):
"""
If the payload is a string it indents and returns a string.
"""
self.assertEqual(' a', indent('a'))
def test_indents_list(self):
"""
List payload indents each item and returns a list.
"""
self.assertEqual(
[u' a', u' b', u' c'],
indent(['a', 'b', 'c']))
def test_indents_different_by(self):
"""
Can change the default indent of 4 to a different integer.
"""
self.assertEqual(
[u' a', u' b', u' c'],
indent(['a', 'b', 'c'], by=1))
def test_indents_different_character(self):
"""
Can change the character used to indent to something else.
"""
self.assertEqual(
[u'?a', u'?b', u'?c'],
indent(['a', 'b', 'c'], by=1, character='?'))
|
src/jig/tests/test_tools.py
|
from os.path import join, dirname
from jig.tools import NumberedDirectoriesToGit, slugify, indent
from jig.tests.testcase import JigTestCase
class TestSlugify(JigTestCase):
"""
Converting strings into slugs.
"""
def test_nothing(self):
"""
An empty string is given.
"""
self.assertEqual(u'', slugify(u''))
def test_normal_ascii_string(self):
"""
ASCII string.
"""
self.assertEqual(u'abc-def-ghi', slugify(u'Abc & Def Ghi'))
def test_special_characters(self):
"""
Special characters in the string.
"""
self.assertEqual(u'abc-def-ghi', slugify(u'Abç \u0000 Def Ghi'))
class TestNumberedDirectoriesToGit(JigTestCase):
"""
Utility for converting snapshots into Git repos.
"""
def get_nd2g(self, name):
"""
Gets a NumberedDirectoriesToGit object.
Where ``name`` is the basename of a directory in
:file:`src/jig/tests/fixtures/numbereddirs`.
"""
nd = join(dirname(__file__), 'fixtures', 'numbereddirs', name)
return NumberedDirectoriesToGit(nd)
def get_group(self, name):
"""
Gets a ``git.Repo`` from the group ``name``.
Where ``name`` is the basename of a directory in
:file:`src/jig/tests/fixtures/numbereddirs`.
"""
return self.get_nd2g(name).repo
def test_bad_directory(self):
"""
Detects bad directory.
"""
with self.assertRaises(ValueError):
self.get_group('bad-directory')
def test_add_one_file(self):
"""
Adding one file.
"""
repo = self.get_group('group-a')
self.assertEqual(['a.txt'],
[i.path for i in repo.commit('HEAD^1').tree])
# We added a second file
self.assertEqual(['a.txt', 'b.txt'],
[i.path for i in repo.commit('HEAD').tree])
def test_modifying_one_file(self):
"""
Modifying one file.
"""
repo = self.get_group('group-b')
# Start with one file
self.assertEqual(['a.txt'],
[i.path for i in repo.commit('HEAD^1').tree])
# Same filename since it was modified
self.assertEqual(['a.txt'],
[i.path for i in repo.commit('HEAD').tree])
# Should be a diff between them
diff = repo.commit('HEAD^1').diff('HEAD')
self.assertEqual('111\n', diff[0].a_blob.data_stream.read())
self.assertEqual('222\n', diff[0].b_blob.data_stream.read())
def test_remove_one_file(self):
"""
Removing one file.
"""
repo = self.get_group('group-c')
diff = repo.commit('HEAD^1').diff('HEAD')
# It's been removed
self.assertEqual('b.txt', diff[0].a_blob.path)
self.assertEqual(None, diff[0].b_blob)
def test_adding_two_removing_two(self):
"""
Adding two, removing two.
"""
repo = self.get_group('group-d')
diff = repo.commit('HEAD^1').diff('HEAD')
self.assertEqual('b.txt', diff[0].a_blob.path)
self.assertEqual(None, diff[0].b_blob)
self.assertEqual('c.txt', diff[1].a_blob.path)
self.assertEqual(None, diff[1].b_blob)
self.assertEqual(None, diff[2].a_blob)
self.assertEqual('d.txt', diff[2].b_blob.path)
self.assertEqual(None, diff[3].a_blob)
self.assertEqual('e.txt', diff[3].b_blob.path)
def test_add_one_modify_one_delete_one(self):
"""
Add one, modify one, remove one.
"""
repo = self.get_group('group-e')
diff = repo.commit('HEAD^1').diff('HEAD')
# We modified a.txt
self.assertEqual('a.txt', diff[0].a_blob.path)
self.assertEqual('a\n', diff[0].a_blob.data_stream.read())
self.assertEqual('aa\n', diff[0].b_blob.data_stream.read())
# We removed b.txt
self.assertEqual('b.txt', diff[1].a_blob.path)
self.assertEqual(None, diff[1].b_blob)
# And we added c.txt
self.assertEqual(None, diff[2].a_blob)
self.assertEqual('c.txt', diff[2].b_blob.path)
def test_move_one_file(self):
"""
Move one file.
"""
repo = self.get_group('group-f')
diff = repo.commit('HEAD^1').diff('HEAD')
self.assertEqual('a/b.txt', diff[0].a_blob.path)
self.assertEqual(None, diff[0].b_blob)
self.assertEqual(None, diff[1].a_blob)
self.assertEqual('b/b.txt', diff[1].b_blob.path)
def test_caches_repo(self):
"""
Calling repo twice will return the same object.
"""
nd2g = self.get_nd2g('group-a')
self.assertEqual(id(nd2g.repo), id(nd2g.repo))
def test_lots_of_changes(self):
"""
Numerous changesets.
"""
nd2g = self.get_nd2g('group-g')
# Make sure we have the expected 5 commits
self.assertEqual(5, len(list(nd2g.repo.iter_commits())))
# And 4 diffs
self.assertEqual(4, len(nd2g.diffs()))
class TestIndent(JigTestCase):
"""
The indent method will indent a sequence of strings.
"""
def test_indent_string(self):
"""
If the payload is a string it indents and returns a string.
"""
self.assertEqual(' a', indent('a'))
def test_indents_list(self):
"""
List payload indents each item and returns a list.
"""
self.assertEqual(
[u' a', u' b', u' c'],
indent(['a', 'b', 'c']))
def test_indents_different_by(self):
"""
Can change the default indent of 4 to a different integer.
"""
self.assertEqual(
[u' a', u' b', u' c'],
indent(['a', 'b', 'c'], by=1))
def test_indents_different_character(self):
"""
Can change the character used to indent to something else.
"""
self.assertEqual(
[u'?a', u'?b', u'?c'],
indent(['a', 'b', 'c'], by=1, character='?'))
| 0.644225 | 0.512327 |
from application.models import STATUS_CHOICES
from django import forms
from django.forms import widgets
from .models import Step1Rating, Step2Rating
from .models import RATING_METADATA_1_CHOICES, RATING_METADATA_2_CHOICES
import ast
class Step1RateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
kwargs['initial'] = {
'rating_metadata_1': ast.literal_eval(kwargs.get('instance').rating_metadata_1 or '[]'),
}
super().__init__(*args, **kwargs)
class Meta:
model = Step1Rating
fields = [
'application',
'rating',
'acceptance',
'acceptance_reason',
'rating_metadata_1',
]
widgets = {
'application': widgets.HiddenInput,
'rating_metadata_1': widgets.CheckboxSelectMultiple(choices=RATING_METADATA_1_CHOICES),
}
def clean_acceptance_reason(self):
reason = self.cleaned_data['acceptance_reason']
if self.cleaned_data['acceptance'] == 'yes' and not reason:
raise forms.ValidationError('This field is required.')
return reason
class Step2RateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
kwargs['initial'] = {
'rating_metadata_2': ast.literal_eval(kwargs.get('instance').rating_metadata_2 or '[]'),
}
super().__init__(*args, **kwargs)
class Meta:
model = Step2Rating
fields = [
'application',
'rating',
'acceptance',
'acceptance_reason',
'rating_metadata_2',
]
widgets = {
'application': widgets.HiddenInput,
'rating_metadata_2': widgets.CheckboxSelectMultiple(choices=RATING_METADATA_2_CHOICES),
}
def clean_acceptance_reason(self):
reason = self.cleaned_data['acceptance_reason']
if self.cleaned_data['acceptance'] == 'yes' and not reason:
raise forms.ValidationError('This field is required.')
return reason
class ChangeStatusForm(forms.Form):
STATUS_CHOICES_NO_DELETED = STATUS_CHOICES[:-1]
choice = forms.ChoiceField(choices=STATUS_CHOICES_NO_DELETED, label='Current status')
reason = forms.CharField(widget=widgets.Textarea, required=False)
application = forms.CharField(widget=forms.HiddenInput)
|
web/src/rating/forms.py
|
from application.models import STATUS_CHOICES
from django import forms
from django.forms import widgets
from .models import Step1Rating, Step2Rating
from .models import RATING_METADATA_1_CHOICES, RATING_METADATA_2_CHOICES
import ast
class Step1RateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
kwargs['initial'] = {
'rating_metadata_1': ast.literal_eval(kwargs.get('instance').rating_metadata_1 or '[]'),
}
super().__init__(*args, **kwargs)
class Meta:
model = Step1Rating
fields = [
'application',
'rating',
'acceptance',
'acceptance_reason',
'rating_metadata_1',
]
widgets = {
'application': widgets.HiddenInput,
'rating_metadata_1': widgets.CheckboxSelectMultiple(choices=RATING_METADATA_1_CHOICES),
}
def clean_acceptance_reason(self):
reason = self.cleaned_data['acceptance_reason']
if self.cleaned_data['acceptance'] == 'yes' and not reason:
raise forms.ValidationError('This field is required.')
return reason
class Step2RateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
kwargs['initial'] = {
'rating_metadata_2': ast.literal_eval(kwargs.get('instance').rating_metadata_2 or '[]'),
}
super().__init__(*args, **kwargs)
class Meta:
model = Step2Rating
fields = [
'application',
'rating',
'acceptance',
'acceptance_reason',
'rating_metadata_2',
]
widgets = {
'application': widgets.HiddenInput,
'rating_metadata_2': widgets.CheckboxSelectMultiple(choices=RATING_METADATA_2_CHOICES),
}
def clean_acceptance_reason(self):
reason = self.cleaned_data['acceptance_reason']
if self.cleaned_data['acceptance'] == 'yes' and not reason:
raise forms.ValidationError('This field is required.')
return reason
class ChangeStatusForm(forms.Form):
STATUS_CHOICES_NO_DELETED = STATUS_CHOICES[:-1]
choice = forms.ChoiceField(choices=STATUS_CHOICES_NO_DELETED, label='Current status')
reason = forms.CharField(widget=widgets.Textarea, required=False)
application = forms.CharField(widget=forms.HiddenInput)
| 0.705582 | 0.087994 |
import urllib2
from config import SHOW_AVATARS
try:
from PIL import Image
except ImportError:
SHOW_AVATARS = False
class Payload(object):
def __init__(self, data):
self.data = data
def user_link(self):
name = self.data['sender']['login']
url = self.data['sender']['html_url']
avatar = self.data['sender']['avatar_url'] + "&s=18"
return self.create_user_link(name, url, avatar)
def check_avatar_size(self, url):
f = urllib2.urlopen(url)
img = Image.open(f)
f.close()
if img.size[0] <= 20 and img.size[1] <= 20:
return True
return False
def create_user_link(self, name, url, avatar):
if SHOW_AVATARS and self.check_avatar_size(avatar):
return " [%s](%s)" % (avatar, name, url)
return "[%s](%s)" % (name, url)
def repo_link(self):
name = self.data['repository']['full_name']
url = self.data['repository']['html_url']
return "[%s](%s)" % (name, url)
def preview(self, text):
if not text:
return text
l = text.split("\n")
result = l[0]
if result[-1] in "[\n, \r]":
result = result[:-1]
if result != text:
result += " [...]"
return result
class PullRequest(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['pull_request']['number']
self.title = self.data['pull_request']['title']
self.body = self.data['pull_request']['body']
self.url = self.data['pull_request']['html_url']
def opened(self):
body = self.preview(self.body)
msg = """%s opened new pull request [#%s %s](%s) in %s:
> %s""" % (self.user_link(), self.number, self.title,
self.url, self.repo_link(), body)
return msg
def assigned(self):
to_name = self.data['assignee']['login']
to_url = self.data['assignee']['html_url']
to_avatar = self.data['assignee']['avatar_url'] + "&s=18"
to = self.create_user_link(to_name, to_url, to_avatar)
msg = """%s assigned %s to pull request [#%s %s](%s).""" % (self.user_link(),
to, self.number, self.title, self.url)
return msg
def closed(self):
merged = self.data['pull_request']['merged']
action = "merged" if merged else "closed"
msg = """%s %s pull request [#%s %s](%s).""" % (self.user_link(),
action, self.number, self.title, self.url)
return msg
class PullRequestComment(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['pull_request']['number']
self.title = self.data['pull_request']['title']
self.body = self.data['comment']['body']
self.url = self.data['comment']['html_url']
def created(self):
body = self.preview(self.body)
msg = """%s commented on pull request [#%s %s](%s):
> %s""" % (self.user_link(), self.number, self.title, self.url, body)
return msg
class Issue(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['issue']['number']
self.title = self.data['issue']['title']
self.url = self.data['issue']['html_url']
self.body = self.data['issue']['body']
def opened(self):
body = self.preview(self.body)
msg = """%s opened new issue [#%s %s](%s) in %s:
> %s""" % (self.user_link(), self.number, self.title, self.url, self.repo_link(), body)
return msg
def labeled(self):
label = self.data['label']['name']
msg = """%s added label `%s` to issue [#%s %s](%s) in %s.""" % (
self.user_link(), label, self.number, self.title, self.url, self.repo_link())
return msg
def closed(self):
msg = """%s closed issue [#%s %s](%s) in %s.""" % (
self.user_link(), self.number, self.title, self.url, self.repo_link())
return msg
def assigned(self):
name = self.data['assignee']['login']
url = self.data['assignee']['html_url']
avatar = self.data['assignee']['avatar_url'] + "&s=18"
assignee = self.create_user_link(name, url, avatar)
msg = """%s assigned %s to issue [#%s %s](%s) in %s.""" % (
self.user_link(), assignee, self.number, self.title, self.url, self.repo_link())
return msg
class IssueComment(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['issue']['number']
self.title = self.data['issue']['title']
self.url = self.data['comment']['html_url']
self.body = self.data['comment']['body']
def created(self):
body = self.preview(self.body)
msg = """%s commented on [#%s %s](%s):
> %s""" % (self.user_link(), self.number, self.title, self.url, body)
return msg
class CommitComment(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.cid = self.data['comment']['commit_id'][:7]
self.url = self.data['comment']['html_url']
self.body = self.data['comment']['body']
def created(self):
body = self.preview(self.body)
msg = """%s commented on [%s](%s):
> %s""" % (self.user_link(), self.cid, self.url, body)
return msg
class Repository(Payload):
def __init__(self, data):
Payload.__init__(self, data)
def created(self):
descr = self.data['repository']['description']
msg = """%s created new repository %s:
> %s""" % (self.user_link(), self.repo_link(), descr)
return msg
class Branch(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.name = self.data['ref']
def created(self):
msg = """%s added branch `%s` to %s.""" % (self.user_link(),
self.name, self.repo_link())
return msg
def deleted(self):
msg = """%s deleted branch `%s` in %s.""" % (self.user_link(),
self.name, self.repo_link())
return msg
class Tag(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.name = self.data['ref']
def created(self):
msg = """%s added tag `%s` to %s.""" % (self.user_link(),
self.name, self.repo_link())
return msg
class Push(Payload):
def __init__(self, data):
Payload.__init__(self, data)
def commits(self):
commits = self.data['commits']
branch = self.data['ref'].replace("refs/heads/", "")
branch_url = self.data['repository']['html_url'] + "/tree/" + branch
if not commits:
commits = [self.data['head_commit']]
changeset = "changesets" if len(commits) > 1 else "changeset"
msg = []
msg.append("%s pushed %s %s to [%s](%s) at %s:" % (
self.user_link(), len(commits), changeset, branch, branch_url, self.repo_link()))
for commit in commits:
cid = commit['id'][:7]
curl = commit['url']
cmsg = self.preview(commit['message'])
ctext = "- [`%s`](%s): %s" % (cid, curl, cmsg)
msg.append("\n")
msg.append(ctext)
return "".join(msg)
class Wiki(Payload):
def __init__(self, data):
Payload.__init__(self, data)
def updated(self):
pages = self.data['pages']
msg = []
msg.append("%s changes %s pages in Wiki at %s:" % (self.user_link(), len(pages), self.repo_link()))
for page in pages:
page_name = page['page_name']
title = page['title']
summary = page['summary']
url = "%s/_compare/%s" % (page['html_url'], page['sha'])
action = page['action']
if summary:
ctext = "- %s [%s](%s)\n>%s" % (action, page_name, url, summary)
else:
ctext = "- %s [%s](%s)\n" % (action, page_name, url)
msg.append("\n")
msg.append(ctext)
return "".join(msg)
|
payload.py
|
import urllib2
from config import SHOW_AVATARS
try:
from PIL import Image
except ImportError:
SHOW_AVATARS = False
class Payload(object):
def __init__(self, data):
self.data = data
def user_link(self):
name = self.data['sender']['login']
url = self.data['sender']['html_url']
avatar = self.data['sender']['avatar_url'] + "&s=18"
return self.create_user_link(name, url, avatar)
def check_avatar_size(self, url):
f = urllib2.urlopen(url)
img = Image.open(f)
f.close()
if img.size[0] <= 20 and img.size[1] <= 20:
return True
return False
def create_user_link(self, name, url, avatar):
if SHOW_AVATARS and self.check_avatar_size(avatar):
return " [%s](%s)" % (avatar, name, url)
return "[%s](%s)" % (name, url)
def repo_link(self):
name = self.data['repository']['full_name']
url = self.data['repository']['html_url']
return "[%s](%s)" % (name, url)
def preview(self, text):
if not text:
return text
l = text.split("\n")
result = l[0]
if result[-1] in "[\n, \r]":
result = result[:-1]
if result != text:
result += " [...]"
return result
class PullRequest(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['pull_request']['number']
self.title = self.data['pull_request']['title']
self.body = self.data['pull_request']['body']
self.url = self.data['pull_request']['html_url']
def opened(self):
body = self.preview(self.body)
msg = """%s opened new pull request [#%s %s](%s) in %s:
> %s""" % (self.user_link(), self.number, self.title,
self.url, self.repo_link(), body)
return msg
def assigned(self):
to_name = self.data['assignee']['login']
to_url = self.data['assignee']['html_url']
to_avatar = self.data['assignee']['avatar_url'] + "&s=18"
to = self.create_user_link(to_name, to_url, to_avatar)
msg = """%s assigned %s to pull request [#%s %s](%s).""" % (self.user_link(),
to, self.number, self.title, self.url)
return msg
def closed(self):
merged = self.data['pull_request']['merged']
action = "merged" if merged else "closed"
msg = """%s %s pull request [#%s %s](%s).""" % (self.user_link(),
action, self.number, self.title, self.url)
return msg
class PullRequestComment(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['pull_request']['number']
self.title = self.data['pull_request']['title']
self.body = self.data['comment']['body']
self.url = self.data['comment']['html_url']
def created(self):
body = self.preview(self.body)
msg = """%s commented on pull request [#%s %s](%s):
> %s""" % (self.user_link(), self.number, self.title, self.url, body)
return msg
class Issue(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['issue']['number']
self.title = self.data['issue']['title']
self.url = self.data['issue']['html_url']
self.body = self.data['issue']['body']
def opened(self):
body = self.preview(self.body)
msg = """%s opened new issue [#%s %s](%s) in %s:
> %s""" % (self.user_link(), self.number, self.title, self.url, self.repo_link(), body)
return msg
def labeled(self):
label = self.data['label']['name']
msg = """%s added label `%s` to issue [#%s %s](%s) in %s.""" % (
self.user_link(), label, self.number, self.title, self.url, self.repo_link())
return msg
def closed(self):
msg = """%s closed issue [#%s %s](%s) in %s.""" % (
self.user_link(), self.number, self.title, self.url, self.repo_link())
return msg
def assigned(self):
name = self.data['assignee']['login']
url = self.data['assignee']['html_url']
avatar = self.data['assignee']['avatar_url'] + "&s=18"
assignee = self.create_user_link(name, url, avatar)
msg = """%s assigned %s to issue [#%s %s](%s) in %s.""" % (
self.user_link(), assignee, self.number, self.title, self.url, self.repo_link())
return msg
class IssueComment(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.number = self.data['issue']['number']
self.title = self.data['issue']['title']
self.url = self.data['comment']['html_url']
self.body = self.data['comment']['body']
def created(self):
body = self.preview(self.body)
msg = """%s commented on [#%s %s](%s):
> %s""" % (self.user_link(), self.number, self.title, self.url, body)
return msg
class CommitComment(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.cid = self.data['comment']['commit_id'][:7]
self.url = self.data['comment']['html_url']
self.body = self.data['comment']['body']
def created(self):
body = self.preview(self.body)
msg = """%s commented on [%s](%s):
> %s""" % (self.user_link(), self.cid, self.url, body)
return msg
class Repository(Payload):
def __init__(self, data):
Payload.__init__(self, data)
def created(self):
descr = self.data['repository']['description']
msg = """%s created new repository %s:
> %s""" % (self.user_link(), self.repo_link(), descr)
return msg
class Branch(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.name = self.data['ref']
def created(self):
msg = """%s added branch `%s` to %s.""" % (self.user_link(),
self.name, self.repo_link())
return msg
def deleted(self):
msg = """%s deleted branch `%s` in %s.""" % (self.user_link(),
self.name, self.repo_link())
return msg
class Tag(Payload):
def __init__(self, data):
Payload.__init__(self, data)
self.name = self.data['ref']
def created(self):
msg = """%s added tag `%s` to %s.""" % (self.user_link(),
self.name, self.repo_link())
return msg
class Push(Payload):
def __init__(self, data):
Payload.__init__(self, data)
def commits(self):
commits = self.data['commits']
branch = self.data['ref'].replace("refs/heads/", "")
branch_url = self.data['repository']['html_url'] + "/tree/" + branch
if not commits:
commits = [self.data['head_commit']]
changeset = "changesets" if len(commits) > 1 else "changeset"
msg = []
msg.append("%s pushed %s %s to [%s](%s) at %s:" % (
self.user_link(), len(commits), changeset, branch, branch_url, self.repo_link()))
for commit in commits:
cid = commit['id'][:7]
curl = commit['url']
cmsg = self.preview(commit['message'])
ctext = "- [`%s`](%s): %s" % (cid, curl, cmsg)
msg.append("\n")
msg.append(ctext)
return "".join(msg)
class Wiki(Payload):
def __init__(self, data):
Payload.__init__(self, data)
def updated(self):
pages = self.data['pages']
msg = []
msg.append("%s changes %s pages in Wiki at %s:" % (self.user_link(), len(pages), self.repo_link()))
for page in pages:
page_name = page['page_name']
title = page['title']
summary = page['summary']
url = "%s/_compare/%s" % (page['html_url'], page['sha'])
action = page['action']
if summary:
ctext = "- %s [%s](%s)\n>%s" % (action, page_name, url, summary)
else:
ctext = "- %s [%s](%s)\n" % (action, page_name, url)
msg.append("\n")
msg.append(ctext)
return "".join(msg)
| 0.474631 | 0.141252 |
import torch
import torch.nn as nn
from src.utils.custom_typing import EncoderOutput
class BaseEncoder(nn.Module):
def __init__(
self,
img_size: int,
in_channels: int,
num_filters: int,
kernel_size: int,
repr_dim: int,
):
"""Encoder to extract the representations
Args:
img_size (int): [Image size (must be squared size)]
in_channels (int): Number of input channels
num_filters (int): Intermediate number of filters
kernel_size (int): Convolution kernel size
repr_dim (int): Dimension of the desired representation
"""
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=num_filters * 2 ** 0,
kernel_size=kernel_size,
stride=1,
)
self.conv2 = nn.Conv2d(
in_channels=num_filters * 2 ** 0,
out_channels=num_filters * 2 ** 1,
kernel_size=kernel_size,
stride=2,
)
self.bn2 = nn.BatchNorm2d(num_features=num_filters * 2 ** 1)
self.conv3 = nn.Conv2d(
in_channels=num_filters * 2 ** 1,
out_channels=num_filters * 2 ** 2,
kernel_size=kernel_size,
stride=2,
)
self.bn3 = nn.BatchNorm2d(num_features=num_filters * 2 ** 2)
self.leaky_relu = nn.LeakyReLU()
self.flatten = nn.Flatten()
self.dense = nn.Linear(
in_features=(4 ** 2) * (num_filters * 2 ** 2),
out_features=repr_dim,
)
def forward(self, x: torch.Tensor) -> EncoderOutput:
"""Forward encoder
Args:
x (torch.Tensor): Image from a given domain
Returns:
EncoderOutput: Representation and feature map
"""
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.conv3(x)
x = self.bn3(x)
feature = self.leaky_relu(x)
x = self.leaky_relu(x)
flatten_x = self.flatten(x)
representation = self.dense(flatten_x)
return EncoderOutput(representation=representation, feature=feature)
|
src/neural_networks/encoder.py
|
import torch
import torch.nn as nn
from src.utils.custom_typing import EncoderOutput
class BaseEncoder(nn.Module):
def __init__(
self,
img_size: int,
in_channels: int,
num_filters: int,
kernel_size: int,
repr_dim: int,
):
"""Encoder to extract the representations
Args:
img_size (int): [Image size (must be squared size)]
in_channels (int): Number of input channels
num_filters (int): Intermediate number of filters
kernel_size (int): Convolution kernel size
repr_dim (int): Dimension of the desired representation
"""
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=num_filters * 2 ** 0,
kernel_size=kernel_size,
stride=1,
)
self.conv2 = nn.Conv2d(
in_channels=num_filters * 2 ** 0,
out_channels=num_filters * 2 ** 1,
kernel_size=kernel_size,
stride=2,
)
self.bn2 = nn.BatchNorm2d(num_features=num_filters * 2 ** 1)
self.conv3 = nn.Conv2d(
in_channels=num_filters * 2 ** 1,
out_channels=num_filters * 2 ** 2,
kernel_size=kernel_size,
stride=2,
)
self.bn3 = nn.BatchNorm2d(num_features=num_filters * 2 ** 2)
self.leaky_relu = nn.LeakyReLU()
self.flatten = nn.Flatten()
self.dense = nn.Linear(
in_features=(4 ** 2) * (num_filters * 2 ** 2),
out_features=repr_dim,
)
def forward(self, x: torch.Tensor) -> EncoderOutput:
"""Forward encoder
Args:
x (torch.Tensor): Image from a given domain
Returns:
EncoderOutput: Representation and feature map
"""
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.conv3(x)
x = self.bn3(x)
feature = self.leaky_relu(x)
x = self.leaky_relu(x)
flatten_x = self.flatten(x)
representation = self.dense(flatten_x)
return EncoderOutput(representation=representation, feature=feature)
| 0.960231 | 0.504944 |
from dataclasses import dataclass, field
from .components import OpenApiComponents
from .info import OpenApiInfo
from .path import OpenApiPath
from .server import OpenApiServer
from .tag import OpenApiTag
@dataclass
class OpenApiSpec:
"""
This is the root document object of the OpenAPI document.
"""
openapi: str
"""
This string MUST be the semantic version number of the OpenAPI Specification version that the
OpenAPI document uses. The openapi field SHOULD be used by tooling specifications and clients to
interpret the OpenAPI document. This is not related to the API info.version string.
"""
info: OpenApiInfo
"""
Provides metadata about the API. The metadata MAY be used by tooling as required.
"""
paths: dict[str, OpenApiPath]
"""
The available paths and operations for the API.
"""
servers: list[OpenApiServer] = field(default_factory=list)
"""
An array of Server Objects, which provide connectivity information to a target server. If the
servers property is not provided, or is an empty array, the default value would be a Server
Object with a url value of /.
"""
components: OpenApiComponents = field(default_factory=OpenApiComponents)
"""
An element to hold various schemas for the specification.
"""
security: list[dict[str, list[str]]] = field(default_factory=list)
"""
A declaration of which security mechanisms can be used across the API. The list of values
includes alternative security requirement objects that can be used. Only one of the security
requirement objects need to be satisfied to authorize a request. Individual operations can
override this definition. To make security optional, an empty security requirement ({}) can be
included in the array.
Each name MUST correspond to a security scheme which is declared in the Security Schemes under
the Components Object. If the security scheme is of type "oauth2" or "openIdConnect", then the
value is a list of scope names required for the execution, and the list MAY be empty if
authorization does not require a specified scope. For other security scheme types, the array
MUST be empty.
"""
tags: list[OpenApiTag] = field(default_factory=list)
|
src/openapi_dataclasses/types/openapi/spec.py
|
from dataclasses import dataclass, field
from .components import OpenApiComponents
from .info import OpenApiInfo
from .path import OpenApiPath
from .server import OpenApiServer
from .tag import OpenApiTag
@dataclass
class OpenApiSpec:
"""
This is the root document object of the OpenAPI document.
"""
openapi: str
"""
This string MUST be the semantic version number of the OpenAPI Specification version that the
OpenAPI document uses. The openapi field SHOULD be used by tooling specifications and clients to
interpret the OpenAPI document. This is not related to the API info.version string.
"""
info: OpenApiInfo
"""
Provides metadata about the API. The metadata MAY be used by tooling as required.
"""
paths: dict[str, OpenApiPath]
"""
The available paths and operations for the API.
"""
servers: list[OpenApiServer] = field(default_factory=list)
"""
An array of Server Objects, which provide connectivity information to a target server. If the
servers property is not provided, or is an empty array, the default value would be a Server
Object with a url value of /.
"""
components: OpenApiComponents = field(default_factory=OpenApiComponents)
"""
An element to hold various schemas for the specification.
"""
security: list[dict[str, list[str]]] = field(default_factory=list)
"""
A declaration of which security mechanisms can be used across the API. The list of values
includes alternative security requirement objects that can be used. Only one of the security
requirement objects need to be satisfied to authorize a request. Individual operations can
override this definition. To make security optional, an empty security requirement ({}) can be
included in the array.
Each name MUST correspond to a security scheme which is declared in the Security Schemes under
the Components Object. If the security scheme is of type "oauth2" or "openIdConnect", then the
value is a list of scope names required for the execution, and the list MAY be empty if
authorization does not require a specified scope. For other security scheme types, the array
MUST be empty.
"""
tags: list[OpenApiTag] = field(default_factory=list)
| 0.755997 | 0.208199 |
"""Base functions for checkpointing."""
import functools
import itertools
import multiprocessing.pool
import os
import re
from typing import Any, Callable, Iterable, Iterator, Sequence, Mapping, Optional
from tensorflow.io import gfile
from vmoe.checkpoints import serialization
AsyncResult = multiprocessing.pool.AsyncResult
ThreadPool = multiprocessing.pool.ThreadPool
# Allows checkpoints patterns such as:
# ckpt, ckpt.foo, ckpt-0-of-3, ckpt_1, ckpt_1.idx, ckpt_1.data-00-of-10.
CHECKPOINT_REGEX = re.compile(
r'^(.*?)(_[0-9]+)?(\.[a-zA-Z]+)?(-[0-9]+-of-[0-9]+)?$')
def add_shard_suffix(filepath: str, shard: int, shard_count: int) -> str:
return f'{filepath}-{shard:05d}-of-{shard_count:05d}'
def find_latest_complete_checkpoint_for_prefix(
prefix: str, suffixes: Optional[Sequence[str]] = None) -> Optional[str]:
"""Returns the latest complete checkpoint matching a given prefix.
Args:
prefix: Prefix of the checkpoint file (e.g. '/tmp/ckpt').
suffixes: Collection of required suffixes for the checkpoints.
Returns:
Latest available checkpoint (if any). E.g. '/tmp/ckpt_2500'.
"""
for step in iterate_complete_steps_for_prefix(
prefix, suffixes=suffixes, decreasing=True):
return prefix + f'_{step}'
return None
def iterate_complete_steps_for_prefix(
prefix: str,
suffixes: Optional[Sequence[str]] = None,
decreasing: bool = False) -> Iterator[int]:
"""Iterates over steps with complete checkpoints from a given prefix.
Complete steps are those for which there are not incomplete (temp) checkpoint
shards and for which all suffixes are present.
E.g. If the prefix is '/dir/ckpt', the suffixes are ('.index', '.data') and
the files in '/dir/' are: '/dir/ckpt_1.index', '/dir/ckpt_1.data',
'/dir/ckpt_2.index', '/dir/.tmp.ckpt_2.data', '/dir/ckpt_3.data'. Then, the
only completed step is 1, since there is one incomplete shard for step 2
(i.e. '/dir/.tmp.ckpt_2.data') and there is one suffix missing for step 3
(i.e. '/dir/ckpt_3.index').
Args:
prefix: Prefix of the checkpoint file (e.g. '/tmp/ckpt').
suffixes: Collection of required suffixes for the checkpoints.
decreasing: If True, iterates the step numbers in decreasing order.
Yields:
Integers corresponding to the completed step numbers for the given prefix.
"""
if not suffixes:
suffixes = (None,)
suffixes = set(suffixes)
def _parse_step_and_suffix_or_error(filepath):
m = CHECKPOINT_REGEX.fullmatch(filepath)
assert m is not None, (
f'Filepath {filepath!r} does not match CHECKPOINT_REGEX. '
f'This should not happen.')
if m.group(2) is None:
raise ValueError(f'Filepath {filepath!r} does not contain a step number.')
step = int(m.group(2)[1:])
suffix = m.group(3)
return step, suffix
# Find set of (step, suffix) from the given prefix.
steps_and_suffixes = set(
map(_parse_step_and_suffix_or_error, gfile.glob(prefix + '*')))
# Remove any steps where there is an associated temp file.
workdir = os.path.dirname(prefix)
pattern_tmp = os.path.join(workdir, f'.tmp.{os.path.basename(prefix)}') + '*'
incomplete_steps_and_suffixes = set(
map(_parse_step_and_suffix_or_error, gfile.glob(pattern_tmp)))
for step, group in itertools.groupby(
sorted(steps_and_suffixes - incomplete_steps_and_suffixes,
reverse=decreasing),
lambda x: x[0]):
if set(x[1] for x in group) == suffixes:
yield step
def remove_checkpoints(filepaths: Iterable[str],
match_fn: Callable[[str], bool],
*,
thread_pool: Optional[ThreadPool] = None):
"""Removes checkpoints for which `match_fn` returns True."""
def remove(filepath):
if match_fn(filepath):
gfile.remove(filepath)
thread_pool = ThreadPool() if thread_pool is None else thread_pool
thread_pool.map(remove, filepaths)
def remove_shard_suffix(filepath: str) -> str:
return CHECKPOINT_REGEX.sub(r'\1\2\3', filepath)
def restore_checkpoint(filepath: str, tree: Optional[Any] = None) -> Any:
with gfile.GFile(filepath, 'rb') as fp:
checkpoint_contents = fp.read()
if tree is None:
return serialization.msgpack_restore(checkpoint_contents)
else:
return serialization.from_bytes(tree, checkpoint_contents)
def restore_multiple_checkpoints(
filepath_tree_map: Mapping[str, Any],
*,
thread_pool: Optional[ThreadPool] = None) -> Mapping[str, Any]:
thread_pool = thread_pool or ThreadPool()
restored_trees = thread_pool.map(
lambda item: restore_checkpoint(item[0], item[1]),
filepath_tree_map.items())
return dict(zip(filepath_tree_map, restored_trees))
def save_checkpoint(filepath: str,
tree: Any,
*,
overwrite: bool = True,
makedirs: bool = True) -> str:
"""Saves the given PyTree in the given location."""
wdir = os.path.dirname(filepath)
if makedirs and not gfile.exists(wdir):
gfile.makedirs(wdir)
temp_filepath = os.path.join(wdir, '.tmp.' + os.path.basename(filepath))
with gfile.GFile(temp_filepath, 'wb') as fp:
fp.write(serialization.to_bytes(tree))
gfile.rename(temp_filepath, filepath, overwrite=overwrite)
return filepath
def save_checkpoint_async(
filepath: str,
tree: Any,
*,
overwrite: bool = True,
makedirs: bool = True,
thread_pool: Optional[ThreadPool] = None) -> AsyncResult:
"""Saves the given PyTree in the given location, asynchronously."""
thread_pool = thread_pool or ThreadPool()
return thread_pool.apply_async(
save_checkpoint,
args=(filepath, tree),
kwds=dict(overwrite=overwrite, makedirs=makedirs))
def save_multiple_checkpoints_async(
filepath_tree_map: Mapping[str, Any],
*,
overwrite: bool = True,
makedirs: bool = True,
thread_pool: Optional[ThreadPool] = None) -> AsyncResult:
thread_pool = thread_pool or ThreadPool()
fn = functools.partial(
save_checkpoint, overwrite=overwrite, makedirs=makedirs)
return thread_pool.map_async(
lambda args: fn(*args), filepath_tree_map.items())
|
vmoe/checkpoints/base.py
|
"""Base functions for checkpointing."""
import functools
import itertools
import multiprocessing.pool
import os
import re
from typing import Any, Callable, Iterable, Iterator, Sequence, Mapping, Optional
from tensorflow.io import gfile
from vmoe.checkpoints import serialization
AsyncResult = multiprocessing.pool.AsyncResult
ThreadPool = multiprocessing.pool.ThreadPool
# Allows checkpoints patterns such as:
# ckpt, ckpt.foo, ckpt-0-of-3, ckpt_1, ckpt_1.idx, ckpt_1.data-00-of-10.
CHECKPOINT_REGEX = re.compile(
r'^(.*?)(_[0-9]+)?(\.[a-zA-Z]+)?(-[0-9]+-of-[0-9]+)?$')
def add_shard_suffix(filepath: str, shard: int, shard_count: int) -> str:
return f'{filepath}-{shard:05d}-of-{shard_count:05d}'
def find_latest_complete_checkpoint_for_prefix(
prefix: str, suffixes: Optional[Sequence[str]] = None) -> Optional[str]:
"""Returns the latest complete checkpoint matching a given prefix.
Args:
prefix: Prefix of the checkpoint file (e.g. '/tmp/ckpt').
suffixes: Collection of required suffixes for the checkpoints.
Returns:
Latest available checkpoint (if any). E.g. '/tmp/ckpt_2500'.
"""
for step in iterate_complete_steps_for_prefix(
prefix, suffixes=suffixes, decreasing=True):
return prefix + f'_{step}'
return None
def iterate_complete_steps_for_prefix(
prefix: str,
suffixes: Optional[Sequence[str]] = None,
decreasing: bool = False) -> Iterator[int]:
"""Iterates over steps with complete checkpoints from a given prefix.
Complete steps are those for which there are not incomplete (temp) checkpoint
shards and for which all suffixes are present.
E.g. If the prefix is '/dir/ckpt', the suffixes are ('.index', '.data') and
the files in '/dir/' are: '/dir/ckpt_1.index', '/dir/ckpt_1.data',
'/dir/ckpt_2.index', '/dir/.tmp.ckpt_2.data', '/dir/ckpt_3.data'. Then, the
only completed step is 1, since there is one incomplete shard for step 2
(i.e. '/dir/.tmp.ckpt_2.data') and there is one suffix missing for step 3
(i.e. '/dir/ckpt_3.index').
Args:
prefix: Prefix of the checkpoint file (e.g. '/tmp/ckpt').
suffixes: Collection of required suffixes for the checkpoints.
decreasing: If True, iterates the step numbers in decreasing order.
Yields:
Integers corresponding to the completed step numbers for the given prefix.
"""
if not suffixes:
suffixes = (None,)
suffixes = set(suffixes)
def _parse_step_and_suffix_or_error(filepath):
m = CHECKPOINT_REGEX.fullmatch(filepath)
assert m is not None, (
f'Filepath {filepath!r} does not match CHECKPOINT_REGEX. '
f'This should not happen.')
if m.group(2) is None:
raise ValueError(f'Filepath {filepath!r} does not contain a step number.')
step = int(m.group(2)[1:])
suffix = m.group(3)
return step, suffix
# Find set of (step, suffix) from the given prefix.
steps_and_suffixes = set(
map(_parse_step_and_suffix_or_error, gfile.glob(prefix + '*')))
# Remove any steps where there is an associated temp file.
workdir = os.path.dirname(prefix)
pattern_tmp = os.path.join(workdir, f'.tmp.{os.path.basename(prefix)}') + '*'
incomplete_steps_and_suffixes = set(
map(_parse_step_and_suffix_or_error, gfile.glob(pattern_tmp)))
for step, group in itertools.groupby(
sorted(steps_and_suffixes - incomplete_steps_and_suffixes,
reverse=decreasing),
lambda x: x[0]):
if set(x[1] for x in group) == suffixes:
yield step
def remove_checkpoints(filepaths: Iterable[str],
match_fn: Callable[[str], bool],
*,
thread_pool: Optional[ThreadPool] = None):
"""Removes checkpoints for which `match_fn` returns True."""
def remove(filepath):
if match_fn(filepath):
gfile.remove(filepath)
thread_pool = ThreadPool() if thread_pool is None else thread_pool
thread_pool.map(remove, filepaths)
def remove_shard_suffix(filepath: str) -> str:
return CHECKPOINT_REGEX.sub(r'\1\2\3', filepath)
def restore_checkpoint(filepath: str, tree: Optional[Any] = None) -> Any:
with gfile.GFile(filepath, 'rb') as fp:
checkpoint_contents = fp.read()
if tree is None:
return serialization.msgpack_restore(checkpoint_contents)
else:
return serialization.from_bytes(tree, checkpoint_contents)
def restore_multiple_checkpoints(
filepath_tree_map: Mapping[str, Any],
*,
thread_pool: Optional[ThreadPool] = None) -> Mapping[str, Any]:
thread_pool = thread_pool or ThreadPool()
restored_trees = thread_pool.map(
lambda item: restore_checkpoint(item[0], item[1]),
filepath_tree_map.items())
return dict(zip(filepath_tree_map, restored_trees))
def save_checkpoint(filepath: str,
tree: Any,
*,
overwrite: bool = True,
makedirs: bool = True) -> str:
"""Saves the given PyTree in the given location."""
wdir = os.path.dirname(filepath)
if makedirs and not gfile.exists(wdir):
gfile.makedirs(wdir)
temp_filepath = os.path.join(wdir, '.tmp.' + os.path.basename(filepath))
with gfile.GFile(temp_filepath, 'wb') as fp:
fp.write(serialization.to_bytes(tree))
gfile.rename(temp_filepath, filepath, overwrite=overwrite)
return filepath
def save_checkpoint_async(
filepath: str,
tree: Any,
*,
overwrite: bool = True,
makedirs: bool = True,
thread_pool: Optional[ThreadPool] = None) -> AsyncResult:
"""Saves the given PyTree in the given location, asynchronously."""
thread_pool = thread_pool or ThreadPool()
return thread_pool.apply_async(
save_checkpoint,
args=(filepath, tree),
kwds=dict(overwrite=overwrite, makedirs=makedirs))
def save_multiple_checkpoints_async(
filepath_tree_map: Mapping[str, Any],
*,
overwrite: bool = True,
makedirs: bool = True,
thread_pool: Optional[ThreadPool] = None) -> AsyncResult:
thread_pool = thread_pool or ThreadPool()
fn = functools.partial(
save_checkpoint, overwrite=overwrite, makedirs=makedirs)
return thread_pool.map_async(
lambda args: fn(*args), filepath_tree_map.items())
| 0.877674 | 0.302597 |
from math import ceil
def eta_hms(seconds, always_show_hours=False, always_show_minutes=False, hours_leading_zero=False):
"""Converts seconds remaining into a human readable timestamp (e.g. hh:mm:ss, h:mm:ss, mm:ss, or ss).
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
always_show_hours -- don't hide the 0 hours.
always_show_minutes -- don't hide the 0 minutes.
hours_leading_zero -- show 01:00:00 instead of 1:00:00.
Returns:
Human readable string.
"""
# Convert seconds to other units.
final_hours, final_minutes, final_seconds = 0, 0, seconds
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Determine which string template to use.
if final_hours or always_show_hours:
if hours_leading_zero:
template = '{hour:02.0f}:{minute:02.0f}:{second:02.0f}'
else:
template = '{hour}:{minute:02.0f}:{second:02.0f}'
elif final_minutes or always_show_minutes:
template = '{minute:02.0f}:{second:02.0f}'
else:
template = '{second:02.0f}'
return template.format(hour=final_hours, minute=final_minutes, second=final_seconds)
def eta_letters(seconds, shortest=False, leading_zero=False):
"""Converts seconds remaining into human readable strings (e.g. '1s' or '5h 22m 2s').
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
shortest -- show the shortest possible string length by only showing the biggest unit.
leading_zero -- always show a leading zero for the minutes and seconds.
Returns:
Human readable string.
"""
if not seconds:
return '00s' if leading_zero else '0s'
# Convert seconds to other units.
final_weeks, final_days, final_hours, final_minutes, final_seconds = 0, 0, 0, 0, seconds
if final_seconds >= 604800:
final_weeks = int(final_seconds / 604800.0)
final_seconds -= final_weeks * 604800
if final_seconds >= 86400:
final_days = int(final_seconds / 86400.0)
final_seconds -= final_days * 86400
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Handle shortest:
if shortest:
if final_weeks:
formatted = str(final_weeks) + 'w'
elif final_days:
formatted = str(final_days) + 'd'
elif final_hours:
formatted = str(final_hours) + 'h'
elif final_minutes:
formatted = '{0:0{1}d}m'.format(final_minutes, 2 if leading_zero else 1)
else:
formatted = '{0:0{1}d}s'.format(final_seconds, 2 if leading_zero else 1)
return formatted
# Determine which string template to use.
if final_weeks:
template = '{0:d}w {1:d}d {2:d}h {3:02d}m {4:02d}s' if leading_zero else '{0}w {1}d {2}h {3}m {4}s'
elif final_days:
template = '{1:d}d {2:d}h {3:02d}m {4:02d}s' if leading_zero else '{1}d {2}h {3}m {4}s'
elif final_hours:
template = '{2:d}h {3:02d}m {4:02d}s' if leading_zero else '{2}h {3}m {4}s'
elif final_minutes:
template = '{3:02d}m {4:02d}s' if leading_zero else '{3}m {4}s'
else:
template = '{4:02d}s' if leading_zero else '{4}s'
return template.format(final_weeks, final_days, final_hours, final_minutes, final_seconds)
|
etaprogress/components/eta_conversions.py
|
from math import ceil
def eta_hms(seconds, always_show_hours=False, always_show_minutes=False, hours_leading_zero=False):
"""Converts seconds remaining into a human readable timestamp (e.g. hh:mm:ss, h:mm:ss, mm:ss, or ss).
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
always_show_hours -- don't hide the 0 hours.
always_show_minutes -- don't hide the 0 minutes.
hours_leading_zero -- show 01:00:00 instead of 1:00:00.
Returns:
Human readable string.
"""
# Convert seconds to other units.
final_hours, final_minutes, final_seconds = 0, 0, seconds
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Determine which string template to use.
if final_hours or always_show_hours:
if hours_leading_zero:
template = '{hour:02.0f}:{minute:02.0f}:{second:02.0f}'
else:
template = '{hour}:{minute:02.0f}:{second:02.0f}'
elif final_minutes or always_show_minutes:
template = '{minute:02.0f}:{second:02.0f}'
else:
template = '{second:02.0f}'
return template.format(hour=final_hours, minute=final_minutes, second=final_seconds)
def eta_letters(seconds, shortest=False, leading_zero=False):
"""Converts seconds remaining into human readable strings (e.g. '1s' or '5h 22m 2s').
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
shortest -- show the shortest possible string length by only showing the biggest unit.
leading_zero -- always show a leading zero for the minutes and seconds.
Returns:
Human readable string.
"""
if not seconds:
return '00s' if leading_zero else '0s'
# Convert seconds to other units.
final_weeks, final_days, final_hours, final_minutes, final_seconds = 0, 0, 0, 0, seconds
if final_seconds >= 604800:
final_weeks = int(final_seconds / 604800.0)
final_seconds -= final_weeks * 604800
if final_seconds >= 86400:
final_days = int(final_seconds / 86400.0)
final_seconds -= final_days * 86400
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Handle shortest:
if shortest:
if final_weeks:
formatted = str(final_weeks) + 'w'
elif final_days:
formatted = str(final_days) + 'd'
elif final_hours:
formatted = str(final_hours) + 'h'
elif final_minutes:
formatted = '{0:0{1}d}m'.format(final_minutes, 2 if leading_zero else 1)
else:
formatted = '{0:0{1}d}s'.format(final_seconds, 2 if leading_zero else 1)
return formatted
# Determine which string template to use.
if final_weeks:
template = '{0:d}w {1:d}d {2:d}h {3:02d}m {4:02d}s' if leading_zero else '{0}w {1}d {2}h {3}m {4}s'
elif final_days:
template = '{1:d}d {2:d}h {3:02d}m {4:02d}s' if leading_zero else '{1}d {2}h {3}m {4}s'
elif final_hours:
template = '{2:d}h {3:02d}m {4:02d}s' if leading_zero else '{2}h {3}m {4}s'
elif final_minutes:
template = '{3:02d}m {4:02d}s' if leading_zero else '{3}m {4}s'
else:
template = '{4:02d}s' if leading_zero else '{4}s'
return template.format(final_weeks, final_days, final_hours, final_minutes, final_seconds)
| 0.896423 | 0.555073 |
import albumentations as A
from torch.utils.data import Dataset
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from .commons import config, target_cols
def get_train_data():
train = pd.read_csv('/workspace/train.csv')
train_annotated = pd.read_csv('/workspace/train_annotations.csv')
train_annotated.data = train_annotated.data.apply(eval)
train_annotated = train_annotated.groupby(['StudyInstanceUID']).agg({
'label': list,
'data': list
})
train = train.merge(train_annotated, how='left', left_on='StudyInstanceUID', right_index=True)
train['file_path'] = train.StudyInstanceUID.apply(lambda x: f'/workspace/train/{x}.jpg')
train['is_annotated'] = (~train['label'].isnull()).astype(int)
return train
def get_train_folds(train):
targets = np.dot(train[target_cols + ['is_annotated']], [2**i for i in range(12)])
folds = list(StratifiedKFold(n_splits=5, random_state=config.seed, shuffle=True).split(X=targets, y=targets))
return folds
def filter_train_annotated_folds(train, folds):
ignored_index = train[train.is_annotated == 0].index.values
folds_annotated = [(np.setdiff1d(fold[0], ignored_index), np.setdiff1d(fold[1], ignored_index)) for fold in folds]
return folds_annotated
transforms_soft = [
A.RandomResizedCrop(config.image_size, config.image_size, scale=(0.85, 1.0)),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)),
A.ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20),
A.CoarseDropout(p=0.2),
A.Cutout(p=0.2, max_h_size=16, max_w_size=16, num_holes=16, fill_value=(0.)),
A.Normalize(
mean=[0.485],
std=[0.229],
),
]
transforms_hard = [
A.RandomResizedCrop(config.image_size, config.image_size, scale=(0.85, 1), p=1),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)),
A.ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20),
A.CLAHE(clip_limit=(1, 4), p=0.5),
A.OneOf([
A.OpticalDistortion(distort_limit=1.0),
A.GridDistortion(num_steps=5, distort_limit=1.),
A.ElasticTransform(alpha=3),
], p=0.2),
A.OneOf([
A.GaussNoise(var_limit=[10, 50]),
A.GaussianBlur(),
A.MotionBlur(),
A.MedianBlur(),
], p=0.2),
A.OneOf([
A.JpegCompression(),
A.Downscale(scale_min=0.1, scale_max=0.15),
], p=0.2),
A.IAAPiecewiseAffine(p=0.2),
A.IAASharpen(p=0.2),
A.CoarseDropout(p=0.2),
A.Cutout(p=0.2, max_h_size=16, max_w_size=16, num_holes=16, fill_value=(0.)),
A.Normalize(
mean=[0.485],
std=[0.229],
),
]
class RANZCRDataset(Dataset):
COLOR_MAP = {
'ETT - Abnormal': (255, 0, 0),
'ETT - Borderline': (0, 255, 0),
'ETT - Normal': (0, 0, 255),
'NGT - Abnormal': (255, 255, 0),
'NGT - Borderline': (255, 0, 255),
'NGT - Incompletely Imaged': (0, 255, 255),
'NGT - Normal': (128, 0, 0),
'CVC - Abnormal': (0, 128, 0),
'CVC - Borderline': (0, 0, 128),
'CVC - Normal': (128, 128, 0),
'Swan Ganz Catheter Present': (128, 0, 128),
}
COLOR_MAP = {k: cv2.cvtColor(np.uint8(v)[None, None], cv2.COLOR_BGR2GRAY)[0] for k, v in COLOR_MAP.items()}
def __init__(self, df, ret_mode, transform=None):
self.df = df
self.ret_mode = ret_mode
self.transform = transform
self.labels = df[target_cols].values.astype(np.float32)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.loc[index]
labels = self.labels[index]
img = cv2.imread(row.file_path, cv2.IMREAD_GRAYSCALE)
img = np.expand_dims(img, -1)
orig_img = img.copy() if self.ret_mode != 'orig' else img
if self.ret_mode != 'orig' and row.is_annotated:
for color_label, coord in zip(row.label, row.data):
for d in coord:
img[d[1]-config.annot_size//2:d[1]+config.annot_size//2,
d[0]-config.annot_size//2:d[0]+config.annot_size//2,
:] = self.COLOR_MAP[color_label]
if self.ret_mode == 'both':
res = self.transform(image=img, orig=orig_img)
orig_img = res['orig']
orig_img = orig_img.transpose(2, 0, 1)
elif self.ret_mode == 'orig':
res = self.transform(image=orig_img)
elif self.ret_mode == 'annotated':
res = self.transform(image=img)
img = res['image']
img = img.transpose(2, 0, 1)
if self.ret_mode == 'both':
return (orig_img, img), labels
else:
return img, labels
|
src/dataset.py
|
import albumentations as A
from torch.utils.data import Dataset
import cv2
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from .commons import config, target_cols
def get_train_data():
train = pd.read_csv('/workspace/train.csv')
train_annotated = pd.read_csv('/workspace/train_annotations.csv')
train_annotated.data = train_annotated.data.apply(eval)
train_annotated = train_annotated.groupby(['StudyInstanceUID']).agg({
'label': list,
'data': list
})
train = train.merge(train_annotated, how='left', left_on='StudyInstanceUID', right_index=True)
train['file_path'] = train.StudyInstanceUID.apply(lambda x: f'/workspace/train/{x}.jpg')
train['is_annotated'] = (~train['label'].isnull()).astype(int)
return train
def get_train_folds(train):
targets = np.dot(train[target_cols + ['is_annotated']], [2**i for i in range(12)])
folds = list(StratifiedKFold(n_splits=5, random_state=config.seed, shuffle=True).split(X=targets, y=targets))
return folds
def filter_train_annotated_folds(train, folds):
ignored_index = train[train.is_annotated == 0].index.values
folds_annotated = [(np.setdiff1d(fold[0], ignored_index), np.setdiff1d(fold[1], ignored_index)) for fold in folds]
return folds_annotated
transforms_soft = [
A.RandomResizedCrop(config.image_size, config.image_size, scale=(0.85, 1.0)),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)),
A.ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20),
A.CoarseDropout(p=0.2),
A.Cutout(p=0.2, max_h_size=16, max_w_size=16, num_holes=16, fill_value=(0.)),
A.Normalize(
mean=[0.485],
std=[0.229],
),
]
transforms_hard = [
A.RandomResizedCrop(config.image_size, config.image_size, scale=(0.85, 1), p=1),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.2, brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2)),
A.ShiftScaleRotate(p=0.2, shift_limit=0.0625, scale_limit=0.2, rotate_limit=20),
A.CLAHE(clip_limit=(1, 4), p=0.5),
A.OneOf([
A.OpticalDistortion(distort_limit=1.0),
A.GridDistortion(num_steps=5, distort_limit=1.),
A.ElasticTransform(alpha=3),
], p=0.2),
A.OneOf([
A.GaussNoise(var_limit=[10, 50]),
A.GaussianBlur(),
A.MotionBlur(),
A.MedianBlur(),
], p=0.2),
A.OneOf([
A.JpegCompression(),
A.Downscale(scale_min=0.1, scale_max=0.15),
], p=0.2),
A.IAAPiecewiseAffine(p=0.2),
A.IAASharpen(p=0.2),
A.CoarseDropout(p=0.2),
A.Cutout(p=0.2, max_h_size=16, max_w_size=16, num_holes=16, fill_value=(0.)),
A.Normalize(
mean=[0.485],
std=[0.229],
),
]
class RANZCRDataset(Dataset):
COLOR_MAP = {
'ETT - Abnormal': (255, 0, 0),
'ETT - Borderline': (0, 255, 0),
'ETT - Normal': (0, 0, 255),
'NGT - Abnormal': (255, 255, 0),
'NGT - Borderline': (255, 0, 255),
'NGT - Incompletely Imaged': (0, 255, 255),
'NGT - Normal': (128, 0, 0),
'CVC - Abnormal': (0, 128, 0),
'CVC - Borderline': (0, 0, 128),
'CVC - Normal': (128, 128, 0),
'Swan Ganz Catheter Present': (128, 0, 128),
}
COLOR_MAP = {k: cv2.cvtColor(np.uint8(v)[None, None], cv2.COLOR_BGR2GRAY)[0] for k, v in COLOR_MAP.items()}
def __init__(self, df, ret_mode, transform=None):
self.df = df
self.ret_mode = ret_mode
self.transform = transform
self.labels = df[target_cols].values.astype(np.float32)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.loc[index]
labels = self.labels[index]
img = cv2.imread(row.file_path, cv2.IMREAD_GRAYSCALE)
img = np.expand_dims(img, -1)
orig_img = img.copy() if self.ret_mode != 'orig' else img
if self.ret_mode != 'orig' and row.is_annotated:
for color_label, coord in zip(row.label, row.data):
for d in coord:
img[d[1]-config.annot_size//2:d[1]+config.annot_size//2,
d[0]-config.annot_size//2:d[0]+config.annot_size//2,
:] = self.COLOR_MAP[color_label]
if self.ret_mode == 'both':
res = self.transform(image=img, orig=orig_img)
orig_img = res['orig']
orig_img = orig_img.transpose(2, 0, 1)
elif self.ret_mode == 'orig':
res = self.transform(image=orig_img)
elif self.ret_mode == 'annotated':
res = self.transform(image=img)
img = res['image']
img = img.transpose(2, 0, 1)
if self.ret_mode == 'both':
return (orig_img, img), labels
else:
return img, labels
| 0.564699 | 0.341775 |
import datetime
import logging
import logging.handlers
import os
import socket
import sys
from typing import List
__all__ = [
'get_logger',
'get_log_file_handler',
'get_log_file_path',
'get_output_loggers',
'get_screen_handler',
'launch_config',
'reset',
]
def with_per_logger_formatting(cls):
"""Add per logger formatting capabilities to the given logging.Handler."""
class _trait(cls):
"""A logging.Handler subclass to enable per logger formatting."""
def __init__(self, *args, **kwargs):
super(_trait, self).__init__(*args, **kwargs)
self._formatters = {}
def setFormatterFor(self, logger, formatter):
"""Set formatter for a given logger instance or logger name."""
logger_name = logger if isinstance(logger, str) else logger.name
self._formatters[logger_name] = formatter
def unsetFormatterFor(self, logger):
"""Unset formatter for a given logger instance or logger name, if any."""
logger_name = logger if isinstance(logger, str) else logger.name
if logger_name in self._formatters:
del self._formatters[logger_name]
def format(self, record): # noqa
if record.name in self._formatters:
formatter = self._formatters[record.name]
return formatter.format(record)
return super(_trait, self).format(record)
return _trait
def attributes(**attr):
"""Inject attributes into a function (a singleton by definition)."""
def _decorator(f):
for name, value in attr.items():
setattr(f, name, value)
return f
return _decorator
@attributes(screen_handler=None, file_handlers={})
def launch_config(
*,
level=None,
log_dir=None,
screen_format=None,
screen_style=None,
log_format=None,
log_style=None
):
"""
Set up launch logging.
This function allows you to:
- Set the default verbosity level for all loggers.
- Configure the location of log files on disk.
- Configure screen and log file formats.
Setup only has side effects for the arguments provided.
The setup process is idempotent.
For the ``screen_format`` argument there are a few aliases:
- 'default' to log verbosity level, logger name and logged message
- 'default_with_timestamp' to add timestamps to the 'default' format
:param level: the default log level used for all loggers.
:param log_dir: used as base path for all log file collections.
:param screen_format: format specification used when logging to the screen,
as expected by the `logging.Formatter` constructor.
Alternatively, aliases for common formats are available, see above.
:param screen_style: the screen style used if no alias is used for
screen_format.
No style can be provided if a format alias is given.
:param log_format: the format used when logging to the main launch log file,
as expected by the `logging.Formatter` constructor.
Alternatively, the 'default' alias can be given to log verbosity level,
logger name and logged message.
:param log_style: the log style used if no alias is given for log_format.
No style can be provided if a format alias is given.
"""
if level is not None:
logging.root.setLevel(level)
if screen_format is not None:
if screen_format == 'default':
screen_format = '[{levelname}] [{name}]: {msg}'
if screen_style is not None:
raise ValueError(
'Cannot set a custom format style for the "default" screen format.'
)
if screen_format == 'default_with_timestamp':
screen_format = '{created:.7f} [{levelname}] [{name}]: {msg}'
if screen_style is not None:
raise ValueError(
'Cannot set a custom format style for the '
'"default_with_timestamp" screen format.'
)
if screen_style is None:
screen_style = '{'
launch_config.screen_formatter = logging.Formatter(
screen_format, style=screen_style
)
if launch_config.screen_handler is not None:
launch_config.screen_handler.setFormatter(launch_config.screen_formatter)
if log_format is not None:
if log_format == 'default':
log_format = '{created:.7f} [{levelname}] [{name}]: {msg}'
if log_style is not None:
raise ValueError(
'Cannot set a custom format style for the "default" log format.'
)
if log_style is None:
log_style = '{'
launch_config.file_formatter = logging.Formatter(
log_format, style=log_style
)
for handler in launch_config.file_handlers.values():
handler.setFormatter(launch_config.file_formatter)
if log_dir is not None:
if any(launch_config.file_handlers):
import warnings
warnings.warn((
'Loggers have been already configured to output to log files below {}. '
'Proceed at your own risk.'
).format(launch_config.log_dir))
if not os.path.isdir(log_dir):
raise ValueError('{} is not a directory'.format(log_dir))
launch_config.log_dir = log_dir
def log_launch_config(*, logger=logging.root):
"""Log logging configuration details relevant for a user with the given logger."""
if any(launch_config.file_handlers):
logger.info('All log files can be found below {}'.format(launch_config.log_dir))
logger.info('Default logging verbosity is set to {}'.format(logging.getLevelName(
logging.root.getEffectiveLevel()
)))
def get_logger(name=None):
"""Get named logger, configured to output to screen and launch main log file."""
logger = logging.getLogger(name)
screen_handler = get_screen_handler()
if screen_handler not in logger.handlers:
logger.addHandler(screen_handler)
launch_log_file_handler = get_log_file_handler()
if launch_log_file_handler not in logger.handlers:
logger.addHandler(launch_log_file_handler)
return logger
def _normalize_output_configuration(config):
"""
Normalize output configuration to a dict representation.
See `get_output_loggers()` documentation for further reference.
"""
normalized_config = {
'both': set(), 'stdout': set(), 'stderr': set()
}
if isinstance(config, str):
if config == 'screen':
normalized_config.update({
'both': {'screen'}
})
elif config == 'log':
normalized_config.update({
'both': {'log'},
'stderr': {'screen'}
})
elif config == 'both':
normalized_config.update({
'both': {'log', 'screen'},
})
elif config == 'own_log':
normalized_config.update({
'both': {'own_log'},
'stdout': {'own_log'},
'stderr': {'own_log'}
})
elif config == 'full':
normalized_config.update({
'both': {'screen', 'log', 'own_log'},
'stdout': {'own_log'},
'stderr': {'own_log'}
})
else:
raise ValueError((
'{} is not a valid standard output config '
'i.e. "screen", "log" or "both"'
).format(config))
elif isinstance(config, dict):
for source, destinations in config.items():
if source not in ('stdout', 'stderr', 'both'):
raise ValueError((
'{} is not a valid output source '
'i.e. "stdout", "stderr" or "both"'
).format(source))
if isinstance(destinations, str):
destinations = {destinations}
for destination in destinations:
if destination not in ('screen', 'log', 'own_log'):
raise ValueError((
'{} is not a valid output destination '
'i.e. "screen", "log" or "own_log"'
).format(destination))
normalized_config[source] = set(destinations)
else:
raise ValueError(
'{} is not a valid output configuration'.format(config)
)
return normalized_config
def get_output_loggers(process_name, output_config):
"""
Get the stdout and stderr output loggers for the given process name.
The output_config may be a dictionary with one or more of the optional keys
'stdout', 'stderr', or 'both' (stdout and stderr combined) which represent
the various process output sources, and values for those keys to assign one
or more logging destinations to the source.
The logging destination values may be:
- 'screen': log it to the screen,
- 'log': log it to launch log file, or
- 'own_log': log it to a separate log file.
When logging the stdout and stderr separately, the log file names follow
the ``<process_name>-<source>.log`` pattern where ``<source>`` is either
'stdout' or 'stderr'
When the 'both' logging destination is used the log file name follows the
``<process_name>.log`` pattern.
The "launch log file" is a log file which is create for each run of
the launch.LaunchService, and at least captures the log output from launch
itself, but may also include output from subprocess's if configured so.
Alternatively, the output_config parameter may be a string which represents
one of a couple available aliases for common logging configurations.
The available aliases are:
- 'screen': stdout and stderr are logged to the screen,
- 'log': stdout and stderr are logged to launch log file and stderr to
the screen,
- 'both': both stdout and stderr are logged to the screen and to launch
main log file,
- 'own_log' for stdout, stderr and their combination to be logged to
their own log files, and
- 'full' to have stdout and stderr sent to the screen, to the main launch
log file, and their own separate and combined log files.
:param process_name: the process-like action whose outputs want to be logged.
:param output_config: configuration for the output loggers,
see above for details.
:returns: a tuple with the stdout and stderr output loggers.
"""
output_config = _normalize_output_configuration(output_config)
for source in ('stdout', 'stderr'):
logger = logging.getLogger('{}-{}'.format(process_name, source))
# If a 'screen' output is configured for this source or for
# 'both' sources, this logger should output to screen.
if 'screen' in (output_config['both'] | output_config[source]):
screen_handler = get_screen_handler()
# Add screen handler if necessary.
if screen_handler not in logger.handlers:
screen_handler.setFormatterFor(
logger, logging.Formatter('{msg}', style='{')
)
logger.addHandler(screen_handler)
# If a 'log' output is configured for this source or for
# 'both' sources, this logger should output to launch main log file.
if 'log' in (output_config['both'] | output_config[source]):
launch_log_file_handler = get_log_file_handler()
# Add launch main log file handler if necessary.
if launch_log_file_handler not in logger.handlers:
launch_log_file_handler.setFormatterFor(
logger, logging.Formatter('{created:.7f} {msg}', style='{')
)
logger.addHandler(launch_log_file_handler)
# If an 'own_log' output is configured for this source, this logger
# should output to its own log file.
if 'own_log' in output_config[source]:
own_log_file_handler = get_log_file_handler(
'{}-{}.log'.format(process_name, source)
)
own_log_file_handler.setFormatter(logging.Formatter(fmt=None))
# Add own log file handler if necessary.
if own_log_file_handler not in logger.handlers:
logger.addHandler(own_log_file_handler)
# If an 'own_log' output is configured for 'both' sources,
# this logger should output to a combined log file.
if 'own_log' in output_config['both']:
combined_log_file_handler = get_log_file_handler(process_name + '.log')
combined_log_file_handler.setFormatter(logging.Formatter('{msg}', style='{'))
# Add combined log file handler if necessary.
if combined_log_file_handler not in logger.handlers:
logger.addHandler(combined_log_file_handler)
# Retrieve both loggers.
return (
logging.getLogger(process_name + '-stdout'),
logging.getLogger(process_name + '-stderr')
)
def get_screen_handler():
"""
Get the one and only screen logging handler.
See launch_config() documentation for screen logging configuration.
"""
if launch_config.screen_handler is None:
handler_cls = with_per_logger_formatting(logging.StreamHandler)
launch_config.screen_handler = handler_cls(sys.stdout)
launch_config.screen_handler.setFormatter(launch_config.screen_formatter)
return launch_config.screen_handler
def get_log_file_path(file_name='launch.log'):
return os.path.join(launch_config.log_dir, file_name)
def get_log_file_handler(file_name='launch.log'):
"""
Get the logging handler to a log file.
See launch_config() documentation for application wide log file
logging configuration.
:param: file_name of the log file whose handler is to be retrieved.
:return: the logging handler associated to the file (always the same
once constructed).
"""
if file_name not in launch_config.file_handlers:
file_path = get_log_file_path(file_name)
if os.name != 'nt':
handler_cls = with_per_logger_formatting(
logging.handlers.WatchedFileHandler
)
else:
handler_cls = with_per_logger_formatting(logging.FileHandler)
file_handler = handler_cls(file_path)
file_handler.setFormatter(launch_config.file_formatter)
launch_config.file_handlers[file_name] = file_handler
return launch_config.file_handlers[file_name]
def _make_unique_log_dir(*, base_path):
"""
Make a unique directory for logging.
:param: base_path for directory creation
:return: the path to the created directory
"""
while True:
now = datetime.datetime.now()
datetime_str = now.strftime('%Y-%m-%d-%H-%M-%S-%f')
log_dirname = '{0}-{1}-{2}'.format(
datetime_str, socket.gethostname(), os.getpid()
)
log_dir = os.path.join(base_path, log_dirname)
# Check that filename does not exist
# TODO(hidmic): fix (unlikely) TOCTTOU race
if not os.path.isdir(log_dir):
os.makedirs(log_dir, exist_ok=True)
return log_dir
# Track all loggers to support module resets
class LaunchLogger(logging.getLoggerClass()):
all_loggers: List[logging.Logger] = []
def __new__(cls, *args, **kwargs):
instance = super(LaunchLogger, cls).__new__(cls)
LaunchLogger.all_loggers.append(instance)
return instance
default_log_dir = _make_unique_log_dir(
base_path=os.path.join(os.path.expanduser('~'), '.ros/log')
)
def reset():
"""Reset logging."""
# Reset existing logging infrastructure
for logger in LaunchLogger.all_loggers:
logger.setLevel(logging.NOTSET)
del logger.handlers[:]
# Back to default logging setup
launch_config.log_dir = None
launch_config.file_handlers = {}
launch_config.screen_handler = None
launch_config(
level=logging.INFO, log_dir=default_log_dir,
log_format='default', screen_format='default'
)
logging.setLoggerClass(LaunchLogger)
# Initial module reset
reset()
|
launch/launch/logging.py
|
import datetime
import logging
import logging.handlers
import os
import socket
import sys
from typing import List
__all__ = [
'get_logger',
'get_log_file_handler',
'get_log_file_path',
'get_output_loggers',
'get_screen_handler',
'launch_config',
'reset',
]
def with_per_logger_formatting(cls):
"""Add per logger formatting capabilities to the given logging.Handler."""
class _trait(cls):
"""A logging.Handler subclass to enable per logger formatting."""
def __init__(self, *args, **kwargs):
super(_trait, self).__init__(*args, **kwargs)
self._formatters = {}
def setFormatterFor(self, logger, formatter):
"""Set formatter for a given logger instance or logger name."""
logger_name = logger if isinstance(logger, str) else logger.name
self._formatters[logger_name] = formatter
def unsetFormatterFor(self, logger):
"""Unset formatter for a given logger instance or logger name, if any."""
logger_name = logger if isinstance(logger, str) else logger.name
if logger_name in self._formatters:
del self._formatters[logger_name]
def format(self, record): # noqa
if record.name in self._formatters:
formatter = self._formatters[record.name]
return formatter.format(record)
return super(_trait, self).format(record)
return _trait
def attributes(**attr):
"""Inject attributes into a function (a singleton by definition)."""
def _decorator(f):
for name, value in attr.items():
setattr(f, name, value)
return f
return _decorator
@attributes(screen_handler=None, file_handlers={})
def launch_config(
*,
level=None,
log_dir=None,
screen_format=None,
screen_style=None,
log_format=None,
log_style=None
):
"""
Set up launch logging.
This function allows you to:
- Set the default verbosity level for all loggers.
- Configure the location of log files on disk.
- Configure screen and log file formats.
Setup only has side effects for the arguments provided.
The setup process is idempotent.
For the ``screen_format`` argument there are a few aliases:
- 'default' to log verbosity level, logger name and logged message
- 'default_with_timestamp' to add timestamps to the 'default' format
:param level: the default log level used for all loggers.
:param log_dir: used as base path for all log file collections.
:param screen_format: format specification used when logging to the screen,
as expected by the `logging.Formatter` constructor.
Alternatively, aliases for common formats are available, see above.
:param screen_style: the screen style used if no alias is used for
screen_format.
No style can be provided if a format alias is given.
:param log_format: the format used when logging to the main launch log file,
as expected by the `logging.Formatter` constructor.
Alternatively, the 'default' alias can be given to log verbosity level,
logger name and logged message.
:param log_style: the log style used if no alias is given for log_format.
No style can be provided if a format alias is given.
"""
if level is not None:
logging.root.setLevel(level)
if screen_format is not None:
if screen_format == 'default':
screen_format = '[{levelname}] [{name}]: {msg}'
if screen_style is not None:
raise ValueError(
'Cannot set a custom format style for the "default" screen format.'
)
if screen_format == 'default_with_timestamp':
screen_format = '{created:.7f} [{levelname}] [{name}]: {msg}'
if screen_style is not None:
raise ValueError(
'Cannot set a custom format style for the '
'"default_with_timestamp" screen format.'
)
if screen_style is None:
screen_style = '{'
launch_config.screen_formatter = logging.Formatter(
screen_format, style=screen_style
)
if launch_config.screen_handler is not None:
launch_config.screen_handler.setFormatter(launch_config.screen_formatter)
if log_format is not None:
if log_format == 'default':
log_format = '{created:.7f} [{levelname}] [{name}]: {msg}'
if log_style is not None:
raise ValueError(
'Cannot set a custom format style for the "default" log format.'
)
if log_style is None:
log_style = '{'
launch_config.file_formatter = logging.Formatter(
log_format, style=log_style
)
for handler in launch_config.file_handlers.values():
handler.setFormatter(launch_config.file_formatter)
if log_dir is not None:
if any(launch_config.file_handlers):
import warnings
warnings.warn((
'Loggers have been already configured to output to log files below {}. '
'Proceed at your own risk.'
).format(launch_config.log_dir))
if not os.path.isdir(log_dir):
raise ValueError('{} is not a directory'.format(log_dir))
launch_config.log_dir = log_dir
def log_launch_config(*, logger=logging.root):
"""Log logging configuration details relevant for a user with the given logger."""
if any(launch_config.file_handlers):
logger.info('All log files can be found below {}'.format(launch_config.log_dir))
logger.info('Default logging verbosity is set to {}'.format(logging.getLevelName(
logging.root.getEffectiveLevel()
)))
def get_logger(name=None):
"""Get named logger, configured to output to screen and launch main log file."""
logger = logging.getLogger(name)
screen_handler = get_screen_handler()
if screen_handler not in logger.handlers:
logger.addHandler(screen_handler)
launch_log_file_handler = get_log_file_handler()
if launch_log_file_handler not in logger.handlers:
logger.addHandler(launch_log_file_handler)
return logger
def _normalize_output_configuration(config):
"""
Normalize output configuration to a dict representation.
See `get_output_loggers()` documentation for further reference.
"""
normalized_config = {
'both': set(), 'stdout': set(), 'stderr': set()
}
if isinstance(config, str):
if config == 'screen':
normalized_config.update({
'both': {'screen'}
})
elif config == 'log':
normalized_config.update({
'both': {'log'},
'stderr': {'screen'}
})
elif config == 'both':
normalized_config.update({
'both': {'log', 'screen'},
})
elif config == 'own_log':
normalized_config.update({
'both': {'own_log'},
'stdout': {'own_log'},
'stderr': {'own_log'}
})
elif config == 'full':
normalized_config.update({
'both': {'screen', 'log', 'own_log'},
'stdout': {'own_log'},
'stderr': {'own_log'}
})
else:
raise ValueError((
'{} is not a valid standard output config '
'i.e. "screen", "log" or "both"'
).format(config))
elif isinstance(config, dict):
for source, destinations in config.items():
if source not in ('stdout', 'stderr', 'both'):
raise ValueError((
'{} is not a valid output source '
'i.e. "stdout", "stderr" or "both"'
).format(source))
if isinstance(destinations, str):
destinations = {destinations}
for destination in destinations:
if destination not in ('screen', 'log', 'own_log'):
raise ValueError((
'{} is not a valid output destination '
'i.e. "screen", "log" or "own_log"'
).format(destination))
normalized_config[source] = set(destinations)
else:
raise ValueError(
'{} is not a valid output configuration'.format(config)
)
return normalized_config
def get_output_loggers(process_name, output_config):
"""
Get the stdout and stderr output loggers for the given process name.
The output_config may be a dictionary with one or more of the optional keys
'stdout', 'stderr', or 'both' (stdout and stderr combined) which represent
the various process output sources, and values for those keys to assign one
or more logging destinations to the source.
The logging destination values may be:
- 'screen': log it to the screen,
- 'log': log it to launch log file, or
- 'own_log': log it to a separate log file.
When logging the stdout and stderr separately, the log file names follow
the ``<process_name>-<source>.log`` pattern where ``<source>`` is either
'stdout' or 'stderr'
When the 'both' logging destination is used the log file name follows the
``<process_name>.log`` pattern.
The "launch log file" is a log file which is create for each run of
the launch.LaunchService, and at least captures the log output from launch
itself, but may also include output from subprocess's if configured so.
Alternatively, the output_config parameter may be a string which represents
one of a couple available aliases for common logging configurations.
The available aliases are:
- 'screen': stdout and stderr are logged to the screen,
- 'log': stdout and stderr are logged to launch log file and stderr to
the screen,
- 'both': both stdout and stderr are logged to the screen and to launch
main log file,
- 'own_log' for stdout, stderr and their combination to be logged to
their own log files, and
- 'full' to have stdout and stderr sent to the screen, to the main launch
log file, and their own separate and combined log files.
:param process_name: the process-like action whose outputs want to be logged.
:param output_config: configuration for the output loggers,
see above for details.
:returns: a tuple with the stdout and stderr output loggers.
"""
output_config = _normalize_output_configuration(output_config)
for source in ('stdout', 'stderr'):
logger = logging.getLogger('{}-{}'.format(process_name, source))
# If a 'screen' output is configured for this source or for
# 'both' sources, this logger should output to screen.
if 'screen' in (output_config['both'] | output_config[source]):
screen_handler = get_screen_handler()
# Add screen handler if necessary.
if screen_handler not in logger.handlers:
screen_handler.setFormatterFor(
logger, logging.Formatter('{msg}', style='{')
)
logger.addHandler(screen_handler)
# If a 'log' output is configured for this source or for
# 'both' sources, this logger should output to launch main log file.
if 'log' in (output_config['both'] | output_config[source]):
launch_log_file_handler = get_log_file_handler()
# Add launch main log file handler if necessary.
if launch_log_file_handler not in logger.handlers:
launch_log_file_handler.setFormatterFor(
logger, logging.Formatter('{created:.7f} {msg}', style='{')
)
logger.addHandler(launch_log_file_handler)
# If an 'own_log' output is configured for this source, this logger
# should output to its own log file.
if 'own_log' in output_config[source]:
own_log_file_handler = get_log_file_handler(
'{}-{}.log'.format(process_name, source)
)
own_log_file_handler.setFormatter(logging.Formatter(fmt=None))
# Add own log file handler if necessary.
if own_log_file_handler not in logger.handlers:
logger.addHandler(own_log_file_handler)
# If an 'own_log' output is configured for 'both' sources,
# this logger should output to a combined log file.
if 'own_log' in output_config['both']:
combined_log_file_handler = get_log_file_handler(process_name + '.log')
combined_log_file_handler.setFormatter(logging.Formatter('{msg}', style='{'))
# Add combined log file handler if necessary.
if combined_log_file_handler not in logger.handlers:
logger.addHandler(combined_log_file_handler)
# Retrieve both loggers.
return (
logging.getLogger(process_name + '-stdout'),
logging.getLogger(process_name + '-stderr')
)
def get_screen_handler():
"""
Get the one and only screen logging handler.
See launch_config() documentation for screen logging configuration.
"""
if launch_config.screen_handler is None:
handler_cls = with_per_logger_formatting(logging.StreamHandler)
launch_config.screen_handler = handler_cls(sys.stdout)
launch_config.screen_handler.setFormatter(launch_config.screen_formatter)
return launch_config.screen_handler
def get_log_file_path(file_name='launch.log'):
return os.path.join(launch_config.log_dir, file_name)
def get_log_file_handler(file_name='launch.log'):
"""
Get the logging handler to a log file.
See launch_config() documentation for application wide log file
logging configuration.
:param: file_name of the log file whose handler is to be retrieved.
:return: the logging handler associated to the file (always the same
once constructed).
"""
if file_name not in launch_config.file_handlers:
file_path = get_log_file_path(file_name)
if os.name != 'nt':
handler_cls = with_per_logger_formatting(
logging.handlers.WatchedFileHandler
)
else:
handler_cls = with_per_logger_formatting(logging.FileHandler)
file_handler = handler_cls(file_path)
file_handler.setFormatter(launch_config.file_formatter)
launch_config.file_handlers[file_name] = file_handler
return launch_config.file_handlers[file_name]
def _make_unique_log_dir(*, base_path):
"""
Make a unique directory for logging.
:param: base_path for directory creation
:return: the path to the created directory
"""
while True:
now = datetime.datetime.now()
datetime_str = now.strftime('%Y-%m-%d-%H-%M-%S-%f')
log_dirname = '{0}-{1}-{2}'.format(
datetime_str, socket.gethostname(), os.getpid()
)
log_dir = os.path.join(base_path, log_dirname)
# Check that filename does not exist
# TODO(hidmic): fix (unlikely) TOCTTOU race
if not os.path.isdir(log_dir):
os.makedirs(log_dir, exist_ok=True)
return log_dir
# Track all loggers to support module resets
class LaunchLogger(logging.getLoggerClass()):
all_loggers: List[logging.Logger] = []
def __new__(cls, *args, **kwargs):
instance = super(LaunchLogger, cls).__new__(cls)
LaunchLogger.all_loggers.append(instance)
return instance
default_log_dir = _make_unique_log_dir(
base_path=os.path.join(os.path.expanduser('~'), '.ros/log')
)
def reset():
"""Reset logging."""
# Reset existing logging infrastructure
for logger in LaunchLogger.all_loggers:
logger.setLevel(logging.NOTSET)
del logger.handlers[:]
# Back to default logging setup
launch_config.log_dir = None
launch_config.file_handlers = {}
launch_config.screen_handler = None
launch_config(
level=logging.INFO, log_dir=default_log_dir,
log_format='default', screen_format='default'
)
logging.setLoggerClass(LaunchLogger)
# Initial module reset
reset()
| 0.692226 | 0.121869 |
__author__ = 'kensuke-mi'
import sys
import unittest
from JapaneseTokenizer.mecab_wrapper.mecab_wrapper import MecabWrapper
from JapaneseTokenizer.datamodels import TokenizedSenetence
from six import string_types
import os
python_version = sys.version_info
class TestMecabWrapperPython2(unittest.TestCase):
def setUp(self):
self.test_senetence = u'紗倉 まな(さくらまな、1993年3月23日 - )は、日本のAV女優。'
self.test_sentence2 = u'午前零時。午前3時。3時。'
self.path_user_dict = os.path.join(os.path.dirname(__file__), 'resources/test/userdict.csv')
def test_neologd_parse(self):
"""* Test case
- neologd辞書で正しく分割できることを確認する
"""
mecab_obj = MecabWrapper(dictType='neologd')
parsed_obj = mecab_obj.tokenize(sentence=self.test_senetence)
self.assertTrue(parsed_obj, TokenizedSenetence)
self.assertTrue(isinstance(parsed_obj.convert_list_object(), list))
self.assertTrue(all(isinstance(mrph, string_types) for mrph in parsed_obj.convert_list_object()))
parsed_obj = mecab_obj.tokenize(sentence=self.test_sentence2)
self.assertTrue(parsed_obj, TokenizedSenetence)
self.assertTrue(isinstance(parsed_obj.convert_list_object(), list))
self.assertTrue(all(isinstance(mrph, string_types) for mrph in parsed_obj.convert_list_object()))
def test_default_parse(self):
"""* Test case
- デフォルトの状態で動作を確認する
"""
dictType = "ipadic"
mecab_obj = MecabWrapper(dictType=dictType)
assert isinstance(mecab_obj, MecabWrapper)
parsed_obj = mecab_obj.tokenize(sentence=self.test_senetence, return_list=True)
assert isinstance(parsed_obj, list)
if python_version >= (3, 0, 0):
for morph in parsed_obj: assert isinstance(morph, str)
print(parsed_obj)
else:
for morph in parsed_obj: assert isinstance(morph, string_types)
def test_init_userdict(self):
"""* Test case
- すべての辞書を利用した場合の動作を確認する
"""
mecab_obj = MecabWrapper(dictType='all', pathUserDictCsv=self.path_user_dict)
assert isinstance(mecab_obj, MecabWrapper)
res = mecab_obj.tokenize(sentence=self.test_senetence, return_list=True)
assert isinstance(res, list)
assert u'さくらまな' in res
if __name__ == '__main__':
unittest.main()
|
test/test_mecab_wrapper_python2.py
|
__author__ = 'kensuke-mi'
import sys
import unittest
from JapaneseTokenizer.mecab_wrapper.mecab_wrapper import MecabWrapper
from JapaneseTokenizer.datamodels import TokenizedSenetence
from six import string_types
import os
python_version = sys.version_info
class TestMecabWrapperPython2(unittest.TestCase):
def setUp(self):
self.test_senetence = u'紗倉 まな(さくらまな、1993年3月23日 - )は、日本のAV女優。'
self.test_sentence2 = u'午前零時。午前3時。3時。'
self.path_user_dict = os.path.join(os.path.dirname(__file__), 'resources/test/userdict.csv')
def test_neologd_parse(self):
"""* Test case
- neologd辞書で正しく分割できることを確認する
"""
mecab_obj = MecabWrapper(dictType='neologd')
parsed_obj = mecab_obj.tokenize(sentence=self.test_senetence)
self.assertTrue(parsed_obj, TokenizedSenetence)
self.assertTrue(isinstance(parsed_obj.convert_list_object(), list))
self.assertTrue(all(isinstance(mrph, string_types) for mrph in parsed_obj.convert_list_object()))
parsed_obj = mecab_obj.tokenize(sentence=self.test_sentence2)
self.assertTrue(parsed_obj, TokenizedSenetence)
self.assertTrue(isinstance(parsed_obj.convert_list_object(), list))
self.assertTrue(all(isinstance(mrph, string_types) for mrph in parsed_obj.convert_list_object()))
def test_default_parse(self):
"""* Test case
- デフォルトの状態で動作を確認する
"""
dictType = "ipadic"
mecab_obj = MecabWrapper(dictType=dictType)
assert isinstance(mecab_obj, MecabWrapper)
parsed_obj = mecab_obj.tokenize(sentence=self.test_senetence, return_list=True)
assert isinstance(parsed_obj, list)
if python_version >= (3, 0, 0):
for morph in parsed_obj: assert isinstance(morph, str)
print(parsed_obj)
else:
for morph in parsed_obj: assert isinstance(morph, string_types)
def test_init_userdict(self):
"""* Test case
- すべての辞書を利用した場合の動作を確認する
"""
mecab_obj = MecabWrapper(dictType='all', pathUserDictCsv=self.path_user_dict)
assert isinstance(mecab_obj, MecabWrapper)
res = mecab_obj.tokenize(sentence=self.test_senetence, return_list=True)
assert isinstance(res, list)
assert u'さくらまな' in res
if __name__ == '__main__':
unittest.main()
| 0.237311 | 0.32611 |
import copy
from importlib import import_module
from autumn.tools.project import get_project, Project
class Opti:
"""
This class is used to define and solve an optimisation problem based on one of the existing AuTuMN apps.
"""
project: Project
def __init__(
self,
app_name, # e.g. 'covid_19' or 'tuberculosis'
region_name, # e.g. 'victoria'
scenario_func=None, # a function that returns a scenario dictionary based on decision variables
objective_func=None, # a function that calculates the objective(s) based on a run model. Should return a list
root_model_params={}, # a params dictionary to update the baseline params
):
self.app_name = app_name
self.region_name = region_name
self.scenario_func = scenario_func
self.objective_func = objective_func
self.root_model_params = root_model_params
self.root_model = None
self.project = get_project(app_name, region_name)
def run_root_model(self) -> dict:
# Update params using root_model_params
root_params = self.project.param_set.baseline.update(self.root_model_params)
self.root_model = self.project.run_baseline_model(root_params)
return root_params.to_dict()
def evaluate_objective(self, decision_vars):
"""
Evaluate the objective function(s) for the given decision variables
:return: a list of objective values
"""
assert self.scenario_func is not None, "A non-null scenario function is required."
assert self.objective_func is not None, "A non-null objective function is required."
sc_dict = self.scenario_func(decision_vars)
sc_model = self.run_scenario(sc_dict)
objective = self.objective_func(sc_model, decision_vars)
return objective
def run_scenario(self, sc_dict):
"""
Run a model scenario defined from the scenario dictionary
:return: a model object
"""
sc_params = self.project.param_set.baseline.update(self.root_model_params).update(sc_dict)
start_time = sc_params.to_dict()["time"]["start"]
sc_models = self.project.run_scenario_models(
baseline_model=self.root_model, scenario_params=[sc_params], start_time=start_time
)
return sc_models[0]
|
autumn/tools/optimisation/opti.py
|
import copy
from importlib import import_module
from autumn.tools.project import get_project, Project
class Opti:
"""
This class is used to define and solve an optimisation problem based on one of the existing AuTuMN apps.
"""
project: Project
def __init__(
self,
app_name, # e.g. 'covid_19' or 'tuberculosis'
region_name, # e.g. 'victoria'
scenario_func=None, # a function that returns a scenario dictionary based on decision variables
objective_func=None, # a function that calculates the objective(s) based on a run model. Should return a list
root_model_params={}, # a params dictionary to update the baseline params
):
self.app_name = app_name
self.region_name = region_name
self.scenario_func = scenario_func
self.objective_func = objective_func
self.root_model_params = root_model_params
self.root_model = None
self.project = get_project(app_name, region_name)
def run_root_model(self) -> dict:
# Update params using root_model_params
root_params = self.project.param_set.baseline.update(self.root_model_params)
self.root_model = self.project.run_baseline_model(root_params)
return root_params.to_dict()
def evaluate_objective(self, decision_vars):
"""
Evaluate the objective function(s) for the given decision variables
:return: a list of objective values
"""
assert self.scenario_func is not None, "A non-null scenario function is required."
assert self.objective_func is not None, "A non-null objective function is required."
sc_dict = self.scenario_func(decision_vars)
sc_model = self.run_scenario(sc_dict)
objective = self.objective_func(sc_model, decision_vars)
return objective
def run_scenario(self, sc_dict):
"""
Run a model scenario defined from the scenario dictionary
:return: a model object
"""
sc_params = self.project.param_set.baseline.update(self.root_model_params).update(sc_dict)
start_time = sc_params.to_dict()["time"]["start"]
sc_models = self.project.run_scenario_models(
baseline_model=self.root_model, scenario_params=[sc_params], start_time=start_time
)
return sc_models[0]
| 0.721743 | 0.351422 |
from openpeerpower.components.cover import ATTR_CURRENT_POSITION
from openpeerpower.components.ozw.cover import VALUE_SELECTED_ID
from .common import setup_ozw
VALUE_ID = "Value"
async def test_cover(opp, cover_data, sent_messages, cover_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(opp, fixture=cover_data)
# Test loaded
state = opp.states.get("cover.roller_shutter_3_instance_1_level")
assert state is not None
assert state.state == "closed"
assert state.attributes[ATTR_CURRENT_POSITION] == 0
# Test setting position
await opp.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 50},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 50, "ValueIDKey": 625573905}
# Feedback on state
cover_msg.decode()
cover_msg.payload["Value"] = 50
cover_msg.encode()
receive_message(cover_msg)
await opp.async_block_till_done()
# Test opening
await opp.services.async_call(
"cover",
"open_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": True, "ValueIDKey": 281475602284568}
# Test stopping after opening
await opp.services.async_call(
"cover",
"stop_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 4
msg = sent_messages[2]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 281475602284568}
msg = sent_messages[3]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 562950578995224}
# Test closing
await opp.services.async_call(
"cover",
"close_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 5
msg = sent_messages[4]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": True, "ValueIDKey": 562950578995224}
# Test stopping after closing
await opp.services.async_call(
"cover",
"stop_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 7
msg = sent_messages[5]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 281475602284568}
msg = sent_messages[6]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 562950578995224}
# Test stopping after no open/close
await opp.services.async_call(
"cover",
"stop_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
# both stop open/close messages sent
assert len(sent_messages) == 9
msg = sent_messages[7]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 281475602284568}
msg = sent_messages[8]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 562950578995224}
# Test converting position to zwave range for position > 0
await opp.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 100},
blocking=True,
)
assert len(sent_messages) == 10
msg = sent_messages[9]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 99, "ValueIDKey": 625573905}
# Test converting position to zwave range for position = 0
await opp.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 0},
blocking=True,
)
assert len(sent_messages) == 11
msg = sent_messages[10]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 625573905}
async def test_barrier(opp, cover_gdo_data, sent_messages, cover_gdo_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(opp, fixture=cover_gdo_data)
# Test loaded
state = opp.states.get("cover.gd00z_4_barrier_state")
assert state is not None
assert state.state == "closed"
# Test opening
await opp.services.async_call(
"cover",
"open_cover",
{"entity_id": "cover.gd00z_4_barrier_state"},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 4, "ValueIDKey": 281475083239444}
# Feedback on state
cover_gdo_msg.decode()
cover_gdo_msg.payload[VALUE_ID][VALUE_SELECTED_ID] = 4
cover_gdo_msg.encode()
receive_message(cover_gdo_msg)
await opp.async_block_till_done()
state = opp.states.get("cover.gd00z_4_barrier_state")
assert state is not None
assert state.state == "open"
# Test closing
await opp.services.async_call(
"cover",
"close_cover",
{"entity_id": "cover.gd00z_4_barrier_state"},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 281475083239444}
|
tests/components/ozw/test_cover.py
|
from openpeerpower.components.cover import ATTR_CURRENT_POSITION
from openpeerpower.components.ozw.cover import VALUE_SELECTED_ID
from .common import setup_ozw
VALUE_ID = "Value"
async def test_cover(opp, cover_data, sent_messages, cover_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(opp, fixture=cover_data)
# Test loaded
state = opp.states.get("cover.roller_shutter_3_instance_1_level")
assert state is not None
assert state.state == "closed"
assert state.attributes[ATTR_CURRENT_POSITION] == 0
# Test setting position
await opp.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 50},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 50, "ValueIDKey": 625573905}
# Feedback on state
cover_msg.decode()
cover_msg.payload["Value"] = 50
cover_msg.encode()
receive_message(cover_msg)
await opp.async_block_till_done()
# Test opening
await opp.services.async_call(
"cover",
"open_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": True, "ValueIDKey": 281475602284568}
# Test stopping after opening
await opp.services.async_call(
"cover",
"stop_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 4
msg = sent_messages[2]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 281475602284568}
msg = sent_messages[3]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 562950578995224}
# Test closing
await opp.services.async_call(
"cover",
"close_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 5
msg = sent_messages[4]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": True, "ValueIDKey": 562950578995224}
# Test stopping after closing
await opp.services.async_call(
"cover",
"stop_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 7
msg = sent_messages[5]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 281475602284568}
msg = sent_messages[6]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 562950578995224}
# Test stopping after no open/close
await opp.services.async_call(
"cover",
"stop_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
# both stop open/close messages sent
assert len(sent_messages) == 9
msg = sent_messages[7]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 281475602284568}
msg = sent_messages[8]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 562950578995224}
# Test converting position to zwave range for position > 0
await opp.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 100},
blocking=True,
)
assert len(sent_messages) == 10
msg = sent_messages[9]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 99, "ValueIDKey": 625573905}
# Test converting position to zwave range for position = 0
await opp.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 0},
blocking=True,
)
assert len(sent_messages) == 11
msg = sent_messages[10]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 625573905}
async def test_barrier(opp, cover_gdo_data, sent_messages, cover_gdo_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(opp, fixture=cover_gdo_data)
# Test loaded
state = opp.states.get("cover.gd00z_4_barrier_state")
assert state is not None
assert state.state == "closed"
# Test opening
await opp.services.async_call(
"cover",
"open_cover",
{"entity_id": "cover.gd00z_4_barrier_state"},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 4, "ValueIDKey": 281475083239444}
# Feedback on state
cover_gdo_msg.decode()
cover_gdo_msg.payload[VALUE_ID][VALUE_SELECTED_ID] = 4
cover_gdo_msg.encode()
receive_message(cover_gdo_msg)
await opp.async_block_till_done()
state = opp.states.get("cover.gd00z_4_barrier_state")
assert state is not None
assert state.state == "open"
# Test closing
await opp.services.async_call(
"cover",
"close_cover",
{"entity_id": "cover.gd00z_4_barrier_state"},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 281475083239444}
| 0.616474 | 0.396594 |
from flask import Response,current_app,request,session
from sqlalchemy import text
from decimal import Decimal
import json,time,datetime
# 自定义跨域 接口 返回 class
class ApiCorsResponse():
@staticmethod
def response(data ,success = True,status_code= 200):
if success :
re_data = {'msg':'ok','data':data}
else:
re_data = {'msg': 'fail', 'error_info': data}
rep = Response(
response=json.dumps(re_data) + "\n" ,
status=status_code,
mimetype= current_app.config["JSONIFY_MIMETYPE"],
headers={
'Access-Control-Allow-Origin':'*',
'Access-Control-Allow-Method':'GET,POST,OPTIONS,PUT,DELETE',
'Access-Control-Allow-Headers':'*',
}
)
return rep
# 自定义函数class
class Func():
split_save = ['day', 'week', 'month', 'year']
@classmethod
def getTableName(cls,conf,data_engine):
table_suffix = ''
try:
if data_engine == 'mysql':
table = conf['table']
if data_engine == 'mongodb':
table = conf['collection']
except KeyError as e:
raise Exception('配置错误: %s not exists' % e.args)
if 'split_save' in conf:
if conf['split_save'] not in cls.split_save:
raise Exception('webserver 配置项 split_save 只支持 %s 选项' % ','.join(cls.split_save))
if conf['split_save'] == 'day':
table_suffix = time.strftime('%Y_%m_%d', time.localtime())
elif conf['split_save'] == 'week':
now = datetime.datetime.now()
this_week_start = now - datetime.timedelta(days=now.weekday())
this_week_end = now + datetime.timedelta(days=6 - now.weekday())
table_suffix = datetime.datetime.strftime(this_week_start, '%Y_%m_%d') + datetime.datetime.strftime(
this_week_end, '_%d')
elif conf['split_save'] == 'month':
table_suffix = time.strftime('%Y_%m', time.localtime())
elif conf['split_save'] == 'year':
table_suffix = time.strftime('%Y', time.localtime())
if len(table_suffix):
table = table + '_' + table_suffix
return table
@classmethod
def fetchone(cls,resultObj):
return cls.fetchall(resultObj)[0]
@classmethod
def fetchall(cls, resultObj):
_list = []
for i in resultObj:
_dict = {}
item = i.items()
for j in item:
if isinstance(j[1],Decimal):
vl = float(Decimal(j[1]).quantize(Decimal('.001')))
_dict[j[0]] = vl
else:
_dict[j[0]] = j[1]
_list.append(_dict)
return _list
|
webServer/customer.py
|
from flask import Response,current_app,request,session
from sqlalchemy import text
from decimal import Decimal
import json,time,datetime
# 自定义跨域 接口 返回 class
class ApiCorsResponse():
@staticmethod
def response(data ,success = True,status_code= 200):
if success :
re_data = {'msg':'ok','data':data}
else:
re_data = {'msg': 'fail', 'error_info': data}
rep = Response(
response=json.dumps(re_data) + "\n" ,
status=status_code,
mimetype= current_app.config["JSONIFY_MIMETYPE"],
headers={
'Access-Control-Allow-Origin':'*',
'Access-Control-Allow-Method':'GET,POST,OPTIONS,PUT,DELETE',
'Access-Control-Allow-Headers':'*',
}
)
return rep
# 自定义函数class
class Func():
split_save = ['day', 'week', 'month', 'year']
@classmethod
def getTableName(cls,conf,data_engine):
table_suffix = ''
try:
if data_engine == 'mysql':
table = conf['table']
if data_engine == 'mongodb':
table = conf['collection']
except KeyError as e:
raise Exception('配置错误: %s not exists' % e.args)
if 'split_save' in conf:
if conf['split_save'] not in cls.split_save:
raise Exception('webserver 配置项 split_save 只支持 %s 选项' % ','.join(cls.split_save))
if conf['split_save'] == 'day':
table_suffix = time.strftime('%Y_%m_%d', time.localtime())
elif conf['split_save'] == 'week':
now = datetime.datetime.now()
this_week_start = now - datetime.timedelta(days=now.weekday())
this_week_end = now + datetime.timedelta(days=6 - now.weekday())
table_suffix = datetime.datetime.strftime(this_week_start, '%Y_%m_%d') + datetime.datetime.strftime(
this_week_end, '_%d')
elif conf['split_save'] == 'month':
table_suffix = time.strftime('%Y_%m', time.localtime())
elif conf['split_save'] == 'year':
table_suffix = time.strftime('%Y', time.localtime())
if len(table_suffix):
table = table + '_' + table_suffix
return table
@classmethod
def fetchone(cls,resultObj):
return cls.fetchall(resultObj)[0]
@classmethod
def fetchall(cls, resultObj):
_list = []
for i in resultObj:
_dict = {}
item = i.items()
for j in item:
if isinstance(j[1],Decimal):
vl = float(Decimal(j[1]).quantize(Decimal('.001')))
_dict[j[0]] = vl
else:
_dict[j[0]] = j[1]
_list.append(_dict)
return _list
| 0.252845 | 0.072243 |
import asyncio
import logging
import time
import typing
import ydb
from ydb import issues, settings, table
from ydb.table import (
BaseSession,
BaseTableClient,
_scan_query_request_factory,
_wrap_scan_query_response,
BaseTxContext,
)
from . import _utilities
from ydb import _apis, _session_impl
logger = logging.getLogger(__name__)
class Session(BaseSession):
async def read_table(
self,
path,
key_range=None,
columns=(),
ordered=False,
row_limit=None,
settings=None,
use_snapshot=None,
):
request = _session_impl.read_table_request_factory(
self._state,
path,
key_range,
columns,
ordered,
row_limit,
use_snapshot=use_snapshot,
)
stream_it = await self._driver(
request,
_apis.TableService.Stub,
_apis.TableService.StreamReadTable,
settings=settings,
)
return _utilities.AsyncResponseIterator(
stream_it, _session_impl.wrap_read_table_response
)
async def keep_alive(self, settings=None):
return await super(Session, self).keep_alive(settings)
async def create(self, settings=None):
return await super(Session, self).create(settings)
async def delete(self, settings=None):
return await super(Session, self).delete(settings)
async def execute_scheme(self, yql_text, settings=None):
return await super(Session, self).execute_scheme(yql_text, settings)
async def prepare(self, query, settings=None):
res = super(Session, self).prepare(query, settings)
if asyncio.iscoroutine(res):
res = await res
return res
async def explain(self, yql_text, settings=None):
return await super(Session, self).explain(yql_text, settings)
async def create_table(self, path, table_description, settings=None):
return await super(Session, self).create_table(
path, table_description, settings
)
async def drop_table(self, path, settings=None):
return await super(Session, self).drop_table(path, settings)
async def alter_table(
self,
path,
add_columns=None,
drop_columns=None,
settings=None,
alter_attributes=None,
add_indexes=None,
drop_indexes=None,
set_ttl_settings=None,
drop_ttl_settings=None,
add_column_families=None,
alter_column_families=None,
alter_storage_settings=None,
set_compaction_policy=None,
alter_partitioning_settings=None,
set_key_bloom_filter=None,
set_read_replicas_settings=None,
):
return await super(Session, self).alter_table(
path,
add_columns,
drop_columns,
settings,
alter_attributes,
add_indexes,
drop_indexes,
set_ttl_settings,
drop_ttl_settings,
add_column_families,
alter_column_families,
alter_storage_settings,
set_compaction_policy,
alter_partitioning_settings,
set_key_bloom_filter,
set_read_replicas_settings,
)
def transaction(self, tx_mode=None):
return TxContext(self._driver, self._state, self, tx_mode)
async def describe_table(self, path, settings=None):
return await super(Session, self).describe_table(path, settings)
async def copy_table(self, source_path, destination_path, settings=None):
return await super(Session, self).copy_table(
source_path, destination_path, settings
)
async def copy_tables(self, source_destination_pairs, settings=None):
return await super(Session, self).copy_tables(
source_destination_pairs, settings
)
async def rename_tables(self, rename_items, settings=None):
return await super(Session, self).rename_tables(rename_items, settings)
class TableClient(BaseTableClient):
def session(self):
return Session(self._driver, self._table_client_settings)
async def bulk_upsert(self, *args, **kwargs):
return await super(TableClient, self).bulk_upsert(*args, **kwargs)
async def scan_query(self, query, parameters=None, settings=None):
request = _scan_query_request_factory(query, parameters, settings)
response = await self._driver(
request,
_apis.TableService.Stub,
_apis.TableService.StreamExecuteScanQuery,
settings=settings,
)
return _utilities.AsyncResponseIterator(
response,
lambda resp: _wrap_scan_query_response(resp, self._table_client_settings),
)
class TxContext(BaseTxContext):
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._tx_state.tx_id is not None:
# It's strictly recommended to close transactions directly
# by using commit_tx=True flag while executing statement or by
# .commit() or .rollback() methods, but here we trying to do best
# effort to avoid useless open transactions
logger.warning("Potentially leaked tx: %s", self._tx_state.tx_id)
try:
await self.rollback()
except issues.Error:
logger.warning("Failed to rollback leaked tx: %s", self._tx_state.tx_id)
self._tx_state.tx_id = None
async def execute(self, query, parameters=None, commit_tx=False, settings=None):
return await super(TxContext, self).execute(
query, parameters, commit_tx, settings
)
async def commit(self, settings=None):
return await super(TxContext, self).commit(settings)
async def rollback(self, settings=None):
return await super(TxContext, self).rollback(settings)
async def begin(self, settings=None):
return await super(TxContext, self).begin(settings)
async def retry_operation(callee, retry_settings=None, *args, **kwargs):
"""
The retry operation helper can be used to retry a coroutine that raises YDB specific
exceptions.
:param callee: A coroutine to retry.
:param retry_settings: An instance of ydb.RetrySettings that describes how the coroutine
should be retried. If None, default instance of retry settings will be used.
:param args: A tuple with positional arguments to be passed into the coroutine.
:param kwargs: A dictionary with keyword arguments to be passed into the coroutine.
Returns awaitable result of coroutine. If retries are not succussful exception is raised.
"""
opt_generator = ydb.retry_operation_impl(callee, retry_settings, *args, **kwargs)
for next_opt in opt_generator:
if isinstance(next_opt, ydb.YdbRetryOperationSleepOpt):
await asyncio.sleep(next_opt.timeout)
else:
try:
return await next_opt.result
except Exception as e:
next_opt.set_exception(e)
class SessionCheckout:
__slots__ = ("_acquired", "_pool", "_blocking", "_timeout", "_retry_timeout")
def __init__(self, pool, timeout, retry_timeout):
"""
A context manager that checkouts a session from the specified pool and
returns it on manager exit.
:param pool: A SessionPool instance
:param blocking: A flag that specifies that session acquire method should blocks
:param timeout: A timeout in seconds for session acquire
"""
self._pool = pool
self._acquired = None
self._timeout = timeout
self._retry_timeout = retry_timeout
async def __aenter__(self):
self._acquired = await self._pool.acquire(self._timeout, self._retry_timeout)
return self._acquired
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._acquired is not None:
await self._pool.release(self._acquired)
class SessionPool:
def __init__(
self, driver: ydb.pool.IConnectionPool, size: int, min_pool_size: int = 0
):
self._driver_await_timeout = 3
self._should_stop = asyncio.Event()
self._waiters = 0
self._driver = driver
self._active_queue = asyncio.PriorityQueue()
self._active_count = 0
self._size = size
self._req_settings = settings.BaseRequestSettings().with_timeout(3)
self._logger = logger.getChild(self.__class__.__name__)
self._min_pool_size = min_pool_size
self._keep_alive_threshold = 4 * 60
self._terminating = False
self._init_session_timeout = 20
self._keep_alive_task = asyncio.ensure_future(self._keep_alive_loop())
self._min_pool_tasks = []
for _ in range(self._min_pool_size):
self._min_pool_tasks.append(
asyncio.ensure_future(self._init_and_put(self._init_session_timeout))
)
async def retry_operation(
self,
callee: typing.Callable,
*args,
retry_settings: table.RetrySettings = None,
**kwargs
):
if retry_settings is None:
retry_settings = table.RetrySettings()
async def wrapper_callee():
async with self.checkout(
timeout=retry_settings.get_session_client_timeout
) as session:
return await callee(session, *args, **kwargs)
return await retry_operation(wrapper_callee, retry_settings)
def _create(self) -> ydb.ISession:
self._active_count += 1
session = self._driver.table_client.session()
self._logger.debug("Created session %s", session)
return session
async def _init_session_logic(
self, session: ydb.ISession
) -> typing.Optional[ydb.ISession]:
try:
await self._driver.wait(self._driver_await_timeout)
session = await session.create(self._req_settings)
return session
except issues.Error as e:
self._logger.error("Failed to create session. Reason: %s", str(e))
except Exception as e:
self._logger.exception("Failed to create session. Reason: %s", str(e))
return None
async def _init_session(
self, session: ydb.ISession, retry_num: int = None
) -> typing.Optional[ydb.ISession]:
"""
:param retry_num: Number of retries. If None - retries until success.
:return:
"""
i = 0
while retry_num is None or i < retry_num:
curr_sess = await self._init_session_logic(session)
if curr_sess:
return curr_sess
i += 1
return None
async def _prepare_session(self, timeout, retry_num) -> ydb.ISession:
session = self._create()
try:
new_sess = await asyncio.wait_for(
self._init_session(session, retry_num=retry_num), timeout=timeout
)
if not new_sess:
self._destroy(session)
return new_sess
except Exception as e:
self._destroy(session)
raise e
async def _get_session_from_queue(self, timeout: float):
task_wait = asyncio.ensure_future(
asyncio.wait_for(self._active_queue.get(), timeout=timeout)
)
task_should_stop = asyncio.ensure_future(self._should_stop.wait())
done, pending = await asyncio.wait(
(task_wait, task_should_stop), return_when=asyncio.FIRST_COMPLETED
)
if task_should_stop in done:
task_wait.cancel()
return self._create()
_, session = task_wait.result()
return session
async def acquire(
self, timeout: float = None, retry_timeout: float = None, retry_num: int = None
) -> ydb.ISession:
if self._should_stop.is_set():
self._logger.debug("Acquired not inited session")
return self._create()
if retry_timeout is None:
retry_timeout = timeout
try:
_, session = self._active_queue.get_nowait()
self._logger.debug(
"Acquired active session from queue: %s" % session.session_id
)
return session
except asyncio.QueueEmpty:
pass
if self._active_count < self._size:
self._logger.debug(
"Session pool is not large enough (active_count < size: %d < %d). "
"will create a new session.",
self._active_count,
self._size,
)
try:
session = await self._prepare_session(
timeout=retry_timeout, retry_num=retry_num
)
except asyncio.TimeoutError:
raise issues.SessionPoolEmpty("Timeout when creating session")
if session is not None:
self._logger.debug(
"Acquired new created session: %s" % session.session_id
)
return session
try:
self._waiters += 1
session = await self._get_session_from_queue(timeout)
return session
except asyncio.TimeoutError:
raise issues.SessionPoolEmpty("Timeout when wait")
finally:
self._waiters -= 1
def _is_min_pool_size_satisfied(self, delta=0):
if self._terminating:
return True
return self._active_count + delta >= self._min_pool_size
async def _init_and_put(self, timeout=10):
sess = await self._prepare_session(timeout=timeout, retry_num=None)
await self.release(session=sess)
def _destroy(self, session: ydb.ISession, wait_for_del: bool = False):
self._logger.debug("Requested session destroy: %s.", session)
self._active_count -= 1
self._logger.debug(
"Session %s is no longer active. Current active count %d.",
session,
self._active_count,
)
if self._waiters > 0 or not self._is_min_pool_size_satisfied():
asyncio.ensure_future(self._init_and_put(self._init_session_timeout))
if session.initialized():
coro = session.delete(self._req_settings)
if wait_for_del:
self._logger.debug("Sent delete on session %s", session)
return coro
else:
asyncio.ensure_future(coro)
async def release(self, session: ydb.ISession):
self._logger.debug("Put on session %s", session.session_id)
if session.closing():
self._destroy(session)
return False
if session.pending_query():
self._destroy(session)
return False
if not session.initialized() or self._should_stop.is_set():
self._destroy(session)
return False
await self._active_queue.put((time.time() + 10 * 60, session))
self._logger.debug("Session returned to queue: %s", session.session_id)
async def _pick_for_keepalive(self):
try:
priority, session = self._active_queue.get_nowait()
except asyncio.QueueEmpty:
return None
till_expire = priority - time.time()
if till_expire < self._keep_alive_threshold:
return session
await self._active_queue.put((priority, session))
return None
async def _send_keep_alive(self, session: ydb.ISession):
if session is None:
return False
if self._should_stop.is_set():
self._destroy(session)
return False
await session.keep_alive(self._req_settings)
try:
await self.release(session)
except Exception:
self._destroy(session)
async def _keep_alive_loop(self):
while True:
try:
await asyncio.wait_for(
self._should_stop.wait(), timeout=self._keep_alive_threshold // 4
)
break
except asyncio.TimeoutError:
while True:
session = await self._pick_for_keepalive()
if not session:
break
asyncio.ensure_future(self._send_keep_alive(session))
async def stop(self, timeout=None):
self._logger.debug("Requested session pool stop.")
self._should_stop.set()
self._terminating = True
for task in self._min_pool_tasks:
task.cancel()
self._logger.debug("Destroying sessions in active queue")
tasks = []
while True:
try:
_, session = self._active_queue.get_nowait()
tasks.append(self._destroy(session, wait_for_del=True))
except asyncio.QueueEmpty:
break
await asyncio.gather(*tasks)
self._logger.debug("Destroyed active sessions")
await asyncio.wait_for(self._keep_alive_task, timeout=timeout)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def wait_until_min_size(self):
await asyncio.gather(*self._min_pool_tasks)
def checkout(self, timeout: float = None, retry_timeout: float = None):
return SessionCheckout(self, timeout, retry_timeout=retry_timeout)
|
ydb/aio/table.py
|
import asyncio
import logging
import time
import typing
import ydb
from ydb import issues, settings, table
from ydb.table import (
BaseSession,
BaseTableClient,
_scan_query_request_factory,
_wrap_scan_query_response,
BaseTxContext,
)
from . import _utilities
from ydb import _apis, _session_impl
logger = logging.getLogger(__name__)
class Session(BaseSession):
async def read_table(
self,
path,
key_range=None,
columns=(),
ordered=False,
row_limit=None,
settings=None,
use_snapshot=None,
):
request = _session_impl.read_table_request_factory(
self._state,
path,
key_range,
columns,
ordered,
row_limit,
use_snapshot=use_snapshot,
)
stream_it = await self._driver(
request,
_apis.TableService.Stub,
_apis.TableService.StreamReadTable,
settings=settings,
)
return _utilities.AsyncResponseIterator(
stream_it, _session_impl.wrap_read_table_response
)
async def keep_alive(self, settings=None):
return await super(Session, self).keep_alive(settings)
async def create(self, settings=None):
return await super(Session, self).create(settings)
async def delete(self, settings=None):
return await super(Session, self).delete(settings)
async def execute_scheme(self, yql_text, settings=None):
return await super(Session, self).execute_scheme(yql_text, settings)
async def prepare(self, query, settings=None):
res = super(Session, self).prepare(query, settings)
if asyncio.iscoroutine(res):
res = await res
return res
async def explain(self, yql_text, settings=None):
return await super(Session, self).explain(yql_text, settings)
async def create_table(self, path, table_description, settings=None):
return await super(Session, self).create_table(
path, table_description, settings
)
async def drop_table(self, path, settings=None):
return await super(Session, self).drop_table(path, settings)
async def alter_table(
self,
path,
add_columns=None,
drop_columns=None,
settings=None,
alter_attributes=None,
add_indexes=None,
drop_indexes=None,
set_ttl_settings=None,
drop_ttl_settings=None,
add_column_families=None,
alter_column_families=None,
alter_storage_settings=None,
set_compaction_policy=None,
alter_partitioning_settings=None,
set_key_bloom_filter=None,
set_read_replicas_settings=None,
):
return await super(Session, self).alter_table(
path,
add_columns,
drop_columns,
settings,
alter_attributes,
add_indexes,
drop_indexes,
set_ttl_settings,
drop_ttl_settings,
add_column_families,
alter_column_families,
alter_storage_settings,
set_compaction_policy,
alter_partitioning_settings,
set_key_bloom_filter,
set_read_replicas_settings,
)
def transaction(self, tx_mode=None):
return TxContext(self._driver, self._state, self, tx_mode)
async def describe_table(self, path, settings=None):
return await super(Session, self).describe_table(path, settings)
async def copy_table(self, source_path, destination_path, settings=None):
return await super(Session, self).copy_table(
source_path, destination_path, settings
)
async def copy_tables(self, source_destination_pairs, settings=None):
return await super(Session, self).copy_tables(
source_destination_pairs, settings
)
async def rename_tables(self, rename_items, settings=None):
return await super(Session, self).rename_tables(rename_items, settings)
class TableClient(BaseTableClient):
def session(self):
return Session(self._driver, self._table_client_settings)
async def bulk_upsert(self, *args, **kwargs):
return await super(TableClient, self).bulk_upsert(*args, **kwargs)
async def scan_query(self, query, parameters=None, settings=None):
request = _scan_query_request_factory(query, parameters, settings)
response = await self._driver(
request,
_apis.TableService.Stub,
_apis.TableService.StreamExecuteScanQuery,
settings=settings,
)
return _utilities.AsyncResponseIterator(
response,
lambda resp: _wrap_scan_query_response(resp, self._table_client_settings),
)
class TxContext(BaseTxContext):
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._tx_state.tx_id is not None:
# It's strictly recommended to close transactions directly
# by using commit_tx=True flag while executing statement or by
# .commit() or .rollback() methods, but here we trying to do best
# effort to avoid useless open transactions
logger.warning("Potentially leaked tx: %s", self._tx_state.tx_id)
try:
await self.rollback()
except issues.Error:
logger.warning("Failed to rollback leaked tx: %s", self._tx_state.tx_id)
self._tx_state.tx_id = None
async def execute(self, query, parameters=None, commit_tx=False, settings=None):
return await super(TxContext, self).execute(
query, parameters, commit_tx, settings
)
async def commit(self, settings=None):
return await super(TxContext, self).commit(settings)
async def rollback(self, settings=None):
return await super(TxContext, self).rollback(settings)
async def begin(self, settings=None):
return await super(TxContext, self).begin(settings)
async def retry_operation(callee, retry_settings=None, *args, **kwargs):
"""
The retry operation helper can be used to retry a coroutine that raises YDB specific
exceptions.
:param callee: A coroutine to retry.
:param retry_settings: An instance of ydb.RetrySettings that describes how the coroutine
should be retried. If None, default instance of retry settings will be used.
:param args: A tuple with positional arguments to be passed into the coroutine.
:param kwargs: A dictionary with keyword arguments to be passed into the coroutine.
Returns awaitable result of coroutine. If retries are not succussful exception is raised.
"""
opt_generator = ydb.retry_operation_impl(callee, retry_settings, *args, **kwargs)
for next_opt in opt_generator:
if isinstance(next_opt, ydb.YdbRetryOperationSleepOpt):
await asyncio.sleep(next_opt.timeout)
else:
try:
return await next_opt.result
except Exception as e:
next_opt.set_exception(e)
class SessionCheckout:
__slots__ = ("_acquired", "_pool", "_blocking", "_timeout", "_retry_timeout")
def __init__(self, pool, timeout, retry_timeout):
"""
A context manager that checkouts a session from the specified pool and
returns it on manager exit.
:param pool: A SessionPool instance
:param blocking: A flag that specifies that session acquire method should blocks
:param timeout: A timeout in seconds for session acquire
"""
self._pool = pool
self._acquired = None
self._timeout = timeout
self._retry_timeout = retry_timeout
async def __aenter__(self):
self._acquired = await self._pool.acquire(self._timeout, self._retry_timeout)
return self._acquired
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._acquired is not None:
await self._pool.release(self._acquired)
class SessionPool:
def __init__(
self, driver: ydb.pool.IConnectionPool, size: int, min_pool_size: int = 0
):
self._driver_await_timeout = 3
self._should_stop = asyncio.Event()
self._waiters = 0
self._driver = driver
self._active_queue = asyncio.PriorityQueue()
self._active_count = 0
self._size = size
self._req_settings = settings.BaseRequestSettings().with_timeout(3)
self._logger = logger.getChild(self.__class__.__name__)
self._min_pool_size = min_pool_size
self._keep_alive_threshold = 4 * 60
self._terminating = False
self._init_session_timeout = 20
self._keep_alive_task = asyncio.ensure_future(self._keep_alive_loop())
self._min_pool_tasks = []
for _ in range(self._min_pool_size):
self._min_pool_tasks.append(
asyncio.ensure_future(self._init_and_put(self._init_session_timeout))
)
async def retry_operation(
self,
callee: typing.Callable,
*args,
retry_settings: table.RetrySettings = None,
**kwargs
):
if retry_settings is None:
retry_settings = table.RetrySettings()
async def wrapper_callee():
async with self.checkout(
timeout=retry_settings.get_session_client_timeout
) as session:
return await callee(session, *args, **kwargs)
return await retry_operation(wrapper_callee, retry_settings)
def _create(self) -> ydb.ISession:
self._active_count += 1
session = self._driver.table_client.session()
self._logger.debug("Created session %s", session)
return session
async def _init_session_logic(
self, session: ydb.ISession
) -> typing.Optional[ydb.ISession]:
try:
await self._driver.wait(self._driver_await_timeout)
session = await session.create(self._req_settings)
return session
except issues.Error as e:
self._logger.error("Failed to create session. Reason: %s", str(e))
except Exception as e:
self._logger.exception("Failed to create session. Reason: %s", str(e))
return None
async def _init_session(
self, session: ydb.ISession, retry_num: int = None
) -> typing.Optional[ydb.ISession]:
"""
:param retry_num: Number of retries. If None - retries until success.
:return:
"""
i = 0
while retry_num is None or i < retry_num:
curr_sess = await self._init_session_logic(session)
if curr_sess:
return curr_sess
i += 1
return None
async def _prepare_session(self, timeout, retry_num) -> ydb.ISession:
session = self._create()
try:
new_sess = await asyncio.wait_for(
self._init_session(session, retry_num=retry_num), timeout=timeout
)
if not new_sess:
self._destroy(session)
return new_sess
except Exception as e:
self._destroy(session)
raise e
async def _get_session_from_queue(self, timeout: float):
task_wait = asyncio.ensure_future(
asyncio.wait_for(self._active_queue.get(), timeout=timeout)
)
task_should_stop = asyncio.ensure_future(self._should_stop.wait())
done, pending = await asyncio.wait(
(task_wait, task_should_stop), return_when=asyncio.FIRST_COMPLETED
)
if task_should_stop in done:
task_wait.cancel()
return self._create()
_, session = task_wait.result()
return session
async def acquire(
self, timeout: float = None, retry_timeout: float = None, retry_num: int = None
) -> ydb.ISession:
if self._should_stop.is_set():
self._logger.debug("Acquired not inited session")
return self._create()
if retry_timeout is None:
retry_timeout = timeout
try:
_, session = self._active_queue.get_nowait()
self._logger.debug(
"Acquired active session from queue: %s" % session.session_id
)
return session
except asyncio.QueueEmpty:
pass
if self._active_count < self._size:
self._logger.debug(
"Session pool is not large enough (active_count < size: %d < %d). "
"will create a new session.",
self._active_count,
self._size,
)
try:
session = await self._prepare_session(
timeout=retry_timeout, retry_num=retry_num
)
except asyncio.TimeoutError:
raise issues.SessionPoolEmpty("Timeout when creating session")
if session is not None:
self._logger.debug(
"Acquired new created session: %s" % session.session_id
)
return session
try:
self._waiters += 1
session = await self._get_session_from_queue(timeout)
return session
except asyncio.TimeoutError:
raise issues.SessionPoolEmpty("Timeout when wait")
finally:
self._waiters -= 1
def _is_min_pool_size_satisfied(self, delta=0):
if self._terminating:
return True
return self._active_count + delta >= self._min_pool_size
async def _init_and_put(self, timeout=10):
sess = await self._prepare_session(timeout=timeout, retry_num=None)
await self.release(session=sess)
def _destroy(self, session: ydb.ISession, wait_for_del: bool = False):
self._logger.debug("Requested session destroy: %s.", session)
self._active_count -= 1
self._logger.debug(
"Session %s is no longer active. Current active count %d.",
session,
self._active_count,
)
if self._waiters > 0 or not self._is_min_pool_size_satisfied():
asyncio.ensure_future(self._init_and_put(self._init_session_timeout))
if session.initialized():
coro = session.delete(self._req_settings)
if wait_for_del:
self._logger.debug("Sent delete on session %s", session)
return coro
else:
asyncio.ensure_future(coro)
async def release(self, session: ydb.ISession):
self._logger.debug("Put on session %s", session.session_id)
if session.closing():
self._destroy(session)
return False
if session.pending_query():
self._destroy(session)
return False
if not session.initialized() or self._should_stop.is_set():
self._destroy(session)
return False
await self._active_queue.put((time.time() + 10 * 60, session))
self._logger.debug("Session returned to queue: %s", session.session_id)
async def _pick_for_keepalive(self):
try:
priority, session = self._active_queue.get_nowait()
except asyncio.QueueEmpty:
return None
till_expire = priority - time.time()
if till_expire < self._keep_alive_threshold:
return session
await self._active_queue.put((priority, session))
return None
async def _send_keep_alive(self, session: ydb.ISession):
if session is None:
return False
if self._should_stop.is_set():
self._destroy(session)
return False
await session.keep_alive(self._req_settings)
try:
await self.release(session)
except Exception:
self._destroy(session)
async def _keep_alive_loop(self):
while True:
try:
await asyncio.wait_for(
self._should_stop.wait(), timeout=self._keep_alive_threshold // 4
)
break
except asyncio.TimeoutError:
while True:
session = await self._pick_for_keepalive()
if not session:
break
asyncio.ensure_future(self._send_keep_alive(session))
async def stop(self, timeout=None):
self._logger.debug("Requested session pool stop.")
self._should_stop.set()
self._terminating = True
for task in self._min_pool_tasks:
task.cancel()
self._logger.debug("Destroying sessions in active queue")
tasks = []
while True:
try:
_, session = self._active_queue.get_nowait()
tasks.append(self._destroy(session, wait_for_del=True))
except asyncio.QueueEmpty:
break
await asyncio.gather(*tasks)
self._logger.debug("Destroyed active sessions")
await asyncio.wait_for(self._keep_alive_task, timeout=timeout)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
async def wait_until_min_size(self):
await asyncio.gather(*self._min_pool_tasks)
def checkout(self, timeout: float = None, retry_timeout: float = None):
return SessionCheckout(self, timeout, retry_timeout=retry_timeout)
| 0.581778 | 0.095645 |
from __future__ import annotations
from typing import Callable
from xknx import XKNX
from xknx.devices import DateTime, ExposeSensor
from xknx.dpt import DPTNumeric
from xknx.remote_value import RemoteValueSensor
from homeassistant.const import (
CONF_ENTITY_ID,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import Event, HomeAssistant, State, callback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, StateType
from .const import KNX_ADDRESS
from .schema import ExposeSchema
@callback
def create_knx_exposure(
hass: HomeAssistant, xknx: XKNX, config: ConfigType
) -> KNXExposeSensor | KNXExposeTime:
"""Create exposures from config."""
address = config[KNX_ADDRESS]
expose_type = config[ExposeSchema.CONF_KNX_EXPOSE_TYPE]
attribute = config.get(ExposeSchema.CONF_KNX_EXPOSE_ATTRIBUTE)
default = config.get(ExposeSchema.CONF_KNX_EXPOSE_DEFAULT)
exposure: KNXExposeSensor | KNXExposeTime
if (
isinstance(expose_type, str)
and expose_type.lower() in ExposeSchema.EXPOSE_TIME_TYPES
):
exposure = KNXExposeTime(xknx, expose_type, address)
else:
entity_id = config[CONF_ENTITY_ID]
exposure = KNXExposeSensor(
hass,
xknx,
expose_type,
entity_id,
attribute,
default,
address,
)
return exposure
class KNXExposeSensor:
"""Object to Expose Home Assistant entity to KNX bus."""
def __init__(
self,
hass: HomeAssistant,
xknx: XKNX,
expose_type: int | str,
entity_id: str,
attribute: str | None,
default: StateType,
address: str,
) -> None:
"""Initialize of Expose class."""
self.hass = hass
self.xknx = xknx
self.type = expose_type
self.entity_id = entity_id
self.expose_attribute = attribute
self.expose_default = default
self.address = address
self._remove_listener: Callable[[], None] | None = None
self.device: ExposeSensor = self.async_register()
self._init_expose_state()
@callback
def async_register(self) -> ExposeSensor:
"""Register listener."""
if self.expose_attribute is not None:
_name = self.entity_id + "__" + self.expose_attribute
else:
_name = self.entity_id
device = ExposeSensor(
self.xknx,
name=_name,
group_address=self.address,
value_type=self.type,
)
self._remove_listener = async_track_state_change_event(
self.hass, [self.entity_id], self._async_entity_changed
)
return device
@callback
def _init_expose_state(self) -> None:
"""Initialize state of the exposure."""
init_state = self.hass.states.get(self.entity_id)
state_value = self._get_expose_value(init_state)
self.device.sensor_value.value = state_value
@callback
def shutdown(self) -> None:
"""Prepare for deletion."""
if self._remove_listener is not None:
self._remove_listener()
self._remove_listener = None
self.device.shutdown()
def _get_expose_value(self, state: State | None) -> StateType:
"""Extract value from state."""
if state is None or state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE):
value = self.expose_default
else:
value = (
state.state
if self.expose_attribute is None
else state.attributes.get(self.expose_attribute, self.expose_default)
)
if self.type == "binary":
if value in (1, STATE_ON, "True"):
return True
if value in (0, STATE_OFF, "False"):
return False
if (
value is not None
and isinstance(self.device.sensor_value, RemoteValueSensor)
and issubclass(self.device.sensor_value.dpt_class, DPTNumeric)
):
return float(value)
return value
async def _async_entity_changed(self, event: Event) -> None:
"""Handle entity change."""
new_state = event.data.get("new_state")
new_value = self._get_expose_value(new_state)
if new_value is None:
return
old_state = event.data.get("old_state")
# don't use default value for comparison on first state change (old_state is None)
old_value = self._get_expose_value(old_state) if old_state is not None else None
# don't send same value sequentially
if new_value != old_value:
await self._async_set_knx_value(new_value)
async def _async_set_knx_value(self, value: StateType) -> None:
"""Set new value on xknx ExposeSensor."""
if value is None:
return
await self.device.set(value)
class KNXExposeTime:
"""Object to Expose Time/Date object to KNX bus."""
def __init__(self, xknx: XKNX, expose_type: str, address: str) -> None:
"""Initialize of Expose class."""
self.xknx = xknx
self.expose_type = expose_type
self.address = address
self.device: DateTime = self.async_register()
@callback
def async_register(self) -> DateTime:
"""Register listener."""
return DateTime(
self.xknx,
name=self.expose_type.capitalize(),
broadcast_type=self.expose_type.upper(),
localtime=True,
group_address=self.address,
)
@callback
def shutdown(self) -> None:
"""Prepare for deletion."""
self.device.shutdown()
|
homeassistant/components/knx/expose.py
|
from __future__ import annotations
from typing import Callable
from xknx import XKNX
from xknx.devices import DateTime, ExposeSensor
from xknx.dpt import DPTNumeric
from xknx.remote_value import RemoteValueSensor
from homeassistant.const import (
CONF_ENTITY_ID,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import Event, HomeAssistant, State, callback
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, StateType
from .const import KNX_ADDRESS
from .schema import ExposeSchema
@callback
def create_knx_exposure(
hass: HomeAssistant, xknx: XKNX, config: ConfigType
) -> KNXExposeSensor | KNXExposeTime:
"""Create exposures from config."""
address = config[KNX_ADDRESS]
expose_type = config[ExposeSchema.CONF_KNX_EXPOSE_TYPE]
attribute = config.get(ExposeSchema.CONF_KNX_EXPOSE_ATTRIBUTE)
default = config.get(ExposeSchema.CONF_KNX_EXPOSE_DEFAULT)
exposure: KNXExposeSensor | KNXExposeTime
if (
isinstance(expose_type, str)
and expose_type.lower() in ExposeSchema.EXPOSE_TIME_TYPES
):
exposure = KNXExposeTime(xknx, expose_type, address)
else:
entity_id = config[CONF_ENTITY_ID]
exposure = KNXExposeSensor(
hass,
xknx,
expose_type,
entity_id,
attribute,
default,
address,
)
return exposure
class KNXExposeSensor:
"""Object to Expose Home Assistant entity to KNX bus."""
def __init__(
self,
hass: HomeAssistant,
xknx: XKNX,
expose_type: int | str,
entity_id: str,
attribute: str | None,
default: StateType,
address: str,
) -> None:
"""Initialize of Expose class."""
self.hass = hass
self.xknx = xknx
self.type = expose_type
self.entity_id = entity_id
self.expose_attribute = attribute
self.expose_default = default
self.address = address
self._remove_listener: Callable[[], None] | None = None
self.device: ExposeSensor = self.async_register()
self._init_expose_state()
@callback
def async_register(self) -> ExposeSensor:
"""Register listener."""
if self.expose_attribute is not None:
_name = self.entity_id + "__" + self.expose_attribute
else:
_name = self.entity_id
device = ExposeSensor(
self.xknx,
name=_name,
group_address=self.address,
value_type=self.type,
)
self._remove_listener = async_track_state_change_event(
self.hass, [self.entity_id], self._async_entity_changed
)
return device
@callback
def _init_expose_state(self) -> None:
"""Initialize state of the exposure."""
init_state = self.hass.states.get(self.entity_id)
state_value = self._get_expose_value(init_state)
self.device.sensor_value.value = state_value
@callback
def shutdown(self) -> None:
"""Prepare for deletion."""
if self._remove_listener is not None:
self._remove_listener()
self._remove_listener = None
self.device.shutdown()
def _get_expose_value(self, state: State | None) -> StateType:
"""Extract value from state."""
if state is None or state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE):
value = self.expose_default
else:
value = (
state.state
if self.expose_attribute is None
else state.attributes.get(self.expose_attribute, self.expose_default)
)
if self.type == "binary":
if value in (1, STATE_ON, "True"):
return True
if value in (0, STATE_OFF, "False"):
return False
if (
value is not None
and isinstance(self.device.sensor_value, RemoteValueSensor)
and issubclass(self.device.sensor_value.dpt_class, DPTNumeric)
):
return float(value)
return value
async def _async_entity_changed(self, event: Event) -> None:
"""Handle entity change."""
new_state = event.data.get("new_state")
new_value = self._get_expose_value(new_state)
if new_value is None:
return
old_state = event.data.get("old_state")
# don't use default value for comparison on first state change (old_state is None)
old_value = self._get_expose_value(old_state) if old_state is not None else None
# don't send same value sequentially
if new_value != old_value:
await self._async_set_knx_value(new_value)
async def _async_set_knx_value(self, value: StateType) -> None:
"""Set new value on xknx ExposeSensor."""
if value is None:
return
await self.device.set(value)
class KNXExposeTime:
"""Object to Expose Time/Date object to KNX bus."""
def __init__(self, xknx: XKNX, expose_type: str, address: str) -> None:
"""Initialize of Expose class."""
self.xknx = xknx
self.expose_type = expose_type
self.address = address
self.device: DateTime = self.async_register()
@callback
def async_register(self) -> DateTime:
"""Register listener."""
return DateTime(
self.xknx,
name=self.expose_type.capitalize(),
broadcast_type=self.expose_type.upper(),
localtime=True,
group_address=self.address,
)
@callback
def shutdown(self) -> None:
"""Prepare for deletion."""
self.device.shutdown()
| 0.880861 | 0.221424 |
import logging
import os
from anykeystore import create_store_from_settings
from pyramid.config import Configurator
from pyramid.exceptions import ConfigurationError
from pyramid.response import Response
from velruse.app.utils import generate_token
from velruse.app.utils import redirect_form
log = logging.getLogger(__name__)
def auth_complete_view(context, request):
endpoint = request.registry.settings.get('endpoint')
token = generate_token()
storage = request.registry.velruse_store
if 'birthday' in context.profile:
context.profile['birthday'] = \
context.profile['birthday'].strftime('%Y-%m-%d')
result_data = {
'profile': context.profile,
'credentials': context.credentials,
}
storage.store(token, result_data, expires=300)
form = redirect_form(endpoint, token)
return Response(body=form)
def auth_denied_view(context, request):
endpoint = request.registry.settings.get('endpoint')
token = generate_token()
storage = request.registry.velruse_store
error_dict = {
'code': getattr(context, 'code', None),
'description': context.message,
}
storage.store(token, error_dict, expires=300)
form = redirect_form(endpoint, token)
return Response(body=form)
def auth_info_view(request):
# TODO: insecure URL, must be protected behind a firewall
storage = request.registry.velruse_store
token = request.get('token')
try:
return storage.retrieve(token)
except KeyError:
log.info('auth_info requested invalid token "%s"')
request.response.status = 400
return None
def default_setup(config):
from pyramid.session import UnencryptedCookieSessionFactoryConfig
log.info('Using an unencrypted cookie-based session. This can be '
'changed by pointing the "velruse.setup" setting at a different '
'function for configuring the session factory.')
settings = config.registry.settings
secret = settings.get('session.secret')
cookie_name = settings.get('session.cookie_name', 'velruse.session')
if secret is None:
log.warn('Configuring unencrypted cookie-based session with a '
'random secret which will invalidate old cookies when '
'restarting the app.')
secret = ''.join('%02x' % ord(x) for x in os.urandom(16))
log.info('autogenerated session secret: %s', secret)
factory = UnencryptedCookieSessionFactoryConfig(
secret, cookie_name=cookie_name)
config.set_session_factory(factory)
# setup backing storage
storage_string = settings.get('store', 'memory')
settings['store.store'] = storage_string
store = create_store_from_settings(settings, prefix='store.')
config.register_velruse_store(store)
def register_velruse_store(config, storage):
"""Add key/value store for velruse to the pyramid application."""
config.registry.velruse_store = storage
settings_adapter = {
'bitbucket': 'add_bitbucket_login_from_settings',
'douban': 'add_douban_login_from_settings',
'facebook': 'add_facebook_login_from_settings',
'github': 'add_github_login_from_settings',
'lastfm': 'add_lastfm_login_from_settings',
'linkedin': 'add_linkedin_login_from_settings',
'live': 'add_live_login_from_settings',
'qq': 'add_qq_login_from_settings',
'renren': 'add_renren_login_from_settings',
'taobao': 'add_taobao_login_from_settings',
'twitter': 'add_twitter_login_from_settings',
'weibo': 'add_weibo_login_from_settings',
}
def find_providers(settings):
providers = set()
for k in settings:
if k.startswith('provider.'):
k = k[9:].split('.', 1)[0]
providers.add(k)
return providers
def load_provider(config, provider):
settings = config.registry.settings
impl = settings.get('provider.%s.impl' % provider) or provider
login_cfg = settings_adapter.get(impl)
if login_cfg is None:
raise ConfigurationError(
'could not find configuration method for provider %s'
'' % provider)
loader = getattr(config, login_cfg)
loader(prefix='provider.%s.' % provider)
def includeme(config):
"""Add the velruse standalone app configuration to a pyramid app."""
settings = config.registry.settings
config.add_directive('register_velruse_store', register_velruse_store)
# setup application
setup = settings.get('setup') or default_setup
if setup:
config.include(setup)
# include supported providers
for provider in settings_adapter:
config.include('velruse.providers.%s' % provider)
# configure requested providers
for provider in find_providers(settings):
load_provider(config, provider)
# check for required settings
if not settings.get('endpoint'):
raise ConfigurationError(
'missing required setting "endpoint"')
# add views
config.add_view(
auth_complete_view,
context='velruse.AuthenticationComplete')
config.add_view(
auth_denied_view,
context='velruse.AuthenticationDenied')
config.add_view(
auth_info_view,
name='auth_info',
request_param='format=json',
renderer='json')
def make_app(**settings):
config = Configurator(settings=settings)
config.include(includeme)
return config.make_wsgi_app()
def make_velruse_app(global_conf, **settings):
"""Construct a complete WSGI app ready to serve by Paste
Example INI file:
.. code-block:: ini
[server:main]
use = egg:Paste#http
host = 0.0.0.0
port = 80
[composite:main]
use = egg:Paste#urlmap
/ = YOURAPP
/velruse = velruse
[app:velruse]
use = egg:velruse
setup = myapp.setup_velruse
endpoint = http://example.com/logged_in
store = redis
store.host = localhost
store.port = 6379
store.db = 0
store.key_prefix = velruse_ustore
provider.facebook.consumer_key = KMfXjzsA2qVUcnnRn3vpnwWZ2pwPRFZdb
provider.facebook.consumer_secret =
<KEY>
provider.facebook.scope = email
provider.tw.impl = twitter
provider.tw.consumer_key = ULZ6PkJbsqw2GxZWCIbOEBZdkrb9XwgXNjRy
provider.tw.consumer_secret = <KEY>
[app:YOURAPP]
use = egg:YOURAPP
full_stack = true
static_files = true
"""
return make_app(**settings)
|
velruse/app/__init__.py
|
import logging
import os
from anykeystore import create_store_from_settings
from pyramid.config import Configurator
from pyramid.exceptions import ConfigurationError
from pyramid.response import Response
from velruse.app.utils import generate_token
from velruse.app.utils import redirect_form
log = logging.getLogger(__name__)
def auth_complete_view(context, request):
endpoint = request.registry.settings.get('endpoint')
token = generate_token()
storage = request.registry.velruse_store
if 'birthday' in context.profile:
context.profile['birthday'] = \
context.profile['birthday'].strftime('%Y-%m-%d')
result_data = {
'profile': context.profile,
'credentials': context.credentials,
}
storage.store(token, result_data, expires=300)
form = redirect_form(endpoint, token)
return Response(body=form)
def auth_denied_view(context, request):
endpoint = request.registry.settings.get('endpoint')
token = generate_token()
storage = request.registry.velruse_store
error_dict = {
'code': getattr(context, 'code', None),
'description': context.message,
}
storage.store(token, error_dict, expires=300)
form = redirect_form(endpoint, token)
return Response(body=form)
def auth_info_view(request):
# TODO: insecure URL, must be protected behind a firewall
storage = request.registry.velruse_store
token = request.get('token')
try:
return storage.retrieve(token)
except KeyError:
log.info('auth_info requested invalid token "%s"')
request.response.status = 400
return None
def default_setup(config):
from pyramid.session import UnencryptedCookieSessionFactoryConfig
log.info('Using an unencrypted cookie-based session. This can be '
'changed by pointing the "velruse.setup" setting at a different '
'function for configuring the session factory.')
settings = config.registry.settings
secret = settings.get('session.secret')
cookie_name = settings.get('session.cookie_name', 'velruse.session')
if secret is None:
log.warn('Configuring unencrypted cookie-based session with a '
'random secret which will invalidate old cookies when '
'restarting the app.')
secret = ''.join('%02x' % ord(x) for x in os.urandom(16))
log.info('autogenerated session secret: %s', secret)
factory = UnencryptedCookieSessionFactoryConfig(
secret, cookie_name=cookie_name)
config.set_session_factory(factory)
# setup backing storage
storage_string = settings.get('store', 'memory')
settings['store.store'] = storage_string
store = create_store_from_settings(settings, prefix='store.')
config.register_velruse_store(store)
def register_velruse_store(config, storage):
"""Add key/value store for velruse to the pyramid application."""
config.registry.velruse_store = storage
settings_adapter = {
'bitbucket': 'add_bitbucket_login_from_settings',
'douban': 'add_douban_login_from_settings',
'facebook': 'add_facebook_login_from_settings',
'github': 'add_github_login_from_settings',
'lastfm': 'add_lastfm_login_from_settings',
'linkedin': 'add_linkedin_login_from_settings',
'live': 'add_live_login_from_settings',
'qq': 'add_qq_login_from_settings',
'renren': 'add_renren_login_from_settings',
'taobao': 'add_taobao_login_from_settings',
'twitter': 'add_twitter_login_from_settings',
'weibo': 'add_weibo_login_from_settings',
}
def find_providers(settings):
providers = set()
for k in settings:
if k.startswith('provider.'):
k = k[9:].split('.', 1)[0]
providers.add(k)
return providers
def load_provider(config, provider):
settings = config.registry.settings
impl = settings.get('provider.%s.impl' % provider) or provider
login_cfg = settings_adapter.get(impl)
if login_cfg is None:
raise ConfigurationError(
'could not find configuration method for provider %s'
'' % provider)
loader = getattr(config, login_cfg)
loader(prefix='provider.%s.' % provider)
def includeme(config):
"""Add the velruse standalone app configuration to a pyramid app."""
settings = config.registry.settings
config.add_directive('register_velruse_store', register_velruse_store)
# setup application
setup = settings.get('setup') or default_setup
if setup:
config.include(setup)
# include supported providers
for provider in settings_adapter:
config.include('velruse.providers.%s' % provider)
# configure requested providers
for provider in find_providers(settings):
load_provider(config, provider)
# check for required settings
if not settings.get('endpoint'):
raise ConfigurationError(
'missing required setting "endpoint"')
# add views
config.add_view(
auth_complete_view,
context='velruse.AuthenticationComplete')
config.add_view(
auth_denied_view,
context='velruse.AuthenticationDenied')
config.add_view(
auth_info_view,
name='auth_info',
request_param='format=json',
renderer='json')
def make_app(**settings):
config = Configurator(settings=settings)
config.include(includeme)
return config.make_wsgi_app()
def make_velruse_app(global_conf, **settings):
"""Construct a complete WSGI app ready to serve by Paste
Example INI file:
.. code-block:: ini
[server:main]
use = egg:Paste#http
host = 0.0.0.0
port = 80
[composite:main]
use = egg:Paste#urlmap
/ = YOURAPP
/velruse = velruse
[app:velruse]
use = egg:velruse
setup = myapp.setup_velruse
endpoint = http://example.com/logged_in
store = redis
store.host = localhost
store.port = 6379
store.db = 0
store.key_prefix = velruse_ustore
provider.facebook.consumer_key = KMfXjzsA2qVUcnnRn3vpnwWZ2pwPRFZdb
provider.facebook.consumer_secret =
<KEY>
provider.facebook.scope = email
provider.tw.impl = twitter
provider.tw.consumer_key = ULZ6PkJbsqw2GxZWCIbOEBZdkrb9XwgXNjRy
provider.tw.consumer_secret = <KEY>
[app:YOURAPP]
use = egg:YOURAPP
full_stack = true
static_files = true
"""
return make_app(**settings)
| 0.332961 | 0.057388 |
import logging
import uuid
from diskimage_builder.block_device.exception \
import BlockDeviceSetupException
from diskimage_builder.block_device.plugin import NodeBase
from diskimage_builder.block_device.plugin import PluginBase
from diskimage_builder.block_device.utils import exec_sudo
logger = logging.getLogger(__name__)
# There is the need to check the length of the label of
# the filesystem. The maximum length depends on the used filesystem.
# This map provides information about the maximum label length.
file_system_max_label_length = {
"ext2": 16,
"ext3": 16,
"ext4": 16,
"xfs": 12,
"vfat": 11
}
class FilesystemNode(NodeBase):
def __init__(self, config, state):
logger.debug("Create filesystem object; config [%s]", config)
super(FilesystemNode, self).__init__(config['name'], state)
# Parameter check (mandatory)
for pname in ['base', 'type']:
if pname not in config:
raise BlockDeviceSetupException(
"Mkfs config needs [%s]" % pname)
setattr(self, pname, config[pname])
# Parameter check (optional)
for pname in ['label', 'opts', 'uuid']:
setattr(self, pname,
config[pname] if pname in config else None)
if self.label is None:
self.label = self.name
# Historic reasons - this will hopefully vanish in one of
# the next major releases
if self.label == "cloudimg-rootfs" and self.type == "xfs":
logger.warning("Default label [cloudimg-rootfs] too long for xfs "
"file system - using [img-rootfs] instead")
self.label = "img-rootfs"
# ensure we don't already have a fs with this label ... they
# all must be unique.
if 'fs_labels' in self.state:
if self.label in self.state['fs_labels']:
raise BlockDeviceSetupException(
"File system label [%s] used more than once" % self.label)
self.state['fs_labels'].append(self.label)
else:
self.state['fs_labels'] = [self.label]
if self.type in file_system_max_label_length:
if file_system_max_label_length[self.type] < len(self.label):
raise BlockDeviceSetupException(
"Label [{label}] too long for filesystem [{type}]: "
"{len} > {max_len}".format(**{
'label': self.label,
'type': self.type,
'len': len(self.label),
'max_len': file_system_max_label_length[self.type]}))
else:
logger.warning("Length of label [%s] cannot be checked for "
"filesystem [%s]: unknown max length",
self.label, self.type)
logger.warning("Continue - but this might lead to an error")
if self.opts is not None:
self.opts = self.opts.strip().split(' ')
if self.uuid is None:
self.uuid = str(uuid.uuid4())
logger.debug("Filesystem created [%s]", self)
def get_edges(self):
edge_from = [self.base]
edge_to = []
return (edge_from, edge_to)
def create(self):
cmd = ["mkfs"]
cmd.extend(['-t', self.type])
if self.opts:
cmd.extend(self.opts)
if self.type == "vfat":
cmd.extend(["-n", self.label])
else:
cmd.extend(["-L", self.label])
if self.type in ('ext2', 'ext3', 'ext4'):
cmd.extend(['-U', self.uuid])
elif self.type == 'xfs':
cmd.extend(['-m', "uuid=%s" % self.uuid])
else:
logger.warning("UUID will not be written for fs type [%s]",
self.type)
if self.type in ('ext2', 'ext3', 'ext4', 'xfs'):
cmd.append('-q')
if 'blockdev' not in self.state:
self.state['blockdev'] = {}
device = self.state['blockdev'][self.base]['device']
cmd.append(device)
logger.debug("Creating fs command [%s]", cmd)
exec_sudo(cmd)
if 'filesys' not in self.state:
self.state['filesys'] = {}
self.state['filesys'][self.name] \
= {'uuid': self.uuid, 'label': self.label,
'fstype': self.type, 'opts': self.opts,
'device': device}
class Mkfs(PluginBase):
"""Create a file system
This block device module handles creating different file
systems.
"""
def __init__(self, config, defaults, state):
super(Mkfs, self).__init__()
self.filesystems = {}
fs = FilesystemNode(config, state)
self.filesystems[fs.get_name()] = fs
def get_nodes(self):
nodes = []
for _, fs in self.filesystems.items():
nodes.append(fs)
return nodes
|
diskimage_builder/block_device/level2/mkfs.py
|
import logging
import uuid
from diskimage_builder.block_device.exception \
import BlockDeviceSetupException
from diskimage_builder.block_device.plugin import NodeBase
from diskimage_builder.block_device.plugin import PluginBase
from diskimage_builder.block_device.utils import exec_sudo
logger = logging.getLogger(__name__)
# There is the need to check the length of the label of
# the filesystem. The maximum length depends on the used filesystem.
# This map provides information about the maximum label length.
file_system_max_label_length = {
"ext2": 16,
"ext3": 16,
"ext4": 16,
"xfs": 12,
"vfat": 11
}
class FilesystemNode(NodeBase):
def __init__(self, config, state):
logger.debug("Create filesystem object; config [%s]", config)
super(FilesystemNode, self).__init__(config['name'], state)
# Parameter check (mandatory)
for pname in ['base', 'type']:
if pname not in config:
raise BlockDeviceSetupException(
"Mkfs config needs [%s]" % pname)
setattr(self, pname, config[pname])
# Parameter check (optional)
for pname in ['label', 'opts', 'uuid']:
setattr(self, pname,
config[pname] if pname in config else None)
if self.label is None:
self.label = self.name
# Historic reasons - this will hopefully vanish in one of
# the next major releases
if self.label == "cloudimg-rootfs" and self.type == "xfs":
logger.warning("Default label [cloudimg-rootfs] too long for xfs "
"file system - using [img-rootfs] instead")
self.label = "img-rootfs"
# ensure we don't already have a fs with this label ... they
# all must be unique.
if 'fs_labels' in self.state:
if self.label in self.state['fs_labels']:
raise BlockDeviceSetupException(
"File system label [%s] used more than once" % self.label)
self.state['fs_labels'].append(self.label)
else:
self.state['fs_labels'] = [self.label]
if self.type in file_system_max_label_length:
if file_system_max_label_length[self.type] < len(self.label):
raise BlockDeviceSetupException(
"Label [{label}] too long for filesystem [{type}]: "
"{len} > {max_len}".format(**{
'label': self.label,
'type': self.type,
'len': len(self.label),
'max_len': file_system_max_label_length[self.type]}))
else:
logger.warning("Length of label [%s] cannot be checked for "
"filesystem [%s]: unknown max length",
self.label, self.type)
logger.warning("Continue - but this might lead to an error")
if self.opts is not None:
self.opts = self.opts.strip().split(' ')
if self.uuid is None:
self.uuid = str(uuid.uuid4())
logger.debug("Filesystem created [%s]", self)
def get_edges(self):
edge_from = [self.base]
edge_to = []
return (edge_from, edge_to)
def create(self):
cmd = ["mkfs"]
cmd.extend(['-t', self.type])
if self.opts:
cmd.extend(self.opts)
if self.type == "vfat":
cmd.extend(["-n", self.label])
else:
cmd.extend(["-L", self.label])
if self.type in ('ext2', 'ext3', 'ext4'):
cmd.extend(['-U', self.uuid])
elif self.type == 'xfs':
cmd.extend(['-m', "uuid=%s" % self.uuid])
else:
logger.warning("UUID will not be written for fs type [%s]",
self.type)
if self.type in ('ext2', 'ext3', 'ext4', 'xfs'):
cmd.append('-q')
if 'blockdev' not in self.state:
self.state['blockdev'] = {}
device = self.state['blockdev'][self.base]['device']
cmd.append(device)
logger.debug("Creating fs command [%s]", cmd)
exec_sudo(cmd)
if 'filesys' not in self.state:
self.state['filesys'] = {}
self.state['filesys'][self.name] \
= {'uuid': self.uuid, 'label': self.label,
'fstype': self.type, 'opts': self.opts,
'device': device}
class Mkfs(PluginBase):
"""Create a file system
This block device module handles creating different file
systems.
"""
def __init__(self, config, defaults, state):
super(Mkfs, self).__init__()
self.filesystems = {}
fs = FilesystemNode(config, state)
self.filesystems[fs.get_name()] = fs
def get_nodes(self):
nodes = []
for _, fs in self.filesystems.items():
nodes.append(fs)
return nodes
| 0.497803 | 0.164852 |
import unittest
from ctci.structs.stack import Stack
class TestStackPush(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_one(self):
self.a.push(1)
self.assertEqual(self.a.peek(), 1)
def test_two(self):
self.a.push(1)
self.a.push(2)
self.assertEqual(self.a.peek(), 2)
class TestStackLen(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_empty(self):
self.assertEqual(len(self.a), 0)
def test_one(self):
self.a.push(1)
self.assertEqual(len(self.a), 1)
def test_multi(self):
self.a.push(1)
self.a.push(1)
self.a.push(1)
self.a.push(1)
self.assertEqual(len(self.a), 4)
def test_with_pop(self):
self.a.push(1)
self.a.pop()
self.a.push(1)
self.a.push(1)
self.assertEqual(len(self.a), 2)
def test_with_pop_multi(self):
self.a.push(1)
self.a.pop()
self.a.push(1)
self.a.push(1)
self.a.pop()
self.assertEqual(len(self.a), 1)
class TestStackPop(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_one(self):
self.a.push(1)
self.assertEqual(self.a.pop(), 1)
def test_two(self):
self.a.push(1)
self.a.push(4)
self.assertEqual(self.a.pop(), 4)
class TestStackPeek(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_one(self):
self.a.push(1)
self.assertEqual(self.a.peek(), 1)
def test_two(self):
self.a.push(1)
self.a.push(4)
self.assertEqual(self.a.peek(), 4)
class TestStackIsEmpty(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_empty(self):
self.assertTrue(self.a.is_empty())
def test_not_empty(self):
self.a.push(1)
self.assertFalse(self.a.is_empty())
if __name__ == '__main__':
unittest.main()
|
tests/test_stack.py
|
import unittest
from ctci.structs.stack import Stack
class TestStackPush(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_one(self):
self.a.push(1)
self.assertEqual(self.a.peek(), 1)
def test_two(self):
self.a.push(1)
self.a.push(2)
self.assertEqual(self.a.peek(), 2)
class TestStackLen(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_empty(self):
self.assertEqual(len(self.a), 0)
def test_one(self):
self.a.push(1)
self.assertEqual(len(self.a), 1)
def test_multi(self):
self.a.push(1)
self.a.push(1)
self.a.push(1)
self.a.push(1)
self.assertEqual(len(self.a), 4)
def test_with_pop(self):
self.a.push(1)
self.a.pop()
self.a.push(1)
self.a.push(1)
self.assertEqual(len(self.a), 2)
def test_with_pop_multi(self):
self.a.push(1)
self.a.pop()
self.a.push(1)
self.a.push(1)
self.a.pop()
self.assertEqual(len(self.a), 1)
class TestStackPop(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_one(self):
self.a.push(1)
self.assertEqual(self.a.pop(), 1)
def test_two(self):
self.a.push(1)
self.a.push(4)
self.assertEqual(self.a.pop(), 4)
class TestStackPeek(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_one(self):
self.a.push(1)
self.assertEqual(self.a.peek(), 1)
def test_two(self):
self.a.push(1)
self.a.push(4)
self.assertEqual(self.a.peek(), 4)
class TestStackIsEmpty(unittest.TestCase):
def setUp(self):
self.a = Stack()
def test_empty(self):
self.assertTrue(self.a.is_empty())
def test_not_empty(self):
self.a.push(1)
self.assertFalse(self.a.is_empty())
if __name__ == '__main__':
unittest.main()
| 0.442877 | 0.578984 |
import inspect
from collections import OrderedDict
from typing import Callable, Any, Union, Iterable, Dict, Tuple
from typish._types import Empty
from typish.classes._cls_dict import ClsDict
class ClsFunction:
"""
ClsDict is a callable that takes a ClsDict or a dict. When called, it uses
the first argument to check for the right function in its body, executes it
and returns the result.
"""
def __init__(self, body: Union[ClsDict,
Dict[type, Callable],
Iterable[Tuple[type, Callable]],
Iterable[Callable]]):
from typish.functions._instance_of import instance_of
if isinstance(body, ClsDict):
self.body = body
elif isinstance(body, dict):
self.body = ClsDict(body)
elif instance_of(body, Iterable[Callable]):
list_of_tuples = []
for func in body:
signature = inspect.signature(func)
params = list(signature.parameters.keys())
if not params:
raise TypeError('ClsFunction expects callables that take '
'at least one parameter, {} does not.'
.format(func.__name__))
first_param = signature.parameters[params[0]]
hint = first_param.annotation
key = Any if hint == Empty else hint
list_of_tuples.append((key, func))
self.body = ClsDict(OrderedDict(list_of_tuples))
elif instance_of(body, Iterable[Tuple[type, Callable]]):
self.body = ClsDict(OrderedDict(body))
else:
raise TypeError('ClsFunction expects a ClsDict or a dict that can '
'be turned to a ClsDict or an iterable of '
'callables.')
if not all(isinstance(value, Callable) for value in self.body.values()):
raise TypeError('ClsFunction expects a dict or ClsDict with only '
'callables as values.')
def understands(self, item: Any) -> bool:
"""
Check to see if this ClsFunction can take item.
:param item: the item that is checked.
:return: True if this ClsFunction can take item.
"""
try:
self.body[item]
return True
except KeyError:
return False
def __call__(self, *args, **kwargs):
if not args:
raise TypeError('ClsFunction must be called with at least 1 '
'positional argument.')
callable_ = self.body[args[0]]
try:
return callable_(*args, **kwargs)
except TypeError as err:
raise TypeError('Unable to call function for \'{}\': {}'
.format(args[0], err.args[0]))
|
typish/classes/_cls_function.py
|
import inspect
from collections import OrderedDict
from typing import Callable, Any, Union, Iterable, Dict, Tuple
from typish._types import Empty
from typish.classes._cls_dict import ClsDict
class ClsFunction:
"""
ClsDict is a callable that takes a ClsDict or a dict. When called, it uses
the first argument to check for the right function in its body, executes it
and returns the result.
"""
def __init__(self, body: Union[ClsDict,
Dict[type, Callable],
Iterable[Tuple[type, Callable]],
Iterable[Callable]]):
from typish.functions._instance_of import instance_of
if isinstance(body, ClsDict):
self.body = body
elif isinstance(body, dict):
self.body = ClsDict(body)
elif instance_of(body, Iterable[Callable]):
list_of_tuples = []
for func in body:
signature = inspect.signature(func)
params = list(signature.parameters.keys())
if not params:
raise TypeError('ClsFunction expects callables that take '
'at least one parameter, {} does not.'
.format(func.__name__))
first_param = signature.parameters[params[0]]
hint = first_param.annotation
key = Any if hint == Empty else hint
list_of_tuples.append((key, func))
self.body = ClsDict(OrderedDict(list_of_tuples))
elif instance_of(body, Iterable[Tuple[type, Callable]]):
self.body = ClsDict(OrderedDict(body))
else:
raise TypeError('ClsFunction expects a ClsDict or a dict that can '
'be turned to a ClsDict or an iterable of '
'callables.')
if not all(isinstance(value, Callable) for value in self.body.values()):
raise TypeError('ClsFunction expects a dict or ClsDict with only '
'callables as values.')
def understands(self, item: Any) -> bool:
"""
Check to see if this ClsFunction can take item.
:param item: the item that is checked.
:return: True if this ClsFunction can take item.
"""
try:
self.body[item]
return True
except KeyError:
return False
def __call__(self, *args, **kwargs):
if not args:
raise TypeError('ClsFunction must be called with at least 1 '
'positional argument.')
callable_ = self.body[args[0]]
try:
return callable_(*args, **kwargs)
except TypeError as err:
raise TypeError('Unable to call function for \'{}\': {}'
.format(args[0], err.args[0]))
| 0.718792 | 0.305361 |
def get_config():
return {
'aws': {
'profile_name': 'mgap'
},
'clarifai': {
'api_key': ''
},
'elucidate': {
'host': 'http://localhost',
'port': 8080,
'base_path': '/annotation',
'annotation_model': 'w3c',
'request_headers_seed': {
'Accept': 'application/ld+json;profile="http://www.w3.org/ns/anno.jsonld"',
'Content-Type': 'application/ld+json'
}
},
'google_vision': {
'api_key': ''
},
'iiif': {
'image_api_default_params': {
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'default',
'format': 'jpg'
}
},
'rabbitmq': {
'username': 'guest',
'password': '',
'host': 'localhost',
'port': 5672,
'vhost': ''
},
'redis': {
'host': 'redis',
'port': 6379,
'db': {
'computer_vision_results': '0',
'celery_task_results': '1'
}
},
'solr': {
'indexes': {
'amazon_rekognition': 'http://localhost:8983/solr/amazon_rekognition',
'clarifai': 'http://localhost:8983/solr/clarifai',
'google_vision': 'http://localhost:8983/solr/google_vision',
'combined': 'http://localhost:8983/solr/combined'
},
'tags_field': 'tags_ssim',
'copy_fields': [
{
'src': 'subject_tesim',
'dst': 'subject_sim'
},
{
'src': 'human_readable_type_tesim',
'dst': 'human_readable_type_sim'
},
{
'src': 'human_readable_resource_type_tesim',
'dst': 'human_readable_resource_type_sim'
},
{
'src': 'genre_tesim',
'dst': 'genre_sim'
},
{
'src': 'named_subject_tesim',
'dst': 'named_subject_sim'
},
{
'src': 'location_tesim',
'dst': 'location_sim'
},
{
'src': 'language_tesim',
'dst': 'language_sim'
}
]
},
'web_annotation': {
'annotation_seed': {
'@context': 'http://www.w3.org/ns/anno.jsonld',
'type': 'Annotation',
'motivation': 'tagging',
'target': {
'type': 'SpecificResource',
'selector': {
'type': 'ImageApiSelector'
}
},
'creator': {
'type': 'Organization',
'name': 'UCLA Library',
'homepage': 'https://library.ucla.edu'
},
'generator': {
'type': 'Software',
'name': 'Machine Generated Annotations Pipeline',
'homepage': 'https://github.com/UCLALibrary/mgap'
}
},
'annotation_body_seed': {
'type': 'TextualBody',
'format': 'text/json',
'language': 'en',
'purpose': 'tagging',
'creator': {
'type': 'Software'
},
'generator': {
'type': 'Software'
}
},
'annotation_container_seed': {
'@context': 'http://www.w3.org/ns/anno.jsonld',
'type': 'AnnotationCollection'
}
}
}
|
mgap/util.py
|
def get_config():
return {
'aws': {
'profile_name': 'mgap'
},
'clarifai': {
'api_key': ''
},
'elucidate': {
'host': 'http://localhost',
'port': 8080,
'base_path': '/annotation',
'annotation_model': 'w3c',
'request_headers_seed': {
'Accept': 'application/ld+json;profile="http://www.w3.org/ns/anno.jsonld"',
'Content-Type': 'application/ld+json'
}
},
'google_vision': {
'api_key': ''
},
'iiif': {
'image_api_default_params': {
'region': 'full',
'size': 'full',
'rotation': '0',
'quality': 'default',
'format': 'jpg'
}
},
'rabbitmq': {
'username': 'guest',
'password': '',
'host': 'localhost',
'port': 5672,
'vhost': ''
},
'redis': {
'host': 'redis',
'port': 6379,
'db': {
'computer_vision_results': '0',
'celery_task_results': '1'
}
},
'solr': {
'indexes': {
'amazon_rekognition': 'http://localhost:8983/solr/amazon_rekognition',
'clarifai': 'http://localhost:8983/solr/clarifai',
'google_vision': 'http://localhost:8983/solr/google_vision',
'combined': 'http://localhost:8983/solr/combined'
},
'tags_field': 'tags_ssim',
'copy_fields': [
{
'src': 'subject_tesim',
'dst': 'subject_sim'
},
{
'src': 'human_readable_type_tesim',
'dst': 'human_readable_type_sim'
},
{
'src': 'human_readable_resource_type_tesim',
'dst': 'human_readable_resource_type_sim'
},
{
'src': 'genre_tesim',
'dst': 'genre_sim'
},
{
'src': 'named_subject_tesim',
'dst': 'named_subject_sim'
},
{
'src': 'location_tesim',
'dst': 'location_sim'
},
{
'src': 'language_tesim',
'dst': 'language_sim'
}
]
},
'web_annotation': {
'annotation_seed': {
'@context': 'http://www.w3.org/ns/anno.jsonld',
'type': 'Annotation',
'motivation': 'tagging',
'target': {
'type': 'SpecificResource',
'selector': {
'type': 'ImageApiSelector'
}
},
'creator': {
'type': 'Organization',
'name': 'UCLA Library',
'homepage': 'https://library.ucla.edu'
},
'generator': {
'type': 'Software',
'name': 'Machine Generated Annotations Pipeline',
'homepage': 'https://github.com/UCLALibrary/mgap'
}
},
'annotation_body_seed': {
'type': 'TextualBody',
'format': 'text/json',
'language': 'en',
'purpose': 'tagging',
'creator': {
'type': 'Software'
},
'generator': {
'type': 'Software'
}
},
'annotation_container_seed': {
'@context': 'http://www.w3.org/ns/anno.jsonld',
'type': 'AnnotationCollection'
}
}
}
| 0.561936 | 0.131507 |
from multiprocessing.dummy import Pool
from source.request import Request
import datetime
class Batch:
"""
Concurrently retrieves a set of reviews
"""
_DEFAULT_THREADS = 8
_REVIEWS_PER_BATCH = 20
_TIME_FORMAT = "[%Y-%m-%d_%H:%M:%S]"
def __init__(self, app_id: int, start: int, concurrent_requests: int = None):
assert isinstance(start, int)
assert isinstance(app_id, int)
if concurrent_requests is not None:
assert isinstance(concurrent_requests, int)
self._threads = concurrent_requests
else:
self._threads = self._DEFAULT_THREADS
self._start = start
self._pool = Pool(self._threads)
self._requests = list()
stop = start + (self._threads * self._REVIEWS_PER_BATCH)
for offset in list(range(start, stop, self._REVIEWS_PER_BATCH)):
self._requests.append(Request(app_id, offset))
assert len(self._requests) == self._threads
_ = self._pool.map(self._execute_request, self._requests)
self._pool.close()
self._pool.join()
return
def _execute_request(self, request: Request) -> None:
"""
Execute a request
"""
request.execute()
return
def print_reviews(self) -> None:
for request in self._requests:
reviews = request.reviews()
for review in reviews:
print(review.describe())
def number_of_reviews_retrieved(self) -> int:
"""
Return the integer number of reviews retrieved
in this batch
"""
if hasattr(self, '_number_reviews_retrieved'):
return self._number_reviews_retrieved
count = 0
for request in self._requests:
count += request.number_of_reviews_retrieved()
assert isinstance(count, int)
self._number_reviews_retrieved = count
return count
def _total_retrieved(self) -> int:
"""
Return the total number of reviews retrieved so far
"""
if hasattr(self, '_total_retrieved_so_far'):
return self._total_retrieved_so_far
self._total_retrieved_so_far = self._start + self.number_of_reviews_retrieved()
assert isinstance(self._total_retrieved_so_far, int)
return self._total_retrieved_so_far
def next_batch_start(self) -> int:
"""
Return the integer offset at which a following Batch
should start.
"""
start = self._total_retrieved() + 1
return start
def estimated_total_available(self) -> int:
"""
Return an integer number of reviews estimated to be available
on Steam. May return None, as Valve is inconsistent about returning
total available reviews in query summaries.
"""
for request in self._requests:
if request.total_reviews_available() is not None:
return request.total_reviews_available()
return None
def _completion_proportion(self, total_available: int) -> float:
"""
Return float describing the proportion of total reviews
that have been retrieved
"""
total_retrieved = self.number_of_reviews_retrieved() + self._start
proportion = total_retrieved / total_available
assert isinstance(proportion, float)
return proportion
def _completion_percentage(self, total_available: int) -> str:
"""
Return as string percentage estimated completion
"""
return str(int(self._completion_proportion(total_available) * 100)) + '%'
def estimate_progress(self, operation_start: datetime.datetime, total_available: int) -> str:
"""
Return a string describing present progress
"""
estimate = datetime.datetime.strftime(datetime.datetime.now(), self._TIME_FORMAT)
total_retrieved = self.number_of_reviews_retrieved() + self._start
estimate += " Retrieved " + "{:,}".format(total_retrieved)
estimate += " of ~" + "{:,}".format(total_available)
estimate += " available reviews (" + self._completion_percentage(total_available) + ')'
passed_time = datetime.datetime.now() - operation_start
seconds_remaining = passed_time.total_seconds() / self._completion_proportion(total_available)
time_remaining = datetime.timedelta(seconds=seconds_remaining)
time_remaining_str = str(time_remaining).split('.')[0]
estimate += '. ~' + time_remaining_str + ' remaining.'
return estimate
def csv_lines(self) -> [[str]]:
"""
Return a list of lists of strings descrbing this batches reviews
"""
strings = list()
for request in self._requests:
strings += request.csv_lines()
return strings
|
source/batch.py
|
from multiprocessing.dummy import Pool
from source.request import Request
import datetime
class Batch:
"""
Concurrently retrieves a set of reviews
"""
_DEFAULT_THREADS = 8
_REVIEWS_PER_BATCH = 20
_TIME_FORMAT = "[%Y-%m-%d_%H:%M:%S]"
def __init__(self, app_id: int, start: int, concurrent_requests: int = None):
assert isinstance(start, int)
assert isinstance(app_id, int)
if concurrent_requests is not None:
assert isinstance(concurrent_requests, int)
self._threads = concurrent_requests
else:
self._threads = self._DEFAULT_THREADS
self._start = start
self._pool = Pool(self._threads)
self._requests = list()
stop = start + (self._threads * self._REVIEWS_PER_BATCH)
for offset in list(range(start, stop, self._REVIEWS_PER_BATCH)):
self._requests.append(Request(app_id, offset))
assert len(self._requests) == self._threads
_ = self._pool.map(self._execute_request, self._requests)
self._pool.close()
self._pool.join()
return
def _execute_request(self, request: Request) -> None:
"""
Execute a request
"""
request.execute()
return
def print_reviews(self) -> None:
for request in self._requests:
reviews = request.reviews()
for review in reviews:
print(review.describe())
def number_of_reviews_retrieved(self) -> int:
"""
Return the integer number of reviews retrieved
in this batch
"""
if hasattr(self, '_number_reviews_retrieved'):
return self._number_reviews_retrieved
count = 0
for request in self._requests:
count += request.number_of_reviews_retrieved()
assert isinstance(count, int)
self._number_reviews_retrieved = count
return count
def _total_retrieved(self) -> int:
"""
Return the total number of reviews retrieved so far
"""
if hasattr(self, '_total_retrieved_so_far'):
return self._total_retrieved_so_far
self._total_retrieved_so_far = self._start + self.number_of_reviews_retrieved()
assert isinstance(self._total_retrieved_so_far, int)
return self._total_retrieved_so_far
def next_batch_start(self) -> int:
"""
Return the integer offset at which a following Batch
should start.
"""
start = self._total_retrieved() + 1
return start
def estimated_total_available(self) -> int:
"""
Return an integer number of reviews estimated to be available
on Steam. May return None, as Valve is inconsistent about returning
total available reviews in query summaries.
"""
for request in self._requests:
if request.total_reviews_available() is not None:
return request.total_reviews_available()
return None
def _completion_proportion(self, total_available: int) -> float:
"""
Return float describing the proportion of total reviews
that have been retrieved
"""
total_retrieved = self.number_of_reviews_retrieved() + self._start
proportion = total_retrieved / total_available
assert isinstance(proportion, float)
return proportion
def _completion_percentage(self, total_available: int) -> str:
"""
Return as string percentage estimated completion
"""
return str(int(self._completion_proportion(total_available) * 100)) + '%'
def estimate_progress(self, operation_start: datetime.datetime, total_available: int) -> str:
"""
Return a string describing present progress
"""
estimate = datetime.datetime.strftime(datetime.datetime.now(), self._TIME_FORMAT)
total_retrieved = self.number_of_reviews_retrieved() + self._start
estimate += " Retrieved " + "{:,}".format(total_retrieved)
estimate += " of ~" + "{:,}".format(total_available)
estimate += " available reviews (" + self._completion_percentage(total_available) + ')'
passed_time = datetime.datetime.now() - operation_start
seconds_remaining = passed_time.total_seconds() / self._completion_proportion(total_available)
time_remaining = datetime.timedelta(seconds=seconds_remaining)
time_remaining_str = str(time_remaining).split('.')[0]
estimate += '. ~' + time_remaining_str + ' remaining.'
return estimate
def csv_lines(self) -> [[str]]:
"""
Return a list of lists of strings descrbing this batches reviews
"""
strings = list()
for request in self._requests:
strings += request.csv_lines()
return strings
| 0.788502 | 0.318644 |
"""Binary class head for Estimator that allow integration with TF Privacy."""
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from tensorflow.python.keras.utils import losses_utils # pylint: disable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.head import base_head
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
class DPBinaryClassHead(tf_estimator.BinaryClassHead):
"""Creates a TF Privacy-enabled version of BinaryClassHead."""
def __init__(self,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
name=None):
super().__init__(
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=loss_fn,
name=name)
def loss(self,
labels,
logits,
features=None,
mode=None,
regularization_losses=None):
"""Returns regularized training loss. See `base_head.Head` for details."""
del mode # Unused for this head.
with tf.compat.v1.name_scope(
'losses', values=(logits, labels, regularization_losses, features)):
logits = base_head.check_logits_final_dim(logits, self.logits_dimension)
labels = self._processed_labels(logits, labels)
unweighted_loss, weights = self._unweighted_loss_and_weights(
logits, labels, features)
vector_training_loss = losses_utils.compute_weighted_loss(
unweighted_loss,
sample_weight=weights,
reduction=tf.keras.losses.Reduction.NONE)
regularization_loss = tf.math.add_n(
regularization_losses) if regularization_losses is not None else None
vector_regularized_training_loss = (
tf.add(vector_training_loss, regularization_loss)
if regularization_loss is not None else vector_training_loss)
return vector_regularized_training_loss
def _create_tpu_estimator_spec(self,
features,
mode,
logits,
labels=None,
optimizer=None,
trainable_variables=None,
train_op_fn=None,
update_ops=None,
regularization_losses=None):
"""See superclass for description."""
with tf.compat.v1.name_scope(self._name, 'head'):
# Predict.
pred_keys = prediction_keys.PredictionKeys
predictions = self.predictions(logits)
if mode == ModeKeys.PREDICT:
probabilities = predictions[pred_keys.PROBABILITIES]
logistic = predictions[pred_keys.LOGISTIC]
classifier_output = base_head.classification_output(
scores=probabilities,
n_classes=2,
label_vocabulary=self._label_vocabulary)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
base_head.DEFAULT_SERVING_KEY: classifier_output,
base_head.CLASSIFY_SERVING_KEY: classifier_output,
base_head.REGRESS_SERVING_KEY:
export_output.RegressionOutput(value=logistic),
base_head.PREDICT_SERVING_KEY:
export_output.PredictOutput(predictions)
})
regularized_training_loss = self.loss(
logits=logits,
labels=labels,
features=features,
mode=mode,
regularization_losses=regularization_losses)
scalar_loss = tf.reduce_mean(regularized_training_loss)
# Eval.
if mode == ModeKeys.EVAL:
eval_metrics = self.metrics(regularization_losses=regularization_losses)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.EVAL,
predictions=predictions,
loss=scalar_loss,
eval_metrics=base_head.create_eval_metrics_tuple(
self.update_metrics, {
'eval_metrics': eval_metrics,
'features': features,
'logits': logits,
'labels': labels,
'regularization_losses': regularization_losses
}))
# Train.
train_op = base_head.create_estimator_spec_train_op(
head_name=self._name,
optimizer=optimizer,
train_op_fn=train_op_fn,
update_ops=update_ops,
trainable_variables=trainable_variables,
regularized_training_loss=regularized_training_loss,
loss_reduction=self._loss_reduction)
# Create summary.
base_head.create_estimator_spec_summary(
regularized_training_loss=scalar_loss,
regularization_losses=regularization_losses,
summary_key_fn=self._summary_key)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.TRAIN,
predictions=predictions,
loss=scalar_loss,
train_op=train_op)
|
tensorflow_privacy/privacy/estimators/binary_class_head.py
|
"""Binary class head for Estimator that allow integration with TF Privacy."""
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from tensorflow.python.keras.utils import losses_utils # pylint: disable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.head import base_head
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
class DPBinaryClassHead(tf_estimator.BinaryClassHead):
"""Creates a TF Privacy-enabled version of BinaryClassHead."""
def __init__(self,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None,
name=None):
super().__init__(
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=loss_fn,
name=name)
def loss(self,
labels,
logits,
features=None,
mode=None,
regularization_losses=None):
"""Returns regularized training loss. See `base_head.Head` for details."""
del mode # Unused for this head.
with tf.compat.v1.name_scope(
'losses', values=(logits, labels, regularization_losses, features)):
logits = base_head.check_logits_final_dim(logits, self.logits_dimension)
labels = self._processed_labels(logits, labels)
unweighted_loss, weights = self._unweighted_loss_and_weights(
logits, labels, features)
vector_training_loss = losses_utils.compute_weighted_loss(
unweighted_loss,
sample_weight=weights,
reduction=tf.keras.losses.Reduction.NONE)
regularization_loss = tf.math.add_n(
regularization_losses) if regularization_losses is not None else None
vector_regularized_training_loss = (
tf.add(vector_training_loss, regularization_loss)
if regularization_loss is not None else vector_training_loss)
return vector_regularized_training_loss
def _create_tpu_estimator_spec(self,
features,
mode,
logits,
labels=None,
optimizer=None,
trainable_variables=None,
train_op_fn=None,
update_ops=None,
regularization_losses=None):
"""See superclass for description."""
with tf.compat.v1.name_scope(self._name, 'head'):
# Predict.
pred_keys = prediction_keys.PredictionKeys
predictions = self.predictions(logits)
if mode == ModeKeys.PREDICT:
probabilities = predictions[pred_keys.PROBABILITIES]
logistic = predictions[pred_keys.LOGISTIC]
classifier_output = base_head.classification_output(
scores=probabilities,
n_classes=2,
label_vocabulary=self._label_vocabulary)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
base_head.DEFAULT_SERVING_KEY: classifier_output,
base_head.CLASSIFY_SERVING_KEY: classifier_output,
base_head.REGRESS_SERVING_KEY:
export_output.RegressionOutput(value=logistic),
base_head.PREDICT_SERVING_KEY:
export_output.PredictOutput(predictions)
})
regularized_training_loss = self.loss(
logits=logits,
labels=labels,
features=features,
mode=mode,
regularization_losses=regularization_losses)
scalar_loss = tf.reduce_mean(regularized_training_loss)
# Eval.
if mode == ModeKeys.EVAL:
eval_metrics = self.metrics(regularization_losses=regularization_losses)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.EVAL,
predictions=predictions,
loss=scalar_loss,
eval_metrics=base_head.create_eval_metrics_tuple(
self.update_metrics, {
'eval_metrics': eval_metrics,
'features': features,
'logits': logits,
'labels': labels,
'regularization_losses': regularization_losses
}))
# Train.
train_op = base_head.create_estimator_spec_train_op(
head_name=self._name,
optimizer=optimizer,
train_op_fn=train_op_fn,
update_ops=update_ops,
trainable_variables=trainable_variables,
regularized_training_loss=regularized_training_loss,
loss_reduction=self._loss_reduction)
# Create summary.
base_head.create_estimator_spec_summary(
regularized_training_loss=scalar_loss,
regularization_losses=regularization_losses,
summary_key_fn=self._summary_key)
return model_fn._TPUEstimatorSpec( # pylint: disable=protected-access
mode=ModeKeys.TRAIN,
predictions=predictions,
loss=scalar_loss,
train_op=train_op)
| 0.951605 | 0.296521 |
import json
import copy
class TreeNode(object):
'二叉树节点类'
def __init__(self, data=None):
self.data = data
self.left = None
self.right = None
class BinTree(object):
'二叉树类'
# 二叉树初始化
def __init__(self, data = None):
self.queue = []
if data is None:
self.root = None
else:
node = TreeNode(copy.deepcopy(data))
self.root = node
self.queue.append(node)
while self.queue:
tmpNode = self.queue[0]
if tmpNode.data['left'] != '':
node = TreeNode(tmpNode.data['left'])
tmpNode.left = node
self.queue.append(node)
tmpNode.data['left'] = ''
elif tmpNode.data['right'] != '':
node = TreeNode(tmpNode.data['right'])
tmpNode.right = node
self.queue.append(node)
tmpNode.data['right'] = ''
else:
self.queue.pop(0)
# 前序遍历
def preOrderTraversal(self, root):
# 遍历终止条件
if root is None:
return
if root.data['name'] == 'join_table':
print(root.data['name'])
self.preOrderTraversal(root.left)
self.preOrderTraversal(root.right)
class GenerateStmt(object):
'生成新SQL语句'
def __init__(self,tree=None, comment=None):
self.root = copy.deepcopy(tree.root)
self.commentTree = copy.deepcopy(comment.root)
self.stmt = ''
self.pre_node = None
# 基于树生成语句
def generate(self, root):
if root is None:
return
if root.data['type'] != 'NONTERMINAL':
self.stmt = self.stmt + self.str_concat(self.pre_node, root)
self.pre_node = root
self.generate(root.left)
self.generate(root.right)
# 节点字符串拼接
def str_concat(self, pre_node, root):
if pre_node is None:
pre_line = 1 #上一节点所在行
pre_column = 0 #上一节点所在行尾
else:
pre_line = int(pre_node.data['last_line']) #上一节点所在行
pre_column = int(pre_node.data['last_column']) #上一节点所在行尾
curr_line = int(root.data['first_line']) #当前节点所在行
curr_column = int(root.data['first_column']) #当前节点所在行首
# 是否换行
if pre_line == curr_line:
blank_cnt = curr_column - pre_column - 1
curr_str = ' ' * blank_cnt + root.data['value']
return curr_str
else:
line_cnt = curr_line - pre_line
curr_str = ''
while line_cnt > 0:
curr_str = curr_str + self.get_comment(self.commentTree ,curr_line - line_cnt) + '\n'
line_cnt = line_cnt - 1
curr_str = curr_str + ' ' * curr_column + root.data['value']
return curr_str
# 检测上一行
def get_comment(self, root, pre_line):
queue = [root]
while queue:
node = queue.pop(0)
if ( node.data['name'] == 'COMMENT'
and int(node.data['first_line']) == pre_line
):
return node.data['value']
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return ''
if __name__ == '__main__':
arg1 = sys.argv[1]
# 打开文件
sqlFo = open(arg1+".json","r")
commentFo = open(arg1+".cmt","r")
# 读取文件
sqlStr = sqlFo.read()
commentStr = commentFo.read()
# 关闭打开的文件
sqlFo.close()
commentFo.close()
# 生成Json
sqlJson = json.loads(sqlStr)
commentJson = json.loads(commentStr)
# 生成二叉树
sql_tree = BinTree(sqlJson)
comment_tree = BinTree(commentJson)
# 关联字段
generatestmt = GenerateStmt(sql_tree,comment_tree)
generatestmt.generate(generatestmt.root)
print(generatestmt.stmt)
|
optimizer/generate_stmt.py
|
import json
import copy
class TreeNode(object):
'二叉树节点类'
def __init__(self, data=None):
self.data = data
self.left = None
self.right = None
class BinTree(object):
'二叉树类'
# 二叉树初始化
def __init__(self, data = None):
self.queue = []
if data is None:
self.root = None
else:
node = TreeNode(copy.deepcopy(data))
self.root = node
self.queue.append(node)
while self.queue:
tmpNode = self.queue[0]
if tmpNode.data['left'] != '':
node = TreeNode(tmpNode.data['left'])
tmpNode.left = node
self.queue.append(node)
tmpNode.data['left'] = ''
elif tmpNode.data['right'] != '':
node = TreeNode(tmpNode.data['right'])
tmpNode.right = node
self.queue.append(node)
tmpNode.data['right'] = ''
else:
self.queue.pop(0)
# 前序遍历
def preOrderTraversal(self, root):
# 遍历终止条件
if root is None:
return
if root.data['name'] == 'join_table':
print(root.data['name'])
self.preOrderTraversal(root.left)
self.preOrderTraversal(root.right)
class GenerateStmt(object):
'生成新SQL语句'
def __init__(self,tree=None, comment=None):
self.root = copy.deepcopy(tree.root)
self.commentTree = copy.deepcopy(comment.root)
self.stmt = ''
self.pre_node = None
# 基于树生成语句
def generate(self, root):
if root is None:
return
if root.data['type'] != 'NONTERMINAL':
self.stmt = self.stmt + self.str_concat(self.pre_node, root)
self.pre_node = root
self.generate(root.left)
self.generate(root.right)
# 节点字符串拼接
def str_concat(self, pre_node, root):
if pre_node is None:
pre_line = 1 #上一节点所在行
pre_column = 0 #上一节点所在行尾
else:
pre_line = int(pre_node.data['last_line']) #上一节点所在行
pre_column = int(pre_node.data['last_column']) #上一节点所在行尾
curr_line = int(root.data['first_line']) #当前节点所在行
curr_column = int(root.data['first_column']) #当前节点所在行首
# 是否换行
if pre_line == curr_line:
blank_cnt = curr_column - pre_column - 1
curr_str = ' ' * blank_cnt + root.data['value']
return curr_str
else:
line_cnt = curr_line - pre_line
curr_str = ''
while line_cnt > 0:
curr_str = curr_str + self.get_comment(self.commentTree ,curr_line - line_cnt) + '\n'
line_cnt = line_cnt - 1
curr_str = curr_str + ' ' * curr_column + root.data['value']
return curr_str
# 检测上一行
def get_comment(self, root, pre_line):
queue = [root]
while queue:
node = queue.pop(0)
if ( node.data['name'] == 'COMMENT'
and int(node.data['first_line']) == pre_line
):
return node.data['value']
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
return ''
if __name__ == '__main__':
arg1 = sys.argv[1]
# 打开文件
sqlFo = open(arg1+".json","r")
commentFo = open(arg1+".cmt","r")
# 读取文件
sqlStr = sqlFo.read()
commentStr = commentFo.read()
# 关闭打开的文件
sqlFo.close()
commentFo.close()
# 生成Json
sqlJson = json.loads(sqlStr)
commentJson = json.loads(commentStr)
# 生成二叉树
sql_tree = BinTree(sqlJson)
comment_tree = BinTree(commentJson)
# 关联字段
generatestmt = GenerateStmt(sql_tree,comment_tree)
generatestmt.generate(generatestmt.root)
print(generatestmt.stmt)
| 0.182936 | 0.193604 |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
class Analytics:
def __init__(self, states):
self.states = states
def show_heatmap(self):
'Plot a heatmap of the state values obtained via the Monte Carlo policy evaluation'
# convert to numpy array
arr = np.array(self.states)
arr_v = [[arr[j,i].v_pi_mean for i in range(9)] for j in range(9)]
# plot heatmap
plt.rcParams['font.size'] = 6
sns.heatmap(arr_v, annot=True, fmt='.1f')
plt.savefig('./2_reinforcement_learning/figs/heatmap.png')
plt.show()
print('Done')
def temp_diff_lineplot(self, values, algorithm):
'Show line plot with actions per step'
indexes = [i for i in range(len(values))]
sns.lineplot(x=indexes, y=values)
plt.xlabel('episodes')
plt.ylabel('actions')
plt.savefig('./2_reinforcement_learning/figs/'+algorithm+'_actions_per_episode.png')
plt.show()
plt.clf()
print(f"{algorithm}: actions after learning: {values[-1]}")
def temp_diff_lineplot_actions_rewards(self, actions, rewards, algorithm):
'Show line plot with actions/rewards per step'
indexes = [i for i in range(len(actions))]
# df = pd.DataFrame({'index': indexes, 'actions': actions, 'rewards': rewards})
# grouped = df.groupby({x: x // 100 for x in range(len(df))})
sns.lineplot(x=indexes, y=actions)
sns.lineplot(x=indexes, y=rewards)
plt.xlabel('episodes')
plt.legend(['n actions', 'rewards'])
plt.savefig('./2_reinforcement_learning/figs/'+algorithm+'_actions_rewards_per_episode.png')
plt.show()
plt.clf()
print(f"actions after learning: {actions[-1]}\nMax total reward yield: {rewards[-1]}")
def compare_algorithms_lineplot(self, sarsa_actions, qlearning_actions, value_type):
'Show line plot with actions/rewards per step'
index = [i for i in range(len(max(sarsa_actions, qlearning_actions)))]
df = pd.DataFrame({'episodes': index, 'sarsa': sarsa_actions, 'qlearning': qlearning_actions})
_df = df.melt(id_vars='episodes', value_vars=['sarsa', 'qlearning'])
sns.lineplot(data=_df, x='episodes', y='value', hue='variable')
plt.xlabel('episodes')
plt.ylabel('num of actions')
plt.legend(['SARSA', 'Q-learning'])
plt.savefig('./2_reinforcement_learning/figs/'+value_type+'_actions_rewards_per_episode.png')
plt.show()
plt.clf()
print(f"SARSA: actions after learning - {sarsa_actions[-1]}")
print(f"Q-Learning: actions after learning - {qlearning_actions[-1]}")
|
2_reinforcement_learning/Analytics.py
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
class Analytics:
def __init__(self, states):
self.states = states
def show_heatmap(self):
'Plot a heatmap of the state values obtained via the Monte Carlo policy evaluation'
# convert to numpy array
arr = np.array(self.states)
arr_v = [[arr[j,i].v_pi_mean for i in range(9)] for j in range(9)]
# plot heatmap
plt.rcParams['font.size'] = 6
sns.heatmap(arr_v, annot=True, fmt='.1f')
plt.savefig('./2_reinforcement_learning/figs/heatmap.png')
plt.show()
print('Done')
def temp_diff_lineplot(self, values, algorithm):
'Show line plot with actions per step'
indexes = [i for i in range(len(values))]
sns.lineplot(x=indexes, y=values)
plt.xlabel('episodes')
plt.ylabel('actions')
plt.savefig('./2_reinforcement_learning/figs/'+algorithm+'_actions_per_episode.png')
plt.show()
plt.clf()
print(f"{algorithm}: actions after learning: {values[-1]}")
def temp_diff_lineplot_actions_rewards(self, actions, rewards, algorithm):
'Show line plot with actions/rewards per step'
indexes = [i for i in range(len(actions))]
# df = pd.DataFrame({'index': indexes, 'actions': actions, 'rewards': rewards})
# grouped = df.groupby({x: x // 100 for x in range(len(df))})
sns.lineplot(x=indexes, y=actions)
sns.lineplot(x=indexes, y=rewards)
plt.xlabel('episodes')
plt.legend(['n actions', 'rewards'])
plt.savefig('./2_reinforcement_learning/figs/'+algorithm+'_actions_rewards_per_episode.png')
plt.show()
plt.clf()
print(f"actions after learning: {actions[-1]}\nMax total reward yield: {rewards[-1]}")
def compare_algorithms_lineplot(self, sarsa_actions, qlearning_actions, value_type):
'Show line plot with actions/rewards per step'
index = [i for i in range(len(max(sarsa_actions, qlearning_actions)))]
df = pd.DataFrame({'episodes': index, 'sarsa': sarsa_actions, 'qlearning': qlearning_actions})
_df = df.melt(id_vars='episodes', value_vars=['sarsa', 'qlearning'])
sns.lineplot(data=_df, x='episodes', y='value', hue='variable')
plt.xlabel('episodes')
plt.ylabel('num of actions')
plt.legend(['SARSA', 'Q-learning'])
plt.savefig('./2_reinforcement_learning/figs/'+value_type+'_actions_rewards_per_episode.png')
plt.show()
plt.clf()
print(f"SARSA: actions after learning - {sarsa_actions[-1]}")
print(f"Q-Learning: actions after learning - {qlearning_actions[-1]}")
| 0.395718 | 0.730626 |
import numpy as np
from sklearn.utils.validation import check_is_fitted
class PrefitVotingClassifier(object):
"""Stripped-down version of VotingClassifier that uses prefit estimators"""
def __init__(self, estimators, feats_per_estimator, voting='hard', weights=None):
self.estimators = [e[1] for e in estimators]
self.feats_per_estimator = feats_per_estimator
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y, sample_weight=None):
raise NotImplementedError
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([self.estimators[i].predict(self._select_features(X, i)) for i in xrange(len(self.estimators))]).T
def _select_features(self, X, estimator_index):
X_selected = X[:, self.feats_per_estimator[estimator_index]]
return X_selected
|
code/prefit_voting_classifier.py
|
import numpy as np
from sklearn.utils.validation import check_is_fitted
class PrefitVotingClassifier(object):
"""Stripped-down version of VotingClassifier that uses prefit estimators"""
def __init__(self, estimators, feats_per_estimator, voting='hard', weights=None):
self.estimators = [e[1] for e in estimators]
self.feats_per_estimator = feats_per_estimator
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y, sample_weight=None):
raise NotImplementedError
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([self.estimators[i].predict(self._select_features(X, i)) for i in xrange(len(self.estimators))]).T
def _select_features(self, X, estimator_index):
X_selected = X[:, self.feats_per_estimator[estimator_index]]
return X_selected
| 0.947805 | 0.602734 |
from functools import wraps
from typing import Any, Callable, Optional, Union, no_type_check
from pydantic import errors
from pydantic_yaml.compat.types import YamlStr
from semver import VersionInfo
__all__ = ["SemVer"]
Comparator = Callable[["SemVer", Any], bool]
def _comparator(operator: Comparator) -> Comparator:
"""Wrap a Version binary op method in a type-check."""
@wraps(operator)
def wrapper(self: "SemVer", other: Any) -> bool:
if not isinstance(other, SemVer):
try:
other = SemVer(other)
except Exception:
return NotImplemented
return operator(self, other)
return wrapper
class SemVer(YamlStr): # want to inherit from VersionInfo, but metaclass conflict
"""Semantic Version string for Pydantic.
Depends on `semver>=2,<3`, see:
https://python-semver.readthedocs.io/en/3.0.0-dev.2/install.html#release-policy
Waiting to be implemented here:
https://github.com/samuelcolvin/pydantic/discussions/2506
"""
allow_build: bool = True
allow_prerelease: bool = True
__slots__ = ["_info"]
@no_type_check
def __new__(cls, version: Optional[str], **kwargs) -> object:
return YamlStr.__new__(cls, cls.parse(**kwargs) if version is None else version)
def __init__(self, version: str):
str.__init__(version)
self._info = VersionInfo.parse(version)
@classmethod
def parse(
self,
major: int,
minor: int = 0,
patch: int = 0,
prerelease: Optional[str] = None,
build: Optional[str] = None,
) -> str:
return str(VersionInfo(major, minor, patch, prerelease, build))
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, value: Union[str, "SemVer"]) -> "SemVer":
vi = VersionInfo.parse(value)
if not cls.allow_build and (vi.build is None):
raise errors.NotNoneError()
if not cls.allow_prerelease and (vi.prerelease is None):
raise errors.NotNoneError()
return cls(value)
@property
def info(self) -> VersionInfo:
return self._info
@info.setter
def info(self, value):
raise AttributeError("attribute 'info' is readonly")
@property
def major(self) -> int:
"""The major part of a version (read-only)."""
return self._info.major
@major.setter
def major(self, value):
raise AttributeError("attribute 'major' is readonly")
@property
def minor(self) -> int:
"""The minor part of a version (read-only)."""
return self._info.minor
@minor.setter
def minor(self, value):
raise AttributeError("attribute 'minor' is readonly")
@property
def patch(self) -> int:
"""The patch part of a version (read-only)."""
return self._info.patch
@patch.setter
def patch(self, value):
raise AttributeError("attribute 'patch' is readonly")
@property
def prerelease(self) -> Optional[str]:
"""The prerelease part of a version (read-only)."""
return self._info.prerelease
@prerelease.setter
def prerelease(self, value):
raise AttributeError("attribute 'prerelease' is readonly")
@property
def build(self) -> Optional[str]:
"""The build part of a version (read-only)."""
return self._info.build
@build.setter
def build(self, value):
raise AttributeError("attribute 'build' is readonly")
def __hash__(self) -> int:
return super.__hash__(self) # use string hashing
@_comparator
def __eq__(self, other: "SemVer"):
return self._info == other._info
@_comparator
def __ne__(self, other: "SemVer"):
return self._info != other._info
@_comparator
def __lt__(self, other: "SemVer"):
return self._info < other._info
@_comparator
def __le__(self, other: "SemVer"):
return self._info <= other._info
@_comparator
def __gt__(self, other: "SemVer"):
return self._info > other._info
@_comparator
def __ge__(self, other: "SemVer"):
return self._info >= other._info
|
pydantic_yaml/ext/semver.py
|
from functools import wraps
from typing import Any, Callable, Optional, Union, no_type_check
from pydantic import errors
from pydantic_yaml.compat.types import YamlStr
from semver import VersionInfo
__all__ = ["SemVer"]
Comparator = Callable[["SemVer", Any], bool]
def _comparator(operator: Comparator) -> Comparator:
"""Wrap a Version binary op method in a type-check."""
@wraps(operator)
def wrapper(self: "SemVer", other: Any) -> bool:
if not isinstance(other, SemVer):
try:
other = SemVer(other)
except Exception:
return NotImplemented
return operator(self, other)
return wrapper
class SemVer(YamlStr): # want to inherit from VersionInfo, but metaclass conflict
"""Semantic Version string for Pydantic.
Depends on `semver>=2,<3`, see:
https://python-semver.readthedocs.io/en/3.0.0-dev.2/install.html#release-policy
Waiting to be implemented here:
https://github.com/samuelcolvin/pydantic/discussions/2506
"""
allow_build: bool = True
allow_prerelease: bool = True
__slots__ = ["_info"]
@no_type_check
def __new__(cls, version: Optional[str], **kwargs) -> object:
return YamlStr.__new__(cls, cls.parse(**kwargs) if version is None else version)
def __init__(self, version: str):
str.__init__(version)
self._info = VersionInfo.parse(version)
@classmethod
def parse(
self,
major: int,
minor: int = 0,
patch: int = 0,
prerelease: Optional[str] = None,
build: Optional[str] = None,
) -> str:
return str(VersionInfo(major, minor, patch, prerelease, build))
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, value: Union[str, "SemVer"]) -> "SemVer":
vi = VersionInfo.parse(value)
if not cls.allow_build and (vi.build is None):
raise errors.NotNoneError()
if not cls.allow_prerelease and (vi.prerelease is None):
raise errors.NotNoneError()
return cls(value)
@property
def info(self) -> VersionInfo:
return self._info
@info.setter
def info(self, value):
raise AttributeError("attribute 'info' is readonly")
@property
def major(self) -> int:
"""The major part of a version (read-only)."""
return self._info.major
@major.setter
def major(self, value):
raise AttributeError("attribute 'major' is readonly")
@property
def minor(self) -> int:
"""The minor part of a version (read-only)."""
return self._info.minor
@minor.setter
def minor(self, value):
raise AttributeError("attribute 'minor' is readonly")
@property
def patch(self) -> int:
"""The patch part of a version (read-only)."""
return self._info.patch
@patch.setter
def patch(self, value):
raise AttributeError("attribute 'patch' is readonly")
@property
def prerelease(self) -> Optional[str]:
"""The prerelease part of a version (read-only)."""
return self._info.prerelease
@prerelease.setter
def prerelease(self, value):
raise AttributeError("attribute 'prerelease' is readonly")
@property
def build(self) -> Optional[str]:
"""The build part of a version (read-only)."""
return self._info.build
@build.setter
def build(self, value):
raise AttributeError("attribute 'build' is readonly")
def __hash__(self) -> int:
return super.__hash__(self) # use string hashing
@_comparator
def __eq__(self, other: "SemVer"):
return self._info == other._info
@_comparator
def __ne__(self, other: "SemVer"):
return self._info != other._info
@_comparator
def __lt__(self, other: "SemVer"):
return self._info < other._info
@_comparator
def __le__(self, other: "SemVer"):
return self._info <= other._info
@_comparator
def __gt__(self, other: "SemVer"):
return self._info > other._info
@_comparator
def __ge__(self, other: "SemVer"):
return self._info >= other._info
| 0.930466 | 0.205077 |
from .code_generator_info import CodeGeneratorInfoMutable
from .composition_parts import Identifier
from .composition_parts import WithCodeGeneratorInfo
from .composition_parts import WithComponent
from .composition_parts import WithDebugInfo
from .composition_parts import WithIdentifier
class Union(WithIdentifier, WithCodeGeneratorInfo, WithComponent,
WithDebugInfo):
"""
Union class makes a group of union types with the same flattened member
types and the same result whether it includes a nullable type or not.
For example, the following union types will be grouped into one Union
instance.
(A? or B or C), (A or B? or C), ((A or B) or C?), (A or (B or C?)), ...
All these unions have the same set of flattened member types (A, B, C) and
include a nullable type.
However, all following union types will be grouped into separate Union
instances.
(A or B), ([X] A or B), ([Y] A or B)
IdlType(A), IdlType([X] A), and IdlType([Y] A) are all distinguished from
each other as they behave differently. Bindings code generators are
expected to define an implementation class for each Union instance.
"""
_null_token = "Null"
@classmethod
def unique_token(cls, union_type):
"""
Returns an unique token per a set of union types that are considered
as the same group.
"""
assert union_type.is_union
token_pieces = []
def collect_token_pieces(idl_type):
idl_type = idl_type.unwrap()
if idl_type.is_union:
for member_type in idl_type.member_types:
collect_token_pieces(member_type)
else:
# Typename is not guaranteed to be unique, however it's
# unlikely that a conflict happens.
token_pieces.append(
idl_type.type_name_with_extended_attribute_key_values)
collect_token_pieces(union_type)
token_pieces.sort()
if union_type.does_include_nullable_type:
token_pieces.append(cls._null_token)
return tuple(token_pieces)
class IR(object):
# Note that Union.IR is, despite of its name, very different from other
# IDL definitions' IR classes. This class is not meant to be stored in
# IRMap nor managed with 'compilation phase'.
def __init__(self, token, union_types):
assert all(union_type.is_union for union_type in union_types)
self.token = token
self._member_set = set(token)
self.union_types = union_types
self.typedefs = []
self.sub_union_irs = []
self.public_object = None
def __lt__(self, other):
if len(self.token) == len(other.token):
return self.token < other.token
else:
return len(self.token) < len(other.token)
def contains(self, other):
assert isinstance(other, Union.IR)
return (self.token != other.token
and self._member_set.issuperset(other._member_set))
def __init__(self, ir):
assert isinstance(ir, Union.IR)
assert ir.public_object is None
identifier = Identifier('Union_{}'.format('_'.join(ir.token)))
union_type = ir.union_types[0]
flattened_member_types = union_type.flattened_member_types
does_include_nullable_type = union_type.does_include_nullable_type
does_include_nullable_or_dict = (
union_type.does_include_nullable_or_dict)
typedef_members = set()
union_members = set()
for union_type in ir.union_types:
assert union_type.flattened_member_types == flattened_member_types
assert (union_type.does_include_nullable_type ==
does_include_nullable_type)
for member_type in union_type.member_types:
if member_type.is_typedef:
typedef_members.add(member_type.typedef_object)
for sub_union_ir in ir.sub_union_irs:
assert isinstance(sub_union_ir.public_object, Union)
typedef_members.update(sub_union_ir.typedefs)
union_members.add(sub_union_ir.public_object)
components = set()
for_testing = [False]
def collect_primary_component(idl_type):
type_definition_object = idl_type.type_definition_object
if type_definition_object and type_definition_object.components:
components.add(type_definition_object.components[0])
if (type_definition_object and
type_definition_object.code_generator_info.for_testing):
for_testing[0] = True
for idl_type in flattened_member_types:
idl_type.apply_to_all_composing_elements(collect_primary_component)
code_generator_info = CodeGeneratorInfoMutable()
code_generator_info.set_for_testing(for_testing[0])
WithIdentifier.__init__(self, identifier)
WithCodeGeneratorInfo.__init__(self,
code_generator_info,
readonly=True)
WithComponent.__init__(self, sorted(components), readonly=True)
WithDebugInfo.__init__(self)
sort_key_typename = lambda idl_type: (
idl_type.type_name_with_extended_attribute_key_values)
sort_key_identifier = lambda x: x.identifier
self._idl_types = tuple(ir.union_types)
self._member_tokens = ir.token
self._flattened_member_types = tuple(
sorted(flattened_member_types, key=sort_key_typename))
self._does_include_nullable_type = does_include_nullable_type
self._does_include_nullable_or_dict = does_include_nullable_or_dict
self._typedef_members = tuple(
sorted(typedef_members, key=sort_key_identifier))
self._union_members = tuple(
sorted(union_members, key=sort_key_identifier))
self._aliasing_typedefs = tuple(
sorted(ir.typedefs, key=sort_key_identifier))
ir.public_object = self
@property
def idl_types(self):
"""Returns a list of IdlTypes which this object represents."""
return self._idl_types
@property
def member_tokens(self):
"""Returns a list of unique names of union member types."""
return self._member_tokens
@property
def flattened_member_types(self):
"""
Returns the same list of flattened member types as
IdlType.flattened_member_types.
"""
return self._flattened_member_types
@property
def does_include_nullable_type(self):
"""
Returns True if any of member type is nullable or a member union
includes a nullable type.
"""
return self._does_include_nullable_type
@property
def does_include_nullable_or_dict(self):
"""
Returns True if this type includes a nullable type or a dictionary
type.
"""
return self._does_include_nullable_or_dict
@property
def typedef_members(self):
"""
Returns a list of typedef types which are direct members of union types
which this object represents.
Given the following union types,
(AT or B), (A or BT) where typedef A AT, and typedef B BT
typedef_members returns a list of IdlType(AT) and IdlType(BT).
"""
return self._typedef_members
@property
def union_members(self):
"""
Returns a list of union types which are direct members of union types
which this object represents.
Given the following union types,
((A or B) or C), (A or (B or C))
union_members returns a list of IdlType(A or B) and IdlType(B or C).
"""
return self._union_members
@property
def aliasing_typedefs(self):
"""
Returns a list of typedef types which are aliases to union types which
this object represents.
Given the following typedef definitions,
typedef ((A or B) or C) T1;
typedef (A or (B or C)) T2;
aliasing_typedefs returns a list of IdlType(T1) and IdlType(T2).
"""
return self._aliasing_typedefs
|
third_party/blink/renderer/bindings/scripts/web_idl/union.py
|
from .code_generator_info import CodeGeneratorInfoMutable
from .composition_parts import Identifier
from .composition_parts import WithCodeGeneratorInfo
from .composition_parts import WithComponent
from .composition_parts import WithDebugInfo
from .composition_parts import WithIdentifier
class Union(WithIdentifier, WithCodeGeneratorInfo, WithComponent,
WithDebugInfo):
"""
Union class makes a group of union types with the same flattened member
types and the same result whether it includes a nullable type or not.
For example, the following union types will be grouped into one Union
instance.
(A? or B or C), (A or B? or C), ((A or B) or C?), (A or (B or C?)), ...
All these unions have the same set of flattened member types (A, B, C) and
include a nullable type.
However, all following union types will be grouped into separate Union
instances.
(A or B), ([X] A or B), ([Y] A or B)
IdlType(A), IdlType([X] A), and IdlType([Y] A) are all distinguished from
each other as they behave differently. Bindings code generators are
expected to define an implementation class for each Union instance.
"""
_null_token = "Null"
@classmethod
def unique_token(cls, union_type):
"""
Returns an unique token per a set of union types that are considered
as the same group.
"""
assert union_type.is_union
token_pieces = []
def collect_token_pieces(idl_type):
idl_type = idl_type.unwrap()
if idl_type.is_union:
for member_type in idl_type.member_types:
collect_token_pieces(member_type)
else:
# Typename is not guaranteed to be unique, however it's
# unlikely that a conflict happens.
token_pieces.append(
idl_type.type_name_with_extended_attribute_key_values)
collect_token_pieces(union_type)
token_pieces.sort()
if union_type.does_include_nullable_type:
token_pieces.append(cls._null_token)
return tuple(token_pieces)
class IR(object):
# Note that Union.IR is, despite of its name, very different from other
# IDL definitions' IR classes. This class is not meant to be stored in
# IRMap nor managed with 'compilation phase'.
def __init__(self, token, union_types):
assert all(union_type.is_union for union_type in union_types)
self.token = token
self._member_set = set(token)
self.union_types = union_types
self.typedefs = []
self.sub_union_irs = []
self.public_object = None
def __lt__(self, other):
if len(self.token) == len(other.token):
return self.token < other.token
else:
return len(self.token) < len(other.token)
def contains(self, other):
assert isinstance(other, Union.IR)
return (self.token != other.token
and self._member_set.issuperset(other._member_set))
def __init__(self, ir):
assert isinstance(ir, Union.IR)
assert ir.public_object is None
identifier = Identifier('Union_{}'.format('_'.join(ir.token)))
union_type = ir.union_types[0]
flattened_member_types = union_type.flattened_member_types
does_include_nullable_type = union_type.does_include_nullable_type
does_include_nullable_or_dict = (
union_type.does_include_nullable_or_dict)
typedef_members = set()
union_members = set()
for union_type in ir.union_types:
assert union_type.flattened_member_types == flattened_member_types
assert (union_type.does_include_nullable_type ==
does_include_nullable_type)
for member_type in union_type.member_types:
if member_type.is_typedef:
typedef_members.add(member_type.typedef_object)
for sub_union_ir in ir.sub_union_irs:
assert isinstance(sub_union_ir.public_object, Union)
typedef_members.update(sub_union_ir.typedefs)
union_members.add(sub_union_ir.public_object)
components = set()
for_testing = [False]
def collect_primary_component(idl_type):
type_definition_object = idl_type.type_definition_object
if type_definition_object and type_definition_object.components:
components.add(type_definition_object.components[0])
if (type_definition_object and
type_definition_object.code_generator_info.for_testing):
for_testing[0] = True
for idl_type in flattened_member_types:
idl_type.apply_to_all_composing_elements(collect_primary_component)
code_generator_info = CodeGeneratorInfoMutable()
code_generator_info.set_for_testing(for_testing[0])
WithIdentifier.__init__(self, identifier)
WithCodeGeneratorInfo.__init__(self,
code_generator_info,
readonly=True)
WithComponent.__init__(self, sorted(components), readonly=True)
WithDebugInfo.__init__(self)
sort_key_typename = lambda idl_type: (
idl_type.type_name_with_extended_attribute_key_values)
sort_key_identifier = lambda x: x.identifier
self._idl_types = tuple(ir.union_types)
self._member_tokens = ir.token
self._flattened_member_types = tuple(
sorted(flattened_member_types, key=sort_key_typename))
self._does_include_nullable_type = does_include_nullable_type
self._does_include_nullable_or_dict = does_include_nullable_or_dict
self._typedef_members = tuple(
sorted(typedef_members, key=sort_key_identifier))
self._union_members = tuple(
sorted(union_members, key=sort_key_identifier))
self._aliasing_typedefs = tuple(
sorted(ir.typedefs, key=sort_key_identifier))
ir.public_object = self
@property
def idl_types(self):
"""Returns a list of IdlTypes which this object represents."""
return self._idl_types
@property
def member_tokens(self):
"""Returns a list of unique names of union member types."""
return self._member_tokens
@property
def flattened_member_types(self):
"""
Returns the same list of flattened member types as
IdlType.flattened_member_types.
"""
return self._flattened_member_types
@property
def does_include_nullable_type(self):
"""
Returns True if any of member type is nullable or a member union
includes a nullable type.
"""
return self._does_include_nullable_type
@property
def does_include_nullable_or_dict(self):
"""
Returns True if this type includes a nullable type or a dictionary
type.
"""
return self._does_include_nullable_or_dict
@property
def typedef_members(self):
"""
Returns a list of typedef types which are direct members of union types
which this object represents.
Given the following union types,
(AT or B), (A or BT) where typedef A AT, and typedef B BT
typedef_members returns a list of IdlType(AT) and IdlType(BT).
"""
return self._typedef_members
@property
def union_members(self):
"""
Returns a list of union types which are direct members of union types
which this object represents.
Given the following union types,
((A or B) or C), (A or (B or C))
union_members returns a list of IdlType(A or B) and IdlType(B or C).
"""
return self._union_members
@property
def aliasing_typedefs(self):
"""
Returns a list of typedef types which are aliases to union types which
this object represents.
Given the following typedef definitions,
typedef ((A or B) or C) T1;
typedef (A or (B or C)) T2;
aliasing_typedefs returns a list of IdlType(T1) and IdlType(T2).
"""
return self._aliasing_typedefs
| 0.890306 | 0.349838 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.storage.blob import BlobServiceClient
try:
import settings_real as settings
except ImportError:
import blob_settings_fake as settings
from testcase import (
StorageTestCase,
TestMode,
record
)
SOURCE_FILE = 'SampleSource.txt'
class TestCommonBlobSamples(StorageTestCase):
connection_string = settings.CONNECTION_STRING
def setUp(self):
data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit"
with open(SOURCE_FILE, 'wb') as stream:
stream.write(data)
super(TestCommonBlobSamples, self).setUp()
def tearDown(self):
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
for container in ['containerformyblobs', 'containerfordeletedblobs', 'leasemyblobscontainer']:
try:
blob_service_client.delete_container(container)
except HttpResponseError:
pass
if os.path.isfile(SOURCE_FILE):
try:
os.remove(SOURCE_FILE)
except:
pass
return super(TestCommonBlobSamples, self).tearDown()
#--Begin Blob Samples-----------------------------------------------------------------
@record
def test_blob_snapshots(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerformyblobs")
# Create new Container
container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
container_client.upload_blob(name="my_blob", data=data)
# Get a BlobClient for a specific blob
blob_client = blob_service_client.get_blob_client(container="containerformyblobs", blob="my_blob")
# [START create_blob_snapshot]
# Create a read-only snapshot of the blob at this point in time
snapshot_blob = blob_client.create_snapshot()
# Get the snapshot ID
print(snapshot_blob.get('snapshot'))
# [END create_blob_snapshot]
# Delete only the snapshot (blob itself is retained)
blob_client.delete_blob(delete_snapshots="only")
# Delete container
blob_service_client.delete_container("containerformyblobs")
@record
def test_soft_delete_and_undelete_blob(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Create a retention policy to retain deleted blobs
from azure.storage.blob import RetentionPolicy
delete_retention_policy = RetentionPolicy(enabled=True, days=1)
# Set the retention policy on the service
blob_service_client.set_service_properties(delete_retention_policy=delete_retention_policy)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerfordeletedblobs")
# Create new Container
try:
container_client.create_container()
except ResourceExistsError:
# Container already created
pass
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
blob_client = container_client.upload_blob(name="my_blob", data=data)
# Soft delete blob in the container (blob can be recovered with undelete)
blob_client.delete_blob()
# [START undelete_blob]
# Undelete the blob before the retention policy expires
blob_client.undelete_blob()
# [END undelete_blob]
# [START get_blob_properties]
properties = blob_client.get_blob_properties()
# [END get_blob_properties]
assert properties is not None
# Delete container
blob_service_client.delete_container("containerfordeletedblobs")
@record
def test_acquire_lease_on_blob(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("leasemyblobscontainer")
# Create new Container
container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
container_client.upload_blob(name="my_blob", data=data)
# Get the blob client
blob_client = blob_service_client.get_blob_client("leasemyblobscontainer", "my_blob")
# [START acquire_lease_on_blob]
# Acquire a lease on the blob
lease = blob_client.acquire_lease()
# Delete blob by passing in the lease
blob_client.delete_blob(lease=lease)
# [END acquire_lease_on_blob]
# Delete container
blob_service_client.delete_container("leasemyblobscontainer")
@record
def test_start_copy_blob_from_url_and_abort_copy(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("copyblobcontainer")
# Create new Container
container_client.create_container()
try:
# [START copy_blob_from_url]
# Get the blob client with the source blob
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = blob_service_client.get_blob_client("copyblobcontainer", '59466-0.txt')
# start copy and check copy status
copy = copied_blob.start_copy_from_url(source_blob)
props = copied_blob.get_blob_properties()
print(props.copy.status)
# [END copy_blob_from_url]
copy_id = props.copy.id
# [START abort_copy_blob_from_url]
# Passing in copy id to abort copy operation
copied_blob.abort_copy(copy_id)
# check copy status
props = copied_blob.get_blob_properties()
print(props.copy.status)
# [END abort_copy_blob_from_url]
finally:
blob_service_client.delete_container("copyblobcontainer")
|
sdk/storage/azure-storage-blob/tests/test_blob_samples_common.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.storage.blob import BlobServiceClient
try:
import settings_real as settings
except ImportError:
import blob_settings_fake as settings
from testcase import (
StorageTestCase,
TestMode,
record
)
SOURCE_FILE = 'SampleSource.txt'
class TestCommonBlobSamples(StorageTestCase):
connection_string = settings.CONNECTION_STRING
def setUp(self):
data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit"
with open(SOURCE_FILE, 'wb') as stream:
stream.write(data)
super(TestCommonBlobSamples, self).setUp()
def tearDown(self):
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
for container in ['containerformyblobs', 'containerfordeletedblobs', 'leasemyblobscontainer']:
try:
blob_service_client.delete_container(container)
except HttpResponseError:
pass
if os.path.isfile(SOURCE_FILE):
try:
os.remove(SOURCE_FILE)
except:
pass
return super(TestCommonBlobSamples, self).tearDown()
#--Begin Blob Samples-----------------------------------------------------------------
@record
def test_blob_snapshots(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerformyblobs")
# Create new Container
container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
container_client.upload_blob(name="my_blob", data=data)
# Get a BlobClient for a specific blob
blob_client = blob_service_client.get_blob_client(container="containerformyblobs", blob="my_blob")
# [START create_blob_snapshot]
# Create a read-only snapshot of the blob at this point in time
snapshot_blob = blob_client.create_snapshot()
# Get the snapshot ID
print(snapshot_blob.get('snapshot'))
# [END create_blob_snapshot]
# Delete only the snapshot (blob itself is retained)
blob_client.delete_blob(delete_snapshots="only")
# Delete container
blob_service_client.delete_container("containerformyblobs")
@record
def test_soft_delete_and_undelete_blob(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Create a retention policy to retain deleted blobs
from azure.storage.blob import RetentionPolicy
delete_retention_policy = RetentionPolicy(enabled=True, days=1)
# Set the retention policy on the service
blob_service_client.set_service_properties(delete_retention_policy=delete_retention_policy)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerfordeletedblobs")
# Create new Container
try:
container_client.create_container()
except ResourceExistsError:
# Container already created
pass
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
blob_client = container_client.upload_blob(name="my_blob", data=data)
# Soft delete blob in the container (blob can be recovered with undelete)
blob_client.delete_blob()
# [START undelete_blob]
# Undelete the blob before the retention policy expires
blob_client.undelete_blob()
# [END undelete_blob]
# [START get_blob_properties]
properties = blob_client.get_blob_properties()
# [END get_blob_properties]
assert properties is not None
# Delete container
blob_service_client.delete_container("containerfordeletedblobs")
@record
def test_acquire_lease_on_blob(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("leasemyblobscontainer")
# Create new Container
container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
container_client.upload_blob(name="my_blob", data=data)
# Get the blob client
blob_client = blob_service_client.get_blob_client("leasemyblobscontainer", "my_blob")
# [START acquire_lease_on_blob]
# Acquire a lease on the blob
lease = blob_client.acquire_lease()
# Delete blob by passing in the lease
blob_client.delete_blob(lease=lease)
# [END acquire_lease_on_blob]
# Delete container
blob_service_client.delete_container("leasemyblobscontainer")
@record
def test_start_copy_blob_from_url_and_abort_copy(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("copyblobcontainer")
# Create new Container
container_client.create_container()
try:
# [START copy_blob_from_url]
# Get the blob client with the source blob
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = blob_service_client.get_blob_client("copyblobcontainer", '59466-0.txt')
# start copy and check copy status
copy = copied_blob.start_copy_from_url(source_blob)
props = copied_blob.get_blob_properties()
print(props.copy.status)
# [END copy_blob_from_url]
copy_id = props.copy.id
# [START abort_copy_blob_from_url]
# Passing in copy id to abort copy operation
copied_blob.abort_copy(copy_id)
# check copy status
props = copied_blob.get_blob_properties()
print(props.copy.status)
# [END abort_copy_blob_from_url]
finally:
blob_service_client.delete_container("copyblobcontainer")
| 0.669205 | 0.188063 |
import sys
sys.path.append("./lambda/helper/python")
import boto3
import unittest
from moto import mock_s3
from moto import mock_dynamodb2
import datastore
DOCUMENTS_TABLE_NAME="DocumentsTestTable"
OUTPUT_TABLE_NAME="OutputTestTable"
current_session = boto3.session.Session()
REGION = current_session.region_name
print(f"Test region is {REGION}")
@mock_dynamodb2
class TestDocumentStore(unittest.TestCase):
def setUp(self):
self.conn = boto3.client('dynamodb',region_name=REGION)
self.conn.create_table(
TableName = DOCUMENTS_TABLE_NAME,
KeySchema = [{"AttributeName": "documentId","KeyType":"HASH"}],
AttributeDefinitions=[{"AttributeName": "documentId", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
self.conn.put_item(
TableName = DOCUMENTS_TABLE_NAME,
Item={
"documentId": {"S" : "b1a54fda-1809-49d7-8f19-0d1688eb65b9"},
"objectName": {"S": "public/samples/Misc/expense.png"},
"bucketName": {"S": "dusstack-sample-s3-bucket"},
"documentStatus": {"S": "IN_PROGRESS"}
}
)
self.conn.put_item(
TableName = DOCUMENTS_TABLE_NAME,
Item={
"documentId": {"S" : "b1a99fda-1809-49d7-8f19-0d1688eb65b9"},
"objectName": {"S": "public/samples/Misc/expense.png"},
"bucketName": {"S": "dusstack-sample-s3-bucket"},
"documentStatus": {"S": "IN_PROGRESS"}
}
)
self.ds = datastore.DocumentStore(DOCUMENTS_TABLE_NAME,OUTPUT_TABLE_NAME)
def test_create_document_success(self):
bucketName = "dusstack-sample-s3-bucket"
objectName = "public/samples/Finance/report.pdf"
documentId = "b1a66fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.createDocument(documentId, bucketName, objectName)
self.assertEqual(response, None)
def test_create_duplicate_document_id_throws_error(self):
bucketName = "dusstack-sample-s3-bucket"
objectName = "public/samples/Finance/report.pdf"
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.createDocument(documentId, bucketName, objectName)
self.assertEqual(response, {'Error': 'Document already exist.'})
def test_update_document_status_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.updateDocumentStatus(documentId, "FAILED")
self.assertEqual(response, None)
def test_update_document_status_throws_error_when_document_does_not_exist(self):
documentId = "b1333fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.updateDocumentStatus(documentId, "FAILED")
self.assertEqual(response, {'Error': 'Document does not exist.'})
def test_mark_document_complete_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.markDocumentComplete(documentId)
documentStatus = self.conn.get_item(
Key={'documentId': {'S': documentId}},
TableName=DOCUMENTS_TABLE_NAME
)['Item']['documentStatus']['S']
self.assertEqual(documentStatus, "SUCCEEDED")
self.assertEqual(response, None)
def test_delete_document_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
self.ds.deleteDocument(documentId)
response = self.conn.get_item(
Key={'documentId': {'S': documentId}},
TableName=DOCUMENTS_TABLE_NAME
)
self.assertTrue('Item' not in response)
def test_get_documents(self):
response = self.ds.getDocuments()
self.assertEqual(len(response['documents']),2)
document_ids = []
for document in response['documents']:
document_ids.append(document['documentId'])
self.assertTrue('b1a54fda-1809-49d7-8f19-0d1688eb65b9' in document_ids)
self.assertTrue('b1a99fda-1809-49d7-8f19-0d1688eb65b9' in document_ids)
def test_get_document_count(self):
response = self.ds.getDocumentCount()
self.assertEqual(response, 2)
def test_get_table(self):
response = self.ds.getTable()
self.assertEqual(response.name,DOCUMENTS_TABLE_NAME)
self.assertTrue("dynamodb.Table" in response.__class__.__name__)
def test_get_document(self):
documentId = 'b1a99fda-1809-49d7-8f19-0d1688eb65b9'
response = self.ds.getDocument(documentId)
self.assertEqual(response['documentStatus'], 'IN_PROGRESS')
self.assertEqual(response['documentId'], documentId)
self.assertEqual(response['bucketName'], "dusstack-sample-s3-bucket")
def tearDown(self):
self.conn.delete_table(TableName=DOCUMENTS_TABLE_NAME)
if __name__=='__main__':
unittest.main()
|
source/test/test_datastore.py
|
import sys
sys.path.append("./lambda/helper/python")
import boto3
import unittest
from moto import mock_s3
from moto import mock_dynamodb2
import datastore
DOCUMENTS_TABLE_NAME="DocumentsTestTable"
OUTPUT_TABLE_NAME="OutputTestTable"
current_session = boto3.session.Session()
REGION = current_session.region_name
print(f"Test region is {REGION}")
@mock_dynamodb2
class TestDocumentStore(unittest.TestCase):
def setUp(self):
self.conn = boto3.client('dynamodb',region_name=REGION)
self.conn.create_table(
TableName = DOCUMENTS_TABLE_NAME,
KeySchema = [{"AttributeName": "documentId","KeyType":"HASH"}],
AttributeDefinitions=[{"AttributeName": "documentId", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
self.conn.put_item(
TableName = DOCUMENTS_TABLE_NAME,
Item={
"documentId": {"S" : "b1a54fda-1809-49d7-8f19-0d1688eb65b9"},
"objectName": {"S": "public/samples/Misc/expense.png"},
"bucketName": {"S": "dusstack-sample-s3-bucket"},
"documentStatus": {"S": "IN_PROGRESS"}
}
)
self.conn.put_item(
TableName = DOCUMENTS_TABLE_NAME,
Item={
"documentId": {"S" : "b1a99fda-1809-49d7-8f19-0d1688eb65b9"},
"objectName": {"S": "public/samples/Misc/expense.png"},
"bucketName": {"S": "dusstack-sample-s3-bucket"},
"documentStatus": {"S": "IN_PROGRESS"}
}
)
self.ds = datastore.DocumentStore(DOCUMENTS_TABLE_NAME,OUTPUT_TABLE_NAME)
def test_create_document_success(self):
bucketName = "dusstack-sample-s3-bucket"
objectName = "public/samples/Finance/report.pdf"
documentId = "b1a66fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.createDocument(documentId, bucketName, objectName)
self.assertEqual(response, None)
def test_create_duplicate_document_id_throws_error(self):
bucketName = "dusstack-sample-s3-bucket"
objectName = "public/samples/Finance/report.pdf"
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.createDocument(documentId, bucketName, objectName)
self.assertEqual(response, {'Error': 'Document already exist.'})
def test_update_document_status_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.updateDocumentStatus(documentId, "FAILED")
self.assertEqual(response, None)
def test_update_document_status_throws_error_when_document_does_not_exist(self):
documentId = "b1333fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.updateDocumentStatus(documentId, "FAILED")
self.assertEqual(response, {'Error': 'Document does not exist.'})
def test_mark_document_complete_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
response = self.ds.markDocumentComplete(documentId)
documentStatus = self.conn.get_item(
Key={'documentId': {'S': documentId}},
TableName=DOCUMENTS_TABLE_NAME
)['Item']['documentStatus']['S']
self.assertEqual(documentStatus, "SUCCEEDED")
self.assertEqual(response, None)
def test_delete_document_success(self):
documentId = "b1a54fda-1809-49d7-8f19-0d1688eb65b9"
self.ds.deleteDocument(documentId)
response = self.conn.get_item(
Key={'documentId': {'S': documentId}},
TableName=DOCUMENTS_TABLE_NAME
)
self.assertTrue('Item' not in response)
def test_get_documents(self):
response = self.ds.getDocuments()
self.assertEqual(len(response['documents']),2)
document_ids = []
for document in response['documents']:
document_ids.append(document['documentId'])
self.assertTrue('b1a54fda-1809-49d7-8f19-0d1688eb65b9' in document_ids)
self.assertTrue('b1a99fda-1809-49d7-8f19-0d1688eb65b9' in document_ids)
def test_get_document_count(self):
response = self.ds.getDocumentCount()
self.assertEqual(response, 2)
def test_get_table(self):
response = self.ds.getTable()
self.assertEqual(response.name,DOCUMENTS_TABLE_NAME)
self.assertTrue("dynamodb.Table" in response.__class__.__name__)
def test_get_document(self):
documentId = 'b1a99fda-1809-49d7-8f19-0d1688eb65b9'
response = self.ds.getDocument(documentId)
self.assertEqual(response['documentStatus'], 'IN_PROGRESS')
self.assertEqual(response['documentId'], documentId)
self.assertEqual(response['bucketName'], "dusstack-sample-s3-bucket")
def tearDown(self):
self.conn.delete_table(TableName=DOCUMENTS_TABLE_NAME)
if __name__=='__main__':
unittest.main()
| 0.207455 | 0.169715 |
from numba.core import registry, serialize, dispatcher
from numba import types
from numba.core.errors import UnsupportedError
import dpctl
from numba.core.compiler_lock import global_compiler_lock
class TargetDispatcher(serialize.ReduceMixin, metaclass=dispatcher.DispatcherMeta):
__numba__ = 'py_func'
target_offload_gpu = '__dppl_offload_gpu__'
target_offload_cpu = '__dppl_offload_cpu__'
target_dppl = 'dppy'
def __init__(self, py_func, wrapper, target, parallel_options, compiled=None):
self.__py_func = py_func
self.__target = target
self.__wrapper = wrapper
self.__compiled = compiled if compiled is not None else {}
self.__parallel = parallel_options
self.__doc__ = py_func.__doc__
self.__name__ = py_func.__name__
self.__module__ = py_func.__module__
def __call__(self, *args, **kwargs):
return self.get_compiled()(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.get_compiled(), name)
def __get__(self, obj, objtype=None):
return self.get_compiled().__get__(obj, objtype)
def __repr__(self):
return self.get_compiled().__repr__()
@classmethod
def _rebuild(cls, py_func, wrapper, target, parallel, compiled):
self = cls(py_func, wrapper, target, parallel, compiled)
return self
def get_compiled(self, target=None):
if target is None:
target = self.__target
disp = self.get_current_disp()
if not disp in self.__compiled.keys():
with global_compiler_lock:
if not disp in self.__compiled.keys():
self.__compiled[disp] = self.__wrapper(self.__py_func, disp)
return self.__compiled[disp]
def __is_with_context_target(self, target):
return target is None or target == TargetDispatcher.target_dppl
def get_current_disp(self):
target = self.__target
parallel = self.__parallel
offload = isinstance(parallel, dict) and parallel.get('offload') is True
if (dpctl.is_in_device_context() or offload):
if not self.__is_with_context_target(target):
raise UnsupportedError(f"Can't use 'with' context with explicitly specified target '{target}'")
if parallel is False or (isinstance(parallel, dict) and parallel.get('offload') is False):
raise UnsupportedError(f"Can't use 'with' context with parallel option '{parallel}'")
from numba_dppy import dppl_offload_dispatcher
if target is None:
if dpctl.get_current_device_type() == dpctl.device_type.gpu:
return registry.dispatcher_registry[TargetDispatcher.target_offload_gpu]
elif dpctl.get_current_device_type() == dpctl.device_type.cpu:
return registry.dispatcher_registry[TargetDispatcher.target_offload_cpu]
else:
if dpctl.is_in_device_context():
raise UnsupportedError('Unknown dppl device type')
if offload:
if dpctl.has_gpu_queues():
return registry.dispatcher_registry[TargetDispatcher.target_offload_gpu]
elif dpctl.has_cpu_queues():
return registry.dispatcher_registry[TargetDispatcher.target_offload_cpu]
if target is None:
target = 'cpu'
return registry.dispatcher_registry[target]
def _reduce_states(self):
return dict(
py_func=self.__py_func,
wrapper=self.__wrapper,
target=self.__target,
parallel=self.__parallel,
compiled=self.__compiled
)
|
numba-dppy/numba_dppy/target_dispatcher.py
|
from numba.core import registry, serialize, dispatcher
from numba import types
from numba.core.errors import UnsupportedError
import dpctl
from numba.core.compiler_lock import global_compiler_lock
class TargetDispatcher(serialize.ReduceMixin, metaclass=dispatcher.DispatcherMeta):
__numba__ = 'py_func'
target_offload_gpu = '__dppl_offload_gpu__'
target_offload_cpu = '__dppl_offload_cpu__'
target_dppl = 'dppy'
def __init__(self, py_func, wrapper, target, parallel_options, compiled=None):
self.__py_func = py_func
self.__target = target
self.__wrapper = wrapper
self.__compiled = compiled if compiled is not None else {}
self.__parallel = parallel_options
self.__doc__ = py_func.__doc__
self.__name__ = py_func.__name__
self.__module__ = py_func.__module__
def __call__(self, *args, **kwargs):
return self.get_compiled()(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.get_compiled(), name)
def __get__(self, obj, objtype=None):
return self.get_compiled().__get__(obj, objtype)
def __repr__(self):
return self.get_compiled().__repr__()
@classmethod
def _rebuild(cls, py_func, wrapper, target, parallel, compiled):
self = cls(py_func, wrapper, target, parallel, compiled)
return self
def get_compiled(self, target=None):
if target is None:
target = self.__target
disp = self.get_current_disp()
if not disp in self.__compiled.keys():
with global_compiler_lock:
if not disp in self.__compiled.keys():
self.__compiled[disp] = self.__wrapper(self.__py_func, disp)
return self.__compiled[disp]
def __is_with_context_target(self, target):
return target is None or target == TargetDispatcher.target_dppl
def get_current_disp(self):
target = self.__target
parallel = self.__parallel
offload = isinstance(parallel, dict) and parallel.get('offload') is True
if (dpctl.is_in_device_context() or offload):
if not self.__is_with_context_target(target):
raise UnsupportedError(f"Can't use 'with' context with explicitly specified target '{target}'")
if parallel is False or (isinstance(parallel, dict) and parallel.get('offload') is False):
raise UnsupportedError(f"Can't use 'with' context with parallel option '{parallel}'")
from numba_dppy import dppl_offload_dispatcher
if target is None:
if dpctl.get_current_device_type() == dpctl.device_type.gpu:
return registry.dispatcher_registry[TargetDispatcher.target_offload_gpu]
elif dpctl.get_current_device_type() == dpctl.device_type.cpu:
return registry.dispatcher_registry[TargetDispatcher.target_offload_cpu]
else:
if dpctl.is_in_device_context():
raise UnsupportedError('Unknown dppl device type')
if offload:
if dpctl.has_gpu_queues():
return registry.dispatcher_registry[TargetDispatcher.target_offload_gpu]
elif dpctl.has_cpu_queues():
return registry.dispatcher_registry[TargetDispatcher.target_offload_cpu]
if target is None:
target = 'cpu'
return registry.dispatcher_registry[target]
def _reduce_states(self):
return dict(
py_func=self.__py_func,
wrapper=self.__wrapper,
target=self.__target,
parallel=self.__parallel,
compiled=self.__compiled
)
| 0.72662 | 0.087759 |
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import utils
from models.BaseModel import GeneralModel
from helpers.KGReader import KGReader
class CFKG(GeneralModel):
reader = 'KGReader'
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--margin', type=float, default=0,
help='Margin in hinge loss.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus: KGReader):
self.emb_size = args.emb_size
self.margin = args.margin
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
super().__init__(args, corpus)
def _define_params(self):
self.e_embeddings = nn.Embedding(self.user_num + self.entity_num, self.emb_size)
# ↑ user and entity embeddings, user first
self.r_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# ↑ relation embedding: 0 is used for "buy" between users and items
self.loss_function = nn.MarginRankingLoss(margin=self.margin)
def forward(self, feed_dict):
self.check_list = []
head_ids = feed_dict['head_id'] # [batch_size, -1]
tail_ids = feed_dict['tail_id'] # [batch_size, -1]
relation_ids = feed_dict['relation_id'] # [batch_size, -1]
head_vectors = self.e_embeddings(head_ids)
tail_vectors = self.e_embeddings(tail_ids)
relation_vectors = self.r_embeddings(relation_ids)
prediction = -((head_vectors + relation_vectors - tail_vectors)**2).sum(-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
def loss(self, out_dict):
predictions = out_dict['prediction']
batch_size = predictions.shape[0]
pos_pred, neg_pred = predictions[:, :2].flatten(), predictions[:, 2:].flatten()
target = torch.from_numpy(np.ones(batch_size * 2, dtype=np.float32)).to(self.device)
loss = self.loss_function(pos_pred, neg_pred, target)
return loss
class Dataset(GeneralModel.Dataset):
def _prepare(self):
if self.phase == 'train':
interaction_df = pd.DataFrame({
'head': self.data['user_id'],
'tail': self.data['item_id'],
'relation': np.zeros_like(self.data['user_id']) # "buy" relation
})
self.data = utils.df_to_dict(pd.concat((self.corpus.relation_df, interaction_df), axis=0))
self.neg_heads = np.zeros(len(self), dtype=int)
self.neg_tails = np.zeros(len(self), dtype=int)
super()._prepare()
def _get_feed_dict(self, index):
if self.phase == 'train':
head, tail = self.data['head'][index], self.data['tail'][index]
relation = self.data['relation'][index]
head_id = np.array([head, head, head, self.neg_heads[index]])
tail_id = np.array([tail, tail, self.neg_tails[index], tail])
relation_id = np.array([relation] * 4)
if relation > 0: # head is not a user
head_id = head_id + self.corpus.n_users
else:
target_item = self.data['item_id'][index]
neg_items = self.neg_items[index]
tail_id = np.concatenate([[target_item], neg_items])
head_id = self.data['user_id'][index] * np.ones_like(tail_id)
relation_id = np.zeros_like(tail_id)
tail_id += self.corpus.n_users # tail must not be a user
feed_dict = {'head_id': head_id, 'tail_id': tail_id, 'relation_id': relation_id}
return feed_dict
def actions_before_epoch(self):
for i in range(len(self)):
head, tail, relation = self.data['head'][i], self.data['tail'][i], self.data['relation'][i]
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
if relation == 0:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
while self.neg_tails[i] in self.corpus.user_clicked_set[head]:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while tail in self.corpus.user_clicked_set[self.neg_heads[i]]:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
else:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
while (head, relation, self.neg_tails[i]) in self.corpus.triplet_set:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while (self.neg_heads[i], relation, tail) in self.corpus.triplet_set:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
|
src/models/general/CFKG.py
|
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import utils
from models.BaseModel import GeneralModel
from helpers.KGReader import KGReader
class CFKG(GeneralModel):
reader = 'KGReader'
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--margin', type=float, default=0,
help='Margin in hinge loss.')
return GeneralModel.parse_model_args(parser)
def __init__(self, args, corpus: KGReader):
self.emb_size = args.emb_size
self.margin = args.margin
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
super().__init__(args, corpus)
def _define_params(self):
self.e_embeddings = nn.Embedding(self.user_num + self.entity_num, self.emb_size)
# ↑ user and entity embeddings, user first
self.r_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# ↑ relation embedding: 0 is used for "buy" between users and items
self.loss_function = nn.MarginRankingLoss(margin=self.margin)
def forward(self, feed_dict):
self.check_list = []
head_ids = feed_dict['head_id'] # [batch_size, -1]
tail_ids = feed_dict['tail_id'] # [batch_size, -1]
relation_ids = feed_dict['relation_id'] # [batch_size, -1]
head_vectors = self.e_embeddings(head_ids)
tail_vectors = self.e_embeddings(tail_ids)
relation_vectors = self.r_embeddings(relation_ids)
prediction = -((head_vectors + relation_vectors - tail_vectors)**2).sum(-1)
return {'prediction': prediction.view(feed_dict['batch_size'], -1)}
def loss(self, out_dict):
predictions = out_dict['prediction']
batch_size = predictions.shape[0]
pos_pred, neg_pred = predictions[:, :2].flatten(), predictions[:, 2:].flatten()
target = torch.from_numpy(np.ones(batch_size * 2, dtype=np.float32)).to(self.device)
loss = self.loss_function(pos_pred, neg_pred, target)
return loss
class Dataset(GeneralModel.Dataset):
def _prepare(self):
if self.phase == 'train':
interaction_df = pd.DataFrame({
'head': self.data['user_id'],
'tail': self.data['item_id'],
'relation': np.zeros_like(self.data['user_id']) # "buy" relation
})
self.data = utils.df_to_dict(pd.concat((self.corpus.relation_df, interaction_df), axis=0))
self.neg_heads = np.zeros(len(self), dtype=int)
self.neg_tails = np.zeros(len(self), dtype=int)
super()._prepare()
def _get_feed_dict(self, index):
if self.phase == 'train':
head, tail = self.data['head'][index], self.data['tail'][index]
relation = self.data['relation'][index]
head_id = np.array([head, head, head, self.neg_heads[index]])
tail_id = np.array([tail, tail, self.neg_tails[index], tail])
relation_id = np.array([relation] * 4)
if relation > 0: # head is not a user
head_id = head_id + self.corpus.n_users
else:
target_item = self.data['item_id'][index]
neg_items = self.neg_items[index]
tail_id = np.concatenate([[target_item], neg_items])
head_id = self.data['user_id'][index] * np.ones_like(tail_id)
relation_id = np.zeros_like(tail_id)
tail_id += self.corpus.n_users # tail must not be a user
feed_dict = {'head_id': head_id, 'tail_id': tail_id, 'relation_id': relation_id}
return feed_dict
def actions_before_epoch(self):
for i in range(len(self)):
head, tail, relation = self.data['head'][i], self.data['tail'][i], self.data['relation'][i]
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
if relation == 0:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
while self.neg_tails[i] in self.corpus.user_clicked_set[head]:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while tail in self.corpus.user_clicked_set[self.neg_heads[i]]:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_users)
else:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
while (head, relation, self.neg_tails[i]) in self.corpus.triplet_set:
self.neg_tails[i] = np.random.randint(1, self.corpus.n_items)
while (self.neg_heads[i], relation, tail) in self.corpus.triplet_set:
self.neg_heads[i] = np.random.randint(1, self.corpus.n_items)
| 0.801315 | 0.183155 |
from __future__ import print_function
import json
import traceback
import pandas as pd
import flask
from six import reraise
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import MALFORMED_REQUEST, BAD_REQUEST
from mlflow.utils.rest_utils import NumpyEncoder
from mlflow.utils.logging_utils import eprint
from mlflow.server.handlers import catch_mlflow_exception
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mlflow.utils import get_jsonable_obj
CONTENT_TYPE_CSV = "text/csv"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_JSON_RECORDS_ORIENTED = "application/json; format=pandas-records"
CONTENT_TYPE_JSON_SPLIT_ORIENTED = "application/json; format=pandas-split"
CONTENT_TYPES = [
CONTENT_TYPE_CSV,
CONTENT_TYPE_JSON,
CONTENT_TYPE_JSON_RECORDS_ORIENTED,
CONTENT_TYPE_JSON_SPLIT_ORIENTED
]
def parse_json_input(json_input, orientation="split"):
"""
:param json_input: A JSON-formatted string representation of a Pandas DataFrame, or a stream
containing such a string representation.
:param orientation: The Pandas DataFrame orientation of the JSON input. This is either 'split'
or 'records'.
"""
# pylint: disable=broad-except
try:
return pd.read_json(json_input, orient=orientation)
except Exception:
_handle_serving_error(
error_message=(
"Failed to parse input as a Pandas DataFrame. Ensure that the input is"
" a valid JSON-formatted Pandas DataFrame with the `{orientation}` orientation"
" produced using the `pandas.DataFrame.to_json(..., orient='{orientation}')`"
" method.".format(orientation=orientation)),
error_code=MALFORMED_REQUEST)
def parse_csv_input(csv_input):
"""
:param csv_input: A CSV-formatted string representation of a Pandas DataFrame, or a stream
containing such a string representation.
"""
# pylint: disable=broad-except
try:
return pd.read_csv(csv_input)
except Exception:
_handle_serving_error(
error_message=(
"Failed to parse input as a Pandas DataFrame. Ensure that the input is"
" a valid CSV-formatted Pandas DataFrame produced using the"
" `pandas.DataFrame.to_csv()` method."),
error_code=MALFORMED_REQUEST)
def _handle_serving_error(error_message, error_code):
"""
Logs information about an exception thrown by model inference code that is currently being
handled and reraises it with the specified error message. The exception stack trace
is also included in the reraised error message.
:param error_message: A message for the reraised exception.
:param error_code: An appropriate error code for the reraised exception. This should be one of
the codes listed in the `mlflow.protos.databricks_pb2` proto.
"""
traceback_buf = StringIO()
traceback.print_exc(file=traceback_buf)
reraise(MlflowException,
MlflowException(
message=error_message,
error_code=error_code,
stack_trace=traceback_buf.getvalue()))
logged_pandas_records_format_warning = False
def init(model):
"""
Initialize the server. Loads pyfunc model from the path.
"""
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping(): # pylint: disable=unused-variable
"""
Determine if the container is working and healthy.
We declare it healthy if we can load the model successfully.
"""
health = model is not None
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
@catch_mlflow_exception
def transformation(): # pylint: disable=unused-variable
"""
Do an inference on a single batch of data. In this sample server,
we take data as CSV or json, convert it to a Pandas DataFrame,
generate predictions and convert them back to CSV.
"""
# Convert from CSV to pandas
if flask.request.content_type == CONTENT_TYPE_CSV:
data = flask.request.data.decode('utf-8')
csv_input = StringIO(data)
data = parse_csv_input(csv_input=csv_input)
elif flask.request.content_type == CONTENT_TYPE_JSON:
global logged_pandas_records_format_warning
if not logged_pandas_records_format_warning:
eprint(
"**IMPORTANT UPDATE**: Starting in MLflow 0.9.0, requests received with a"
" `Content-Type` header value of `{json_content_type}` will be interpreted"
" as JSON-serialized Pandas DataFrames with the `split` orientation, instead"
" of the `records` orientation. The `records` orientation is unsafe because"
" it may not preserve column ordering. Client code should be updated to"
" either send serialized DataFrames with the `split` orientation and the"
" `{split_json_content_type}` content type (recommended) or use the"
" `{records_json_content_type}` content type with the `records` orientation."
" For more information, see"
" https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.\n".format(
json_content_type=CONTENT_TYPE_JSON,
split_json_content_type=CONTENT_TYPE_JSON_SPLIT_ORIENTED,
records_json_content_type=CONTENT_TYPE_JSON_RECORDS_ORIENTED))
logged_pandas_records_format_warning = True
data = parse_json_input(json_input=flask.request.data.decode('utf-8'),
orientation="records")
elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
data = parse_json_input(json_input=flask.request.data.decode('utf-8'),
orientation="records")
elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
data = parse_json_input(json_input=flask.request.data.decode('utf-8'),
orientation="split")
else:
return flask.Response(
response=("This predictor only supports the following content types,"
" {supported_content_types}. Got '{received_content_type}'.".format(
supported_content_types=CONTENT_TYPES,
received_content_type=flask.request.content_type)),
status=415,
mimetype='text/plain')
# Do the prediction
# pylint: disable=broad-except
try:
raw_predictions = model.predict(data)
except Exception:
_handle_serving_error(
error_message=(
"Encountered an unexpected error while evaluating the model. Verify"
" that the serialized input Dataframe is compatible with the model for"
" inference."),
error_code=BAD_REQUEST)
predictions = get_jsonable_obj(raw_predictions, pandas_orientation="records")
result = json.dumps(predictions, cls=NumpyEncoder)
return flask.Response(response=result, status=200, mimetype='application/json')
return app
|
mlflow/pyfunc/scoring_server.py
|
from __future__ import print_function
import json
import traceback
import pandas as pd
import flask
from six import reraise
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import MALFORMED_REQUEST, BAD_REQUEST
from mlflow.utils.rest_utils import NumpyEncoder
from mlflow.utils.logging_utils import eprint
from mlflow.server.handlers import catch_mlflow_exception
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mlflow.utils import get_jsonable_obj
CONTENT_TYPE_CSV = "text/csv"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_JSON_RECORDS_ORIENTED = "application/json; format=pandas-records"
CONTENT_TYPE_JSON_SPLIT_ORIENTED = "application/json; format=pandas-split"
CONTENT_TYPES = [
CONTENT_TYPE_CSV,
CONTENT_TYPE_JSON,
CONTENT_TYPE_JSON_RECORDS_ORIENTED,
CONTENT_TYPE_JSON_SPLIT_ORIENTED
]
def parse_json_input(json_input, orientation="split"):
"""
:param json_input: A JSON-formatted string representation of a Pandas DataFrame, or a stream
containing such a string representation.
:param orientation: The Pandas DataFrame orientation of the JSON input. This is either 'split'
or 'records'.
"""
# pylint: disable=broad-except
try:
return pd.read_json(json_input, orient=orientation)
except Exception:
_handle_serving_error(
error_message=(
"Failed to parse input as a Pandas DataFrame. Ensure that the input is"
" a valid JSON-formatted Pandas DataFrame with the `{orientation}` orientation"
" produced using the `pandas.DataFrame.to_json(..., orient='{orientation}')`"
" method.".format(orientation=orientation)),
error_code=MALFORMED_REQUEST)
def parse_csv_input(csv_input):
"""
:param csv_input: A CSV-formatted string representation of a Pandas DataFrame, or a stream
containing such a string representation.
"""
# pylint: disable=broad-except
try:
return pd.read_csv(csv_input)
except Exception:
_handle_serving_error(
error_message=(
"Failed to parse input as a Pandas DataFrame. Ensure that the input is"
" a valid CSV-formatted Pandas DataFrame produced using the"
" `pandas.DataFrame.to_csv()` method."),
error_code=MALFORMED_REQUEST)
def _handle_serving_error(error_message, error_code):
"""
Logs information about an exception thrown by model inference code that is currently being
handled and reraises it with the specified error message. The exception stack trace
is also included in the reraised error message.
:param error_message: A message for the reraised exception.
:param error_code: An appropriate error code for the reraised exception. This should be one of
the codes listed in the `mlflow.protos.databricks_pb2` proto.
"""
traceback_buf = StringIO()
traceback.print_exc(file=traceback_buf)
reraise(MlflowException,
MlflowException(
message=error_message,
error_code=error_code,
stack_trace=traceback_buf.getvalue()))
logged_pandas_records_format_warning = False
def init(model):
"""
Initialize the server. Loads pyfunc model from the path.
"""
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping(): # pylint: disable=unused-variable
"""
Determine if the container is working and healthy.
We declare it healthy if we can load the model successfully.
"""
health = model is not None
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
@catch_mlflow_exception
def transformation(): # pylint: disable=unused-variable
"""
Do an inference on a single batch of data. In this sample server,
we take data as CSV or json, convert it to a Pandas DataFrame,
generate predictions and convert them back to CSV.
"""
# Convert from CSV to pandas
if flask.request.content_type == CONTENT_TYPE_CSV:
data = flask.request.data.decode('utf-8')
csv_input = StringIO(data)
data = parse_csv_input(csv_input=csv_input)
elif flask.request.content_type == CONTENT_TYPE_JSON:
global logged_pandas_records_format_warning
if not logged_pandas_records_format_warning:
eprint(
"**IMPORTANT UPDATE**: Starting in MLflow 0.9.0, requests received with a"
" `Content-Type` header value of `{json_content_type}` will be interpreted"
" as JSON-serialized Pandas DataFrames with the `split` orientation, instead"
" of the `records` orientation. The `records` orientation is unsafe because"
" it may not preserve column ordering. Client code should be updated to"
" either send serialized DataFrames with the `split` orientation and the"
" `{split_json_content_type}` content type (recommended) or use the"
" `{records_json_content_type}` content type with the `records` orientation."
" For more information, see"
" https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.\n".format(
json_content_type=CONTENT_TYPE_JSON,
split_json_content_type=CONTENT_TYPE_JSON_SPLIT_ORIENTED,
records_json_content_type=CONTENT_TYPE_JSON_RECORDS_ORIENTED))
logged_pandas_records_format_warning = True
data = parse_json_input(json_input=flask.request.data.decode('utf-8'),
orientation="records")
elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
data = parse_json_input(json_input=flask.request.data.decode('utf-8'),
orientation="records")
elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
data = parse_json_input(json_input=flask.request.data.decode('utf-8'),
orientation="split")
else:
return flask.Response(
response=("This predictor only supports the following content types,"
" {supported_content_types}. Got '{received_content_type}'.".format(
supported_content_types=CONTENT_TYPES,
received_content_type=flask.request.content_type)),
status=415,
mimetype='text/plain')
# Do the prediction
# pylint: disable=broad-except
try:
raw_predictions = model.predict(data)
except Exception:
_handle_serving_error(
error_message=(
"Encountered an unexpected error while evaluating the model. Verify"
" that the serialized input Dataframe is compatible with the model for"
" inference."),
error_code=BAD_REQUEST)
predictions = get_jsonable_obj(raw_predictions, pandas_orientation="records")
result = json.dumps(predictions, cls=NumpyEncoder)
return flask.Response(response=result, status=200, mimetype='application/json')
return app
| 0.783988 | 0.204223 |
import pprint
import re # noqa: F401
import six
from ory_hydra_client.configuration import Configuration
class WellKnown(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'authorization_endpoint': 'str',
'backchannel_logout_session_supported': 'bool',
'backchannel_logout_supported': 'bool',
'claims_parameter_supported': 'bool',
'claims_supported': 'list[str]',
'end_session_endpoint': 'str',
'frontchannel_logout_session_supported': 'bool',
'frontchannel_logout_supported': 'bool',
'grant_types_supported': 'list[str]',
'id_token_signing_alg_values_supported': 'list[str]',
'issuer': 'str',
'jwks_uri': 'str',
'registration_endpoint': 'str',
'request_parameter_supported': 'bool',
'request_uri_parameter_supported': 'bool',
'require_request_uri_registration': 'bool',
'response_modes_supported': 'list[str]',
'response_types_supported': 'list[str]',
'revocation_endpoint': 'str',
'scopes_supported': 'list[str]',
'subject_types_supported': 'list[str]',
'token_endpoint': 'str',
'token_endpoint_auth_methods_supported': 'list[str]',
'userinfo_endpoint': 'str',
'userinfo_signing_alg_values_supported': 'list[str]'
}
attribute_map = {
'authorization_endpoint': 'authorization_endpoint',
'backchannel_logout_session_supported': 'backchannel_logout_session_supported',
'backchannel_logout_supported': 'backchannel_logout_supported',
'claims_parameter_supported': 'claims_parameter_supported',
'claims_supported': 'claims_supported',
'end_session_endpoint': 'end_session_endpoint',
'frontchannel_logout_session_supported': 'frontchannel_logout_session_supported',
'frontchannel_logout_supported': 'frontchannel_logout_supported',
'grant_types_supported': 'grant_types_supported',
'id_token_signing_alg_values_supported': 'id_token_signing_alg_values_supported',
'issuer': 'issuer',
'jwks_uri': 'jwks_uri',
'registration_endpoint': 'registration_endpoint',
'request_parameter_supported': 'request_parameter_supported',
'request_uri_parameter_supported': 'request_uri_parameter_supported',
'require_request_uri_registration': 'require_request_uri_registration',
'response_modes_supported': 'response_modes_supported',
'response_types_supported': 'response_types_supported',
'revocation_endpoint': 'revocation_endpoint',
'scopes_supported': 'scopes_supported',
'subject_types_supported': 'subject_types_supported',
'token_endpoint': 'token_endpoint',
'token_endpoint_auth_methods_supported': 'token_endpoint_auth_methods_supported',
'userinfo_endpoint': 'userinfo_endpoint',
'userinfo_signing_alg_values_supported': 'userinfo_signing_alg_values_supported'
}
def __init__(self, authorization_endpoint=None, backchannel_logout_session_supported=None, backchannel_logout_supported=None, claims_parameter_supported=None, claims_supported=None, end_session_endpoint=None, frontchannel_logout_session_supported=None, frontchannel_logout_supported=None, grant_types_supported=None, id_token_signing_alg_values_supported=None, issuer=None, jwks_uri=None, registration_endpoint=None, request_parameter_supported=None, request_uri_parameter_supported=None, require_request_uri_registration=None, response_modes_supported=None, response_types_supported=None, revocation_endpoint=None, scopes_supported=None, subject_types_supported=None, token_endpoint=None, token_endpoint_auth_methods_supported=None, userinfo_endpoint=None, userinfo_signing_alg_values_supported=None, local_vars_configuration=None): # noqa: E501
"""WellKnown - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._authorization_endpoint = None
self._backchannel_logout_session_supported = None
self._backchannel_logout_supported = None
self._claims_parameter_supported = None
self._claims_supported = None
self._end_session_endpoint = None
self._frontchannel_logout_session_supported = None
self._frontchannel_logout_supported = None
self._grant_types_supported = None
self._id_token_signing_alg_values_supported = None
self._issuer = None
self._jwks_uri = None
self._registration_endpoint = None
self._request_parameter_supported = None
self._request_uri_parameter_supported = None
self._require_request_uri_registration = None
self._response_modes_supported = None
self._response_types_supported = None
self._revocation_endpoint = None
self._scopes_supported = None
self._subject_types_supported = None
self._token_endpoint = None
self._token_endpoint_auth_methods_supported = None
self._userinfo_endpoint = None
self._userinfo_signing_alg_values_supported = None
self.discriminator = None
self.authorization_endpoint = authorization_endpoint
if backchannel_logout_session_supported is not None:
self.backchannel_logout_session_supported = backchannel_logout_session_supported
if backchannel_logout_supported is not None:
self.backchannel_logout_supported = backchannel_logout_supported
if claims_parameter_supported is not None:
self.claims_parameter_supported = claims_parameter_supported
if claims_supported is not None:
self.claims_supported = claims_supported
if end_session_endpoint is not None:
self.end_session_endpoint = end_session_endpoint
if frontchannel_logout_session_supported is not None:
self.frontchannel_logout_session_supported = frontchannel_logout_session_supported
if frontchannel_logout_supported is not None:
self.frontchannel_logout_supported = frontchannel_logout_supported
if grant_types_supported is not None:
self.grant_types_supported = grant_types_supported
self.id_token_signing_alg_values_supported = id_token_signing_alg_values_supported
self.issuer = issuer
self.jwks_uri = jwks_uri
if registration_endpoint is not None:
self.registration_endpoint = registration_endpoint
if request_parameter_supported is not None:
self.request_parameter_supported = request_parameter_supported
if request_uri_parameter_supported is not None:
self.request_uri_parameter_supported = request_uri_parameter_supported
if require_request_uri_registration is not None:
self.require_request_uri_registration = require_request_uri_registration
if response_modes_supported is not None:
self.response_modes_supported = response_modes_supported
self.response_types_supported = response_types_supported
if revocation_endpoint is not None:
self.revocation_endpoint = revocation_endpoint
if scopes_supported is not None:
self.scopes_supported = scopes_supported
self.subject_types_supported = subject_types_supported
self.token_endpoint = token_endpoint
if token_endpoint_auth_methods_supported is not None:
self.token_endpoint_auth_methods_supported = token_endpoint_auth_methods_supported
if userinfo_endpoint is not None:
self.userinfo_endpoint = userinfo_endpoint
if userinfo_signing_alg_values_supported is not None:
self.userinfo_signing_alg_values_supported = userinfo_signing_alg_values_supported
@property
def authorization_endpoint(self):
"""Gets the authorization_endpoint of this WellKnown. # noqa: E501
URL of the OP's OAuth 2.0 Authorization Endpoint. # noqa: E501
:return: The authorization_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._authorization_endpoint
@authorization_endpoint.setter
def authorization_endpoint(self, authorization_endpoint):
"""Sets the authorization_endpoint of this WellKnown.
URL of the OP's OAuth 2.0 Authorization Endpoint. # noqa: E501
:param authorization_endpoint: The authorization_endpoint of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and authorization_endpoint is None: # noqa: E501
raise ValueError("Invalid value for `authorization_endpoint`, must not be `None`") # noqa: E501
self._authorization_endpoint = authorization_endpoint
@property
def backchannel_logout_session_supported(self):
"""Gets the backchannel_logout_session_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP can pass a sid (session ID) Claim in the Logout Token to identify the RP session with the OP. If supported, the sid Claim is also included in ID Tokens issued by the OP # noqa: E501
:return: The backchannel_logout_session_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._backchannel_logout_session_supported
@backchannel_logout_session_supported.setter
def backchannel_logout_session_supported(self, backchannel_logout_session_supported):
"""Sets the backchannel_logout_session_supported of this WellKnown.
Boolean value specifying whether the OP can pass a sid (session ID) Claim in the Logout Token to identify the RP session with the OP. If supported, the sid Claim is also included in ID Tokens issued by the OP # noqa: E501
:param backchannel_logout_session_supported: The backchannel_logout_session_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._backchannel_logout_session_supported = backchannel_logout_session_supported
@property
def backchannel_logout_supported(self):
"""Gets the backchannel_logout_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports back-channel logout, with true indicating support. # noqa: E501
:return: The backchannel_logout_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._backchannel_logout_supported
@backchannel_logout_supported.setter
def backchannel_logout_supported(self, backchannel_logout_supported):
"""Sets the backchannel_logout_supported of this WellKnown.
Boolean value specifying whether the OP supports back-channel logout, with true indicating support. # noqa: E501
:param backchannel_logout_supported: The backchannel_logout_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._backchannel_logout_supported = backchannel_logout_supported
@property
def claims_parameter_supported(self):
"""Gets the claims_parameter_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. # noqa: E501
:return: The claims_parameter_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._claims_parameter_supported
@claims_parameter_supported.setter
def claims_parameter_supported(self, claims_parameter_supported):
"""Sets the claims_parameter_supported of this WellKnown.
Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. # noqa: E501
:param claims_parameter_supported: The claims_parameter_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._claims_parameter_supported = claims_parameter_supported
@property
def claims_supported(self):
"""Gets the claims_supported of this WellKnown. # noqa: E501
JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list. # noqa: E501
:return: The claims_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._claims_supported
@claims_supported.setter
def claims_supported(self, claims_supported):
"""Sets the claims_supported of this WellKnown.
JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list. # noqa: E501
:param claims_supported: The claims_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._claims_supported = claims_supported
@property
def end_session_endpoint(self):
"""Gets the end_session_endpoint of this WellKnown. # noqa: E501
URL at the OP to which an RP can perform a redirect to request that the End-User be logged out at the OP. # noqa: E501
:return: The end_session_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._end_session_endpoint
@end_session_endpoint.setter
def end_session_endpoint(self, end_session_endpoint):
"""Sets the end_session_endpoint of this WellKnown.
URL at the OP to which an RP can perform a redirect to request that the End-User be logged out at the OP. # noqa: E501
:param end_session_endpoint: The end_session_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._end_session_endpoint = end_session_endpoint
@property
def frontchannel_logout_session_supported(self):
"""Gets the frontchannel_logout_session_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP can pass iss (issuer) and sid (session ID) query parameters to identify the RP session with the OP when the frontchannel_logout_uri is used. If supported, the sid Claim is also included in ID Tokens issued by the OP. # noqa: E501
:return: The frontchannel_logout_session_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._frontchannel_logout_session_supported
@frontchannel_logout_session_supported.setter
def frontchannel_logout_session_supported(self, frontchannel_logout_session_supported):
"""Sets the frontchannel_logout_session_supported of this WellKnown.
Boolean value specifying whether the OP can pass iss (issuer) and sid (session ID) query parameters to identify the RP session with the OP when the frontchannel_logout_uri is used. If supported, the sid Claim is also included in ID Tokens issued by the OP. # noqa: E501
:param frontchannel_logout_session_supported: The frontchannel_logout_session_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._frontchannel_logout_session_supported = frontchannel_logout_session_supported
@property
def frontchannel_logout_supported(self):
"""Gets the frontchannel_logout_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports HTTP-based logout, with true indicating support. # noqa: E501
:return: The frontchannel_logout_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._frontchannel_logout_supported
@frontchannel_logout_supported.setter
def frontchannel_logout_supported(self, frontchannel_logout_supported):
"""Sets the frontchannel_logout_supported of this WellKnown.
Boolean value specifying whether the OP supports HTTP-based logout, with true indicating support. # noqa: E501
:param frontchannel_logout_supported: The frontchannel_logout_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._frontchannel_logout_supported = frontchannel_logout_supported
@property
def grant_types_supported(self):
"""Gets the grant_types_supported of this WellKnown. # noqa: E501
JSON array containing a list of the OAuth 2.0 Grant Type values that this OP supports. # noqa: E501
:return: The grant_types_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._grant_types_supported
@grant_types_supported.setter
def grant_types_supported(self, grant_types_supported):
"""Sets the grant_types_supported of this WellKnown.
JSON array containing a list of the OAuth 2.0 Grant Type values that this OP supports. # noqa: E501
:param grant_types_supported: The grant_types_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._grant_types_supported = grant_types_supported
@property
def id_token_signing_alg_values_supported(self):
"""Gets the id_token_signing_alg_values_supported of this WellKnown. # noqa: E501
JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT. # noqa: E501
:return: The id_token_signing_alg_values_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._id_token_signing_alg_values_supported
@id_token_signing_alg_values_supported.setter
def id_token_signing_alg_values_supported(self, id_token_signing_alg_values_supported):
"""Sets the id_token_signing_alg_values_supported of this WellKnown.
JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT. # noqa: E501
:param id_token_signing_alg_values_supported: The id_token_signing_alg_values_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and id_token_signing_alg_values_supported is None: # noqa: E501
raise ValueError("Invalid value for `id_token_signing_alg_values_supported`, must not be `None`") # noqa: E501
self._id_token_signing_alg_values_supported = id_token_signing_alg_values_supported
@property
def issuer(self):
"""Gets the issuer of this WellKnown. # noqa: E501
URL using the https scheme with no query or fragment component that the OP asserts as its IssuerURL Identifier. If IssuerURL discovery is supported , this value MUST be identical to the issuer value returned by WebFinger. This also MUST be identical to the iss Claim value in ID Tokens issued from this IssuerURL. # noqa: E501
:return: The issuer of this WellKnown. # noqa: E501
:rtype: str
"""
return self._issuer
@issuer.setter
def issuer(self, issuer):
"""Sets the issuer of this WellKnown.
URL using the https scheme with no query or fragment component that the OP asserts as its IssuerURL Identifier. If IssuerURL discovery is supported , this value MUST be identical to the issuer value returned by WebFinger. This also MUST be identical to the iss Claim value in ID Tokens issued from this IssuerURL. # noqa: E501
:param issuer: The issuer of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and issuer is None: # noqa: E501
raise ValueError("Invalid value for `issuer`, must not be `None`") # noqa: E501
self._issuer = issuer
@property
def jwks_uri(self):
"""Gets the jwks_uri of this WellKnown. # noqa: E501
URL of the OP's JSON Web Key Set [JWK] document. This contains the signing key(s) the RP uses to validate signatures from the OP. The JWK Set MAY also contain the Server's encryption key(s), which are used by RPs to encrypt requests to the Server. When both signing and encryption keys are made available, a use (Key Use) parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage. Although some algorithms allow the same key to be used for both signatures and encryption, doing so is NOT RECOMMENDED, as it is less secure. The JWK x5c parameter MAY be used to provide X.509 representations of keys provided. When used, the bare key values MUST still be present and MUST match those in the certificate. # noqa: E501
:return: The jwks_uri of this WellKnown. # noqa: E501
:rtype: str
"""
return self._jwks_uri
@jwks_uri.setter
def jwks_uri(self, jwks_uri):
"""Sets the jwks_uri of this WellKnown.
URL of the OP's JSON Web Key Set [JWK] document. This contains the signing key(s) the RP uses to validate signatures from the OP. The JWK Set MAY also contain the Server's encryption key(s), which are used by RPs to encrypt requests to the Server. When both signing and encryption keys are made available, a use (Key Use) parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage. Although some algorithms allow the same key to be used for both signatures and encryption, doing so is NOT RECOMMENDED, as it is less secure. The JWK x5c parameter MAY be used to provide X.509 representations of keys provided. When used, the bare key values MUST still be present and MUST match those in the certificate. # noqa: E501
:param jwks_uri: The jwks_uri of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and jwks_uri is None: # noqa: E501
raise ValueError("Invalid value for `jwks_uri`, must not be `None`") # noqa: E501
self._jwks_uri = jwks_uri
@property
def registration_endpoint(self):
"""Gets the registration_endpoint of this WellKnown. # noqa: E501
URL of the OP's Dynamic Client Registration Endpoint. # noqa: E501
:return: The registration_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._registration_endpoint
@registration_endpoint.setter
def registration_endpoint(self, registration_endpoint):
"""Sets the registration_endpoint of this WellKnown.
URL of the OP's Dynamic Client Registration Endpoint. # noqa: E501
:param registration_endpoint: The registration_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._registration_endpoint = registration_endpoint
@property
def request_parameter_supported(self):
"""Gets the request_parameter_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. # noqa: E501
:return: The request_parameter_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._request_parameter_supported
@request_parameter_supported.setter
def request_parameter_supported(self, request_parameter_supported):
"""Sets the request_parameter_supported of this WellKnown.
Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. # noqa: E501
:param request_parameter_supported: The request_parameter_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._request_parameter_supported = request_parameter_supported
@property
def request_uri_parameter_supported(self):
"""Gets the request_uri_parameter_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. # noqa: E501
:return: The request_uri_parameter_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._request_uri_parameter_supported
@request_uri_parameter_supported.setter
def request_uri_parameter_supported(self, request_uri_parameter_supported):
"""Sets the request_uri_parameter_supported of this WellKnown.
Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. # noqa: E501
:param request_uri_parameter_supported: The request_uri_parameter_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._request_uri_parameter_supported = request_uri_parameter_supported
@property
def require_request_uri_registration(self):
"""Gets the require_request_uri_registration of this WellKnown. # noqa: E501
Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter. # noqa: E501
:return: The require_request_uri_registration of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._require_request_uri_registration
@require_request_uri_registration.setter
def require_request_uri_registration(self, require_request_uri_registration):
"""Sets the require_request_uri_registration of this WellKnown.
Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter. # noqa: E501
:param require_request_uri_registration: The require_request_uri_registration of this WellKnown. # noqa: E501
:type: bool
"""
self._require_request_uri_registration = require_request_uri_registration
@property
def response_modes_supported(self):
"""Gets the response_modes_supported of this WellKnown. # noqa: E501
JSON array containing a list of the OAuth 2.0 response_mode values that this OP supports. # noqa: E501
:return: The response_modes_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._response_modes_supported
@response_modes_supported.setter
def response_modes_supported(self, response_modes_supported):
"""Sets the response_modes_supported of this WellKnown.
JSON array containing a list of the OAuth 2.0 response_mode values that this OP supports. # noqa: E501
:param response_modes_supported: The response_modes_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._response_modes_supported = response_modes_supported
@property
def response_types_supported(self):
"""Gets the response_types_supported of this WellKnown. # noqa: E501
JSON array containing a list of the OAuth 2.0 response_type values that this OP supports. Dynamic OpenID Providers MUST support the code, id_token, and the token id_token Response Type values. # noqa: E501
:return: The response_types_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._response_types_supported
@response_types_supported.setter
def response_types_supported(self, response_types_supported):
"""Sets the response_types_supported of this WellKnown.
JSON array containing a list of the OAuth 2.0 response_type values that this OP supports. Dynamic OpenID Providers MUST support the code, id_token, and the token id_token Response Type values. # noqa: E501
:param response_types_supported: The response_types_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and response_types_supported is None: # noqa: E501
raise ValueError("Invalid value for `response_types_supported`, must not be `None`") # noqa: E501
self._response_types_supported = response_types_supported
@property
def revocation_endpoint(self):
"""Gets the revocation_endpoint of this WellKnown. # noqa: E501
URL of the authorization server's OAuth 2.0 revocation endpoint. # noqa: E501
:return: The revocation_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._revocation_endpoint
@revocation_endpoint.setter
def revocation_endpoint(self, revocation_endpoint):
"""Sets the revocation_endpoint of this WellKnown.
URL of the authorization server's OAuth 2.0 revocation endpoint. # noqa: E501
:param revocation_endpoint: The revocation_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._revocation_endpoint = revocation_endpoint
@property
def scopes_supported(self):
"""Gets the scopes_supported of this WellKnown. # noqa: E501
SON array containing a list of the OAuth 2.0 [RFC6749] scope values that this server supports. The server MUST support the openid scope value. Servers MAY choose not to advertise some supported scope values even when this parameter is used # noqa: E501
:return: The scopes_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._scopes_supported
@scopes_supported.setter
def scopes_supported(self, scopes_supported):
"""Sets the scopes_supported of this WellKnown.
SON array containing a list of the OAuth 2.0 [RFC6749] scope values that this server supports. The server MUST support the openid scope value. Servers MAY choose not to advertise some supported scope values even when this parameter is used # noqa: E501
:param scopes_supported: The scopes_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._scopes_supported = scopes_supported
@property
def subject_types_supported(self):
"""Gets the subject_types_supported of this WellKnown. # noqa: E501
JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public. # noqa: E501
:return: The subject_types_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._subject_types_supported
@subject_types_supported.setter
def subject_types_supported(self, subject_types_supported):
"""Sets the subject_types_supported of this WellKnown.
JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public. # noqa: E501
:param subject_types_supported: The subject_types_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and subject_types_supported is None: # noqa: E501
raise ValueError("Invalid value for `subject_types_supported`, must not be `None`") # noqa: E501
self._subject_types_supported = subject_types_supported
@property
def token_endpoint(self):
"""Gets the token_endpoint of this WellKnown. # noqa: E501
URL of the OP's OAuth 2.0 Token Endpoint # noqa: E501
:return: The token_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._token_endpoint
@token_endpoint.setter
def token_endpoint(self, token_endpoint):
"""Sets the token_endpoint of this WellKnown.
URL of the OP's OAuth 2.0 Token Endpoint # noqa: E501
:param token_endpoint: The token_endpoint of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and token_endpoint is None: # noqa: E501
raise ValueError("Invalid value for `token_endpoint`, must not be `None`") # noqa: E501
self._token_endpoint = token_endpoint
@property
def token_endpoint_auth_methods_supported(self):
"""Gets the token_endpoint_auth_methods_supported of this WellKnown. # noqa: E501
JSON array containing a list of Client Authentication methods supported by this Token Endpoint. The options are client_secret_post, client_secret_basic, client_secret_jwt, and private_key_jwt, as described in Section 9 of OpenID Connect Core 1.0 # noqa: E501
:return: The token_endpoint_auth_methods_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._token_endpoint_auth_methods_supported
@token_endpoint_auth_methods_supported.setter
def token_endpoint_auth_methods_supported(self, token_endpoint_auth_methods_supported):
"""Sets the token_endpoint_auth_methods_supported of this WellKnown.
JSON array containing a list of Client Authentication methods supported by this Token Endpoint. The options are client_secret_post, client_secret_basic, client_secret_jwt, and private_key_jwt, as described in Section 9 of OpenID Connect Core 1.0 # noqa: E501
:param token_endpoint_auth_methods_supported: The token_endpoint_auth_methods_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._token_endpoint_auth_methods_supported = token_endpoint_auth_methods_supported
@property
def userinfo_endpoint(self):
"""Gets the userinfo_endpoint of this WellKnown. # noqa: E501
URL of the OP's UserInfo Endpoint. # noqa: E501
:return: The userinfo_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._userinfo_endpoint
@userinfo_endpoint.setter
def userinfo_endpoint(self, userinfo_endpoint):
"""Sets the userinfo_endpoint of this WellKnown.
URL of the OP's UserInfo Endpoint. # noqa: E501
:param userinfo_endpoint: The userinfo_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._userinfo_endpoint = userinfo_endpoint
@property
def userinfo_signing_alg_values_supported(self):
"""Gets the userinfo_signing_alg_values_supported of this WellKnown. # noqa: E501
JSON array containing a list of the JWS [JWS] signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT [JWT]. # noqa: E501
:return: The userinfo_signing_alg_values_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._userinfo_signing_alg_values_supported
@userinfo_signing_alg_values_supported.setter
def userinfo_signing_alg_values_supported(self, userinfo_signing_alg_values_supported):
"""Sets the userinfo_signing_alg_values_supported of this WellKnown.
JSON array containing a list of the JWS [JWS] signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT [JWT]. # noqa: E501
:param userinfo_signing_alg_values_supported: The userinfo_signing_alg_values_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._userinfo_signing_alg_values_supported = userinfo_signing_alg_values_supported
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WellKnown):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, WellKnown):
return True
return self.to_dict() != other.to_dict()
|
clients/hydra/python/ory_hydra_client/models/well_known.py
|
import pprint
import re # noqa: F401
import six
from ory_hydra_client.configuration import Configuration
class WellKnown(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'authorization_endpoint': 'str',
'backchannel_logout_session_supported': 'bool',
'backchannel_logout_supported': 'bool',
'claims_parameter_supported': 'bool',
'claims_supported': 'list[str]',
'end_session_endpoint': 'str',
'frontchannel_logout_session_supported': 'bool',
'frontchannel_logout_supported': 'bool',
'grant_types_supported': 'list[str]',
'id_token_signing_alg_values_supported': 'list[str]',
'issuer': 'str',
'jwks_uri': 'str',
'registration_endpoint': 'str',
'request_parameter_supported': 'bool',
'request_uri_parameter_supported': 'bool',
'require_request_uri_registration': 'bool',
'response_modes_supported': 'list[str]',
'response_types_supported': 'list[str]',
'revocation_endpoint': 'str',
'scopes_supported': 'list[str]',
'subject_types_supported': 'list[str]',
'token_endpoint': 'str',
'token_endpoint_auth_methods_supported': 'list[str]',
'userinfo_endpoint': 'str',
'userinfo_signing_alg_values_supported': 'list[str]'
}
attribute_map = {
'authorization_endpoint': 'authorization_endpoint',
'backchannel_logout_session_supported': 'backchannel_logout_session_supported',
'backchannel_logout_supported': 'backchannel_logout_supported',
'claims_parameter_supported': 'claims_parameter_supported',
'claims_supported': 'claims_supported',
'end_session_endpoint': 'end_session_endpoint',
'frontchannel_logout_session_supported': 'frontchannel_logout_session_supported',
'frontchannel_logout_supported': 'frontchannel_logout_supported',
'grant_types_supported': 'grant_types_supported',
'id_token_signing_alg_values_supported': 'id_token_signing_alg_values_supported',
'issuer': 'issuer',
'jwks_uri': 'jwks_uri',
'registration_endpoint': 'registration_endpoint',
'request_parameter_supported': 'request_parameter_supported',
'request_uri_parameter_supported': 'request_uri_parameter_supported',
'require_request_uri_registration': 'require_request_uri_registration',
'response_modes_supported': 'response_modes_supported',
'response_types_supported': 'response_types_supported',
'revocation_endpoint': 'revocation_endpoint',
'scopes_supported': 'scopes_supported',
'subject_types_supported': 'subject_types_supported',
'token_endpoint': 'token_endpoint',
'token_endpoint_auth_methods_supported': 'token_endpoint_auth_methods_supported',
'userinfo_endpoint': 'userinfo_endpoint',
'userinfo_signing_alg_values_supported': 'userinfo_signing_alg_values_supported'
}
def __init__(self, authorization_endpoint=None, backchannel_logout_session_supported=None, backchannel_logout_supported=None, claims_parameter_supported=None, claims_supported=None, end_session_endpoint=None, frontchannel_logout_session_supported=None, frontchannel_logout_supported=None, grant_types_supported=None, id_token_signing_alg_values_supported=None, issuer=None, jwks_uri=None, registration_endpoint=None, request_parameter_supported=None, request_uri_parameter_supported=None, require_request_uri_registration=None, response_modes_supported=None, response_types_supported=None, revocation_endpoint=None, scopes_supported=None, subject_types_supported=None, token_endpoint=None, token_endpoint_auth_methods_supported=None, userinfo_endpoint=None, userinfo_signing_alg_values_supported=None, local_vars_configuration=None): # noqa: E501
"""WellKnown - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._authorization_endpoint = None
self._backchannel_logout_session_supported = None
self._backchannel_logout_supported = None
self._claims_parameter_supported = None
self._claims_supported = None
self._end_session_endpoint = None
self._frontchannel_logout_session_supported = None
self._frontchannel_logout_supported = None
self._grant_types_supported = None
self._id_token_signing_alg_values_supported = None
self._issuer = None
self._jwks_uri = None
self._registration_endpoint = None
self._request_parameter_supported = None
self._request_uri_parameter_supported = None
self._require_request_uri_registration = None
self._response_modes_supported = None
self._response_types_supported = None
self._revocation_endpoint = None
self._scopes_supported = None
self._subject_types_supported = None
self._token_endpoint = None
self._token_endpoint_auth_methods_supported = None
self._userinfo_endpoint = None
self._userinfo_signing_alg_values_supported = None
self.discriminator = None
self.authorization_endpoint = authorization_endpoint
if backchannel_logout_session_supported is not None:
self.backchannel_logout_session_supported = backchannel_logout_session_supported
if backchannel_logout_supported is not None:
self.backchannel_logout_supported = backchannel_logout_supported
if claims_parameter_supported is not None:
self.claims_parameter_supported = claims_parameter_supported
if claims_supported is not None:
self.claims_supported = claims_supported
if end_session_endpoint is not None:
self.end_session_endpoint = end_session_endpoint
if frontchannel_logout_session_supported is not None:
self.frontchannel_logout_session_supported = frontchannel_logout_session_supported
if frontchannel_logout_supported is not None:
self.frontchannel_logout_supported = frontchannel_logout_supported
if grant_types_supported is not None:
self.grant_types_supported = grant_types_supported
self.id_token_signing_alg_values_supported = id_token_signing_alg_values_supported
self.issuer = issuer
self.jwks_uri = jwks_uri
if registration_endpoint is not None:
self.registration_endpoint = registration_endpoint
if request_parameter_supported is not None:
self.request_parameter_supported = request_parameter_supported
if request_uri_parameter_supported is not None:
self.request_uri_parameter_supported = request_uri_parameter_supported
if require_request_uri_registration is not None:
self.require_request_uri_registration = require_request_uri_registration
if response_modes_supported is not None:
self.response_modes_supported = response_modes_supported
self.response_types_supported = response_types_supported
if revocation_endpoint is not None:
self.revocation_endpoint = revocation_endpoint
if scopes_supported is not None:
self.scopes_supported = scopes_supported
self.subject_types_supported = subject_types_supported
self.token_endpoint = token_endpoint
if token_endpoint_auth_methods_supported is not None:
self.token_endpoint_auth_methods_supported = token_endpoint_auth_methods_supported
if userinfo_endpoint is not None:
self.userinfo_endpoint = userinfo_endpoint
if userinfo_signing_alg_values_supported is not None:
self.userinfo_signing_alg_values_supported = userinfo_signing_alg_values_supported
@property
def authorization_endpoint(self):
"""Gets the authorization_endpoint of this WellKnown. # noqa: E501
URL of the OP's OAuth 2.0 Authorization Endpoint. # noqa: E501
:return: The authorization_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._authorization_endpoint
@authorization_endpoint.setter
def authorization_endpoint(self, authorization_endpoint):
"""Sets the authorization_endpoint of this WellKnown.
URL of the OP's OAuth 2.0 Authorization Endpoint. # noqa: E501
:param authorization_endpoint: The authorization_endpoint of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and authorization_endpoint is None: # noqa: E501
raise ValueError("Invalid value for `authorization_endpoint`, must not be `None`") # noqa: E501
self._authorization_endpoint = authorization_endpoint
@property
def backchannel_logout_session_supported(self):
"""Gets the backchannel_logout_session_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP can pass a sid (session ID) Claim in the Logout Token to identify the RP session with the OP. If supported, the sid Claim is also included in ID Tokens issued by the OP # noqa: E501
:return: The backchannel_logout_session_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._backchannel_logout_session_supported
@backchannel_logout_session_supported.setter
def backchannel_logout_session_supported(self, backchannel_logout_session_supported):
"""Sets the backchannel_logout_session_supported of this WellKnown.
Boolean value specifying whether the OP can pass a sid (session ID) Claim in the Logout Token to identify the RP session with the OP. If supported, the sid Claim is also included in ID Tokens issued by the OP # noqa: E501
:param backchannel_logout_session_supported: The backchannel_logout_session_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._backchannel_logout_session_supported = backchannel_logout_session_supported
@property
def backchannel_logout_supported(self):
"""Gets the backchannel_logout_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports back-channel logout, with true indicating support. # noqa: E501
:return: The backchannel_logout_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._backchannel_logout_supported
@backchannel_logout_supported.setter
def backchannel_logout_supported(self, backchannel_logout_supported):
"""Sets the backchannel_logout_supported of this WellKnown.
Boolean value specifying whether the OP supports back-channel logout, with true indicating support. # noqa: E501
:param backchannel_logout_supported: The backchannel_logout_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._backchannel_logout_supported = backchannel_logout_supported
@property
def claims_parameter_supported(self):
"""Gets the claims_parameter_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. # noqa: E501
:return: The claims_parameter_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._claims_parameter_supported
@claims_parameter_supported.setter
def claims_parameter_supported(self, claims_parameter_supported):
"""Sets the claims_parameter_supported of this WellKnown.
Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support. # noqa: E501
:param claims_parameter_supported: The claims_parameter_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._claims_parameter_supported = claims_parameter_supported
@property
def claims_supported(self):
"""Gets the claims_supported of this WellKnown. # noqa: E501
JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list. # noqa: E501
:return: The claims_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._claims_supported
@claims_supported.setter
def claims_supported(self, claims_supported):
"""Sets the claims_supported of this WellKnown.
JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list. # noqa: E501
:param claims_supported: The claims_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._claims_supported = claims_supported
@property
def end_session_endpoint(self):
"""Gets the end_session_endpoint of this WellKnown. # noqa: E501
URL at the OP to which an RP can perform a redirect to request that the End-User be logged out at the OP. # noqa: E501
:return: The end_session_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._end_session_endpoint
@end_session_endpoint.setter
def end_session_endpoint(self, end_session_endpoint):
"""Sets the end_session_endpoint of this WellKnown.
URL at the OP to which an RP can perform a redirect to request that the End-User be logged out at the OP. # noqa: E501
:param end_session_endpoint: The end_session_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._end_session_endpoint = end_session_endpoint
@property
def frontchannel_logout_session_supported(self):
"""Gets the frontchannel_logout_session_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP can pass iss (issuer) and sid (session ID) query parameters to identify the RP session with the OP when the frontchannel_logout_uri is used. If supported, the sid Claim is also included in ID Tokens issued by the OP. # noqa: E501
:return: The frontchannel_logout_session_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._frontchannel_logout_session_supported
@frontchannel_logout_session_supported.setter
def frontchannel_logout_session_supported(self, frontchannel_logout_session_supported):
"""Sets the frontchannel_logout_session_supported of this WellKnown.
Boolean value specifying whether the OP can pass iss (issuer) and sid (session ID) query parameters to identify the RP session with the OP when the frontchannel_logout_uri is used. If supported, the sid Claim is also included in ID Tokens issued by the OP. # noqa: E501
:param frontchannel_logout_session_supported: The frontchannel_logout_session_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._frontchannel_logout_session_supported = frontchannel_logout_session_supported
@property
def frontchannel_logout_supported(self):
"""Gets the frontchannel_logout_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports HTTP-based logout, with true indicating support. # noqa: E501
:return: The frontchannel_logout_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._frontchannel_logout_supported
@frontchannel_logout_supported.setter
def frontchannel_logout_supported(self, frontchannel_logout_supported):
"""Sets the frontchannel_logout_supported of this WellKnown.
Boolean value specifying whether the OP supports HTTP-based logout, with true indicating support. # noqa: E501
:param frontchannel_logout_supported: The frontchannel_logout_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._frontchannel_logout_supported = frontchannel_logout_supported
@property
def grant_types_supported(self):
"""Gets the grant_types_supported of this WellKnown. # noqa: E501
JSON array containing a list of the OAuth 2.0 Grant Type values that this OP supports. # noqa: E501
:return: The grant_types_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._grant_types_supported
@grant_types_supported.setter
def grant_types_supported(self, grant_types_supported):
"""Sets the grant_types_supported of this WellKnown.
JSON array containing a list of the OAuth 2.0 Grant Type values that this OP supports. # noqa: E501
:param grant_types_supported: The grant_types_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._grant_types_supported = grant_types_supported
@property
def id_token_signing_alg_values_supported(self):
"""Gets the id_token_signing_alg_values_supported of this WellKnown. # noqa: E501
JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT. # noqa: E501
:return: The id_token_signing_alg_values_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._id_token_signing_alg_values_supported
@id_token_signing_alg_values_supported.setter
def id_token_signing_alg_values_supported(self, id_token_signing_alg_values_supported):
"""Sets the id_token_signing_alg_values_supported of this WellKnown.
JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT. # noqa: E501
:param id_token_signing_alg_values_supported: The id_token_signing_alg_values_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and id_token_signing_alg_values_supported is None: # noqa: E501
raise ValueError("Invalid value for `id_token_signing_alg_values_supported`, must not be `None`") # noqa: E501
self._id_token_signing_alg_values_supported = id_token_signing_alg_values_supported
@property
def issuer(self):
"""Gets the issuer of this WellKnown. # noqa: E501
URL using the https scheme with no query or fragment component that the OP asserts as its IssuerURL Identifier. If IssuerURL discovery is supported , this value MUST be identical to the issuer value returned by WebFinger. This also MUST be identical to the iss Claim value in ID Tokens issued from this IssuerURL. # noqa: E501
:return: The issuer of this WellKnown. # noqa: E501
:rtype: str
"""
return self._issuer
@issuer.setter
def issuer(self, issuer):
"""Sets the issuer of this WellKnown.
URL using the https scheme with no query or fragment component that the OP asserts as its IssuerURL Identifier. If IssuerURL discovery is supported , this value MUST be identical to the issuer value returned by WebFinger. This also MUST be identical to the iss Claim value in ID Tokens issued from this IssuerURL. # noqa: E501
:param issuer: The issuer of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and issuer is None: # noqa: E501
raise ValueError("Invalid value for `issuer`, must not be `None`") # noqa: E501
self._issuer = issuer
@property
def jwks_uri(self):
"""Gets the jwks_uri of this WellKnown. # noqa: E501
URL of the OP's JSON Web Key Set [JWK] document. This contains the signing key(s) the RP uses to validate signatures from the OP. The JWK Set MAY also contain the Server's encryption key(s), which are used by RPs to encrypt requests to the Server. When both signing and encryption keys are made available, a use (Key Use) parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage. Although some algorithms allow the same key to be used for both signatures and encryption, doing so is NOT RECOMMENDED, as it is less secure. The JWK x5c parameter MAY be used to provide X.509 representations of keys provided. When used, the bare key values MUST still be present and MUST match those in the certificate. # noqa: E501
:return: The jwks_uri of this WellKnown. # noqa: E501
:rtype: str
"""
return self._jwks_uri
@jwks_uri.setter
def jwks_uri(self, jwks_uri):
"""Sets the jwks_uri of this WellKnown.
URL of the OP's JSON Web Key Set [JWK] document. This contains the signing key(s) the RP uses to validate signatures from the OP. The JWK Set MAY also contain the Server's encryption key(s), which are used by RPs to encrypt requests to the Server. When both signing and encryption keys are made available, a use (Key Use) parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage. Although some algorithms allow the same key to be used for both signatures and encryption, doing so is NOT RECOMMENDED, as it is less secure. The JWK x5c parameter MAY be used to provide X.509 representations of keys provided. When used, the bare key values MUST still be present and MUST match those in the certificate. # noqa: E501
:param jwks_uri: The jwks_uri of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and jwks_uri is None: # noqa: E501
raise ValueError("Invalid value for `jwks_uri`, must not be `None`") # noqa: E501
self._jwks_uri = jwks_uri
@property
def registration_endpoint(self):
"""Gets the registration_endpoint of this WellKnown. # noqa: E501
URL of the OP's Dynamic Client Registration Endpoint. # noqa: E501
:return: The registration_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._registration_endpoint
@registration_endpoint.setter
def registration_endpoint(self, registration_endpoint):
"""Sets the registration_endpoint of this WellKnown.
URL of the OP's Dynamic Client Registration Endpoint. # noqa: E501
:param registration_endpoint: The registration_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._registration_endpoint = registration_endpoint
@property
def request_parameter_supported(self):
"""Gets the request_parameter_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. # noqa: E501
:return: The request_parameter_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._request_parameter_supported
@request_parameter_supported.setter
def request_parameter_supported(self, request_parameter_supported):
"""Sets the request_parameter_supported of this WellKnown.
Boolean value specifying whether the OP supports use of the request parameter, with true indicating support. # noqa: E501
:param request_parameter_supported: The request_parameter_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._request_parameter_supported = request_parameter_supported
@property
def request_uri_parameter_supported(self):
"""Gets the request_uri_parameter_supported of this WellKnown. # noqa: E501
Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. # noqa: E501
:return: The request_uri_parameter_supported of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._request_uri_parameter_supported
@request_uri_parameter_supported.setter
def request_uri_parameter_supported(self, request_uri_parameter_supported):
"""Sets the request_uri_parameter_supported of this WellKnown.
Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. # noqa: E501
:param request_uri_parameter_supported: The request_uri_parameter_supported of this WellKnown. # noqa: E501
:type: bool
"""
self._request_uri_parameter_supported = request_uri_parameter_supported
@property
def require_request_uri_registration(self):
"""Gets the require_request_uri_registration of this WellKnown. # noqa: E501
Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter. # noqa: E501
:return: The require_request_uri_registration of this WellKnown. # noqa: E501
:rtype: bool
"""
return self._require_request_uri_registration
@require_request_uri_registration.setter
def require_request_uri_registration(self, require_request_uri_registration):
"""Sets the require_request_uri_registration of this WellKnown.
Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter. # noqa: E501
:param require_request_uri_registration: The require_request_uri_registration of this WellKnown. # noqa: E501
:type: bool
"""
self._require_request_uri_registration = require_request_uri_registration
@property
def response_modes_supported(self):
"""Gets the response_modes_supported of this WellKnown. # noqa: E501
JSON array containing a list of the OAuth 2.0 response_mode values that this OP supports. # noqa: E501
:return: The response_modes_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._response_modes_supported
@response_modes_supported.setter
def response_modes_supported(self, response_modes_supported):
"""Sets the response_modes_supported of this WellKnown.
JSON array containing a list of the OAuth 2.0 response_mode values that this OP supports. # noqa: E501
:param response_modes_supported: The response_modes_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._response_modes_supported = response_modes_supported
@property
def response_types_supported(self):
"""Gets the response_types_supported of this WellKnown. # noqa: E501
JSON array containing a list of the OAuth 2.0 response_type values that this OP supports. Dynamic OpenID Providers MUST support the code, id_token, and the token id_token Response Type values. # noqa: E501
:return: The response_types_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._response_types_supported
@response_types_supported.setter
def response_types_supported(self, response_types_supported):
"""Sets the response_types_supported of this WellKnown.
JSON array containing a list of the OAuth 2.0 response_type values that this OP supports. Dynamic OpenID Providers MUST support the code, id_token, and the token id_token Response Type values. # noqa: E501
:param response_types_supported: The response_types_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and response_types_supported is None: # noqa: E501
raise ValueError("Invalid value for `response_types_supported`, must not be `None`") # noqa: E501
self._response_types_supported = response_types_supported
@property
def revocation_endpoint(self):
"""Gets the revocation_endpoint of this WellKnown. # noqa: E501
URL of the authorization server's OAuth 2.0 revocation endpoint. # noqa: E501
:return: The revocation_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._revocation_endpoint
@revocation_endpoint.setter
def revocation_endpoint(self, revocation_endpoint):
"""Sets the revocation_endpoint of this WellKnown.
URL of the authorization server's OAuth 2.0 revocation endpoint. # noqa: E501
:param revocation_endpoint: The revocation_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._revocation_endpoint = revocation_endpoint
@property
def scopes_supported(self):
"""Gets the scopes_supported of this WellKnown. # noqa: E501
SON array containing a list of the OAuth 2.0 [RFC6749] scope values that this server supports. The server MUST support the openid scope value. Servers MAY choose not to advertise some supported scope values even when this parameter is used # noqa: E501
:return: The scopes_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._scopes_supported
@scopes_supported.setter
def scopes_supported(self, scopes_supported):
"""Sets the scopes_supported of this WellKnown.
SON array containing a list of the OAuth 2.0 [RFC6749] scope values that this server supports. The server MUST support the openid scope value. Servers MAY choose not to advertise some supported scope values even when this parameter is used # noqa: E501
:param scopes_supported: The scopes_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._scopes_supported = scopes_supported
@property
def subject_types_supported(self):
"""Gets the subject_types_supported of this WellKnown. # noqa: E501
JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public. # noqa: E501
:return: The subject_types_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._subject_types_supported
@subject_types_supported.setter
def subject_types_supported(self, subject_types_supported):
"""Sets the subject_types_supported of this WellKnown.
JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public. # noqa: E501
:param subject_types_supported: The subject_types_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and subject_types_supported is None: # noqa: E501
raise ValueError("Invalid value for `subject_types_supported`, must not be `None`") # noqa: E501
self._subject_types_supported = subject_types_supported
@property
def token_endpoint(self):
"""Gets the token_endpoint of this WellKnown. # noqa: E501
URL of the OP's OAuth 2.0 Token Endpoint # noqa: E501
:return: The token_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._token_endpoint
@token_endpoint.setter
def token_endpoint(self, token_endpoint):
"""Sets the token_endpoint of this WellKnown.
URL of the OP's OAuth 2.0 Token Endpoint # noqa: E501
:param token_endpoint: The token_endpoint of this WellKnown. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and token_endpoint is None: # noqa: E501
raise ValueError("Invalid value for `token_endpoint`, must not be `None`") # noqa: E501
self._token_endpoint = token_endpoint
@property
def token_endpoint_auth_methods_supported(self):
"""Gets the token_endpoint_auth_methods_supported of this WellKnown. # noqa: E501
JSON array containing a list of Client Authentication methods supported by this Token Endpoint. The options are client_secret_post, client_secret_basic, client_secret_jwt, and private_key_jwt, as described in Section 9 of OpenID Connect Core 1.0 # noqa: E501
:return: The token_endpoint_auth_methods_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._token_endpoint_auth_methods_supported
@token_endpoint_auth_methods_supported.setter
def token_endpoint_auth_methods_supported(self, token_endpoint_auth_methods_supported):
"""Sets the token_endpoint_auth_methods_supported of this WellKnown.
JSON array containing a list of Client Authentication methods supported by this Token Endpoint. The options are client_secret_post, client_secret_basic, client_secret_jwt, and private_key_jwt, as described in Section 9 of OpenID Connect Core 1.0 # noqa: E501
:param token_endpoint_auth_methods_supported: The token_endpoint_auth_methods_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._token_endpoint_auth_methods_supported = token_endpoint_auth_methods_supported
@property
def userinfo_endpoint(self):
"""Gets the userinfo_endpoint of this WellKnown. # noqa: E501
URL of the OP's UserInfo Endpoint. # noqa: E501
:return: The userinfo_endpoint of this WellKnown. # noqa: E501
:rtype: str
"""
return self._userinfo_endpoint
@userinfo_endpoint.setter
def userinfo_endpoint(self, userinfo_endpoint):
"""Sets the userinfo_endpoint of this WellKnown.
URL of the OP's UserInfo Endpoint. # noqa: E501
:param userinfo_endpoint: The userinfo_endpoint of this WellKnown. # noqa: E501
:type: str
"""
self._userinfo_endpoint = userinfo_endpoint
@property
def userinfo_signing_alg_values_supported(self):
"""Gets the userinfo_signing_alg_values_supported of this WellKnown. # noqa: E501
JSON array containing a list of the JWS [JWS] signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT [JWT]. # noqa: E501
:return: The userinfo_signing_alg_values_supported of this WellKnown. # noqa: E501
:rtype: list[str]
"""
return self._userinfo_signing_alg_values_supported
@userinfo_signing_alg_values_supported.setter
def userinfo_signing_alg_values_supported(self, userinfo_signing_alg_values_supported):
"""Sets the userinfo_signing_alg_values_supported of this WellKnown.
JSON array containing a list of the JWS [JWS] signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT [JWT]. # noqa: E501
:param userinfo_signing_alg_values_supported: The userinfo_signing_alg_values_supported of this WellKnown. # noqa: E501
:type: list[str]
"""
self._userinfo_signing_alg_values_supported = userinfo_signing_alg_values_supported
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WellKnown):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, WellKnown):
return True
return self.to_dict() != other.to_dict()
| 0.604983 | 0.067948 |
import models
from aiogram.types import Message, ChatType, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from auth import dp
from utils.general import save_message_to_storage_channel
from utils import logger
@dp.message_handler(commands=['add', 'addr'], chat_type=[ChatType.PRIVATE,
ChatType.GROUP,
ChatType.SUPERGROUP,
ChatType.CHANNEL])
@logger.log_msg
async def add_command(message: Message):
target_message = message.reply_to_message
if not target_message:
await message.answer('Команда должна быть ответом на сообщение')
return
trigger = message.get_args()
if not trigger:
await message.answer('Триггер после комманды не указан')
return
if len(trigger.split(' ')) > 1:
await message.answer('Триггер должен состоять из одного слова')
text = target_message.text or target_message.caption or None
is_reply = message.get_command(pure=True) == 'addr'
is_command_inline = False
if message.chat.type == ChatType.PRIVATE: # проверяем ограничения для инлайн комманд
is_command_inline = True
if '@' in trigger:
await message.answer('Нельзя сохранять симовол @ в комманду')
return
if message.sticker or message.video_note:
await message.answer('Данный тип медиа не поддерживается')
return
media = None
if target_message.photo \
or target_message.document \
or target_message.audio \
or target_message.voice \
or target_message.video \
or target_message.video_note \
or target_message.animation \
or target_message.sticker:
media_message = await save_message_to_storage_channel(message.reply_to_message)
media = media_message.audio \
or media_message.voice \
or media_message.document \
or media_message.photo \
or media_message.video \
or media_message.video_note \
or media_message.animation \
or media_message.sticker
models.save_command(trigger=trigger,
text=text,
media=media,
created_by=message.from_user,
to_chat=message.chat,
is_inline=is_command_inline,
is_reply=is_reply)
await message.reply(text='Команда добавлена.')
@dp.message_handler(commands='del', chat_type=[ChatType.PRIVATE,
ChatType.GROUP,
ChatType.SUPERGROUP,
ChatType.CHANNEL])
@logger.log_msg
async def delete_command(message: Message):
trigger = message.get_args()
if not trigger:
await message.answer('Триггер после комманды не указан')
return
try:
models.delete_command(trigger=trigger, to_chat=message.chat)
await message.reply(text='Команда удалена.')
except:
await message.reply(text='Такой комманды не существует')
@dp.message_handler(commands='mycom', chat_type=ChatType.PRIVATE)
async def my_commands(message: Message):
result = models.get_mycommands(created_by=message.from_user, to_chat=message.chat)
mycom = ''
for command in result:
mycom += '/' + command.trigger + ' '
await message.answer(text='Вот список твоих команд:\n' + str(mycom))
@dp.message_handler(commands='mycom', chat_type=[ChatType.CHANNEL,
ChatType.GROUP,
ChatType.SUPERGROUP])
async def chat_commands(message: Message):
result = models.get_chatcommands(to_chat=message.chat)
mycom = ''
for command in result:
mycom += '/' + command.trigger + ' '
await message.answer(text='Вот список команд чата:\n' + str(mycom))
@dp.message_handler(commands='get_id', chat_type=ChatType.GROUP)
async def get_id(message: Message):
await message.answer(text=message.chat.id)
@dp.message_handler(commands='premium', chat_type=[ChatType.PRIVATE])
async def premium(message: Message):
text = 'Вы можете купить премиум подписку по цене 50 руб. в месяц и снять ограничения для своего аккаунта \n' \
'или чата на время действия подпискаи. Премиум покапается отдельно для каждого чата в котором есть бот.'
keyboard = InlineKeyboardMarkup()
buy_for_chat_button = InlineKeyboardButton('Премиум для чата', callback_data='premium_chat')
but_for_account_button = InlineKeyboardButton('Премиум для аккаунта', callback_data='premium_account')
keyboard.add(but_for_account_button, buy_for_chat_button)
await message.answer(text, reply_markup=keyboard)
@dp.callback_query_handler(lambda query: query.data and query.data == 'premium_chat')
async def premium_chat(callback_query: CallbackQuery):
# todo
await callback_query.answer() # для того чтобы не было значка загрузки на кнопке
@dp.callback_query_handler(lambda query: query.data and query.data == 'premium_account')
async def premium_account(callback_query: CallbackQuery):
# todo
await callback_query.answer()
|
handlers/command_handlers/commands.py
|
import models
from aiogram.types import Message, ChatType, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from auth import dp
from utils.general import save_message_to_storage_channel
from utils import logger
@dp.message_handler(commands=['add', 'addr'], chat_type=[ChatType.PRIVATE,
ChatType.GROUP,
ChatType.SUPERGROUP,
ChatType.CHANNEL])
@logger.log_msg
async def add_command(message: Message):
target_message = message.reply_to_message
if not target_message:
await message.answer('Команда должна быть ответом на сообщение')
return
trigger = message.get_args()
if not trigger:
await message.answer('Триггер после комманды не указан')
return
if len(trigger.split(' ')) > 1:
await message.answer('Триггер должен состоять из одного слова')
text = target_message.text or target_message.caption or None
is_reply = message.get_command(pure=True) == 'addr'
is_command_inline = False
if message.chat.type == ChatType.PRIVATE: # проверяем ограничения для инлайн комманд
is_command_inline = True
if '@' in trigger:
await message.answer('Нельзя сохранять симовол @ в комманду')
return
if message.sticker or message.video_note:
await message.answer('Данный тип медиа не поддерживается')
return
media = None
if target_message.photo \
or target_message.document \
or target_message.audio \
or target_message.voice \
or target_message.video \
or target_message.video_note \
or target_message.animation \
or target_message.sticker:
media_message = await save_message_to_storage_channel(message.reply_to_message)
media = media_message.audio \
or media_message.voice \
or media_message.document \
or media_message.photo \
or media_message.video \
or media_message.video_note \
or media_message.animation \
or media_message.sticker
models.save_command(trigger=trigger,
text=text,
media=media,
created_by=message.from_user,
to_chat=message.chat,
is_inline=is_command_inline,
is_reply=is_reply)
await message.reply(text='Команда добавлена.')
@dp.message_handler(commands='del', chat_type=[ChatType.PRIVATE,
ChatType.GROUP,
ChatType.SUPERGROUP,
ChatType.CHANNEL])
@logger.log_msg
async def delete_command(message: Message):
trigger = message.get_args()
if not trigger:
await message.answer('Триггер после комманды не указан')
return
try:
models.delete_command(trigger=trigger, to_chat=message.chat)
await message.reply(text='Команда удалена.')
except:
await message.reply(text='Такой комманды не существует')
@dp.message_handler(commands='mycom', chat_type=ChatType.PRIVATE)
async def my_commands(message: Message):
result = models.get_mycommands(created_by=message.from_user, to_chat=message.chat)
mycom = ''
for command in result:
mycom += '/' + command.trigger + ' '
await message.answer(text='Вот список твоих команд:\n' + str(mycom))
@dp.message_handler(commands='mycom', chat_type=[ChatType.CHANNEL,
ChatType.GROUP,
ChatType.SUPERGROUP])
async def chat_commands(message: Message):
result = models.get_chatcommands(to_chat=message.chat)
mycom = ''
for command in result:
mycom += '/' + command.trigger + ' '
await message.answer(text='Вот список команд чата:\n' + str(mycom))
@dp.message_handler(commands='get_id', chat_type=ChatType.GROUP)
async def get_id(message: Message):
await message.answer(text=message.chat.id)
@dp.message_handler(commands='premium', chat_type=[ChatType.PRIVATE])
async def premium(message: Message):
text = 'Вы можете купить премиум подписку по цене 50 руб. в месяц и снять ограничения для своего аккаунта \n' \
'или чата на время действия подпискаи. Премиум покапается отдельно для каждого чата в котором есть бот.'
keyboard = InlineKeyboardMarkup()
buy_for_chat_button = InlineKeyboardButton('Премиум для чата', callback_data='premium_chat')
but_for_account_button = InlineKeyboardButton('Премиум для аккаунта', callback_data='premium_account')
keyboard.add(but_for_account_button, buy_for_chat_button)
await message.answer(text, reply_markup=keyboard)
@dp.callback_query_handler(lambda query: query.data and query.data == 'premium_chat')
async def premium_chat(callback_query: CallbackQuery):
# todo
await callback_query.answer() # для того чтобы не было значка загрузки на кнопке
@dp.callback_query_handler(lambda query: query.data and query.data == 'premium_account')
async def premium_account(callback_query: CallbackQuery):
# todo
await callback_query.answer()
| 0.130175 | 0.12075 |
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tab_mlp import MLP
from pytorch_widedeep.models.transformers._encoders import SaintEncoder
from pytorch_widedeep.models.transformers._embeddings_layers import (
CatAndContEmbeddings,
)
class SAINT(nn.Module):
r"""Defines a ``SAINT`` model
(`arXiv:2106.01342 <https://arxiv.org/abs/2106.01342>`_) that can be used
as the ``deeptabular`` component of a Wide & Deep model.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
embed_input: List
List of Tuples with the column name and number of unique values
e.g. [('education', 11), ...]
embed_dropout: float, default = 0.1
Dropout to be applied to the embeddings matrix
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
embed_continuous_activation: str, default = None
String indicating the activation function to be applied to the
continuous embeddings, if any. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported.
cont_norm_layer: str, default = None,
Type of normalization layer applied to the continuous features before
they are embedded. Options are: ``layernorm``, ``batchnorm`` or
``None``.
input_dim: int, default = 32
The so-called *dimension of the model*. In general is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 2
Number of SAINT-Transformer blocks. 1 in the paper.
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention column and
row layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. ``tanh``, ``relu``,
``leaky_relu``, ``gelu``, ``geglu`` and ``reglu`` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP input dimension
mlp_activation: str, default = "relu"
MLP activation function. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
transformer_blks: ``nn.Sequential``
Sequence of SAINT-Transformer blocks
transformer_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import SAINT
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SAINT(column_idx=column_idx, embed_input=embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
embed_input: List[Tuple[str, int]],
embed_dropout: float = 0.1,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
embed_continuous_activation: str = None,
cont_norm_layer: str = None,
input_dim: int = 32,
use_bias: bool = False,
n_heads: int = 8,
n_blocks: int = 2,
attn_dropout: float = 0.1,
ff_dropout: float = 0.2,
transformer_activation: str = "gelu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(SAINT, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.embed_dropout = embed_dropout
self.full_embed_dropout = full_embed_dropout
self.shared_embed = shared_embed
self.add_shared_embed = add_shared_embed
self.frac_shared_embed = frac_shared_embed
self.continuous_cols = continuous_cols
self.embed_continuous_activation = embed_continuous_activation
self.cont_norm_layer = cont_norm_layer
self.input_dim = input_dim
self.use_bias = use_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = "cls_token" in column_idx
self.n_cat = len(embed_input) if embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
if self.n_cont and not self.n_cat and not self.embed_continuous:
raise ValueError(
"If only continuous features are used 'embed_continuous' must be set to 'True'"
)
self.cat_and_cont_embed = CatAndContEmbeddings(
input_dim,
column_idx,
embed_input,
embed_dropout,
full_embed_dropout,
shared_embed,
add_shared_embed,
frac_shared_embed,
False, # use_embed_bias
continuous_cols,
True, # embed_continuous,
embed_continuous_activation,
True, # use_cont_bias
cont_norm_layer,
)
self.transformer_blks = nn.Sequential()
for i in range(n_blocks):
self.transformer_blks.add_module(
"saint_block" + str(i),
SaintEncoder(
input_dim,
n_heads,
use_bias,
attn_dropout,
ff_dropout,
transformer_activation,
self.n_feats,
),
)
attn_output_dim = (
self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
)
if not mlp_hidden_dims:
mlp_hidden_dims = [
attn_output_dim,
attn_output_dim * 4,
attn_output_dim * 2,
]
else:
assert mlp_hidden_dims[0] == attn_output_dim, (
f"The input dim of the MLP must be {attn_output_dim}. "
f"Got {mlp_hidden_dims[0]} instead"
)
self.transformer_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = x_cat
if x_cont is not None:
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
x = self.transformer_blks(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
return self.transformer_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: :math:`(N, H, F, F)`
- row attention: :math:`(1, H, N, N)`
where *N* is the batch size, *H* is the number of heads and *F* is the
number of features/columns in the dataset
"""
attention_weights = []
for blk in self.transformer_blks:
attention_weights.append(
(blk.col_attn.attn_weights, blk.row_attn.attn_weights)
)
return attention_weights
|
pytorch_widedeep/models/transformers/saint.py
|
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tab_mlp import MLP
from pytorch_widedeep.models.transformers._encoders import SaintEncoder
from pytorch_widedeep.models.transformers._embeddings_layers import (
CatAndContEmbeddings,
)
class SAINT(nn.Module):
r"""Defines a ``SAINT`` model
(`arXiv:2106.01342 <https://arxiv.org/abs/2106.01342>`_) that can be used
as the ``deeptabular`` component of a Wide & Deep model.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
{'education': 0, 'relationship': 1, 'workclass': 2, ...}
embed_input: List
List of Tuples with the column name and number of unique values
e.g. [('education', 11), ...]
embed_dropout: float, default = 0.1
Dropout to be applied to the embeddings matrix
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
:obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If ``full_embed_dropout = True``, ``embed_dropout`` is ignored.
shared_embed: bool, default = False
The idea behind ``shared_embed`` is described in the Appendix A in the
`TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns'`. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
``frac_shared_embed`` with the shared embeddings.
See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if ``add_shared_embed
= False``) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
embed_continuous_activation: str, default = None
String indicating the activation function to be applied to the
continuous embeddings, if any. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported.
cont_norm_layer: str, default = None,
Type of normalization layer applied to the continuous features before
they are embedded. Options are: ``layernorm``, ``batchnorm`` or
``None``.
input_dim: int, default = 32
The so-called *dimension of the model*. In general is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 2
Number of SAINT-Transformer blocks. 1 in the paper.
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention column and
row layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. ``tanh``, ``relu``,
``leaky_relu``, ``gelu``, ``geglu`` and ``reglu`` are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to ``[l, 4*l,
2*l]`` where ``l`` is the MLP input dimension
mlp_activation: str, default = "relu"
MLP activation function. ``tanh``, ``relu``, ``leaky_relu`` and
``gelu`` are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->
LIN -> ACT]``
Attributes
----------
cat_and_cont_embed: ``nn.Module``
This is the module that processes the categorical and continuous columns
transformer_blks: ``nn.Sequential``
Sequence of SAINT-Transformer blocks
transformer_mlp: ``nn.Module``
MLP component in the model
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import SAINT
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SAINT(column_idx=column_idx, embed_input=embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(
self,
column_idx: Dict[str, int],
embed_input: List[Tuple[str, int]],
embed_dropout: float = 0.1,
full_embed_dropout: bool = False,
shared_embed: bool = False,
add_shared_embed: bool = False,
frac_shared_embed: float = 0.25,
continuous_cols: Optional[List[str]] = None,
embed_continuous_activation: str = None,
cont_norm_layer: str = None,
input_dim: int = 32,
use_bias: bool = False,
n_heads: int = 8,
n_blocks: int = 2,
attn_dropout: float = 0.1,
ff_dropout: float = 0.2,
transformer_activation: str = "gelu",
mlp_hidden_dims: Optional[List[int]] = None,
mlp_activation: str = "relu",
mlp_dropout: float = 0.1,
mlp_batchnorm: bool = False,
mlp_batchnorm_last: bool = False,
mlp_linear_first: bool = True,
):
super(SAINT, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.embed_dropout = embed_dropout
self.full_embed_dropout = full_embed_dropout
self.shared_embed = shared_embed
self.add_shared_embed = add_shared_embed
self.frac_shared_embed = frac_shared_embed
self.continuous_cols = continuous_cols
self.embed_continuous_activation = embed_continuous_activation
self.cont_norm_layer = cont_norm_layer
self.input_dim = input_dim
self.use_bias = use_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = "cls_token" in column_idx
self.n_cat = len(embed_input) if embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
if self.n_cont and not self.n_cat and not self.embed_continuous:
raise ValueError(
"If only continuous features are used 'embed_continuous' must be set to 'True'"
)
self.cat_and_cont_embed = CatAndContEmbeddings(
input_dim,
column_idx,
embed_input,
embed_dropout,
full_embed_dropout,
shared_embed,
add_shared_embed,
frac_shared_embed,
False, # use_embed_bias
continuous_cols,
True, # embed_continuous,
embed_continuous_activation,
True, # use_cont_bias
cont_norm_layer,
)
self.transformer_blks = nn.Sequential()
for i in range(n_blocks):
self.transformer_blks.add_module(
"saint_block" + str(i),
SaintEncoder(
input_dim,
n_heads,
use_bias,
attn_dropout,
ff_dropout,
transformer_activation,
self.n_feats,
),
)
attn_output_dim = (
self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
)
if not mlp_hidden_dims:
mlp_hidden_dims = [
attn_output_dim,
attn_output_dim * 4,
attn_output_dim * 2,
]
else:
assert mlp_hidden_dims[0] == attn_output_dim, (
f"The input dim of the MLP must be {attn_output_dim}. "
f"Got {mlp_hidden_dims[0]} instead"
)
self.transformer_mlp = MLP(
mlp_hidden_dims,
mlp_activation,
mlp_dropout,
mlp_batchnorm,
mlp_batchnorm_last,
mlp_linear_first,
)
# the output_dim attribute will be used as input_dim when "merging" the models
self.output_dim = mlp_hidden_dims[-1]
def forward(self, X: Tensor) -> Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = x_cat
if x_cont is not None:
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
x = self.transformer_blks(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
return self.transformer_mlp(x)
@property
def attention_weights(self) -> List:
r"""List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: :math:`(N, H, F, F)`
- row attention: :math:`(1, H, N, N)`
where *N* is the batch size, *H* is the number of heads and *F* is the
number of features/columns in the dataset
"""
attention_weights = []
for blk in self.transformer_blks:
attention_weights.append(
(blk.col_attn.attn_weights, blk.row_attn.attn_weights)
)
return attention_weights
| 0.967763 | 0.761317 |
import indigo
import json
from Shelly_Addon import Shelly_Addon
class Shelly_Addon_DHT22(Shelly_Addon):
"""
The Shelly Temperature Add-on is a sensor tht attaches to a host device.
The host devices can be a Shelly 1 or Shelly 1PM.
"""
def __init__(self, device):
Shelly_Addon.__init__(self, device)
def getSubscriptions(self):
"""
Default method to return a list of topics that the device subscribes to.
:return: A list.
"""
address = self.getAddress()
if address is None:
return []
else:
return [
"{}/online".format(address),
"{}/ext_temperature/{}".format(address, self.getProbeNumber()),
"{}/ext_temperatures".format(address),
"{}/ext_humidity/{}".format(address, self.getProbeNumber()),
"{}/ext_humidities".format(address)
]
def handleMessage(self, topic, payload):
"""
This method is called when a message comes in and matches one of this devices subscriptions.
:param topic: The topic of the message.
:param payload: THe payload of the message.
:return: None
"""
if topic == "{}/ext_temperature/{}".format(self.getAddress(), self.getProbeNumber()):
# For some reason, the shelly reports the temperature with a preceding colon...
temperature = payload
try:
self.setTemperature(float(temperature))
except ValueError:
self.logger.error(u"Unable to convert value of \"{}\" into a float!".format(payload))
elif topic == "{}/ext_humidity/{}".format(self.getAddress(), self.getProbeNumber()):
decimals = int(self.device.pluginProps.get('humidity-decimals', 1))
offset = 0
try:
offset = float(self.device.pluginProps.get('humidity-offset', 0))
except ValueError:
self.logger.error(u"Unable to convert offset of \"{}\" into a float!".format(self.device.pluginProps.get('humidity-offset', 0)))
try:
humidity = float(payload) + offset
self.device.updateStateOnServer(key="humidity", value=humidity, uiValue='{:.{}f}%'.format(humidity, decimals), decimalPlaces=decimals)
except ValueError:
self.logger.error(u"Unable to convert value of \"{}\" into a float!".format(payload))
elif topic == "{}/ext_temperatures".format(self.getAddress()) and len(self.getProbeNumber()) > 1:
try:
data = json.loads(payload)
for sensor in data.values():
if sensor['hwID'] == self.getProbeNumber():
value = sensor['tC']
self.handleMessage("{}/ext_temperature/{}".format(self.getAddress(), self.getProbeNumber()), value)
break
except ValueError:
self.logger.warn("Unable to convert payload to json: {}".format(payload))
elif topic == "{}/ext_humidities".format(self.getAddress()) and len(self.getProbeNumber()) > 1:
try:
data = json.loads(payload)
for sensor in data.values():
if sensor['hwID'] == self.getProbeNumber():
value = sensor['hum']
self.handleMessage("{}/ext_humidity/{}".format(self.getAddress(), self.getProbeNumber()), value)
break
except ValueError:
self.logger.warn("Unable to convert payload to json: {}".format(payload))
elif topic == "{}/online".format(self.getAddress()):
Shelly_Addon.handleMessage(self, topic, payload)
# Set the display state after data changed
temp = self.device.states['temperature']
temp_decimals = int(self.device.pluginProps.get('temp-decimals', 1))
temp_units = self.device.pluginProps.get('temp-units', 'F')[-1]
humidity = self.device.states['humidity']
humidity_decimals = int(self.device.pluginProps.get('humidity-decimals', 1))
self.device.updateStateOnServer(key="status", value='{:.{}f}°{} / {:.{}f}%'.format(temp, temp_decimals, temp_units, humidity, humidity_decimals))
self.updateStateImage()
def handleAction(self, action):
"""
The method that gets called when an Indigo action takes place.
:param action: The Indigo action.
:return: None
"""
Shelly_Addon.handleAction(self, action)
def getProbeNumber(self):
"""
Getter for the identifier of the probe. For now, a single DHT22 will be on a host device.
:return: The probe number to be used in the topic.
"""
return self.device.pluginProps.get('probe-number', None)
def updateStateImage(self):
"""
Sets the state image based on device states.
:return:
"""
if self.device.states.get('online', True):
self.device.updateStateImageOnServer(indigo.kStateImageSel.TemperatureSensorOn)
else:
self.device.updateStateImageOnServer(indigo.kStateImageSel.TemperatureSensor)
@staticmethod
def validateConfigUI(valuesDict, typeId, devId):
"""
Validates a device config.
:param valuesDict: The values in the Config UI.
:param typeId: the device type as specified in the type attribute.
:param devId: The id of the device (0 if a new device).
:return: Tuple of the form (valid, valuesDict, errors)
"""
isValid, valuesDict, errors = Shelly_Addon.validateConfigUI(valuesDict, typeId, devId)
# Validate that the temperature offset is a valid number
temperature_offset = valuesDict.get("temp-offset", None)
if temperature_offset != "":
try:
float(temperature_offset)
except ValueError:
isValid = False
errors["temp-offset"] = u"Unable to convert to a float."
# Validate that the humidity offset is a valid number
humidity_offset = valuesDict.get("humidity-offset", None)
if humidity_offset != "":
try:
float(humidity_offset)
except ValueError:
isValid = False
errors["humidity-offset"] = u"Unable to convert to a float."
return isValid, valuesDict, errors
|
ShellyMQTT.indigoPlugin/Contents/Server Plugin/Devices/Addons/Shelly_Addon_DHT22.py
|
import indigo
import json
from Shelly_Addon import Shelly_Addon
class Shelly_Addon_DHT22(Shelly_Addon):
"""
The Shelly Temperature Add-on is a sensor tht attaches to a host device.
The host devices can be a Shelly 1 or Shelly 1PM.
"""
def __init__(self, device):
Shelly_Addon.__init__(self, device)
def getSubscriptions(self):
"""
Default method to return a list of topics that the device subscribes to.
:return: A list.
"""
address = self.getAddress()
if address is None:
return []
else:
return [
"{}/online".format(address),
"{}/ext_temperature/{}".format(address, self.getProbeNumber()),
"{}/ext_temperatures".format(address),
"{}/ext_humidity/{}".format(address, self.getProbeNumber()),
"{}/ext_humidities".format(address)
]
def handleMessage(self, topic, payload):
"""
This method is called when a message comes in and matches one of this devices subscriptions.
:param topic: The topic of the message.
:param payload: THe payload of the message.
:return: None
"""
if topic == "{}/ext_temperature/{}".format(self.getAddress(), self.getProbeNumber()):
# For some reason, the shelly reports the temperature with a preceding colon...
temperature = payload
try:
self.setTemperature(float(temperature))
except ValueError:
self.logger.error(u"Unable to convert value of \"{}\" into a float!".format(payload))
elif topic == "{}/ext_humidity/{}".format(self.getAddress(), self.getProbeNumber()):
decimals = int(self.device.pluginProps.get('humidity-decimals', 1))
offset = 0
try:
offset = float(self.device.pluginProps.get('humidity-offset', 0))
except ValueError:
self.logger.error(u"Unable to convert offset of \"{}\" into a float!".format(self.device.pluginProps.get('humidity-offset', 0)))
try:
humidity = float(payload) + offset
self.device.updateStateOnServer(key="humidity", value=humidity, uiValue='{:.{}f}%'.format(humidity, decimals), decimalPlaces=decimals)
except ValueError:
self.logger.error(u"Unable to convert value of \"{}\" into a float!".format(payload))
elif topic == "{}/ext_temperatures".format(self.getAddress()) and len(self.getProbeNumber()) > 1:
try:
data = json.loads(payload)
for sensor in data.values():
if sensor['hwID'] == self.getProbeNumber():
value = sensor['tC']
self.handleMessage("{}/ext_temperature/{}".format(self.getAddress(), self.getProbeNumber()), value)
break
except ValueError:
self.logger.warn("Unable to convert payload to json: {}".format(payload))
elif topic == "{}/ext_humidities".format(self.getAddress()) and len(self.getProbeNumber()) > 1:
try:
data = json.loads(payload)
for sensor in data.values():
if sensor['hwID'] == self.getProbeNumber():
value = sensor['hum']
self.handleMessage("{}/ext_humidity/{}".format(self.getAddress(), self.getProbeNumber()), value)
break
except ValueError:
self.logger.warn("Unable to convert payload to json: {}".format(payload))
elif topic == "{}/online".format(self.getAddress()):
Shelly_Addon.handleMessage(self, topic, payload)
# Set the display state after data changed
temp = self.device.states['temperature']
temp_decimals = int(self.device.pluginProps.get('temp-decimals', 1))
temp_units = self.device.pluginProps.get('temp-units', 'F')[-1]
humidity = self.device.states['humidity']
humidity_decimals = int(self.device.pluginProps.get('humidity-decimals', 1))
self.device.updateStateOnServer(key="status", value='{:.{}f}°{} / {:.{}f}%'.format(temp, temp_decimals, temp_units, humidity, humidity_decimals))
self.updateStateImage()
def handleAction(self, action):
"""
The method that gets called when an Indigo action takes place.
:param action: The Indigo action.
:return: None
"""
Shelly_Addon.handleAction(self, action)
def getProbeNumber(self):
"""
Getter for the identifier of the probe. For now, a single DHT22 will be on a host device.
:return: The probe number to be used in the topic.
"""
return self.device.pluginProps.get('probe-number', None)
def updateStateImage(self):
"""
Sets the state image based on device states.
:return:
"""
if self.device.states.get('online', True):
self.device.updateStateImageOnServer(indigo.kStateImageSel.TemperatureSensorOn)
else:
self.device.updateStateImageOnServer(indigo.kStateImageSel.TemperatureSensor)
@staticmethod
def validateConfigUI(valuesDict, typeId, devId):
"""
Validates a device config.
:param valuesDict: The values in the Config UI.
:param typeId: the device type as specified in the type attribute.
:param devId: The id of the device (0 if a new device).
:return: Tuple of the form (valid, valuesDict, errors)
"""
isValid, valuesDict, errors = Shelly_Addon.validateConfigUI(valuesDict, typeId, devId)
# Validate that the temperature offset is a valid number
temperature_offset = valuesDict.get("temp-offset", None)
if temperature_offset != "":
try:
float(temperature_offset)
except ValueError:
isValid = False
errors["temp-offset"] = u"Unable to convert to a float."
# Validate that the humidity offset is a valid number
humidity_offset = valuesDict.get("humidity-offset", None)
if humidity_offset != "":
try:
float(humidity_offset)
except ValueError:
isValid = False
errors["humidity-offset"] = u"Unable to convert to a float."
return isValid, valuesDict, errors
| 0.747155 | 0.364664 |
import unreal
BL_ColorRamp3 = unreal.AssetToolsHelpers.get_asset_tools().create_asset('BL_ColorRamp3', '/Engine/Functions/BLUI/', unreal.MaterialFunction, unreal.MaterialFunctionFactoryNew())
BL_ColorRamp3.set_editor_property("expose_to_library", True)
BL_ColorRamp3.set_editor_property("library_categories_text", ("BLUI", "Custom", "Utility"))
create_expression = unreal.MaterialEditingLibrary.create_material_expression_in_function
create_connection = unreal.MaterialEditingLibrary.connect_material_expressions
connect_property = unreal.MaterialEditingLibrary.connect_material_property
update_function = unreal.MaterialEditingLibrary.update_material_function
mat_func_separate = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/BreakOutFloat3Components')
mat_func_combine = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/MakeFloat3')
### Creating Nodes
Mix = create_expression(BL_ColorRamp3,unreal.MaterialExpressionLinearInterpolate,-340.0, 3620.0)
Reroute01 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1840.0, 3360.0)
Math20 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-640.0, 4415.648193359375)
Math19 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionSubtract,-800.0, 4415.648193359375)
Math18 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionSubtract,-800.0, 4235.648193359375)
Math21 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-640.0, 4235.648193359375)
Mix01 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionLinearInterpolate,-20.0, 4480.0)
Math22 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionMultiply,-480.0, 4260.0)
Reroute10 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute09 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute08 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Math23 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionAdd,-320.0, 4320.0)
Reroute06 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Reroute07 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Math12 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionSubtract,-1080.0, 3460.0)
Math15 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-920.0, 3460.0)
Math16 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionMultiply,-760.0, 3480.0)
Math17 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionAdd,-600.0, 3540.0)
Math14 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-900.0, 3640.0)
Math13 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionSubtract, -1080.0, 3640.0)
Position0 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3540.0)
Color0 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3620.0)
Position1 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3800.0)
Color1 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3880.0)
Position2 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1560.0, 4540.0)
Color2 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionFunctionInput,-1560.0, 4620.0)
Factor = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -2200.0, 3320.0)
OutputResult = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionOutput,400, 4480)
### Loading Material Functions and Textures
### Setting Values
Color0.input_name = 'Color0'
Color0.sort_priority = 0
Color0.preview_value = (0.0, 0.0, 0.0, 1.0)
Color0.use_preview_value_as_default = True
Position0.input_name = 'Position0'
Position0.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position0.sort_priority = 1
Position0.preview_value = (0.0, 0.0, 0.0, 1.0)
Position0.use_preview_value_as_default = True
Color1.input_name = 'Color1'
Color1.sort_priority = 2
Color1.preview_value = (1.0, 0.0, 0.0, 1.0)
Color1.use_preview_value_as_default = True
Position1.input_name = "Position1"
Position1.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position1.sort_priority = 3
Position1.preview_value = (0.125, 0, 0, 1)
Position1.use_preview_value_as_default = True
Color2.input_name = 'Color2'
Color2.sort_priority = 4
Color2.preview_value = (1.0, 0.5, 0.0, 1)
Color2.use_preview_value_as_default = True
Position2.input_name = "Position2"
Position2.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position2.sort_priority = 5
Position2.preview_value = (0.250, 0, 0, 1)
Position2.use_preview_value_as_default = True
Factor.input_name = 'Factor'
Factor.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Factor.sort_priority = 6
Factor.preview_value = (0.0, 0.0, 0.0, 1.0)
Factor.use_preview_value_as_default = True
### Creating Connections
Color1_connection = create_connection(Color1, '', Mix, 'B')
Position1_connection = create_connection(Position1, '', Math12, 'A')
Position1_connection = create_connection(Position1, '', Math13, 'B')
Position1_connection = create_connection(Position1, '', Reroute09, '')
Position1_connection = create_connection(Position1, '', Reroute10, '')
Position1_connection = create_connection(Position1, '', Reroute08, '')
Mix_connection = create_connection(Mix, '', Mix01, 'A')
Position0_connection = create_connection(Position0, '', Math12, 'B')
Position0_connection = create_connection(Position0, '', Math14, 'A')
Position0_connection = create_connection(Position0, '', Math13, 'A')
Color0_connection = create_connection(Color0, '', Mix, 'A')
Reroute01_connection = create_connection(Reroute01, '', Reroute06, '')
Reroute01_connection = create_connection(Reroute01, '', Math16, 'B')
Reroute01_connection = create_connection(Reroute01, '', Reroute07, '')
Math20_connection = create_connection(Math20, '', Math23, 'B')
Math19_connection = create_connection(Math19, '', Math20, 'B')
Math18_connection = create_connection(Math18, '', Math21, 'B')
Math21_connection = create_connection(Math21, '', Math22, 'A')
Color2_connection = create_connection(Color2, '', Mix01, 'B')
Position2_connection = create_connection(Position2, '', Math18, 'A')
Position2_connection = create_connection(Position2, '', Math19, 'B')
Math22_connection = create_connection(Math22, '', Math23, 'A')
Reroute10_connection = create_connection(Reroute10, '', Math20, 'A')
Reroute09_connection = create_connection(Reroute09, '', Math18, 'B')
Reroute08_connection = create_connection(Reroute08, '', Math19, 'A')
Math23_connection = create_connection(Math23, '', Mix01, 'Alpha')
Reroute06_connection = create_connection(Reroute06, '', Math22, 'B')
Factor_connection = create_connection(Factor, '', Reroute01, '')
Math12_connection = create_connection(Math12, '', Math15, 'B')
Math15_connection = create_connection(Math15, '', Math16, 'A')
Math16_connection = create_connection(Math16, '', Math17, 'A')
Math17_connection = create_connection(Math17, '', Mix, 'Alpha')
Math14_connection = create_connection(Math14, '', Math17, 'B')
Math13_connection = create_connection(Math13, '', Math14, 'B')
Mix01_connection = create_connection(Mix01, '', OutputResult, '')
update_function()
|
BL_ColorRamp3_MF.py
|
import unreal
BL_ColorRamp3 = unreal.AssetToolsHelpers.get_asset_tools().create_asset('BL_ColorRamp3', '/Engine/Functions/BLUI/', unreal.MaterialFunction, unreal.MaterialFunctionFactoryNew())
BL_ColorRamp3.set_editor_property("expose_to_library", True)
BL_ColorRamp3.set_editor_property("library_categories_text", ("BLUI", "Custom", "Utility"))
create_expression = unreal.MaterialEditingLibrary.create_material_expression_in_function
create_connection = unreal.MaterialEditingLibrary.connect_material_expressions
connect_property = unreal.MaterialEditingLibrary.connect_material_property
update_function = unreal.MaterialEditingLibrary.update_material_function
mat_func_separate = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/BreakOutFloat3Components')
mat_func_combine = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/MakeFloat3')
### Creating Nodes
Mix = create_expression(BL_ColorRamp3,unreal.MaterialExpressionLinearInterpolate,-340.0, 3620.0)
Reroute01 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1840.0, 3360.0)
Math20 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-640.0, 4415.648193359375)
Math19 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionSubtract,-800.0, 4415.648193359375)
Math18 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionSubtract,-800.0, 4235.648193359375)
Math21 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-640.0, 4235.648193359375)
Mix01 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionLinearInterpolate,-20.0, 4480.0)
Math22 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionMultiply,-480.0, 4260.0)
Reroute10 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute09 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute08 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Math23 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionAdd,-320.0, 4320.0)
Reroute06 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Reroute07 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Math12 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionSubtract,-1080.0, 3460.0)
Math15 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-920.0, 3460.0)
Math16 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionMultiply,-760.0, 3480.0)
Math17 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionAdd,-600.0, 3540.0)
Math14 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionDivide,-900.0, 3640.0)
Math13 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionSubtract, -1080.0, 3640.0)
Position0 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3540.0)
Color0 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3620.0)
Position1 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3800.0)
Color1 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1580.0, 3880.0)
Position2 = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -1560.0, 4540.0)
Color2 = create_expression(BL_ColorRamp3,unreal.MaterialExpressionFunctionInput,-1560.0, 4620.0)
Factor = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionInput, -2200.0, 3320.0)
OutputResult = create_expression(BL_ColorRamp3, unreal.MaterialExpressionFunctionOutput,400, 4480)
### Loading Material Functions and Textures
### Setting Values
Color0.input_name = 'Color0'
Color0.sort_priority = 0
Color0.preview_value = (0.0, 0.0, 0.0, 1.0)
Color0.use_preview_value_as_default = True
Position0.input_name = 'Position0'
Position0.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position0.sort_priority = 1
Position0.preview_value = (0.0, 0.0, 0.0, 1.0)
Position0.use_preview_value_as_default = True
Color1.input_name = 'Color1'
Color1.sort_priority = 2
Color1.preview_value = (1.0, 0.0, 0.0, 1.0)
Color1.use_preview_value_as_default = True
Position1.input_name = "Position1"
Position1.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position1.sort_priority = 3
Position1.preview_value = (0.125, 0, 0, 1)
Position1.use_preview_value_as_default = True
Color2.input_name = 'Color2'
Color2.sort_priority = 4
Color2.preview_value = (1.0, 0.5, 0.0, 1)
Color2.use_preview_value_as_default = True
Position2.input_name = "Position2"
Position2.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position2.sort_priority = 5
Position2.preview_value = (0.250, 0, 0, 1)
Position2.use_preview_value_as_default = True
Factor.input_name = 'Factor'
Factor.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Factor.sort_priority = 6
Factor.preview_value = (0.0, 0.0, 0.0, 1.0)
Factor.use_preview_value_as_default = True
### Creating Connections
Color1_connection = create_connection(Color1, '', Mix, 'B')
Position1_connection = create_connection(Position1, '', Math12, 'A')
Position1_connection = create_connection(Position1, '', Math13, 'B')
Position1_connection = create_connection(Position1, '', Reroute09, '')
Position1_connection = create_connection(Position1, '', Reroute10, '')
Position1_connection = create_connection(Position1, '', Reroute08, '')
Mix_connection = create_connection(Mix, '', Mix01, 'A')
Position0_connection = create_connection(Position0, '', Math12, 'B')
Position0_connection = create_connection(Position0, '', Math14, 'A')
Position0_connection = create_connection(Position0, '', Math13, 'A')
Color0_connection = create_connection(Color0, '', Mix, 'A')
Reroute01_connection = create_connection(Reroute01, '', Reroute06, '')
Reroute01_connection = create_connection(Reroute01, '', Math16, 'B')
Reroute01_connection = create_connection(Reroute01, '', Reroute07, '')
Math20_connection = create_connection(Math20, '', Math23, 'B')
Math19_connection = create_connection(Math19, '', Math20, 'B')
Math18_connection = create_connection(Math18, '', Math21, 'B')
Math21_connection = create_connection(Math21, '', Math22, 'A')
Color2_connection = create_connection(Color2, '', Mix01, 'B')
Position2_connection = create_connection(Position2, '', Math18, 'A')
Position2_connection = create_connection(Position2, '', Math19, 'B')
Math22_connection = create_connection(Math22, '', Math23, 'A')
Reroute10_connection = create_connection(Reroute10, '', Math20, 'A')
Reroute09_connection = create_connection(Reroute09, '', Math18, 'B')
Reroute08_connection = create_connection(Reroute08, '', Math19, 'A')
Math23_connection = create_connection(Math23, '', Mix01, 'Alpha')
Reroute06_connection = create_connection(Reroute06, '', Math22, 'B')
Factor_connection = create_connection(Factor, '', Reroute01, '')
Math12_connection = create_connection(Math12, '', Math15, 'B')
Math15_connection = create_connection(Math15, '', Math16, 'A')
Math16_connection = create_connection(Math16, '', Math17, 'A')
Math17_connection = create_connection(Math17, '', Mix, 'Alpha')
Math14_connection = create_connection(Math14, '', Math17, 'B')
Math13_connection = create_connection(Math13, '', Math14, 'B')
Mix01_connection = create_connection(Mix01, '', OutputResult, '')
update_function()
| 0.332635 | 0.126353 |
import pmdarima
import pandas as pd
import numpy as np
import sklearn
import copy
from training import TrainHelper, ModelsBaseClass
from preparation import PreparationHelper
class ARIMA(ModelsBaseClass.BaseModel):
"""
Class containing (S)ARIMA(X) model and methods
"""
def __init__(self, target_column: str, order: tuple, seasonal_order: tuple, method: str = 'lbfgs',
use_exog: bool = False, with_intercept: bool = True, trend: str = None, log: bool = False,
power_transf: bool = False, one_step_ahead: bool = False):
"""
:param target_column: target_column for prediction
:param order: (p, d, q) of (S)ARIMA(X) model
:param seasonal_order: (P, D, Q, m) of (S)ARIMA(X) model
:param method: method to use for optimization
:param use_exog: use exogenous input
:param with_intercept: use intercept
:param trend: trend component
:param log: use log transform
:param power_transf: use power transform
:param one_step_ahead: perform one step ahead prediction
"""
super().__init__(target_column=target_column, seasonal_periods=seasonal_order[3], name='(S)ARIMA(X)',
one_step_ahead=one_step_ahead)
self.model = pmdarima.ARIMA(order=order, seasonal_order=seasonal_order, maxiter=50, disp=1, method=method,
with_intercept=with_intercept, enforce_stationarity=False,
suppress_warnings=True)
self.use_exog = use_exog
self.exog_cols_dropped = None
self.trend = trend
self.log = log
self.power_transformer = sklearn.preprocessing.PowerTransformer() if power_transf else None
self.contains_zeros = False
def train(self, train: pd.DataFrame, cross_val_call: bool = False) -> dict:
"""
Train (S)ARIMA(X) model
:param train: train set
:param cross_val_call: called to perform cross validation
:return dictionary with cross validated scores (if specified)
"""
cross_val_score_dict = {}
if cross_val_call:
cross_val_score_dict, self.model = self.get_cross_val_score(train=train)
train_exog = None
if (self.power_transformer is not None) or self.log:
train = TrainHelper.get_transformed_set(dataset=train, target_column=self.target_column,
power_transformer=self.power_transformer, log=self.log)
if self.use_exog:
train_exog = train.drop(labels=[self.target_column], axis=1)
self.exog_cols_dropped = train_exog.columns[train_exog.isna().any()].tolist()
PreparationHelper.drop_columns(train_exog, self.exog_cols_dropped)
train_exog = train_exog.to_numpy(dtype=float)
self.model.fit(y=train[self.target_column], exogenous=train_exog, trend=self.trend)
return cross_val_score_dict
def insample(self, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed) insample predictions
:param train: train set
:return: DataFrame with insample predictions
"""
train_exog = None
if self.use_exog:
train_exog = train.drop(labels=[self.target_column], axis=1)
PreparationHelper.drop_columns(train_exog, self.exog_cols_dropped)
train_exog = train_exog.to_numpy(dtype=float)
insample = pd.DataFrame(data=self.model.predict_in_sample(exogenous=train_exog), index=train.index,
columns=['Insample'])
if self.power_transformer is not None:
insample = pd.DataFrame(data=self.power_transformer.inverse_transform(insample['Insample']
.values.reshape(-1, 1)),
index=insample.index, columns=['Insample'])
if self.log:
if 0 in train[self.target_column].values:
self.contains_zeros = True
insample = np.exp(insample) - 1
else:
insample = np.exp(insample)
return insample
def predict(self, test: pd.DataFrame, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed), if specified one step ahead, out-of-sample predictions
:param test: test set
:param train: train set
:return: DataFrame with predictions, upper and lower confidence level
"""
test_exog = None
if (self.power_transformer is not None) or self.log:
test = TrainHelper.get_transformed_set(dataset=test, target_column=self.target_column,
power_transformer=self.power_transformer, log=self.log,
only_transform=True)
if self.use_exog:
test_exog = test.drop(labels=[self.target_column], axis=1)
PreparationHelper.drop_columns(test_exog, self.exog_cols_dropped)
test_exog = test_exog.to_numpy(dtype=float)
if self.one_step_ahead:
predict = []
conf_low = []
conf_up = []
# deep copy model as predict function should not change class model
model = copy.deepcopy(self.model)
for i in range(0, test.shape[0]):
if self.use_exog:
fc, conf = model.predict(n_periods=1, exogenous=pd.DataFrame(test_exog[i].reshape(1, -1)),
return_conf_int=True, alpha=0.05)
model.update(test[self.target_column][i],
exogenous=pd.DataFrame(test_exog[i].reshape(1, -1)))
else:
fc, conf = model.predict(n_periods=1, return_conf_int=True, alpha=0.05)
model.update(test[self.target_column][i])
predict.append(fc[0])
conf_low.append(conf[0][0])
conf_up.append(conf[0][1])
else:
predict, conf = self.model.predict(n_periods=test.shape[0], exogenous=test_exog,
return_conf_int=True, alpha=0.05)
conf_low = conf[:, 0]
conf_up = conf[:, 1]
predictions = pd.DataFrame({'Prediction': predict, 'LowerConf': conf_low, 'UpperConf': conf_up},
index=test.index)
if self.power_transformer is not None:
predictions = pd.DataFrame({'Prediction': self.power_transformer.inverse_transform(
predictions['Prediction'].values.reshape(-1, 1)).flatten(),
'LowerConf': self.power_transformer.inverse_transform(
predictions['LowerConf'].values.reshape(-1, 1)).flatten(),
'UpperConf': self.power_transformer.inverse_transform(
predictions['UpperConf'].values.reshape(-1, 1)).flatten()},
index=predictions.index)
if self.log:
predict_backtr = np.exp(predictions['Prediction'])
if self.contains_zeros:
predict_backtr += 1
lower_dist = ((predictions['Prediction'] - predictions['LowerConf'])
/ predictions['Prediction']) * predict_backtr
upper_dist = ((predictions['UpperConf'] - predictions['Prediction'])
/ predictions['Prediction']) * predict_backtr
predictions = pd.DataFrame({'Prediction': predict_backtr,
'LowerConf': predict_backtr - lower_dist,
'UpperConf': predict_backtr + upper_dist},
index=predictions.index)
return predictions
|
training/ModelsARIMA.py
|
import pmdarima
import pandas as pd
import numpy as np
import sklearn
import copy
from training import TrainHelper, ModelsBaseClass
from preparation import PreparationHelper
class ARIMA(ModelsBaseClass.BaseModel):
"""
Class containing (S)ARIMA(X) model and methods
"""
def __init__(self, target_column: str, order: tuple, seasonal_order: tuple, method: str = 'lbfgs',
use_exog: bool = False, with_intercept: bool = True, trend: str = None, log: bool = False,
power_transf: bool = False, one_step_ahead: bool = False):
"""
:param target_column: target_column for prediction
:param order: (p, d, q) of (S)ARIMA(X) model
:param seasonal_order: (P, D, Q, m) of (S)ARIMA(X) model
:param method: method to use for optimization
:param use_exog: use exogenous input
:param with_intercept: use intercept
:param trend: trend component
:param log: use log transform
:param power_transf: use power transform
:param one_step_ahead: perform one step ahead prediction
"""
super().__init__(target_column=target_column, seasonal_periods=seasonal_order[3], name='(S)ARIMA(X)',
one_step_ahead=one_step_ahead)
self.model = pmdarima.ARIMA(order=order, seasonal_order=seasonal_order, maxiter=50, disp=1, method=method,
with_intercept=with_intercept, enforce_stationarity=False,
suppress_warnings=True)
self.use_exog = use_exog
self.exog_cols_dropped = None
self.trend = trend
self.log = log
self.power_transformer = sklearn.preprocessing.PowerTransformer() if power_transf else None
self.contains_zeros = False
def train(self, train: pd.DataFrame, cross_val_call: bool = False) -> dict:
"""
Train (S)ARIMA(X) model
:param train: train set
:param cross_val_call: called to perform cross validation
:return dictionary with cross validated scores (if specified)
"""
cross_val_score_dict = {}
if cross_val_call:
cross_val_score_dict, self.model = self.get_cross_val_score(train=train)
train_exog = None
if (self.power_transformer is not None) or self.log:
train = TrainHelper.get_transformed_set(dataset=train, target_column=self.target_column,
power_transformer=self.power_transformer, log=self.log)
if self.use_exog:
train_exog = train.drop(labels=[self.target_column], axis=1)
self.exog_cols_dropped = train_exog.columns[train_exog.isna().any()].tolist()
PreparationHelper.drop_columns(train_exog, self.exog_cols_dropped)
train_exog = train_exog.to_numpy(dtype=float)
self.model.fit(y=train[self.target_column], exogenous=train_exog, trend=self.trend)
return cross_val_score_dict
def insample(self, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed) insample predictions
:param train: train set
:return: DataFrame with insample predictions
"""
train_exog = None
if self.use_exog:
train_exog = train.drop(labels=[self.target_column], axis=1)
PreparationHelper.drop_columns(train_exog, self.exog_cols_dropped)
train_exog = train_exog.to_numpy(dtype=float)
insample = pd.DataFrame(data=self.model.predict_in_sample(exogenous=train_exog), index=train.index,
columns=['Insample'])
if self.power_transformer is not None:
insample = pd.DataFrame(data=self.power_transformer.inverse_transform(insample['Insample']
.values.reshape(-1, 1)),
index=insample.index, columns=['Insample'])
if self.log:
if 0 in train[self.target_column].values:
self.contains_zeros = True
insample = np.exp(insample) - 1
else:
insample = np.exp(insample)
return insample
def predict(self, test: pd.DataFrame, train: pd.DataFrame) -> pd.DataFrame:
"""
Deliver (back-transformed), if specified one step ahead, out-of-sample predictions
:param test: test set
:param train: train set
:return: DataFrame with predictions, upper and lower confidence level
"""
test_exog = None
if (self.power_transformer is not None) or self.log:
test = TrainHelper.get_transformed_set(dataset=test, target_column=self.target_column,
power_transformer=self.power_transformer, log=self.log,
only_transform=True)
if self.use_exog:
test_exog = test.drop(labels=[self.target_column], axis=1)
PreparationHelper.drop_columns(test_exog, self.exog_cols_dropped)
test_exog = test_exog.to_numpy(dtype=float)
if self.one_step_ahead:
predict = []
conf_low = []
conf_up = []
# deep copy model as predict function should not change class model
model = copy.deepcopy(self.model)
for i in range(0, test.shape[0]):
if self.use_exog:
fc, conf = model.predict(n_periods=1, exogenous=pd.DataFrame(test_exog[i].reshape(1, -1)),
return_conf_int=True, alpha=0.05)
model.update(test[self.target_column][i],
exogenous=pd.DataFrame(test_exog[i].reshape(1, -1)))
else:
fc, conf = model.predict(n_periods=1, return_conf_int=True, alpha=0.05)
model.update(test[self.target_column][i])
predict.append(fc[0])
conf_low.append(conf[0][0])
conf_up.append(conf[0][1])
else:
predict, conf = self.model.predict(n_periods=test.shape[0], exogenous=test_exog,
return_conf_int=True, alpha=0.05)
conf_low = conf[:, 0]
conf_up = conf[:, 1]
predictions = pd.DataFrame({'Prediction': predict, 'LowerConf': conf_low, 'UpperConf': conf_up},
index=test.index)
if self.power_transformer is not None:
predictions = pd.DataFrame({'Prediction': self.power_transformer.inverse_transform(
predictions['Prediction'].values.reshape(-1, 1)).flatten(),
'LowerConf': self.power_transformer.inverse_transform(
predictions['LowerConf'].values.reshape(-1, 1)).flatten(),
'UpperConf': self.power_transformer.inverse_transform(
predictions['UpperConf'].values.reshape(-1, 1)).flatten()},
index=predictions.index)
if self.log:
predict_backtr = np.exp(predictions['Prediction'])
if self.contains_zeros:
predict_backtr += 1
lower_dist = ((predictions['Prediction'] - predictions['LowerConf'])
/ predictions['Prediction']) * predict_backtr
upper_dist = ((predictions['UpperConf'] - predictions['Prediction'])
/ predictions['Prediction']) * predict_backtr
predictions = pd.DataFrame({'Prediction': predict_backtr,
'LowerConf': predict_backtr - lower_dist,
'UpperConf': predict_backtr + upper_dist},
index=predictions.index)
return predictions
| 0.747524 | 0.417212 |
import numpy as np
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
from astropy.table import Table
from astropy.cosmology import Planck15
DIST = Planck15.luminosity_distance(z=0.0137).cgs.value
def optical():
# PTF12gzk
dat = Table.read("table1.dat", format='ascii')
jd = dat['col1']
mag = dat['col4']
emag = dat['col5']
band = dat['col3']
Mag = mag-33.8 # distance modulus
dt = jd-jd[0]
choose = np.logical_or(band == 'r', band == 'R,r')
plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='red', fmt='.')
choose = np.logical_or(band == 'g', band == 'g')
plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='green', fmt='.')
choose = np.logical_or(band == 'g', band == 'g')
plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='green', fmt='.')
plt.gca().invert_yaxis()
plt.tick_params(labelsize=14)
plt.xlabel("dt [days]", fontsize=16)
plt.ylabel("M", fontsize=16)
plt.show()
if __name__=="__main__":
# radio
dt = np.array([8, 8, 10, 10, 10, 10, 12])
nu = np.array([4.8, 7.4, 6.1, 14, 20, 95, 6.1])
f = np.array([79, 82, 33, 42, 90, 3600, 33]) # uJy
ef = np.array([16, 15, -99, -99, -99, -99, -99])
# -99 ef means upper limit
f_cgs = f * 1E-6 * 1E-23 * 4 * np.pi * DIST**2
# detections
choose = ef > 0
plt.errorbar(dt[choose], f_cgs[choose], yerr=ef[choose], fmt='.', c='k')
# non-detections
choose = ef < 0
plt.scatter(dt[choose], f_cgs[choose], marker='v', c='k')
# text saying the frequency
for ii,val in enumerate(nu):
if ii%2==0:
plt.text(
dt[ii]*1.01, f_cgs[ii], "%s GHz" %val, fontsize=14,
horizontalalignment='left', verticalalignment='top')
else:
plt.text(
dt[ii]*1.01, f_cgs[ii], "%s GHz" %val, fontsize=14,
horizontalalignment='left', verticalalignment='bottom')
# and now limits for this new source...
# dist was Sept 9 I think
# AMI: Sept 13
d = Planck15.luminosity_distance(z=0.033).cgs.value
plt.scatter(4, 35*1E-6*1E-23*4*np.pi*d**2, s=50, c='lightblue', marker='v')
plt.text(4*1.01, 35*1E-6*1E-23*4*np.pi*d**2, "15.5 GHz",
horizontalalignment='left', fontsize=14)
# SMA:
plt.scatter(4, 3.5*1E-3*1E-23*4*np.pi*d**2, s=50, c='lightblue', marker='v')
plt.text(4*1.01, 3.5*1E-3*1E-23*4*np.pi*d**2, "231.5 GHz",
horizontalalignment='left', fontsize=14)
dt = 6
L = 0.59*1E-3*1E-23*4*np.pi*d**2
plt.scatter(dt, L, s=50, c='lightblue', marker='v')
plt.text(dt*1.01, L, "230 GHz",
horizontalalignment='left', fontsize=14)
# VLA:
plt.scatter(5, 27*1E-6*1E-23*4*np.pi*d**2, s=50, c='lightblue', marker='v')
plt.text(5*1.01, 27*1E-6*1E-23*4*np.pi*d**2, "10 GHz",
horizontalalignment='left', fontsize=14)
plt.yscale('log')
plt.xlim(3,14)
plt.tick_params(labelsize=14)
plt.xlabel("dt [day]", fontsize=14)
plt.ylabel(r"$L_\nu $[erg/s/Hz]", fontsize=14)
plt.show()
#plt.savefig("radio_comparison.png")
|
code/extra_plots/compare_12gzk.py
|
import numpy as np
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
from astropy.table import Table
from astropy.cosmology import Planck15
DIST = Planck15.luminosity_distance(z=0.0137).cgs.value
def optical():
# PTF12gzk
dat = Table.read("table1.dat", format='ascii')
jd = dat['col1']
mag = dat['col4']
emag = dat['col5']
band = dat['col3']
Mag = mag-33.8 # distance modulus
dt = jd-jd[0]
choose = np.logical_or(band == 'r', band == 'R,r')
plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='red', fmt='.')
choose = np.logical_or(band == 'g', band == 'g')
plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='green', fmt='.')
choose = np.logical_or(band == 'g', band == 'g')
plt.errorbar(dt[choose], Mag[choose], yerr=emag[choose], c='green', fmt='.')
plt.gca().invert_yaxis()
plt.tick_params(labelsize=14)
plt.xlabel("dt [days]", fontsize=16)
plt.ylabel("M", fontsize=16)
plt.show()
if __name__=="__main__":
# radio
dt = np.array([8, 8, 10, 10, 10, 10, 12])
nu = np.array([4.8, 7.4, 6.1, 14, 20, 95, 6.1])
f = np.array([79, 82, 33, 42, 90, 3600, 33]) # uJy
ef = np.array([16, 15, -99, -99, -99, -99, -99])
# -99 ef means upper limit
f_cgs = f * 1E-6 * 1E-23 * 4 * np.pi * DIST**2
# detections
choose = ef > 0
plt.errorbar(dt[choose], f_cgs[choose], yerr=ef[choose], fmt='.', c='k')
# non-detections
choose = ef < 0
plt.scatter(dt[choose], f_cgs[choose], marker='v', c='k')
# text saying the frequency
for ii,val in enumerate(nu):
if ii%2==0:
plt.text(
dt[ii]*1.01, f_cgs[ii], "%s GHz" %val, fontsize=14,
horizontalalignment='left', verticalalignment='top')
else:
plt.text(
dt[ii]*1.01, f_cgs[ii], "%s GHz" %val, fontsize=14,
horizontalalignment='left', verticalalignment='bottom')
# and now limits for this new source...
# dist was Sept 9 I think
# AMI: Sept 13
d = Planck15.luminosity_distance(z=0.033).cgs.value
plt.scatter(4, 35*1E-6*1E-23*4*np.pi*d**2, s=50, c='lightblue', marker='v')
plt.text(4*1.01, 35*1E-6*1E-23*4*np.pi*d**2, "15.5 GHz",
horizontalalignment='left', fontsize=14)
# SMA:
plt.scatter(4, 3.5*1E-3*1E-23*4*np.pi*d**2, s=50, c='lightblue', marker='v')
plt.text(4*1.01, 3.5*1E-3*1E-23*4*np.pi*d**2, "231.5 GHz",
horizontalalignment='left', fontsize=14)
dt = 6
L = 0.59*1E-3*1E-23*4*np.pi*d**2
plt.scatter(dt, L, s=50, c='lightblue', marker='v')
plt.text(dt*1.01, L, "230 GHz",
horizontalalignment='left', fontsize=14)
# VLA:
plt.scatter(5, 27*1E-6*1E-23*4*np.pi*d**2, s=50, c='lightblue', marker='v')
plt.text(5*1.01, 27*1E-6*1E-23*4*np.pi*d**2, "10 GHz",
horizontalalignment='left', fontsize=14)
plt.yscale('log')
plt.xlim(3,14)
plt.tick_params(labelsize=14)
plt.xlabel("dt [day]", fontsize=14)
plt.ylabel(r"$L_\nu $[erg/s/Hz]", fontsize=14)
plt.show()
#plt.savefig("radio_comparison.png")
| 0.549157 | 0.656658 |
import numpy as np
import cv2, re, os, keras, warnings, time
from keras.models import Model
import keras.layers as L
from keras.optimizers import RMSprop
from keras import models
from keras import optimizers
from keras.applications import VGG16
from mylib.Mailer import Mailer
from mylib import Config
warnings.filterwarnings('ignore')
CAM_CONSTANT = 0
# CNN VGG model
class FeatExtractor:
def __init__(self, SIZE):
self.size = config.SIZE
self.vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(self.size[0], self.size[1], 3))
for layer in self.vgg_conv.layers[:-4]:
layer.trainable = False
# Create the model
def build_feat_extractor():
model = models.Sequential()
# Add the vgg convolutional base model
model.add(self.vgg_conv)
# Add new layers
model.add(L.Flatten())
model.add(L.Dense(1024, activation='relu'))
model.add(L.Dropout(0.2))
model.add(L.Dense(256, activation='relu'))
model.add(L.Dense(2, activation='softmax'))
return model
self.model = build_feat_extractor()
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
self.model.load_weights('weights/Feature_Extractor.h5')
inp = self.model.input
out = self.model.layers[-4].output
self.model = Model(inputs=[inp], outputs=[out])
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
def get_feats(self, frames):
image_data = np.zeros((len(frames), config.VGG16_OUT))
for index, image in enumerate(frames):
vect = self.model.predict(image.reshape(1, self.size[0], self.size[1], 3))
image_data[index, :] = vect
image_data = image_data.reshape(1, len(frames), config.VGG16_OUT)
return image_data
# RNN model
class RnnModel:
def __init__(self, NUM_FEATURES, LOOK_BACK):
self.num_features = NUM_FEATURES
self.look_back = config.LOOK_BACK
def build_model():
inp = L.Input(shape=(self.look_back, self.num_features))
x = L.LSTM(64, return_sequences=True)(inp)
x = L.Dropout(0.2)(x)
x = L.LSTM(16)(x)
out = L.Dense(2, activation='softmax')(x)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['acc'])
return model
self.model = build_model()
self.model.load_weights('weights/RNN.h5')
def predict(self, frame_data):
pred = self.model.predict(frame_data)
return pred[0][1]
def __draw_label(img, text, pos, bg_color):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
#===============================================================================
# Initiate the main function
if __name__ == '__main__':
if not config.FROM_WEBCAM:
# Enter your desired test video path
cap = cv2.VideoCapture('tests/v_CricketShot_g22_c01.avi')
else:
# From webcam
cap = cv2.VideoCapture(CAM_CONSTANT, cv2.CAP_DSHOW)
cnt = 0
frames = []
fe = FeatExtractor(config.SIZE)
rnn = RnnModel(config.VGG16_OUT, config.LOOK_BACK)
total_frames = 0
detect_certainty = []
neg_certainty = []
while (cap.isOpened()):
# Capture frame-by-frame
cnt+=1
ret, full = cap.read()
frame = cv2.resize(full, config.SIZE)
if cnt % config.TAKE_FRAME == 0:
frames.append(frame)
pred = 0
if len(frames) == config.LOOK_BACK:
# Get features
feats = fe.get_feats(frames)
frames.pop(0)
initial = time.time()
pred = rnn.predict(feats)
final = time.time() - initial
print("")
# Check predictions per frame (either 0 or 1)
print('[INFO] Frame acc. predictions:', pred)
# Check inference time per frame
print('Frame inference in %.4f seconds' % (final))
if ret == True:
# Display the resulting frame
# Optimize the threshold (avg. prediction score for class labels) if desired
# 1 for class1 and 0 for class2. Please refer config.
if pred >= config.Threshold:
__draw_label(full, 'Bowl', (20, 20), (255, 255, 255))
total_frames += 1
detect_certainty.append(pred)
else:
neg_certainty.append(pred)
if config.ALERT:
# Adjust the total_frames (avg. score to send the mail). Refer config.
if total_frames > config.positive_frames:
print('[INFO] Sending mail...')
neg = np.mean(neg_certainty)
pos = np.mean(detect_certainty)
time1 = total_frames * config.TAKE_FRAME / 30
Mailer().send(config.MAIL, total_frames, time1, pos, neg)
print('[INFO] Mail sent')
detect_certainty = []
total_frames = 0
__draw_label(full, 'Bat', (20, 20), (255, 255, 255))
cv2.imshow('Test_Window', full)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
|
Run.py
|
import numpy as np
import cv2, re, os, keras, warnings, time
from keras.models import Model
import keras.layers as L
from keras.optimizers import RMSprop
from keras import models
from keras import optimizers
from keras.applications import VGG16
from mylib.Mailer import Mailer
from mylib import Config
warnings.filterwarnings('ignore')
CAM_CONSTANT = 0
# CNN VGG model
class FeatExtractor:
def __init__(self, SIZE):
self.size = config.SIZE
self.vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(self.size[0], self.size[1], 3))
for layer in self.vgg_conv.layers[:-4]:
layer.trainable = False
# Create the model
def build_feat_extractor():
model = models.Sequential()
# Add the vgg convolutional base model
model.add(self.vgg_conv)
# Add new layers
model.add(L.Flatten())
model.add(L.Dense(1024, activation='relu'))
model.add(L.Dropout(0.2))
model.add(L.Dense(256, activation='relu'))
model.add(L.Dense(2, activation='softmax'))
return model
self.model = build_feat_extractor()
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
self.model.load_weights('weights/Feature_Extractor.h5')
inp = self.model.input
out = self.model.layers[-4].output
self.model = Model(inputs=[inp], outputs=[out])
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
def get_feats(self, frames):
image_data = np.zeros((len(frames), config.VGG16_OUT))
for index, image in enumerate(frames):
vect = self.model.predict(image.reshape(1, self.size[0], self.size[1], 3))
image_data[index, :] = vect
image_data = image_data.reshape(1, len(frames), config.VGG16_OUT)
return image_data
# RNN model
class RnnModel:
def __init__(self, NUM_FEATURES, LOOK_BACK):
self.num_features = NUM_FEATURES
self.look_back = config.LOOK_BACK
def build_model():
inp = L.Input(shape=(self.look_back, self.num_features))
x = L.LSTM(64, return_sequences=True)(inp)
x = L.Dropout(0.2)(x)
x = L.LSTM(16)(x)
out = L.Dense(2, activation='softmax')(x)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['acc'])
return model
self.model = build_model()
self.model.load_weights('weights/RNN.h5')
def predict(self, frame_data):
pred = self.model.predict(frame_data)
return pred[0][1]
def __draw_label(img, text, pos, bg_color):
font_face = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.4
color = (0, 0, 0)
thickness = cv2.FILLED
margin = 2
txt_size = cv2.getTextSize(text, font_face, scale, thickness)
end_x = pos[0] + txt_size[0][0] + margin
end_y = pos[1] - txt_size[0][1] - margin
cv2.rectangle(img, pos, (end_x, end_y), bg_color, thickness)
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
#===============================================================================
# Initiate the main function
if __name__ == '__main__':
if not config.FROM_WEBCAM:
# Enter your desired test video path
cap = cv2.VideoCapture('tests/v_CricketShot_g22_c01.avi')
else:
# From webcam
cap = cv2.VideoCapture(CAM_CONSTANT, cv2.CAP_DSHOW)
cnt = 0
frames = []
fe = FeatExtractor(config.SIZE)
rnn = RnnModel(config.VGG16_OUT, config.LOOK_BACK)
total_frames = 0
detect_certainty = []
neg_certainty = []
while (cap.isOpened()):
# Capture frame-by-frame
cnt+=1
ret, full = cap.read()
frame = cv2.resize(full, config.SIZE)
if cnt % config.TAKE_FRAME == 0:
frames.append(frame)
pred = 0
if len(frames) == config.LOOK_BACK:
# Get features
feats = fe.get_feats(frames)
frames.pop(0)
initial = time.time()
pred = rnn.predict(feats)
final = time.time() - initial
print("")
# Check predictions per frame (either 0 or 1)
print('[INFO] Frame acc. predictions:', pred)
# Check inference time per frame
print('Frame inference in %.4f seconds' % (final))
if ret == True:
# Display the resulting frame
# Optimize the threshold (avg. prediction score for class labels) if desired
# 1 for class1 and 0 for class2. Please refer config.
if pred >= config.Threshold:
__draw_label(full, 'Bowl', (20, 20), (255, 255, 255))
total_frames += 1
detect_certainty.append(pred)
else:
neg_certainty.append(pred)
if config.ALERT:
# Adjust the total_frames (avg. score to send the mail). Refer config.
if total_frames > config.positive_frames:
print('[INFO] Sending mail...')
neg = np.mean(neg_certainty)
pos = np.mean(detect_certainty)
time1 = total_frames * config.TAKE_FRAME / 30
Mailer().send(config.MAIL, total_frames, time1, pos, neg)
print('[INFO] Mail sent')
detect_certainty = []
total_frames = 0
__draw_label(full, 'Bat', (20, 20), (255, 255, 255))
cv2.imshow('Test_Window', full)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
| 0.780328 | 0.248483 |
from lockstep.lockstep_response import LockstepResponse
class CodeDefinitionsClient:
def __init__(self, client):
self.client = client
def retrieve_codedefinition(self, id: str, include: str) -> LockstepResponse:
"""
Retrieves the CodeDefinition specified by this unique
identifier, optionally including nested data sets.
A CodeDefinition contains information around system code values
and their definitions.
Parameters
----------
id : str
The unique Lockstep Platform ID number of this
CodeDefinition
include : str
To fetch additional data on this object, specify the list of
elements to retrieve. No collections are currently available
but may be offered in the future
"""
path = f"/api/v1/CodeDefinitions/{id}"
return self.client.send_request("GET", path, None, {"id": id, "include": include})
def query_codedefinitions(self, filter: str, include: str, order: str, pageSize: int, pageNumber: int) -> LockstepResponse:
"""
Queries CodeDefinitions for this account using the specified
filtering, sorting, nested fetch, and pagination rules
requested.
More information on querying can be found on the [Searchlight
Query Language](https://developer.lockstep.io/docs/querying-with-searchlight)
page on the Lockstep Developer website.
A CodeDefinition contains information around system code values
and their definitions.
Parameters
----------
filter : str
The filter for this query. See [Searchlight Query
Language](https://developer.lockstep.io/docs/querying-with-searchlight)
include : str
To fetch additional data on this object, specify the list of
elements to retrieve. No collections are currently available
but may be offered in the future
order : str
The sort order for this query. See See [Searchlight Query
Language](https://developer.lockstep.io/docs/querying-with-searchlight)
pageSize : int
The page size for results (default 200). See [Searchlight
Query Language](https://developer.lockstep.io/docs/querying-with-searchlight)
pageNumber : int
The page number for results (default 0). See [Searchlight
Query Language](https://developer.lockstep.io/docs/querying-with-searchlight)
"""
path = f"/api/v1/CodeDefinitions/query"
return self.client.send_request("GET", path, None, {"filter": filter, "include": include, "order": order, "pageSize": pageSize, "pageNumber": pageNumber})
|
src/lockstep/clients/codedefinitions_client.py
|
from lockstep.lockstep_response import LockstepResponse
class CodeDefinitionsClient:
def __init__(self, client):
self.client = client
def retrieve_codedefinition(self, id: str, include: str) -> LockstepResponse:
"""
Retrieves the CodeDefinition specified by this unique
identifier, optionally including nested data sets.
A CodeDefinition contains information around system code values
and their definitions.
Parameters
----------
id : str
The unique Lockstep Platform ID number of this
CodeDefinition
include : str
To fetch additional data on this object, specify the list of
elements to retrieve. No collections are currently available
but may be offered in the future
"""
path = f"/api/v1/CodeDefinitions/{id}"
return self.client.send_request("GET", path, None, {"id": id, "include": include})
def query_codedefinitions(self, filter: str, include: str, order: str, pageSize: int, pageNumber: int) -> LockstepResponse:
"""
Queries CodeDefinitions for this account using the specified
filtering, sorting, nested fetch, and pagination rules
requested.
More information on querying can be found on the [Searchlight
Query Language](https://developer.lockstep.io/docs/querying-with-searchlight)
page on the Lockstep Developer website.
A CodeDefinition contains information around system code values
and their definitions.
Parameters
----------
filter : str
The filter for this query. See [Searchlight Query
Language](https://developer.lockstep.io/docs/querying-with-searchlight)
include : str
To fetch additional data on this object, specify the list of
elements to retrieve. No collections are currently available
but may be offered in the future
order : str
The sort order for this query. See See [Searchlight Query
Language](https://developer.lockstep.io/docs/querying-with-searchlight)
pageSize : int
The page size for results (default 200). See [Searchlight
Query Language](https://developer.lockstep.io/docs/querying-with-searchlight)
pageNumber : int
The page number for results (default 0). See [Searchlight
Query Language](https://developer.lockstep.io/docs/querying-with-searchlight)
"""
path = f"/api/v1/CodeDefinitions/query"
return self.client.send_request("GET", path, None, {"filter": filter, "include": include, "order": order, "pageSize": pageSize, "pageNumber": pageNumber})
| 0.862872 | 0.260778 |
import sys
import os
import uuid
import re
DEFAULT_VCPROJ = r'''<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="%{PROJNAME}%"
ProjectGUID="%{PROJGUID}%"
RootNamespace="%{PROJNAME}%"
TargetFrameworkVersion="196613"
>
<Platforms>
<Platform
Name="Win32"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
WarningLevel="3"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
WholeProgramOptimization="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
>
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>
'''
DEFAULT_64_VCPROJ = r'''<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="%{PROJNAME}%"
ProjectGUID="%{PROJGUID}%"
RootNamespace="%{PROJNAME}%"
TargetFrameworkVersion="196613"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
WarningLevel="3"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
WholeProgramOptimization="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(SolutionDir)$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(SolutionDir)$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
WholeProgramOptimization="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
>
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>
'''
def format_text(fn, text, param):
var_pat = re.compile(r'%{(?P<var>\w+)}%')
def repl(match):
var = match.group('var')
return param.get(var, '%{UNKNOWN}%')
with open(fn, 'wb') as fo:
fo.write(('\r\n'.join(re.sub(var_pat, repl, text).split('\n'))).encode('Windows-1252'))
def new_uuid():
return '{' + str(uuid.uuid4()).upper() + '}'
def create_vcproj(target, x64support):
projname = os.path.basename(target)
vcproj_fn = target + '.vcproj'
for fn in ( vcproj_fn ):
if os.path.exists(fn):
print('%s already exists!' % fn)
return
param = { 'PROJNAME': projname, 'PROJGUID': new_uuid() }
try:
default_vcproj = DEFAULT_64_VCPROJ if x64support else DEFAULT_VCPROJ
format_text(vcproj_fn, default_vcproj, param)
print('created %s.' % vcproj_fn)
except IOError as e:
print(e)
def main():
args = sys.argv[1:]
x64support = True
if len(args) > 0 and args[0] == '-32':
x64support = False
args = args[1:]
for a in args:
create_vcproj(a, x64support)
return 0
if __name__ == '__main__':
exit(main())
#---eof---
|
src/newvc8.py
|
import sys
import os
import uuid
import re
DEFAULT_VCPROJ = r'''<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="%{PROJNAME}%"
ProjectGUID="%{PROJGUID}%"
RootNamespace="%{PROJNAME}%"
TargetFrameworkVersion="196613"
>
<Platforms>
<Platform
Name="Win32"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
WarningLevel="3"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
WholeProgramOptimization="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
>
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>
'''
DEFAULT_64_VCPROJ = r'''<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject
ProjectType="Visual C++"
Version="9.00"
Name="%{PROJNAME}%"
ProjectGUID="%{PROJGUID}%"
RootNamespace="%{PROJNAME}%"
TargetFrameworkVersion="196613"
>
<Platforms>
<Platform
Name="Win32"
/>
<Platform
Name="x64"
/>
</Platforms>
<ToolFiles>
</ToolFiles>
<Configurations>
<Configuration
Name="Debug|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
WarningLevel="3"
DebugInformationFormat="4"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|Win32"
OutputDirectory="$(SolutionDir)$(ConfigurationName)"
IntermediateDirectory="$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
WholeProgramOptimization="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="1"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Debug|x64"
OutputDirectory="$(SolutionDir)$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="0"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
<Configuration
Name="Release|x64"
OutputDirectory="$(SolutionDir)$(PlatformName)\$(ConfigurationName)"
IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
ConfigurationType="1"
CharacterSet="2"
WholeProgramOptimization="1"
>
<Tool
Name="VCPreBuildEventTool"
/>
<Tool
Name="VCCustomBuildTool"
/>
<Tool
Name="VCXMLDataGeneratorTool"
/>
<Tool
Name="VCWebServiceProxyGeneratorTool"
/>
<Tool
Name="VCMIDLTool"
TargetEnvironment="3"
/>
<Tool
Name="VCCLCompilerTool"
Optimization="2"
EnableIntrinsicFunctions="true"
RuntimeLibrary="2"
EnableFunctionLevelLinking="true"
WarningLevel="3"
DebugInformationFormat="3"
/>
<Tool
Name="VCManagedResourceCompilerTool"
/>
<Tool
Name="VCResourceCompilerTool"
/>
<Tool
Name="VCPreLinkEventTool"
/>
<Tool
Name="VCLinkerTool"
GenerateDebugInformation="true"
OptimizeReferences="2"
EnableCOMDATFolding="2"
TargetMachine="17"
/>
<Tool
Name="VCALinkTool"
/>
<Tool
Name="VCManifestTool"
/>
<Tool
Name="VCXDCMakeTool"
/>
<Tool
Name="VCBscMakeTool"
/>
<Tool
Name="VCFxCopTool"
/>
<Tool
Name="VCAppVerifierTool"
/>
<Tool
Name="VCPostBuildEventTool"
/>
</Configuration>
</Configurations>
<References>
</References>
<Files>
<Filter
Name="Source Files"
Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
</Filter>
<Filter
Name="Header Files"
Filter="h;hpp;hxx;hm;inl;inc;xsd"
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
>
</Filter>
<Filter
Name="Resource Files"
Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav"
UniqueIdentifier="{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}"
>
</Filter>
</Files>
<Globals>
</Globals>
</VisualStudioProject>
'''
def format_text(fn, text, param):
var_pat = re.compile(r'%{(?P<var>\w+)}%')
def repl(match):
var = match.group('var')
return param.get(var, '%{UNKNOWN}%')
with open(fn, 'wb') as fo:
fo.write(('\r\n'.join(re.sub(var_pat, repl, text).split('\n'))).encode('Windows-1252'))
def new_uuid():
return '{' + str(uuid.uuid4()).upper() + '}'
def create_vcproj(target, x64support):
projname = os.path.basename(target)
vcproj_fn = target + '.vcproj'
for fn in ( vcproj_fn ):
if os.path.exists(fn):
print('%s already exists!' % fn)
return
param = { 'PROJNAME': projname, 'PROJGUID': new_uuid() }
try:
default_vcproj = DEFAULT_64_VCPROJ if x64support else DEFAULT_VCPROJ
format_text(vcproj_fn, default_vcproj, param)
print('created %s.' % vcproj_fn)
except IOError as e:
print(e)
def main():
args = sys.argv[1:]
x64support = True
if len(args) > 0 and args[0] == '-32':
x64support = False
args = args[1:]
for a in args:
create_vcproj(a, x64support)
return 0
if __name__ == '__main__':
exit(main())
#---eof---
| 0.22627 | 0.055695 |
from http import HTTPStatus
from flask import Response, json, request
from flask_apispec import doc, marshal_with, use_kwargs
from flask_apispec.views import MethodResource
from flask_restful import Resource, reqparse
from smart_queue.apps import InvalidResponseModel
from smart_queue.apps.conditions.models import (
ConditionGETRequestModel,
ConditionGETResponse,
ConditionPOSTRequestModel,
)
from smart_queue.db.database import (
delete_condition,
get_all_conditions,
insert_condition,
)
class ConditionEndpoint(MethodResource, Resource):
"""
Endpoint related to queue.
"""
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @marshal_with(ConditionGETResponse)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def get(self):
"""
GET Method - Get all condtions
"""
return Response(
response=json.dumps(
[
{
"id": condition.id,
"name": condition.name,
"description": condition.description,
}
for condition in get_all_conditions()
]
),
status=HTTPStatus.OK,
mimetype="application/json",
)
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @use_kwargs(ConditionPOSTRequestModel)
@marshal_with(ConditionPOSTRequestModel, code=201)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def post(self):
"""
POST Method - Insert new condition
"""
# NOTE: Add JSON validation
try:
name = None
desc = None
burst_time = None
urgency = None
if "name" in request.json:
name = request.json["name"]
if "description" in request.json:
desc = request.json["description"]
if "burst_time" in request.json:
burst_time = request.json["burst_time"]
if "urgency" in request.json:
urgency = request.json["urgency"]
# Add condition
insert_condition(name=name, desc=desc, burst_time=burst_time)
except KeyError:
return Response(
response=json.dumps({"info": "Wrong JSON format."}),
status=HTTPStatus.BAD_REQUEST,
mimetype="application/json",
)
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @use_kwargs(ConditionPOSTRequestModel)
@marshal_with(ConditionPOSTRequestModel, code=201)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def delete(self):
"""
Delete Method - Insert new condition
"""
# NOTE: Add JSON validation
try:
id = None
if "id" in request.json:
id = request.json["id"]
# Add condition
delete_condition(id=id)
except KeyError:
return Response(
response=json.dumps({"info": "Wrong JSON format."}),
status=HTTPStatus.BAD_REQUEST,
mimetype="application/json",
)
|
smart_queue/apps/conditions/views.py
|
from http import HTTPStatus
from flask import Response, json, request
from flask_apispec import doc, marshal_with, use_kwargs
from flask_apispec.views import MethodResource
from flask_restful import Resource, reqparse
from smart_queue.apps import InvalidResponseModel
from smart_queue.apps.conditions.models import (
ConditionGETRequestModel,
ConditionGETResponse,
ConditionPOSTRequestModel,
)
from smart_queue.db.database import (
delete_condition,
get_all_conditions,
insert_condition,
)
class ConditionEndpoint(MethodResource, Resource):
"""
Endpoint related to queue.
"""
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @marshal_with(ConditionGETResponse)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def get(self):
"""
GET Method - Get all condtions
"""
return Response(
response=json.dumps(
[
{
"id": condition.id,
"name": condition.name,
"description": condition.description,
}
for condition in get_all_conditions()
]
),
status=HTTPStatus.OK,
mimetype="application/json",
)
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @use_kwargs(ConditionPOSTRequestModel)
@marshal_with(ConditionPOSTRequestModel, code=201)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def post(self):
"""
POST Method - Insert new condition
"""
# NOTE: Add JSON validation
try:
name = None
desc = None
burst_time = None
urgency = None
if "name" in request.json:
name = request.json["name"]
if "description" in request.json:
desc = request.json["description"]
if "burst_time" in request.json:
burst_time = request.json["burst_time"]
if "urgency" in request.json:
urgency = request.json["urgency"]
# Add condition
insert_condition(name=name, desc=desc, burst_time=burst_time)
except KeyError:
return Response(
response=json.dumps({"info": "Wrong JSON format."}),
status=HTTPStatus.BAD_REQUEST,
mimetype="application/json",
)
@doc(description="Endpoint used for conditions", tags=["Condition"])
# @use_kwargs(ConditionPOSTRequestModel)
@marshal_with(ConditionPOSTRequestModel, code=201)
@marshal_with(InvalidResponseModel, code=400)
@marshal_with(InvalidResponseModel, code=404)
@marshal_with(InvalidResponseModel, code=422)
@marshal_with(InvalidResponseModel, code=500)
def delete(self):
"""
Delete Method - Insert new condition
"""
# NOTE: Add JSON validation
try:
id = None
if "id" in request.json:
id = request.json["id"]
# Add condition
delete_condition(id=id)
except KeyError:
return Response(
response=json.dumps({"info": "Wrong JSON format."}),
status=HTTPStatus.BAD_REQUEST,
mimetype="application/json",
)
| 0.609059 | 0.056966 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IndustrialPidLoopArgs', 'IndustrialPidLoop']
@pulumi.input_type
class IndustrialPidLoopArgs:
def __init__(__self__, *,
pid_loop_configuration: pulumi.Input[str],
pid_loop_dcs_type: pulumi.Input[str],
pid_loop_is_crucial: pulumi.Input[bool],
pid_loop_name: pulumi.Input[str],
pid_loop_type: pulumi.Input[str],
pid_project_id: pulumi.Input[str],
pid_loop_desc: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IndustrialPidLoop resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
"""
pulumi.set(__self__, "pid_loop_configuration", pid_loop_configuration)
pulumi.set(__self__, "pid_loop_dcs_type", pid_loop_dcs_type)
pulumi.set(__self__, "pid_loop_is_crucial", pid_loop_is_crucial)
pulumi.set(__self__, "pid_loop_name", pid_loop_name)
pulumi.set(__self__, "pid_loop_type", pid_loop_type)
pulumi.set(__self__, "pid_project_id", pid_project_id)
if pid_loop_desc is not None:
pulumi.set(__self__, "pid_loop_desc", pid_loop_desc)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> pulumi.Input[str]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@pid_loop_configuration.setter
def pid_loop_configuration(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_configuration", value)
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> pulumi.Input[str]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@pid_loop_dcs_type.setter
def pid_loop_dcs_type(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_dcs_type", value)
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> pulumi.Input[bool]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@pid_loop_is_crucial.setter
def pid_loop_is_crucial(self, value: pulumi.Input[bool]):
pulumi.set(self, "pid_loop_is_crucial", value)
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> pulumi.Input[str]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@pid_loop_name.setter
def pid_loop_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_name", value)
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> pulumi.Input[str]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@pid_loop_type.setter
def pid_loop_type(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_type", value)
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> pulumi.Input[str]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@pid_project_id.setter
def pid_project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_project_id", value)
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> Optional[pulumi.Input[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@pid_loop_desc.setter
def pid_loop_desc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_desc", value)
@pulumi.input_type
class _IndustrialPidLoopState:
def __init__(__self__, *,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IndustrialPidLoop resources.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] status: The status of Pid Loop.
"""
if pid_loop_configuration is not None:
pulumi.set(__self__, "pid_loop_configuration", pid_loop_configuration)
if pid_loop_dcs_type is not None:
pulumi.set(__self__, "pid_loop_dcs_type", pid_loop_dcs_type)
if pid_loop_desc is not None:
pulumi.set(__self__, "pid_loop_desc", pid_loop_desc)
if pid_loop_is_crucial is not None:
pulumi.set(__self__, "pid_loop_is_crucial", pid_loop_is_crucial)
if pid_loop_name is not None:
pulumi.set(__self__, "pid_loop_name", pid_loop_name)
if pid_loop_type is not None:
pulumi.set(__self__, "pid_loop_type", pid_loop_type)
if pid_project_id is not None:
pulumi.set(__self__, "pid_project_id", pid_project_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@pid_loop_configuration.setter
def pid_loop_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_configuration", value)
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> Optional[pulumi.Input[str]]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@pid_loop_dcs_type.setter
def pid_loop_dcs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_dcs_type", value)
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> Optional[pulumi.Input[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@pid_loop_desc.setter
def pid_loop_desc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_desc", value)
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> Optional[pulumi.Input[bool]]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@pid_loop_is_crucial.setter
def pid_loop_is_crucial(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "pid_loop_is_crucial", value)
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@pid_loop_name.setter
def pid_loop_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_name", value)
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@pid_loop_type.setter
def pid_loop_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_type", value)
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> Optional[pulumi.Input[str]]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@pid_project_id.setter
def pid_project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_project_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Pid Loop.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class IndustrialPidLoop(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Brain Industrial Pid Loop resource.
> **NOTE:** Available in v1.117.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.brain.IndustrialPidLoop("example",
pid_loop_configuration="YourLoopConfiguration",
pid_loop_dcs_type="standard",
pid_loop_is_crucial=True,
pid_loop_name="tf-testAcc",
pid_loop_type="0",
pid_project_id="856c6b8f-ca63-40a4-xxxx-xxxx")
```
## Import
Brain Industrial Pid Loop can be imported using the id, e.g.
```sh
$ pulumi import alicloud:brain/industrialPidLoop:IndustrialPidLoop example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IndustrialPidLoopArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Brain Industrial Pid Loop resource.
> **NOTE:** Available in v1.117.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.brain.IndustrialPidLoop("example",
pid_loop_configuration="YourLoopConfiguration",
pid_loop_dcs_type="standard",
pid_loop_is_crucial=True,
pid_loop_name="tf-testAcc",
pid_loop_type="0",
pid_project_id="856c6b8f-ca63-40a4-xxxx-xxxx")
```
## Import
Brain Industrial Pid Loop can be imported using the id, e.g.
```sh
$ pulumi import alicloud:brain/industrialPidLoop:IndustrialPidLoop example <id>
```
:param str resource_name: The name of the resource.
:param IndustrialPidLoopArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IndustrialPidLoopArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IndustrialPidLoopArgs.__new__(IndustrialPidLoopArgs)
if pid_loop_configuration is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_configuration'")
__props__.__dict__["pid_loop_configuration"] = pid_loop_configuration
if pid_loop_dcs_type is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_dcs_type'")
__props__.__dict__["pid_loop_dcs_type"] = pid_loop_dcs_type
__props__.__dict__["pid_loop_desc"] = pid_loop_desc
if pid_loop_is_crucial is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_is_crucial'")
__props__.__dict__["pid_loop_is_crucial"] = pid_loop_is_crucial
if pid_loop_name is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_name'")
__props__.__dict__["pid_loop_name"] = pid_loop_name
if pid_loop_type is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_type'")
__props__.__dict__["pid_loop_type"] = pid_loop_type
if pid_project_id is None and not opts.urn:
raise TypeError("Missing required property 'pid_project_id'")
__props__.__dict__["pid_project_id"] = pid_project_id
__props__.__dict__["status"] = None
super(IndustrialPidLoop, __self__).__init__(
'alicloud:brain/industrialPidLoop:IndustrialPidLoop',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'IndustrialPidLoop':
"""
Get an existing IndustrialPidLoop resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] status: The status of Pid Loop.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IndustrialPidLoopState.__new__(_IndustrialPidLoopState)
__props__.__dict__["pid_loop_configuration"] = pid_loop_configuration
__props__.__dict__["pid_loop_dcs_type"] = pid_loop_dcs_type
__props__.__dict__["pid_loop_desc"] = pid_loop_desc
__props__.__dict__["pid_loop_is_crucial"] = pid_loop_is_crucial
__props__.__dict__["pid_loop_name"] = pid_loop_name
__props__.__dict__["pid_loop_type"] = pid_loop_type
__props__.__dict__["pid_project_id"] = pid_project_id
__props__.__dict__["status"] = status
return IndustrialPidLoop(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> pulumi.Output[str]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> pulumi.Output[str]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> pulumi.Output[Optional[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> pulumi.Output[bool]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> pulumi.Output[str]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> pulumi.Output[str]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> pulumi.Output[str]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of Pid Loop.
"""
return pulumi.get(self, "status")
|
sdk/python/pulumi_alicloud/brain/industrial_pid_loop.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IndustrialPidLoopArgs', 'IndustrialPidLoop']
@pulumi.input_type
class IndustrialPidLoopArgs:
def __init__(__self__, *,
pid_loop_configuration: pulumi.Input[str],
pid_loop_dcs_type: pulumi.Input[str],
pid_loop_is_crucial: pulumi.Input[bool],
pid_loop_name: pulumi.Input[str],
pid_loop_type: pulumi.Input[str],
pid_project_id: pulumi.Input[str],
pid_loop_desc: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IndustrialPidLoop resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
"""
pulumi.set(__self__, "pid_loop_configuration", pid_loop_configuration)
pulumi.set(__self__, "pid_loop_dcs_type", pid_loop_dcs_type)
pulumi.set(__self__, "pid_loop_is_crucial", pid_loop_is_crucial)
pulumi.set(__self__, "pid_loop_name", pid_loop_name)
pulumi.set(__self__, "pid_loop_type", pid_loop_type)
pulumi.set(__self__, "pid_project_id", pid_project_id)
if pid_loop_desc is not None:
pulumi.set(__self__, "pid_loop_desc", pid_loop_desc)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> pulumi.Input[str]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@pid_loop_configuration.setter
def pid_loop_configuration(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_configuration", value)
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> pulumi.Input[str]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@pid_loop_dcs_type.setter
def pid_loop_dcs_type(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_dcs_type", value)
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> pulumi.Input[bool]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@pid_loop_is_crucial.setter
def pid_loop_is_crucial(self, value: pulumi.Input[bool]):
pulumi.set(self, "pid_loop_is_crucial", value)
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> pulumi.Input[str]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@pid_loop_name.setter
def pid_loop_name(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_name", value)
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> pulumi.Input[str]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@pid_loop_type.setter
def pid_loop_type(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_loop_type", value)
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> pulumi.Input[str]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@pid_project_id.setter
def pid_project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "pid_project_id", value)
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> Optional[pulumi.Input[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@pid_loop_desc.setter
def pid_loop_desc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_desc", value)
@pulumi.input_type
class _IndustrialPidLoopState:
def __init__(__self__, *,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IndustrialPidLoop resources.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] status: The status of Pid Loop.
"""
if pid_loop_configuration is not None:
pulumi.set(__self__, "pid_loop_configuration", pid_loop_configuration)
if pid_loop_dcs_type is not None:
pulumi.set(__self__, "pid_loop_dcs_type", pid_loop_dcs_type)
if pid_loop_desc is not None:
pulumi.set(__self__, "pid_loop_desc", pid_loop_desc)
if pid_loop_is_crucial is not None:
pulumi.set(__self__, "pid_loop_is_crucial", pid_loop_is_crucial)
if pid_loop_name is not None:
pulumi.set(__self__, "pid_loop_name", pid_loop_name)
if pid_loop_type is not None:
pulumi.set(__self__, "pid_loop_type", pid_loop_type)
if pid_project_id is not None:
pulumi.set(__self__, "pid_project_id", pid_project_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@pid_loop_configuration.setter
def pid_loop_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_configuration", value)
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> Optional[pulumi.Input[str]]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@pid_loop_dcs_type.setter
def pid_loop_dcs_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_dcs_type", value)
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> Optional[pulumi.Input[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@pid_loop_desc.setter
def pid_loop_desc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_desc", value)
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> Optional[pulumi.Input[bool]]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@pid_loop_is_crucial.setter
def pid_loop_is_crucial(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "pid_loop_is_crucial", value)
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@pid_loop_name.setter
def pid_loop_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_name", value)
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@pid_loop_type.setter
def pid_loop_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_loop_type", value)
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> Optional[pulumi.Input[str]]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@pid_project_id.setter
def pid_project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pid_project_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Pid Loop.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class IndustrialPidLoop(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Brain Industrial Pid Loop resource.
> **NOTE:** Available in v1.117.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.brain.IndustrialPidLoop("example",
pid_loop_configuration="YourLoopConfiguration",
pid_loop_dcs_type="standard",
pid_loop_is_crucial=True,
pid_loop_name="tf-testAcc",
pid_loop_type="0",
pid_project_id="856c6b8f-ca63-40a4-xxxx-xxxx")
```
## Import
Brain Industrial Pid Loop can be imported using the id, e.g.
```sh
$ pulumi import alicloud:brain/industrialPidLoop:IndustrialPidLoop example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IndustrialPidLoopArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Brain Industrial Pid Loop resource.
> **NOTE:** Available in v1.117.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.brain.IndustrialPidLoop("example",
pid_loop_configuration="YourLoopConfiguration",
pid_loop_dcs_type="standard",
pid_loop_is_crucial=True,
pid_loop_name="tf-testAcc",
pid_loop_type="0",
pid_project_id="856c6b8f-ca63-40a4-xxxx-xxxx")
```
## Import
Brain Industrial Pid Loop can be imported using the id, e.g.
```sh
$ pulumi import alicloud:brain/industrialPidLoop:IndustrialPidLoop example <id>
```
:param str resource_name: The name of the resource.
:param IndustrialPidLoopArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IndustrialPidLoopArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IndustrialPidLoopArgs.__new__(IndustrialPidLoopArgs)
if pid_loop_configuration is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_configuration'")
__props__.__dict__["pid_loop_configuration"] = pid_loop_configuration
if pid_loop_dcs_type is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_dcs_type'")
__props__.__dict__["pid_loop_dcs_type"] = pid_loop_dcs_type
__props__.__dict__["pid_loop_desc"] = pid_loop_desc
if pid_loop_is_crucial is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_is_crucial'")
__props__.__dict__["pid_loop_is_crucial"] = pid_loop_is_crucial
if pid_loop_name is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_name'")
__props__.__dict__["pid_loop_name"] = pid_loop_name
if pid_loop_type is None and not opts.urn:
raise TypeError("Missing required property 'pid_loop_type'")
__props__.__dict__["pid_loop_type"] = pid_loop_type
if pid_project_id is None and not opts.urn:
raise TypeError("Missing required property 'pid_project_id'")
__props__.__dict__["pid_project_id"] = pid_project_id
__props__.__dict__["status"] = None
super(IndustrialPidLoop, __self__).__init__(
'alicloud:brain/industrialPidLoop:IndustrialPidLoop',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
pid_loop_configuration: Optional[pulumi.Input[str]] = None,
pid_loop_dcs_type: Optional[pulumi.Input[str]] = None,
pid_loop_desc: Optional[pulumi.Input[str]] = None,
pid_loop_is_crucial: Optional[pulumi.Input[bool]] = None,
pid_loop_name: Optional[pulumi.Input[str]] = None,
pid_loop_type: Optional[pulumi.Input[str]] = None,
pid_project_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'IndustrialPidLoop':
"""
Get an existing IndustrialPidLoop resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pid_loop_configuration: The Pid Loop Configuration.
:param pulumi.Input[str] pid_loop_dcs_type: The dcs type of Pid Loop. Valid values: `standard`.
:param pulumi.Input[str] pid_loop_desc: The desc of Pid Loop.
:param pulumi.Input[bool] pid_loop_is_crucial: Whether is crucial Pid Loop.
:param pulumi.Input[str] pid_loop_name: The name of Pid Loop.
:param pulumi.Input[str] pid_loop_type: The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
:param pulumi.Input[str] pid_project_id: The pid project id.
:param pulumi.Input[str] status: The status of Pid Loop.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IndustrialPidLoopState.__new__(_IndustrialPidLoopState)
__props__.__dict__["pid_loop_configuration"] = pid_loop_configuration
__props__.__dict__["pid_loop_dcs_type"] = pid_loop_dcs_type
__props__.__dict__["pid_loop_desc"] = pid_loop_desc
__props__.__dict__["pid_loop_is_crucial"] = pid_loop_is_crucial
__props__.__dict__["pid_loop_name"] = pid_loop_name
__props__.__dict__["pid_loop_type"] = pid_loop_type
__props__.__dict__["pid_project_id"] = pid_project_id
__props__.__dict__["status"] = status
return IndustrialPidLoop(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="pidLoopConfiguration")
def pid_loop_configuration(self) -> pulumi.Output[str]:
"""
The Pid Loop Configuration.
"""
return pulumi.get(self, "pid_loop_configuration")
@property
@pulumi.getter(name="pidLoopDcsType")
def pid_loop_dcs_type(self) -> pulumi.Output[str]:
"""
The dcs type of Pid Loop. Valid values: `standard`.
"""
return pulumi.get(self, "pid_loop_dcs_type")
@property
@pulumi.getter(name="pidLoopDesc")
def pid_loop_desc(self) -> pulumi.Output[Optional[str]]:
"""
The desc of Pid Loop.
"""
return pulumi.get(self, "pid_loop_desc")
@property
@pulumi.getter(name="pidLoopIsCrucial")
def pid_loop_is_crucial(self) -> pulumi.Output[bool]:
"""
Whether is crucial Pid Loop.
"""
return pulumi.get(self, "pid_loop_is_crucial")
@property
@pulumi.getter(name="pidLoopName")
def pid_loop_name(self) -> pulumi.Output[str]:
"""
The name of Pid Loop.
"""
return pulumi.get(self, "pid_loop_name")
@property
@pulumi.getter(name="pidLoopType")
def pid_loop_type(self) -> pulumi.Output[str]:
"""
The type of Pid Loop. Valid values: `0`, `1`, `2`, `3`, `4`, `5`.
"""
return pulumi.get(self, "pid_loop_type")
@property
@pulumi.getter(name="pidProjectId")
def pid_project_id(self) -> pulumi.Output[str]:
"""
The pid project id.
"""
return pulumi.get(self, "pid_project_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of Pid Loop.
"""
return pulumi.get(self, "status")
| 0.756268 | 0.069226 |
import os
import uuid
import pytest
import swapper
from django.conf import settings
from django.template.response import TemplateResponse
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from django_fsm import can_proceed
from getpaid.registry import registry
from getpaid.types import BackendMethod as bm
from getpaid.types import ConfirmationMethod as cm
from getpaid.types import PaymentStatus as ps
pytestmark = pytest.mark.django_db
dummy = settings.GETPAID_DUMMY_SLUG
Order = swapper.load_model("getpaid", "Order")
Payment = swapper.load_model("getpaid", "Payment")
url_post_payment = reverse_lazy("paywall:gateway")
url_api_register = reverse_lazy("paywall:api_register")
url_api_operate = reverse_lazy("paywall:api_operate")
def _prep_conf(api_method: bm = bm.REST, confirm_method: cm = cm.PUSH) -> dict:
return {
settings.GETPAID_DUMMY_SLUG: {
"paywall_method": api_method,
"confirmation_method": confirm_method,
}
}
class TestModelProcessor(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
def test_model_and_dummy_backend(self):
order = Order.objects.create()
payment = Payment.objects.create(
order=order,
currency=order.currency,
amount_required=order.get_total_amount(),
backend=dummy,
description=order.get_description(),
)
proc = payment.get_processor()
assert isinstance(proc, registry[dummy])
def test_get_flow_begin(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(api_method=bm.GET)
payment = payment_factory(external_id=uuid.uuid4())
result = payment.prepare_transaction(None)
assert result.status_code == 302
def test_post_flow_begin(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(api_method=bm.POST)
payment = payment_factory(external_id=uuid.uuid4())
result = payment.prepare_transaction(None)
assert result.status_code == 200
assert isinstance(result, TemplateResponse)
assert payment.status == ps.PREPARED
def test_rest_flow_begin(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(api_method=bm.REST)
payment = payment_factory(external_id=uuid.uuid4())
requests_mock.post(str(url_api_register), json={"url": str(url_post_payment)})
result = payment.prepare_transaction(None)
assert result.status_code == 302
assert payment.status == ps.PREPARED
# PULL flow
def test_pull_flow_paid(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PULL)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
url_get_status = reverse("paywall:get_status", kwargs={"pk": payment.external_id})
requests_mock.get(url_get_status, json={"payment_status": ps.PAID})
payment.fetch_and_update_status()
# all confirmed payments are by default marked as PARTIAL
assert payment.status == ps.PARTIAL
# and need to be checked and marked if complete
assert can_proceed(payment.mark_as_paid)
def test_pull_flow_locked(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PULL)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
url_get_status = reverse("paywall:get_status", kwargs={"pk": payment.external_id})
requests_mock.get(url_get_status, json={"payment_status": ps.PRE_AUTH})
payment.fetch_and_update_status()
assert payment.status == ps.PRE_AUTH
def test_pull_flow_failed(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PULL)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
url_get_status = reverse("paywall:get_status", kwargs={"pk": payment.external_id})
requests_mock.get(url_get_status, json={"payment_status": ps.FAILED})
payment.fetch_and_update_status()
assert payment.status == ps.FAILED
# PUSH flow
def test_push_flow_paid(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PUSH)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
request = rf.post("", content_type="application/json", data={"new_status": ps.PAID})
payment.handle_paywall_callback(request)
assert payment.status == ps.PAID
def test_push_flow_locked(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PUSH)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
request = rf.post(
"", content_type="application/json", data={"new_status": ps.PRE_AUTH}
)
payment.handle_paywall_callback(request)
assert payment.status == ps.PRE_AUTH
def test_push_flow_failed(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PUSH)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
request = rf.post(
"", content_type="application/json", data={"new_status": ps.FAILED}
)
payment.handle_paywall_callback(request)
assert payment.status == ps.FAILED
|
tests/dummy/test_dummy_integration.py
|
import os
import uuid
import pytest
import swapper
from django.conf import settings
from django.template.response import TemplateResponse
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from django_fsm import can_proceed
from getpaid.registry import registry
from getpaid.types import BackendMethod as bm
from getpaid.types import ConfirmationMethod as cm
from getpaid.types import PaymentStatus as ps
pytestmark = pytest.mark.django_db
dummy = settings.GETPAID_DUMMY_SLUG
Order = swapper.load_model("getpaid", "Order")
Payment = swapper.load_model("getpaid", "Payment")
url_post_payment = reverse_lazy("paywall:gateway")
url_api_register = reverse_lazy("paywall:api_register")
url_api_operate = reverse_lazy("paywall:api_operate")
def _prep_conf(api_method: bm = bm.REST, confirm_method: cm = cm.PUSH) -> dict:
return {
settings.GETPAID_DUMMY_SLUG: {
"paywall_method": api_method,
"confirmation_method": confirm_method,
}
}
class TestModelProcessor(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
def test_model_and_dummy_backend(self):
order = Order.objects.create()
payment = Payment.objects.create(
order=order,
currency=order.currency,
amount_required=order.get_total_amount(),
backend=dummy,
description=order.get_description(),
)
proc = payment.get_processor()
assert isinstance(proc, registry[dummy])
def test_get_flow_begin(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(api_method=bm.GET)
payment = payment_factory(external_id=uuid.uuid4())
result = payment.prepare_transaction(None)
assert result.status_code == 302
def test_post_flow_begin(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(api_method=bm.POST)
payment = payment_factory(external_id=uuid.uuid4())
result = payment.prepare_transaction(None)
assert result.status_code == 200
assert isinstance(result, TemplateResponse)
assert payment.status == ps.PREPARED
def test_rest_flow_begin(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(api_method=bm.REST)
payment = payment_factory(external_id=uuid.uuid4())
requests_mock.post(str(url_api_register), json={"url": str(url_post_payment)})
result = payment.prepare_transaction(None)
assert result.status_code == 302
assert payment.status == ps.PREPARED
# PULL flow
def test_pull_flow_paid(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PULL)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
url_get_status = reverse("paywall:get_status", kwargs={"pk": payment.external_id})
requests_mock.get(url_get_status, json={"payment_status": ps.PAID})
payment.fetch_and_update_status()
# all confirmed payments are by default marked as PARTIAL
assert payment.status == ps.PARTIAL
# and need to be checked and marked if complete
assert can_proceed(payment.mark_as_paid)
def test_pull_flow_locked(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PULL)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
url_get_status = reverse("paywall:get_status", kwargs={"pk": payment.external_id})
requests_mock.get(url_get_status, json={"payment_status": ps.PRE_AUTH})
payment.fetch_and_update_status()
assert payment.status == ps.PRE_AUTH
def test_pull_flow_failed(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PULL)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
url_get_status = reverse("paywall:get_status", kwargs={"pk": payment.external_id})
requests_mock.get(url_get_status, json={"payment_status": ps.FAILED})
payment.fetch_and_update_status()
assert payment.status == ps.FAILED
# PUSH flow
def test_push_flow_paid(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PUSH)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
request = rf.post("", content_type="application/json", data={"new_status": ps.PAID})
payment.handle_paywall_callback(request)
assert payment.status == ps.PAID
def test_push_flow_locked(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PUSH)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
request = rf.post(
"", content_type="application/json", data={"new_status": ps.PRE_AUTH}
)
payment.handle_paywall_callback(request)
assert payment.status == ps.PRE_AUTH
def test_push_flow_failed(payment_factory, settings, live_server, requests_mock, rf):
os.environ["_PAYWALL_URL"] = live_server.url
settings.GETPAID_BACKEND_SETTINGS = _prep_conf(confirm_method=cm.PUSH)
payment = payment_factory(external_id=uuid.uuid4())
payment.confirm_prepared()
request = rf.post(
"", content_type="application/json", data={"new_status": ps.FAILED}
)
payment.handle_paywall_callback(request)
assert payment.status == ps.FAILED
| 0.42668 | 0.164181 |
from __future__ import annotations
import asyncio
import logging
import pprint
from asyncio import tasks
from dataclasses import dataclass
from typing import Any, Callable, cast
import aiohttp
import async_timeout
from aiohttp import ClientWebSocketResponse, WSMsgType, client_exceptions
from .const import NOTIFY_WS_CLOSED, WS_HEARTBEAT
from .exceptions import (
CannotConnect,
ConnectionClosed,
ConnectionFailed,
InvalidMessage,
JSONRPCError,
RPCError,
RPCTimeout,
)
_LOGGER = logging.getLogger(__name__)
@dataclass
class RouteData:
"""RouteData (src/dst) class."""
src: str | None
dst: str | None
class RPCCall:
"""RPCCall class."""
def __init__(
self, call_id: int, method: str, params: dict[str, Any] | None, route: RouteData
):
"""Initialize RPC class."""
self.call_id = call_id
self.params = params
self.method = method
self.src = route.src
self.dst = route.dst
self.resolve: asyncio.Future = asyncio.Future()
@property
def request_frame(self) -> dict[str, Any]:
"""Request frame."""
msg = {
"id": self.call_id,
"method": self.method,
"src": self.src,
}
for obj in ("params", "dst"):
if getattr(self, obj) is not None:
msg[obj] = getattr(self, obj)
return msg
class WsRPC:
"""WsRPC class."""
def __init__(self, ip_address: str, on_notification: Callable) -> None:
"""Initialize WsRPC class."""
self._ip_address = ip_address
self._on_notification = on_notification
self._rx_task: tasks.Task[None] | None = None
self._client: ClientWebSocketResponse | None = None
self._calls: dict[int, RPCCall] = {}
self._call_id = 1
self._route = RouteData(f"aios-{id(self)}", None)
@property
def _next_id(self) -> int:
self._call_id += 1
return self._call_id
async def connect(self, aiohttp_session: aiohttp.ClientSession) -> None:
"""Connect to device."""
if self.connected:
raise RuntimeError("Already connected")
_LOGGER.debug("Trying to connect to device at %s", self._ip_address)
try:
self._client = await aiohttp_session.ws_connect(
f"http://{self._ip_address}/rpc", heartbeat=WS_HEARTBEAT
)
except (
client_exceptions.WSServerHandshakeError,
client_exceptions.ClientError,
) as err:
raise CannotConnect(f"Error connecting to {self._ip_address}") from err
self._rx_task = asyncio.create_task(self._rx_msgs())
_LOGGER.info("Connected to %s", self._ip_address)
async def disconnect(self) -> None:
"""Disconnect all sessions."""
self._rx_task = None
if self._client is None:
return
await self._client.close()
async def _handle_call(self, frame_id: str) -> None:
assert self._client
await self._client.send_json(
{
"id": frame_id,
"src": self._route.src,
"error": {"code": 500, "message": "Not Implemented"},
}
)
def _handle_frame(self, frame: dict[str, Any]) -> None:
if peer_src := frame.get("src"):
if self._route.dst is not None and peer_src != self._route.dst:
_LOGGER.warning(
"Remote src changed: %s -> %s", self._route.dst, peer_src
)
self._route.dst = peer_src
frame_id = frame.get("id")
if method := frame.get("method"):
# peer is invoking a method
params = frame.get("params")
if frame_id:
# and expects a response
_LOGGER.debug("handle call for frame_id: %s", frame_id)
asyncio.create_task(self._handle_call(frame_id))
else:
# this is a notification
_LOGGER.debug("Notification: %s %s", method, params)
self._on_notification(method, params)
elif frame_id:
# looks like a response
if frame_id not in self._calls:
_LOGGER.warning("Response for an unknown request id: %s", frame_id)
return
call = self._calls.pop(frame_id)
call.resolve.set_result(frame)
else:
_LOGGER.warning("Invalid frame: %s", frame)
async def _rx_msgs(self) -> None:
assert self._client
while not self._client.closed:
try:
frame = await self._receive_json_or_raise()
except ConnectionClosed:
break
self._handle_frame(frame)
_LOGGER.debug("Websocket connection closed")
for call_item in self._calls.values():
call_item.resolve.cancel()
self._calls.clear()
if not self._client.closed:
await self._client.close()
self._client = None
self._on_notification(NOTIFY_WS_CLOSED)
async def _receive_json_or_raise(self) -> dict[str, Any]:
"""Receive json or raise."""
assert self._client
msg = await self._client.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):
raise ConnectionClosed("Connection was closed.")
if msg.type == WSMsgType.ERROR:
raise ConnectionFailed()
if msg.type != WSMsgType.TEXT:
raise InvalidMessage(f"Received non-Text message: {msg.type}")
try:
data: dict[str, Any] = msg.json()
except ValueError as err:
raise InvalidMessage("Received invalid JSON.") from err
_LOGGER.debug("Received message:\n%s\n", pprint.pformat(msg))
return data
@property
def connected(self) -> bool:
"""Return if we're currently connected."""
return self._client is not None and not self._client.closed
async def call(
self, method: str, params: dict[str, Any] | None = None, timeout: int = 10
) -> dict[str, Any]:
"""Websocket RPC call."""
if self._client is None:
raise RuntimeError("Not connected")
call = RPCCall(self._next_id, method, params, self._route)
self._calls[call.call_id] = call
await self._client.send_json(call.request_frame)
try:
async with async_timeout.timeout(timeout):
resp = await call.resolve
except asyncio.TimeoutError as exc:
_LOGGER.warning("%s timed out: %s", call, exc)
raise RPCTimeout(call) from exc
except Exception as exc:
_LOGGER.error("%s ???: %s", call, exc)
raise RPCError(call, exc) from exc
if "result" in resp:
_LOGGER.debug("%s(%s) -> %s", call.method, call.params, resp["result"])
return cast(dict, resp["result"])
try:
code, msg = resp["error"]["code"], resp["error"]["message"]
raise JSONRPCError(code, msg)
except KeyError as err:
raise RPCError(f"bad response: {resp}") from err
|
aioshelly/wsrpc.py
|
from __future__ import annotations
import asyncio
import logging
import pprint
from asyncio import tasks
from dataclasses import dataclass
from typing import Any, Callable, cast
import aiohttp
import async_timeout
from aiohttp import ClientWebSocketResponse, WSMsgType, client_exceptions
from .const import NOTIFY_WS_CLOSED, WS_HEARTBEAT
from .exceptions import (
CannotConnect,
ConnectionClosed,
ConnectionFailed,
InvalidMessage,
JSONRPCError,
RPCError,
RPCTimeout,
)
_LOGGER = logging.getLogger(__name__)
@dataclass
class RouteData:
"""RouteData (src/dst) class."""
src: str | None
dst: str | None
class RPCCall:
"""RPCCall class."""
def __init__(
self, call_id: int, method: str, params: dict[str, Any] | None, route: RouteData
):
"""Initialize RPC class."""
self.call_id = call_id
self.params = params
self.method = method
self.src = route.src
self.dst = route.dst
self.resolve: asyncio.Future = asyncio.Future()
@property
def request_frame(self) -> dict[str, Any]:
"""Request frame."""
msg = {
"id": self.call_id,
"method": self.method,
"src": self.src,
}
for obj in ("params", "dst"):
if getattr(self, obj) is not None:
msg[obj] = getattr(self, obj)
return msg
class WsRPC:
"""WsRPC class."""
def __init__(self, ip_address: str, on_notification: Callable) -> None:
"""Initialize WsRPC class."""
self._ip_address = ip_address
self._on_notification = on_notification
self._rx_task: tasks.Task[None] | None = None
self._client: ClientWebSocketResponse | None = None
self._calls: dict[int, RPCCall] = {}
self._call_id = 1
self._route = RouteData(f"aios-{id(self)}", None)
@property
def _next_id(self) -> int:
self._call_id += 1
return self._call_id
async def connect(self, aiohttp_session: aiohttp.ClientSession) -> None:
"""Connect to device."""
if self.connected:
raise RuntimeError("Already connected")
_LOGGER.debug("Trying to connect to device at %s", self._ip_address)
try:
self._client = await aiohttp_session.ws_connect(
f"http://{self._ip_address}/rpc", heartbeat=WS_HEARTBEAT
)
except (
client_exceptions.WSServerHandshakeError,
client_exceptions.ClientError,
) as err:
raise CannotConnect(f"Error connecting to {self._ip_address}") from err
self._rx_task = asyncio.create_task(self._rx_msgs())
_LOGGER.info("Connected to %s", self._ip_address)
async def disconnect(self) -> None:
"""Disconnect all sessions."""
self._rx_task = None
if self._client is None:
return
await self._client.close()
async def _handle_call(self, frame_id: str) -> None:
assert self._client
await self._client.send_json(
{
"id": frame_id,
"src": self._route.src,
"error": {"code": 500, "message": "Not Implemented"},
}
)
def _handle_frame(self, frame: dict[str, Any]) -> None:
if peer_src := frame.get("src"):
if self._route.dst is not None and peer_src != self._route.dst:
_LOGGER.warning(
"Remote src changed: %s -> %s", self._route.dst, peer_src
)
self._route.dst = peer_src
frame_id = frame.get("id")
if method := frame.get("method"):
# peer is invoking a method
params = frame.get("params")
if frame_id:
# and expects a response
_LOGGER.debug("handle call for frame_id: %s", frame_id)
asyncio.create_task(self._handle_call(frame_id))
else:
# this is a notification
_LOGGER.debug("Notification: %s %s", method, params)
self._on_notification(method, params)
elif frame_id:
# looks like a response
if frame_id not in self._calls:
_LOGGER.warning("Response for an unknown request id: %s", frame_id)
return
call = self._calls.pop(frame_id)
call.resolve.set_result(frame)
else:
_LOGGER.warning("Invalid frame: %s", frame)
async def _rx_msgs(self) -> None:
assert self._client
while not self._client.closed:
try:
frame = await self._receive_json_or_raise()
except ConnectionClosed:
break
self._handle_frame(frame)
_LOGGER.debug("Websocket connection closed")
for call_item in self._calls.values():
call_item.resolve.cancel()
self._calls.clear()
if not self._client.closed:
await self._client.close()
self._client = None
self._on_notification(NOTIFY_WS_CLOSED)
async def _receive_json_or_raise(self) -> dict[str, Any]:
"""Receive json or raise."""
assert self._client
msg = await self._client.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):
raise ConnectionClosed("Connection was closed.")
if msg.type == WSMsgType.ERROR:
raise ConnectionFailed()
if msg.type != WSMsgType.TEXT:
raise InvalidMessage(f"Received non-Text message: {msg.type}")
try:
data: dict[str, Any] = msg.json()
except ValueError as err:
raise InvalidMessage("Received invalid JSON.") from err
_LOGGER.debug("Received message:\n%s\n", pprint.pformat(msg))
return data
@property
def connected(self) -> bool:
"""Return if we're currently connected."""
return self._client is not None and not self._client.closed
async def call(
self, method: str, params: dict[str, Any] | None = None, timeout: int = 10
) -> dict[str, Any]:
"""Websocket RPC call."""
if self._client is None:
raise RuntimeError("Not connected")
call = RPCCall(self._next_id, method, params, self._route)
self._calls[call.call_id] = call
await self._client.send_json(call.request_frame)
try:
async with async_timeout.timeout(timeout):
resp = await call.resolve
except asyncio.TimeoutError as exc:
_LOGGER.warning("%s timed out: %s", call, exc)
raise RPCTimeout(call) from exc
except Exception as exc:
_LOGGER.error("%s ???: %s", call, exc)
raise RPCError(call, exc) from exc
if "result" in resp:
_LOGGER.debug("%s(%s) -> %s", call.method, call.params, resp["result"])
return cast(dict, resp["result"])
try:
code, msg = resp["error"]["code"], resp["error"]["message"]
raise JSONRPCError(code, msg)
except KeyError as err:
raise RPCError(f"bad response: {resp}") from err
| 0.850298 | 0.097562 |
import sys
from PyQt5.QtWidgets import QApplication, QDialog, QPushButton,QComboBox,QLabel, QMainWindow, QWidget, QVBoxLayout
from PyQt5.QtGui import QPixmap,QIcon
from datetime import datetime
import time
from utils import covid_info,covid_table
import matplotlib.pyplot as plt
#information
covid_result=covid_info(0,0)
pic_table = covid_table()
#button
btn_x = 200
class button(QPushButton):
def __init__(self,f,t,y):
QPushButton.__init__(self,f)
self.resize(200,50)
self.setText(t)
self.move(btn_x,y)
#time
mytime = time.localtime()
if mytime.tm_hour < 12:
str_hello='早上好!'
elif mytime.tm_hour<18:
str_hello='下午好!'
else:
str_hello='晚上好!'
#UI
class covid_info_ui(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("疫情信息助手")
icon = QIcon()
icon.addPixmap(QPixmap('noun-coronavirus-3377862.png'))
self.setWindowIcon(icon)
label = QLabel(self)
pixmap = QPixmap('noun-man-4325221.png').scaledToHeight(100)
label.resize(100,100)
label.setPixmap(pixmap)
label.move(0,0)
self.resize(600,600)
hello = QLabel(self)
hello.setText(str_hello)
hello.resize(200,30)
moji_x=130
moji_y=35
hello.move(moji_x,moji_y)
date_time = QLabel(self)
dt=str(datetime.now())
date_time.setText(dt)
date_time.resize(500,30)
date_time.move(moji_x,moji_y+20)
moji_coun = QLabel(self)
country_number=covid_result.country()
moji_coun.setText(country_number)
moji_coun.resize(600,220)
moji_coun.move(10,100)
class CTlab(QLabel):
def __init__(self):
super().__init__()
self.setWindowTitle('疫情信息助手')
self.resize(700,700)
self.setWordWrap(True)
def showlab(self,t):
self.setText(t)
self.show()
class area_tab(tuple):
def pro_area(self):
re_area = ' '
for i in self:
re_area += i+"\n"
re_area = re_area[10:]
return(re_area)
app = QApplication(sys.argv)
ex = covid_info_ui()
lab_city=CTlab()
#combobox
btn_y = 340
f = open("prov.txt", "r",encoding="utf-8")
combo = QComboBox(ex)
combo.resize(120,50)
combo.move(50,btn_y)
for line in f.readlines():
line=line.strip()
combo.addItem(line)
f.close()
#button
btn_1 = button(ex,'省市简报',btn_y)
btn_2 = button(ex,'省市中高风险区',btn_y+50)
btn_3 = button(ex,'新增确诊趋势图',btn_y+100)
btn_4 = button(ex,'新增无症状趋势图',btn_y+150)
btn_1.clicked.connect(lambda:lab_city.showlab(covid_result.city(combo.currentText())))
btn_2.clicked.connect(lambda:lab_city.showlab(area_tab.pro_area(covid_result.area(combo.currentText()))))
def win_plt():
plt.plot(pic_table.date,pic_table.numc_list)
plt.show()
btn_3.clicked.connect(lambda:win_plt())
def no_plt():
plt.plot(pic_table.date,pic_table.numl_list)
plt.show()
btn_4.clicked.connect(lambda:no_plt())
#warningUI
dlg = QDialog()
def understand():
ex.show()
dlg.done(1)
dlg.setWindowTitle('警告!')
dlg.resize(1000, 200)
warn_icon = QIcon()
warn_icon.addPixmap(QPixmap('noun-warning-1559852.png'))
dlg.setWindowIcon(warn_icon)
lab_warning = QLabel(dlg)
lab_warning.setText('本软件仅提供数据汇总,数据来源均为国家卫健委。\n继续使用意味着您理解:本软件不对数据真实性负任何责任,不可以作为任何医疗建议!\n感谢您的使用!')
lab_warning.move(10,10)
btn_warn=button(dlg,'我理解以上声明',100)
btn_warn.move(370,120)
btn_warn.clicked.connect(lambda:understand())
dlg.show()
sys.exit(app.exec())
|
covidui.py
|
import sys
from PyQt5.QtWidgets import QApplication, QDialog, QPushButton,QComboBox,QLabel, QMainWindow, QWidget, QVBoxLayout
from PyQt5.QtGui import QPixmap,QIcon
from datetime import datetime
import time
from utils import covid_info,covid_table
import matplotlib.pyplot as plt
#information
covid_result=covid_info(0,0)
pic_table = covid_table()
#button
btn_x = 200
class button(QPushButton):
def __init__(self,f,t,y):
QPushButton.__init__(self,f)
self.resize(200,50)
self.setText(t)
self.move(btn_x,y)
#time
mytime = time.localtime()
if mytime.tm_hour < 12:
str_hello='早上好!'
elif mytime.tm_hour<18:
str_hello='下午好!'
else:
str_hello='晚上好!'
#UI
class covid_info_ui(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("疫情信息助手")
icon = QIcon()
icon.addPixmap(QPixmap('noun-coronavirus-3377862.png'))
self.setWindowIcon(icon)
label = QLabel(self)
pixmap = QPixmap('noun-man-4325221.png').scaledToHeight(100)
label.resize(100,100)
label.setPixmap(pixmap)
label.move(0,0)
self.resize(600,600)
hello = QLabel(self)
hello.setText(str_hello)
hello.resize(200,30)
moji_x=130
moji_y=35
hello.move(moji_x,moji_y)
date_time = QLabel(self)
dt=str(datetime.now())
date_time.setText(dt)
date_time.resize(500,30)
date_time.move(moji_x,moji_y+20)
moji_coun = QLabel(self)
country_number=covid_result.country()
moji_coun.setText(country_number)
moji_coun.resize(600,220)
moji_coun.move(10,100)
class CTlab(QLabel):
def __init__(self):
super().__init__()
self.setWindowTitle('疫情信息助手')
self.resize(700,700)
self.setWordWrap(True)
def showlab(self,t):
self.setText(t)
self.show()
class area_tab(tuple):
def pro_area(self):
re_area = ' '
for i in self:
re_area += i+"\n"
re_area = re_area[10:]
return(re_area)
app = QApplication(sys.argv)
ex = covid_info_ui()
lab_city=CTlab()
#combobox
btn_y = 340
f = open("prov.txt", "r",encoding="utf-8")
combo = QComboBox(ex)
combo.resize(120,50)
combo.move(50,btn_y)
for line in f.readlines():
line=line.strip()
combo.addItem(line)
f.close()
#button
btn_1 = button(ex,'省市简报',btn_y)
btn_2 = button(ex,'省市中高风险区',btn_y+50)
btn_3 = button(ex,'新增确诊趋势图',btn_y+100)
btn_4 = button(ex,'新增无症状趋势图',btn_y+150)
btn_1.clicked.connect(lambda:lab_city.showlab(covid_result.city(combo.currentText())))
btn_2.clicked.connect(lambda:lab_city.showlab(area_tab.pro_area(covid_result.area(combo.currentText()))))
def win_plt():
plt.plot(pic_table.date,pic_table.numc_list)
plt.show()
btn_3.clicked.connect(lambda:win_plt())
def no_plt():
plt.plot(pic_table.date,pic_table.numl_list)
plt.show()
btn_4.clicked.connect(lambda:no_plt())
#warningUI
dlg = QDialog()
def understand():
ex.show()
dlg.done(1)
dlg.setWindowTitle('警告!')
dlg.resize(1000, 200)
warn_icon = QIcon()
warn_icon.addPixmap(QPixmap('noun-warning-1559852.png'))
dlg.setWindowIcon(warn_icon)
lab_warning = QLabel(dlg)
lab_warning.setText('本软件仅提供数据汇总,数据来源均为国家卫健委。\n继续使用意味着您理解:本软件不对数据真实性负任何责任,不可以作为任何医疗建议!\n感谢您的使用!')
lab_warning.move(10,10)
btn_warn=button(dlg,'我理解以上声明',100)
btn_warn.move(370,120)
btn_warn.clicked.connect(lambda:understand())
dlg.show()
sys.exit(app.exec())
| 0.092048 | 0.077239 |
import os
import m5
from m5.util import addToPath
from m5.objects import *
from m5.options import *
import argparse
m5.util.addToPath('../..')
from common import SysPaths
from common import ObjectList
from common import MemConfig
from common.cores.arm import HPI
import devices
default_kernel = 'vmlinux.arm64'
default_disk = 'linaro-minimal-aarch64.img'
default_root_device = '/dev/vda1'
# Pre-defined CPU configurations. Each tuple must be ordered as : (cpu_class,
# l1_icache_class, l1_dcache_class, walk_cache_class, l2_Cache_class). Any of
# the cache class may be 'None' if the particular cache is not present.
cpu_types = {
"atomic" : ( AtomicSimpleCPU, None, None, None),
"minor" : (MinorCPU,
devices.L1I, devices.L1D,
devices.L2),
"hpi" : ( HPI.HPI,
HPI.HPI_ICache, HPI.HPI_DCache,
HPI.HPI_L2)
}
def create_cow_image(name):
"""Helper function to create a Copy-on-Write disk image"""
image = CowDiskImage()
image.child.image_file = SysPaths.disk(name)
return image;
def create(args):
''' Create and configure the system object. '''
if args.script and not os.path.isfile(args.script):
print("Error: Bootscript %s does not exist" % args.script)
sys.exit(1)
cpu_class = cpu_types[args.cpu][0]
mem_mode = cpu_class.memory_mode()
# Only simulate caches when using a timing CPU (e.g., the HPI model)
want_caches = True if mem_mode == "timing" else False
system = devices.SimpleSystem(want_caches,
args.mem_size,
mem_mode=mem_mode,
workload=ArmFsLinux(
object_file=
SysPaths.binary(args.kernel)),
readfile=args.script)
MemConfig.config_mem(args, system)
# Add the PCI devices we need for this system. The base system
# doesn't have any PCI devices by default since they are assumed
# to be added by the configuration scripts needing them.
system.pci_devices = [
# Create a VirtIO block device for the system's boot
# disk. Attach the disk image using gem5's Copy-on-Write
# functionality to avoid writing changes to the stored copy of
# the disk image.
PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))),
]
# Attach the PCI devices to the system. The helper method in the
# system assigns a unique PCI bus ID to each of the devices and
# connects them to the IO bus.
for dev in system.pci_devices:
system.attach_pci(dev)
# Wire up the system's memory system
system.connect()
# Add CPU clusters to the system
system.cpu_cluster = [
devices.CpuCluster(system,
args.num_cores,
args.cpu_freq, "1.0V",
*cpu_types[args.cpu]),
]
# Create a cache hierarchy for the cluster. We are assuming that
# clusters have core-private L1 caches and an L2 that's shared
# within the cluster.
system.addCaches(want_caches, last_cache_level=2)
# Setup gem5's minimal Linux boot loader.
system.realview.setupBootLoader(system, SysPaths.binary)
if args.dtb:
system.workload.dtb_filename = args.dtb
else:
# No DTB specified: autogenerate DTB
system.workload.dtb_filename = \
os.path.join(m5.options.outdir, 'system.dtb')
system.generateDtb(system.workload.dtb_filename)
# Linux boot command flags
kernel_cmd = [
# Tell Linux to use the simulated serial port as a console
"console=ttyAMA0",
# Hard-code timi
"lpj=19988480",
# Disable address space randomisation to get a consistent
# memory layout.
"norandmaps",
# Tell Linux where to find the root disk image.
"root=%s" % args.root_device,
# Mount the root disk read-write by default.
"rw",
# Tell Linux about the amount of physical memory present.
"mem=%s" % args.mem_size,
]
system.workload.command_line = " ".join(kernel_cmd)
return system
def run(args):
cptdir = m5.options.outdir
if args.checkpoint:
print("Checkpoint directory: %s" % cptdir)
while True:
event = m5.simulate()
exit_msg = event.getCause()
if exit_msg == "checkpoint":
print("Dropping checkpoint at tick %d" % m5.curTick())
cpt_dir = os.path.join(m5.options.outdir, "cpt.%d" % m5.curTick())
m5.checkpoint(os.path.join(cpt_dir))
print("Checkpoint done.")
else:
print(exit_msg, " @ ", m5.curTick())
break
sys.exit(event.getCode())
def main():
parser = argparse.ArgumentParser(epilog=__doc__)
parser.add_argument("--dtb", type=str, default=None,
help="DTB file to load")
parser.add_argument("--kernel", type=str, default=default_kernel,
help="Linux kernel")
parser.add_argument("--disk-image", type=str,
default=default_disk,
help="Disk to instantiate")
parser.add_argument("--root-device", type=str,
default=default_root_device,
help="OS device name for root partition (default: {})"
.format(default_root_device))
parser.add_argument("--script", type=str, default="",
help = "Linux bootscript")
parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()),
default="atomic",
help="CPU model to use")
parser.add_argument("--cpu-freq", type=str, default="4GHz")
parser.add_argument("--num-cores", type=int, default=1,
help="Number of CPU cores")
parser.add_argument("--mem-type", default="DDR3_1600_8x8",
choices=ObjectList.mem_list.get_names(),
help = "type of memory to use")
parser.add_argument("--mem-channels", type=int, default=1,
help = "number of memory channels")
parser.add_argument("--mem-ranks", type=int, default=None,
help = "number of memory ranks per channel")
parser.add_argument("--mem-size", action="store", type=str,
default="2GB",
help="Specify the physical memory size")
parser.add_argument("--checkpoint", action="store_true")
parser.add_argument("--restore", type=str, default=None)
args = parser.parse_args()
root = Root(full_system=True)
root.system = create(args)
if args.restore is not None:
m5.instantiate(args.restore)
else:
m5.instantiate()
run(args)
if __name__ == "__m5_main__":
main()
|
configs/example/arm/starter_fs.py
|
import os
import m5
from m5.util import addToPath
from m5.objects import *
from m5.options import *
import argparse
m5.util.addToPath('../..')
from common import SysPaths
from common import ObjectList
from common import MemConfig
from common.cores.arm import HPI
import devices
default_kernel = 'vmlinux.arm64'
default_disk = 'linaro-minimal-aarch64.img'
default_root_device = '/dev/vda1'
# Pre-defined CPU configurations. Each tuple must be ordered as : (cpu_class,
# l1_icache_class, l1_dcache_class, walk_cache_class, l2_Cache_class). Any of
# the cache class may be 'None' if the particular cache is not present.
cpu_types = {
"atomic" : ( AtomicSimpleCPU, None, None, None),
"minor" : (MinorCPU,
devices.L1I, devices.L1D,
devices.L2),
"hpi" : ( HPI.HPI,
HPI.HPI_ICache, HPI.HPI_DCache,
HPI.HPI_L2)
}
def create_cow_image(name):
"""Helper function to create a Copy-on-Write disk image"""
image = CowDiskImage()
image.child.image_file = SysPaths.disk(name)
return image;
def create(args):
''' Create and configure the system object. '''
if args.script and not os.path.isfile(args.script):
print("Error: Bootscript %s does not exist" % args.script)
sys.exit(1)
cpu_class = cpu_types[args.cpu][0]
mem_mode = cpu_class.memory_mode()
# Only simulate caches when using a timing CPU (e.g., the HPI model)
want_caches = True if mem_mode == "timing" else False
system = devices.SimpleSystem(want_caches,
args.mem_size,
mem_mode=mem_mode,
workload=ArmFsLinux(
object_file=
SysPaths.binary(args.kernel)),
readfile=args.script)
MemConfig.config_mem(args, system)
# Add the PCI devices we need for this system. The base system
# doesn't have any PCI devices by default since they are assumed
# to be added by the configuration scripts needing them.
system.pci_devices = [
# Create a VirtIO block device for the system's boot
# disk. Attach the disk image using gem5's Copy-on-Write
# functionality to avoid writing changes to the stored copy of
# the disk image.
PciVirtIO(vio=VirtIOBlock(image=create_cow_image(args.disk_image))),
]
# Attach the PCI devices to the system. The helper method in the
# system assigns a unique PCI bus ID to each of the devices and
# connects them to the IO bus.
for dev in system.pci_devices:
system.attach_pci(dev)
# Wire up the system's memory system
system.connect()
# Add CPU clusters to the system
system.cpu_cluster = [
devices.CpuCluster(system,
args.num_cores,
args.cpu_freq, "1.0V",
*cpu_types[args.cpu]),
]
# Create a cache hierarchy for the cluster. We are assuming that
# clusters have core-private L1 caches and an L2 that's shared
# within the cluster.
system.addCaches(want_caches, last_cache_level=2)
# Setup gem5's minimal Linux boot loader.
system.realview.setupBootLoader(system, SysPaths.binary)
if args.dtb:
system.workload.dtb_filename = args.dtb
else:
# No DTB specified: autogenerate DTB
system.workload.dtb_filename = \
os.path.join(m5.options.outdir, 'system.dtb')
system.generateDtb(system.workload.dtb_filename)
# Linux boot command flags
kernel_cmd = [
# Tell Linux to use the simulated serial port as a console
"console=ttyAMA0",
# Hard-code timi
"lpj=19988480",
# Disable address space randomisation to get a consistent
# memory layout.
"norandmaps",
# Tell Linux where to find the root disk image.
"root=%s" % args.root_device,
# Mount the root disk read-write by default.
"rw",
# Tell Linux about the amount of physical memory present.
"mem=%s" % args.mem_size,
]
system.workload.command_line = " ".join(kernel_cmd)
return system
def run(args):
cptdir = m5.options.outdir
if args.checkpoint:
print("Checkpoint directory: %s" % cptdir)
while True:
event = m5.simulate()
exit_msg = event.getCause()
if exit_msg == "checkpoint":
print("Dropping checkpoint at tick %d" % m5.curTick())
cpt_dir = os.path.join(m5.options.outdir, "cpt.%d" % m5.curTick())
m5.checkpoint(os.path.join(cpt_dir))
print("Checkpoint done.")
else:
print(exit_msg, " @ ", m5.curTick())
break
sys.exit(event.getCode())
def main():
parser = argparse.ArgumentParser(epilog=__doc__)
parser.add_argument("--dtb", type=str, default=None,
help="DTB file to load")
parser.add_argument("--kernel", type=str, default=default_kernel,
help="Linux kernel")
parser.add_argument("--disk-image", type=str,
default=default_disk,
help="Disk to instantiate")
parser.add_argument("--root-device", type=str,
default=default_root_device,
help="OS device name for root partition (default: {})"
.format(default_root_device))
parser.add_argument("--script", type=str, default="",
help = "Linux bootscript")
parser.add_argument("--cpu", type=str, choices=list(cpu_types.keys()),
default="atomic",
help="CPU model to use")
parser.add_argument("--cpu-freq", type=str, default="4GHz")
parser.add_argument("--num-cores", type=int, default=1,
help="Number of CPU cores")
parser.add_argument("--mem-type", default="DDR3_1600_8x8",
choices=ObjectList.mem_list.get_names(),
help = "type of memory to use")
parser.add_argument("--mem-channels", type=int, default=1,
help = "number of memory channels")
parser.add_argument("--mem-ranks", type=int, default=None,
help = "number of memory ranks per channel")
parser.add_argument("--mem-size", action="store", type=str,
default="2GB",
help="Specify the physical memory size")
parser.add_argument("--checkpoint", action="store_true")
parser.add_argument("--restore", type=str, default=None)
args = parser.parse_args()
root = Root(full_system=True)
root.system = create(args)
if args.restore is not None:
m5.instantiate(args.restore)
else:
m5.instantiate()
run(args)
if __name__ == "__m5_main__":
main()
| 0.36557 | 0.191158 |
import time
from collections import OrderedDict
from random import Random
from typing import Dict
class SimRandom:
"""Simulator random object that will keep a module level random.Random object to keep its internal random sequence,
it will not be affected by outside, and outside can set seed with seed function as normal.
Use it as a dict to get another random object with a name, all the random objects from this way will be
affect the seed method.
.. code-block:: python
from maro.simulator.utils import random, seed
# This will create 2 random object, each has different sequence.
r1 = random["r1"]
r2 = random["r2"]
# Seed will reset above random sequence.
seed(1)
"""
def __init__(self):
# random object instances
self._rand_instances: Dict[str, Random] = OrderedDict()
self._seed_dict: Dict[str, int] = {}
self._seed = int(time.time())
def seed(self, seed_num: int):
"""Set seed for simulator random objects.
NOTE:
This method will affect all the random object that get from this class.
Args:
seed_num (int): Seed to set, must be an integer.
"""
assert type(seed_num) is int
self._seed = seed_num
for index, (key, rand) in enumerate(self._rand_instances.items()):
# we set seed for each random instance with 1 offset
seed = seed_num + index
rand.seed(seed)
self._seed_dict[key] = seed
def _create_instance(self, key: str) -> None:
assert type(key) is str
if key not in self._rand_instances:
self._seed_dict[key] = self._seed + len(self._rand_instances)
r = Random()
r.seed(self._seed_dict[key])
self._rand_instances[key] = r
def __getitem__(self, key):
assert type(key) is str
if key not in self._rand_instances:
self._create_instance(key)
return self._rand_instances[key]
def reset_seed(self, key: str) -> None:
"""Reset seed of current random generator.
NOTE:
This will reset the seed to the value that specified by user (or default).
Args:
key(str): Key of item to get.
"""
assert type(key) is str
if key not in self._seed_dict:
self._create_instance(key)
rand = self._rand_instances[key]
rand.seed(self._seed_dict[key])
random = SimRandom()
"""Random utility for simulator, same with original random module."""
seed = random.seed
"""Set seed for simulator."""
__all__ = ['seed', 'random', 'SimRandom']
|
maro/simulator/utils/sim_random.py
|
import time
from collections import OrderedDict
from random import Random
from typing import Dict
class SimRandom:
"""Simulator random object that will keep a module level random.Random object to keep its internal random sequence,
it will not be affected by outside, and outside can set seed with seed function as normal.
Use it as a dict to get another random object with a name, all the random objects from this way will be
affect the seed method.
.. code-block:: python
from maro.simulator.utils import random, seed
# This will create 2 random object, each has different sequence.
r1 = random["r1"]
r2 = random["r2"]
# Seed will reset above random sequence.
seed(1)
"""
def __init__(self):
# random object instances
self._rand_instances: Dict[str, Random] = OrderedDict()
self._seed_dict: Dict[str, int] = {}
self._seed = int(time.time())
def seed(self, seed_num: int):
"""Set seed for simulator random objects.
NOTE:
This method will affect all the random object that get from this class.
Args:
seed_num (int): Seed to set, must be an integer.
"""
assert type(seed_num) is int
self._seed = seed_num
for index, (key, rand) in enumerate(self._rand_instances.items()):
# we set seed for each random instance with 1 offset
seed = seed_num + index
rand.seed(seed)
self._seed_dict[key] = seed
def _create_instance(self, key: str) -> None:
assert type(key) is str
if key not in self._rand_instances:
self._seed_dict[key] = self._seed + len(self._rand_instances)
r = Random()
r.seed(self._seed_dict[key])
self._rand_instances[key] = r
def __getitem__(self, key):
assert type(key) is str
if key not in self._rand_instances:
self._create_instance(key)
return self._rand_instances[key]
def reset_seed(self, key: str) -> None:
"""Reset seed of current random generator.
NOTE:
This will reset the seed to the value that specified by user (or default).
Args:
key(str): Key of item to get.
"""
assert type(key) is str
if key not in self._seed_dict:
self._create_instance(key)
rand = self._rand_instances[key]
rand.seed(self._seed_dict[key])
random = SimRandom()
"""Random utility for simulator, same with original random module."""
seed = random.seed
"""Set seed for simulator."""
__all__ = ['seed', 'random', 'SimRandom']
| 0.850701 | 0.485966 |
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from modules.attention import Attention
class ATTRNNAgent(nn.Module):
def __init__(self, input_scheme, args):
super(ATTRNNAgent, self).__init__()
self.args = args
fixed_inputs = []
var_inputs = []
idx = 0
len_fixed = 0
split = []
for part in input_scheme:
if type(part) == int:
# part: len
fixed_inputs.append((idx, part))
idx += part
len_fixed += part
split.append(part)
else:
# part: len * n
var_inputs.append((idx, part[0], part[1]))
idx += part[0] * part[1]
split.append(part[0] * part[1])
attns = []
vfc1s = []
vfc2s = []
n_var = len(var_inputs)
len_attn = 0
ffc1 = nn.Linear(len_fixed, args.attn_hidden_dim)
for i in range(n_var):
vfc1s.append(nn.Linear(var_inputs[i][1], args.attn_hidden_dim))
attns.append(Attention(args.attn_hidden_dim, args.attn_hidden_dim, args.attn_hidden_dim, args.attn_n_heads))
# print(var_inputs[i][1])
vfc2s.append(nn.Linear(args.attn_hidden_dim * args.attn_n_heads, args.attn_hidden_dim))
len_attn += args.attn_hidden_dim
ffc2 = nn.Linear(args.attn_hidden_dim, args.attn_hidden_dim)
len_attn += args.attn_hidden_dim
self.split = split
self.input_scheme = input_scheme
self.attns = nn.ModuleList(attns)
self.vfc1s = nn.ModuleList(vfc1s)
self.vfc2s = nn.ModuleList(vfc2s)
self.ffc1 = ffc1
self.ffc2 = ffc2
self.fc1 = nn.Linear(len_attn, args.rnn_hidden_dim)
if args.use_rnn:
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
else:
self.rnn = None
# print(args.n_actions)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
split_inputs = inputs.split(self.split, dim=1)
# print(" split_inputs[0]", split_inputs[0].is_cuda)
fixed_inputs = []
var_inputs = []
for i, part in enumerate(self.input_scheme):
if type(part) == int:
fixed_inputs.append(split_inputs[i])
else:
var_inputs.append(split_inputs[i].view(-1, part[1], part[0]))
fixed_input = th.cat(fixed_inputs, dim=1)
fixed_h = self.ffc1(fixed_input)
var_outputs = []
for i, var_input in enumerate(var_inputs):
# print("var_input", var_input.is_cuda)
attn_input = self.vfc1s[i](var_input)
attn_h = self.attns[i](F.relu(fixed_h), F.relu(attn_input), attn_input)
attn_output = self.vfc2s[i](F.relu(attn_h))
var_outputs.append(attn_output)
fixed_output = self.ffc2(F.relu(fixed_h))
# print(fixed_output.size(), var_outputs[0].size())
attn_output = th.cat([fixed_output] + var_outputs, dim=1)
x = F.relu(self.fc1(F.relu(attn_output)))
if self.args.use_rnn:
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
else:
h = x
q = self.fc2(h)
return q, h
|
src/modules/agents/att_rnn_agent.py
|
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from modules.attention import Attention
class ATTRNNAgent(nn.Module):
def __init__(self, input_scheme, args):
super(ATTRNNAgent, self).__init__()
self.args = args
fixed_inputs = []
var_inputs = []
idx = 0
len_fixed = 0
split = []
for part in input_scheme:
if type(part) == int:
# part: len
fixed_inputs.append((idx, part))
idx += part
len_fixed += part
split.append(part)
else:
# part: len * n
var_inputs.append((idx, part[0], part[1]))
idx += part[0] * part[1]
split.append(part[0] * part[1])
attns = []
vfc1s = []
vfc2s = []
n_var = len(var_inputs)
len_attn = 0
ffc1 = nn.Linear(len_fixed, args.attn_hidden_dim)
for i in range(n_var):
vfc1s.append(nn.Linear(var_inputs[i][1], args.attn_hidden_dim))
attns.append(Attention(args.attn_hidden_dim, args.attn_hidden_dim, args.attn_hidden_dim, args.attn_n_heads))
# print(var_inputs[i][1])
vfc2s.append(nn.Linear(args.attn_hidden_dim * args.attn_n_heads, args.attn_hidden_dim))
len_attn += args.attn_hidden_dim
ffc2 = nn.Linear(args.attn_hidden_dim, args.attn_hidden_dim)
len_attn += args.attn_hidden_dim
self.split = split
self.input_scheme = input_scheme
self.attns = nn.ModuleList(attns)
self.vfc1s = nn.ModuleList(vfc1s)
self.vfc2s = nn.ModuleList(vfc2s)
self.ffc1 = ffc1
self.ffc2 = ffc2
self.fc1 = nn.Linear(len_attn, args.rnn_hidden_dim)
if args.use_rnn:
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
else:
self.rnn = None
# print(args.n_actions)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
# make hidden states on same device as model
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
split_inputs = inputs.split(self.split, dim=1)
# print(" split_inputs[0]", split_inputs[0].is_cuda)
fixed_inputs = []
var_inputs = []
for i, part in enumerate(self.input_scheme):
if type(part) == int:
fixed_inputs.append(split_inputs[i])
else:
var_inputs.append(split_inputs[i].view(-1, part[1], part[0]))
fixed_input = th.cat(fixed_inputs, dim=1)
fixed_h = self.ffc1(fixed_input)
var_outputs = []
for i, var_input in enumerate(var_inputs):
# print("var_input", var_input.is_cuda)
attn_input = self.vfc1s[i](var_input)
attn_h = self.attns[i](F.relu(fixed_h), F.relu(attn_input), attn_input)
attn_output = self.vfc2s[i](F.relu(attn_h))
var_outputs.append(attn_output)
fixed_output = self.ffc2(F.relu(fixed_h))
# print(fixed_output.size(), var_outputs[0].size())
attn_output = th.cat([fixed_output] + var_outputs, dim=1)
x = F.relu(self.fc1(F.relu(attn_output)))
if self.args.use_rnn:
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
else:
h = x
q = self.fc2(h)
return q, h
| 0.642993 | 0.242626 |
from typing import Callable, List, Optional
from rp2.abstract_transaction import AbstractTransaction
from rp2.configuration import Configuration
from rp2.entry_types import TransactionType
from rp2.logger import LOGGER
from rp2.rp2_decimal import FIAT_DECIMAL_MASK, ZERO, RP2Decimal
from rp2.rp2_error import RP2TypeError, RP2ValueError
class OutTransaction(AbstractTransaction):
def __init__(
self,
configuration: Configuration,
timestamp: str,
asset: str,
exchange: str,
holder: str,
transaction_type: str,
spot_price: RP2Decimal,
crypto_out_no_fee: RP2Decimal,
crypto_fee: RP2Decimal,
crypto_out_with_fee: Optional[RP2Decimal] = None,
fiat_out_no_fee: Optional[RP2Decimal] = None,
fiat_fee: Optional[RP2Decimal] = None,
internal_id: Optional[int] = None,
unique_id: Optional[str] = None,
notes: Optional[str] = None,
) -> None:
super().__init__(configuration, timestamp, asset, transaction_type, spot_price, internal_id, unique_id, notes)
self.__exchange: str = configuration.type_check_exchange("exchange", exchange)
self.__holder: str = configuration.type_check_holder("holder", holder)
self.__crypto_out_no_fee: RP2Decimal = configuration.type_check_positive_decimal("crypto_out_no_fee", crypto_out_no_fee, non_zero=True)
self.__crypto_fee: RP2Decimal = configuration.type_check_positive_decimal("crypto_fee", crypto_fee)
self.__fiat_out_with_fee: RP2Decimal
self.__fiat_out_no_fee: RP2Decimal
# Crypto out with fee is optional. It can be derived from crypto out (no fee) and crypto fee, however some exchanges
# provide it anyway. If it is provided use it as given by the exchange, if not compute it.
if crypto_out_with_fee is None:
self.__crypto_out_with_fee = self.__crypto_out_no_fee + self.crypto_fee
else:
self.__crypto_out_with_fee = configuration.type_check_positive_decimal("crypto_out_with_fee", crypto_out_with_fee, non_zero=True)
# Fiat out without fee and fiat fee are optional. They can be derived from crypto out (no fee), spot price and crypto fee,
# however some exchanges provide them anyway. If they are provided use them as given by the exchange, if not compute them.
if fiat_out_no_fee is None:
self.__fiat_out_no_fee = self.__crypto_out_no_fee * self.spot_price
else:
self.__fiat_out_no_fee = configuration.type_check_positive_decimal("fiat_out_no_fee", fiat_out_no_fee, non_zero=True)
if fiat_fee is None:
self.__fiat_fee = self.__crypto_fee * self.spot_price
else:
self.__fiat_fee = configuration.type_check_positive_decimal("fiat_fee", fiat_fee)
self.__fiat_out_with_fee = self.__fiat_out_no_fee + self.__fiat_fee
if spot_price == ZERO:
raise RP2ValueError(f"{self.asset} {type(self).__name__} ({self.timestamp}, id {self.internal_id}): parameter 'spot_price' cannot be 0")
if self.transaction_type not in (TransactionType.DONATE, TransactionType.GIFT, TransactionType.SELL):
raise RP2ValueError(
f"{self.asset} {type(self).__name__} ({self.timestamp}, id {self.internal_id}): invalid transaction type {self.transaction_type}"
)
# If the values provided by the exchange doesn't match the computed one, log a warning.
if not RP2Decimal.is_equal_within_precision(self.__crypto_out_with_fee, self.__crypto_out_no_fee + self.__crypto_fee, FIAT_DECIMAL_MASK):
LOGGER.warning(
"%s %s (%s, id %s): crypto_out_with_fee != crypto_out_no_fee + crypto_fee: %f != %f",
self.asset,
type(self).__name__,
self.timestamp,
self.internal_id,
self.__crypto_out_with_fee,
self.__crypto_out_no_fee + self.__crypto_fee,
)
if not RP2Decimal.is_equal_within_precision(self.__crypto_fee * self.spot_price, self.__fiat_fee, FIAT_DECIMAL_MASK):
LOGGER.warning(
"%s %s (%s, id %s): crypto_fee * spot_price != fiat_fee: %f != %f",
self.asset,
type(self).__name__,
self.timestamp,
self.internal_id,
self.__crypto_fee * self.spot_price,
self.__fiat_fee,
)
if not RP2Decimal.is_equal_within_precision(self.__crypto_out_no_fee * self.spot_price, self.__fiat_out_no_fee, FIAT_DECIMAL_MASK):
LOGGER.warning(
"%s %s (%s, id %s): crypto_out_no_fee * spot_price != fiat_out_no_fee: %f != %f",
self.asset,
type(self).__name__,
self.timestamp,
self.internal_id,
self.__crypto_out_no_fee * self.spot_price,
self.__fiat_out_no_fee,
)
def to_string(self, indent: int = 0, repr_format: bool = True, extra_data: Optional[List[str]] = None) -> str:
self.configuration.type_check_positive_int("indent", indent)
self.configuration.type_check_bool("repr_format", repr_format)
if extra_data and not isinstance(extra_data, List):
raise RP2TypeError(f"Parameter 'extra_data' is not of type List: {extra_data}")
class_specific_data: List[str] = []
stringify: Callable[[object], str] = repr
if not repr_format:
stringify = str
class_specific_data = [
f"exchange={stringify(self.exchange)}",
f"holder={stringify(self.holder)}",
f"transaction_type={stringify(self.transaction_type)}",
f"spot_price={self.spot_price:.4f}",
f"crypto_out_no_fee={self.crypto_out_no_fee:.8f}",
f"crypto_fee={self.crypto_fee:.8f}",
f"unique_id={self.unique_id}",
f"is_taxable={stringify(self.is_taxable())}",
f"fiat_taxable_amount={self.fiat_taxable_amount:.4f}",
]
if extra_data:
class_specific_data.extend(extra_data)
return super().to_string(indent=indent, repr_format=repr_format, extra_data=class_specific_data)
@property
def exchange(self) -> str:
return self.__exchange
@property
def holder(self) -> str:
return self.__holder
@property
def crypto_out_no_fee(self) -> RP2Decimal:
return self.__crypto_out_no_fee
@property
def crypto_out_with_fee(self) -> RP2Decimal:
return self.__crypto_out_with_fee
@property
def crypto_fee(self) -> RP2Decimal:
return self.__crypto_fee
@property
def fiat_out_no_fee(self) -> RP2Decimal:
return self.__fiat_out_no_fee
@property
def fiat_out_with_fee(self) -> RP2Decimal:
return self.__fiat_out_with_fee
@property
def fiat_fee(self) -> RP2Decimal:
return self.__fiat_fee
# IRS Publication 544 (https://www.irs.gov/publications/p544) explains that sale expenses are deducted from the sale price
# (see "Example 1" in the "Gain or Loss From Sales and Exchanges" section). A less formal explanation:
# https://taxbit.com/cryptocurrency-tax-guide. Therefore the fee is considered a deduction and the outgoing amount is not.
@property
def crypto_taxable_amount(self) -> RP2Decimal:
return self.crypto_out_no_fee
@property
def fiat_taxable_amount(self) -> RP2Decimal:
return self.fiat_out_no_fee
@property
def crypto_deduction(self) -> RP2Decimal:
return self.crypto_fee
@property
def fiat_deduction(self) -> RP2Decimal:
return self.fiat_fee
@property
def crypto_balance_change(self) -> RP2Decimal:
return self.crypto_out_with_fee
@property
def fiat_balance_change(self) -> RP2Decimal:
return self.fiat_out_with_fee
def is_taxable(self) -> bool:
return True
|
src/rp2/out_transaction.py
|
from typing import Callable, List, Optional
from rp2.abstract_transaction import AbstractTransaction
from rp2.configuration import Configuration
from rp2.entry_types import TransactionType
from rp2.logger import LOGGER
from rp2.rp2_decimal import FIAT_DECIMAL_MASK, ZERO, RP2Decimal
from rp2.rp2_error import RP2TypeError, RP2ValueError
class OutTransaction(AbstractTransaction):
def __init__(
self,
configuration: Configuration,
timestamp: str,
asset: str,
exchange: str,
holder: str,
transaction_type: str,
spot_price: RP2Decimal,
crypto_out_no_fee: RP2Decimal,
crypto_fee: RP2Decimal,
crypto_out_with_fee: Optional[RP2Decimal] = None,
fiat_out_no_fee: Optional[RP2Decimal] = None,
fiat_fee: Optional[RP2Decimal] = None,
internal_id: Optional[int] = None,
unique_id: Optional[str] = None,
notes: Optional[str] = None,
) -> None:
super().__init__(configuration, timestamp, asset, transaction_type, spot_price, internal_id, unique_id, notes)
self.__exchange: str = configuration.type_check_exchange("exchange", exchange)
self.__holder: str = configuration.type_check_holder("holder", holder)
self.__crypto_out_no_fee: RP2Decimal = configuration.type_check_positive_decimal("crypto_out_no_fee", crypto_out_no_fee, non_zero=True)
self.__crypto_fee: RP2Decimal = configuration.type_check_positive_decimal("crypto_fee", crypto_fee)
self.__fiat_out_with_fee: RP2Decimal
self.__fiat_out_no_fee: RP2Decimal
# Crypto out with fee is optional. It can be derived from crypto out (no fee) and crypto fee, however some exchanges
# provide it anyway. If it is provided use it as given by the exchange, if not compute it.
if crypto_out_with_fee is None:
self.__crypto_out_with_fee = self.__crypto_out_no_fee + self.crypto_fee
else:
self.__crypto_out_with_fee = configuration.type_check_positive_decimal("crypto_out_with_fee", crypto_out_with_fee, non_zero=True)
# Fiat out without fee and fiat fee are optional. They can be derived from crypto out (no fee), spot price and crypto fee,
# however some exchanges provide them anyway. If they are provided use them as given by the exchange, if not compute them.
if fiat_out_no_fee is None:
self.__fiat_out_no_fee = self.__crypto_out_no_fee * self.spot_price
else:
self.__fiat_out_no_fee = configuration.type_check_positive_decimal("fiat_out_no_fee", fiat_out_no_fee, non_zero=True)
if fiat_fee is None:
self.__fiat_fee = self.__crypto_fee * self.spot_price
else:
self.__fiat_fee = configuration.type_check_positive_decimal("fiat_fee", fiat_fee)
self.__fiat_out_with_fee = self.__fiat_out_no_fee + self.__fiat_fee
if spot_price == ZERO:
raise RP2ValueError(f"{self.asset} {type(self).__name__} ({self.timestamp}, id {self.internal_id}): parameter 'spot_price' cannot be 0")
if self.transaction_type not in (TransactionType.DONATE, TransactionType.GIFT, TransactionType.SELL):
raise RP2ValueError(
f"{self.asset} {type(self).__name__} ({self.timestamp}, id {self.internal_id}): invalid transaction type {self.transaction_type}"
)
# If the values provided by the exchange doesn't match the computed one, log a warning.
if not RP2Decimal.is_equal_within_precision(self.__crypto_out_with_fee, self.__crypto_out_no_fee + self.__crypto_fee, FIAT_DECIMAL_MASK):
LOGGER.warning(
"%s %s (%s, id %s): crypto_out_with_fee != crypto_out_no_fee + crypto_fee: %f != %f",
self.asset,
type(self).__name__,
self.timestamp,
self.internal_id,
self.__crypto_out_with_fee,
self.__crypto_out_no_fee + self.__crypto_fee,
)
if not RP2Decimal.is_equal_within_precision(self.__crypto_fee * self.spot_price, self.__fiat_fee, FIAT_DECIMAL_MASK):
LOGGER.warning(
"%s %s (%s, id %s): crypto_fee * spot_price != fiat_fee: %f != %f",
self.asset,
type(self).__name__,
self.timestamp,
self.internal_id,
self.__crypto_fee * self.spot_price,
self.__fiat_fee,
)
if not RP2Decimal.is_equal_within_precision(self.__crypto_out_no_fee * self.spot_price, self.__fiat_out_no_fee, FIAT_DECIMAL_MASK):
LOGGER.warning(
"%s %s (%s, id %s): crypto_out_no_fee * spot_price != fiat_out_no_fee: %f != %f",
self.asset,
type(self).__name__,
self.timestamp,
self.internal_id,
self.__crypto_out_no_fee * self.spot_price,
self.__fiat_out_no_fee,
)
def to_string(self, indent: int = 0, repr_format: bool = True, extra_data: Optional[List[str]] = None) -> str:
self.configuration.type_check_positive_int("indent", indent)
self.configuration.type_check_bool("repr_format", repr_format)
if extra_data and not isinstance(extra_data, List):
raise RP2TypeError(f"Parameter 'extra_data' is not of type List: {extra_data}")
class_specific_data: List[str] = []
stringify: Callable[[object], str] = repr
if not repr_format:
stringify = str
class_specific_data = [
f"exchange={stringify(self.exchange)}",
f"holder={stringify(self.holder)}",
f"transaction_type={stringify(self.transaction_type)}",
f"spot_price={self.spot_price:.4f}",
f"crypto_out_no_fee={self.crypto_out_no_fee:.8f}",
f"crypto_fee={self.crypto_fee:.8f}",
f"unique_id={self.unique_id}",
f"is_taxable={stringify(self.is_taxable())}",
f"fiat_taxable_amount={self.fiat_taxable_amount:.4f}",
]
if extra_data:
class_specific_data.extend(extra_data)
return super().to_string(indent=indent, repr_format=repr_format, extra_data=class_specific_data)
@property
def exchange(self) -> str:
return self.__exchange
@property
def holder(self) -> str:
return self.__holder
@property
def crypto_out_no_fee(self) -> RP2Decimal:
return self.__crypto_out_no_fee
@property
def crypto_out_with_fee(self) -> RP2Decimal:
return self.__crypto_out_with_fee
@property
def crypto_fee(self) -> RP2Decimal:
return self.__crypto_fee
@property
def fiat_out_no_fee(self) -> RP2Decimal:
return self.__fiat_out_no_fee
@property
def fiat_out_with_fee(self) -> RP2Decimal:
return self.__fiat_out_with_fee
@property
def fiat_fee(self) -> RP2Decimal:
return self.__fiat_fee
# IRS Publication 544 (https://www.irs.gov/publications/p544) explains that sale expenses are deducted from the sale price
# (see "Example 1" in the "Gain or Loss From Sales and Exchanges" section). A less formal explanation:
# https://taxbit.com/cryptocurrency-tax-guide. Therefore the fee is considered a deduction and the outgoing amount is not.
@property
def crypto_taxable_amount(self) -> RP2Decimal:
return self.crypto_out_no_fee
@property
def fiat_taxable_amount(self) -> RP2Decimal:
return self.fiat_out_no_fee
@property
def crypto_deduction(self) -> RP2Decimal:
return self.crypto_fee
@property
def fiat_deduction(self) -> RP2Decimal:
return self.fiat_fee
@property
def crypto_balance_change(self) -> RP2Decimal:
return self.crypto_out_with_fee
@property
def fiat_balance_change(self) -> RP2Decimal:
return self.fiat_out_with_fee
def is_taxable(self) -> bool:
return True
| 0.887485 | 0.201538 |
from typing import Dict, List, Union
import unittest
from rating.manager.bisect import get_closest_configs_bisect
import yaml
def generate_test_from_timestamp(timestamp: int) -> Union[int, Dict, Dict]:
"""Generate multiple configurations procedurally, to be tested afterward."""
rules = """
rules:
-
name: group_m1
labelSet:
test: true
ruleset:
-
metric: request_cpu
value: {}
unit: core-hours
-
metric: usage_cpu
value: {}
unit: core-hours
-
name: group_m2
labelSet:
test: false
ruleset:
-
metric: request_cpu
value: {}
unit: core-hours
-
metric: usage_cpu
value: {}
unit: core-hours
""".format(timestamp / 100,
timestamp / 10,
timestamp / 50,
timestamp / 5)
metrics = """
metrics:
rating_request_cpu:
report_name: pod-cpu-request-hourly
presto_table: report_metering_pod_cpu_request_hourly
presto_column: pod_request_cpu_core_seconds
unit: core-seconds
rating_usage_cpu:
report_name: pod-cpu-usage-hourly
presto_table: report_metering_pod_cpu_usage_hourly
presto_column: pod_usage_cpu_core_seconds
unit: core-seconds
rating_request_memory:
report_name: pod-memory-request-hourly
presto_table: report_metering_pod_memory_request_hourly
presto_column: pod_request_memory_byte_seconds
unit: byte-seconds
rating_usage_memory:
report_name: pod-memory-usage-hourly
presto_table: report_metering_pod_memory_usage_hourly
presto_column: pod_usage_memory_byte_seconds
unit: byte-seconds
"""
return (int(timestamp), (yaml.safe_load(rules),
yaml.safe_load(metrics)))
def generate_tests_fixture(size: int) -> List[Union[Dict, Dict]]:
"""Generate the configurations for the tests."""
configurations = []
base = generate_test_from_timestamp(1)
configurations.append(base)
for idx in range(1, size):
config = generate_test_from_timestamp(idx * size / 10)
configurations.append(config)
return configurations
class TestConfigs(unittest.TestCase):
"""Test the configuration matching bisection algorithm."""
size = 10000
configurations = generate_tests_fixture(size)
timestamps = tuple(ts[0] for ts in configurations)
def test_find_closest_timestamp_bisect_zero(self):
timestamp = 0
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 0)
def test_find_closest_timestamp_bisect_one(self):
timestamp = 1
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 0)
def test_find_closest_timestamp_bisect_begin(self):
timestamp = 250
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 1)
def test_find_closest_timestamp_bisect_middle(self):
timestamp = 694200
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 695)
def test_find_closest_timestamp_bisect_end(self):
timestamp = 999629
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 1000)
def test_find_closest_timestamp_bisect_last(self):
timestamp = 100000
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 100)
def test_find_closest_timestamp_bisect_over(self):
timestamp = 19231123123
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 9999)
def test_find_in_small(self):
timestamps = (0, 1576663550, 1576675754, 1576678772)
timestamp = 1576672457
result = get_closest_configs_bisect(timestamp,
timestamps)
self.assertEqual(result, 2)
|
tests/test_timestamps.py
|
from typing import Dict, List, Union
import unittest
from rating.manager.bisect import get_closest_configs_bisect
import yaml
def generate_test_from_timestamp(timestamp: int) -> Union[int, Dict, Dict]:
"""Generate multiple configurations procedurally, to be tested afterward."""
rules = """
rules:
-
name: group_m1
labelSet:
test: true
ruleset:
-
metric: request_cpu
value: {}
unit: core-hours
-
metric: usage_cpu
value: {}
unit: core-hours
-
name: group_m2
labelSet:
test: false
ruleset:
-
metric: request_cpu
value: {}
unit: core-hours
-
metric: usage_cpu
value: {}
unit: core-hours
""".format(timestamp / 100,
timestamp / 10,
timestamp / 50,
timestamp / 5)
metrics = """
metrics:
rating_request_cpu:
report_name: pod-cpu-request-hourly
presto_table: report_metering_pod_cpu_request_hourly
presto_column: pod_request_cpu_core_seconds
unit: core-seconds
rating_usage_cpu:
report_name: pod-cpu-usage-hourly
presto_table: report_metering_pod_cpu_usage_hourly
presto_column: pod_usage_cpu_core_seconds
unit: core-seconds
rating_request_memory:
report_name: pod-memory-request-hourly
presto_table: report_metering_pod_memory_request_hourly
presto_column: pod_request_memory_byte_seconds
unit: byte-seconds
rating_usage_memory:
report_name: pod-memory-usage-hourly
presto_table: report_metering_pod_memory_usage_hourly
presto_column: pod_usage_memory_byte_seconds
unit: byte-seconds
"""
return (int(timestamp), (yaml.safe_load(rules),
yaml.safe_load(metrics)))
def generate_tests_fixture(size: int) -> List[Union[Dict, Dict]]:
"""Generate the configurations for the tests."""
configurations = []
base = generate_test_from_timestamp(1)
configurations.append(base)
for idx in range(1, size):
config = generate_test_from_timestamp(idx * size / 10)
configurations.append(config)
return configurations
class TestConfigs(unittest.TestCase):
"""Test the configuration matching bisection algorithm."""
size = 10000
configurations = generate_tests_fixture(size)
timestamps = tuple(ts[0] for ts in configurations)
def test_find_closest_timestamp_bisect_zero(self):
timestamp = 0
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 0)
def test_find_closest_timestamp_bisect_one(self):
timestamp = 1
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 0)
def test_find_closest_timestamp_bisect_begin(self):
timestamp = 250
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 1)
def test_find_closest_timestamp_bisect_middle(self):
timestamp = 694200
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 695)
def test_find_closest_timestamp_bisect_end(self):
timestamp = 999629
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 1000)
def test_find_closest_timestamp_bisect_last(self):
timestamp = 100000
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 100)
def test_find_closest_timestamp_bisect_over(self):
timestamp = 19231123123
result = get_closest_configs_bisect(timestamp,
self.timestamps)
self.assertEqual(result, 9999)
def test_find_in_small(self):
timestamps = (0, 1576663550, 1576675754, 1576678772)
timestamp = 1576672457
result = get_closest_configs_bisect(timestamp,
timestamps)
self.assertEqual(result, 2)
| 0.899844 | 0.423041 |
from __future__ import absolute_import, unicode_literals, print_function, division
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth.decorators import login_required
from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth.views import LogoutView
from django.shortcuts import redirect
from django.urls import re_path
from django.views.generic import TemplateView
from rest_framework.authentication import BasicAuthentication, SessionAuthentication, TokenAuthentication
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from main.api.permissions import CanViewSwagger
from main.views import DashboardView
from main.api.urls import urls as api_endpoints
from main.urls import download_urlpatterns
from publish.urls import publish_urlpatterns
def home_view_selection_view(request):
if request.user.is_authenticated:
return redirect('api/explorer')
else:
return redirect('login')
def admin_view_selection_view(request):
if request.user.is_superuser:
return admin.site.index(request)
elif request.user.is_authenticated:
return redirect('dashboard')
else:
return redirect('login')
web_urls = [
# Authentication URLs
re_path(r'^logout/$', LogoutView.as_view(), {'next_page': '/login/'}, name='logout'),
# re_path(r'^login/$', auth_views.login),
re_path('^', include('django.contrib.auth.urls')),
# Application URLs
re_path(r'^download/', include(download_urlpatterns, namespace='download')),
re_path(r'^admin/logout/$', LogoutView.as_view(), {'next_page': '/'}),
# use a function to determine where admin/ will resolve to, based on the user
re_path(r'^admin/$', admin_view_selection_view),
re_path(r'^admin/', admin.site.urls),
re_path(r'^publish/', include(publish_urlpatterns, namespace='publish')),
re_path(r'^$', home_view_selection_view, name='home'),
re_path(r'^dashboard/', login_required(DashboardView.as_view()), name='dashboard'),
re_path(r'^about/', TemplateView.as_view(template_name='main/about.html'), name='about'),
# legacy
re_path(r'^grappelli/', include('grappelli.urls')), # Grappelli URLS
]
api_urls = [
re_path(r'^api/', include((api_endpoints, 'api'))),
]
sso_api_urls = [
re_path(r'^sso-api/', include((api_endpoints, 'sso-api'))),
]
media_urls = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
schema_view = get_schema_view(
openapi.Info(
title="Biosys API",
default_version='v1',
description="Biosys API Documentation",
),
public=True,
patterns=api_urls,
authentication_classes=(SessionAuthentication, BasicAuthentication, TokenAuthentication),
permission_classes=(CanViewSwagger,)
)
api_doc_urls = [
re_path(r'^api/swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=None), name='doc-json'),
re_path(r'^api/explorer/$', schema_view.with_ui('swagger', cache_timeout=None), name='doc-swagger'),
re_path(r'^api/redoc/$', schema_view.with_ui('redoc', cache_timeout=None), name='doc-redoc'),
]
urlpatterns = web_urls + api_urls + api_doc_urls + media_urls + sso_api_urls
|
biosys/urls.py
|
from __future__ import absolute_import, unicode_literals, print_function, division
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth.decorators import login_required
from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth.views import LogoutView
from django.shortcuts import redirect
from django.urls import re_path
from django.views.generic import TemplateView
from rest_framework.authentication import BasicAuthentication, SessionAuthentication, TokenAuthentication
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from main.api.permissions import CanViewSwagger
from main.views import DashboardView
from main.api.urls import urls as api_endpoints
from main.urls import download_urlpatterns
from publish.urls import publish_urlpatterns
def home_view_selection_view(request):
if request.user.is_authenticated:
return redirect('api/explorer')
else:
return redirect('login')
def admin_view_selection_view(request):
if request.user.is_superuser:
return admin.site.index(request)
elif request.user.is_authenticated:
return redirect('dashboard')
else:
return redirect('login')
web_urls = [
# Authentication URLs
re_path(r'^logout/$', LogoutView.as_view(), {'next_page': '/login/'}, name='logout'),
# re_path(r'^login/$', auth_views.login),
re_path('^', include('django.contrib.auth.urls')),
# Application URLs
re_path(r'^download/', include(download_urlpatterns, namespace='download')),
re_path(r'^admin/logout/$', LogoutView.as_view(), {'next_page': '/'}),
# use a function to determine where admin/ will resolve to, based on the user
re_path(r'^admin/$', admin_view_selection_view),
re_path(r'^admin/', admin.site.urls),
re_path(r'^publish/', include(publish_urlpatterns, namespace='publish')),
re_path(r'^$', home_view_selection_view, name='home'),
re_path(r'^dashboard/', login_required(DashboardView.as_view()), name='dashboard'),
re_path(r'^about/', TemplateView.as_view(template_name='main/about.html'), name='about'),
# legacy
re_path(r'^grappelli/', include('grappelli.urls')), # Grappelli URLS
]
api_urls = [
re_path(r'^api/', include((api_endpoints, 'api'))),
]
sso_api_urls = [
re_path(r'^sso-api/', include((api_endpoints, 'sso-api'))),
]
media_urls = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
schema_view = get_schema_view(
openapi.Info(
title="Biosys API",
default_version='v1',
description="Biosys API Documentation",
),
public=True,
patterns=api_urls,
authentication_classes=(SessionAuthentication, BasicAuthentication, TokenAuthentication),
permission_classes=(CanViewSwagger,)
)
api_doc_urls = [
re_path(r'^api/swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=None), name='doc-json'),
re_path(r'^api/explorer/$', schema_view.with_ui('swagger', cache_timeout=None), name='doc-swagger'),
re_path(r'^api/redoc/$', schema_view.with_ui('redoc', cache_timeout=None), name='doc-redoc'),
]
urlpatterns = web_urls + api_urls + api_doc_urls + media_urls + sso_api_urls
| 0.444324 | 0.05962 |
from gevent import monkey
monkey.patch_all()
import unittest
import coverage
cov = coverage.coverage()
cov.start()
from flask import Flask, session
from flask.ext.socketio import SocketIO, send, emit, join_room, leave_room
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret'
socketio = SocketIO(app)
disconnected = None
@socketio.on('connect')
def on_connect():
send('connected')
session['a'] = 'b'
@socketio.on('disconnect')
def on_connect():
global disconnected
disconnected = '/'
@socketio.on('connect', namespace='/test')
def on_connect_test():
send('connected-test')
@socketio.on('disconnect', namespace='/test')
def on_disconnect_test():
global disconnected
disconnected = '/test'
@socketio.on('message')
def on_message(message):
send(message)
@socketio.on('json')
def on_json(data):
send(data, json=True, broadcast=True)
@socketio.on('message', namespace='/test')
def on_message_test(message):
send(message)
@socketio.on('json', namespace='/test')
def on_json_test(data):
send(data, json=True, namespace='/test')
@socketio.on('my custom event')
def on_custom_event(data):
emit('my custom response', data)
@socketio.on('my custom namespace event', namespace='/test')
def on_custom_event_test(data):
emit('my custom namespace response', data, namespace='/test')
@socketio.on('my custom broadcast event')
def on_custom_event_broadcast(data):
emit('my custom response', data, broadcast=True)
@socketio.on('my custom broadcast namespace event', namespace='/test')
def on_custom_event_broadcast_test(data):
emit('my custom namespace response', data, namespace='/test',
broadcast=True)
@socketio.on('join room')
def on_join_room(data):
join_room(data['room'])
@socketio.on('leave room')
def on_leave_room(data):
leave_room(data['room'])
@socketio.on('join room', namespace='/test')
def on_join_room(data):
join_room(data['room'])
@socketio.on('leave room', namespace='/test')
def on_leave_room(data):
leave_room(data['room'])
@socketio.on('my room event')
def on_room_event(data):
room = data.pop('room')
emit('my room response', data, room=room)
@socketio.on('my room namespace event', namespace='/test')
def on_room_namespace_event(data):
room = data.pop('room')
send('room message', room=room)
@socketio.on_error()
def error_handler(value):
if isinstance(value, AssertionError):
global error_testing
error_testing = True
else:
raise value
@socketio.on('error testing')
def raise_error(data):
raise AssertionError()
@socketio.on_error('/test')
def error_handler_namespace(value):
if isinstance(value, AssertionError):
global error_testing_namespace
error_testing_namespace = True
else:
raise value
@socketio.on("error testing", namespace='/test')
def raise_error_namespace(data):
raise AssertionError()
@socketio.on_error_default
def error_handler_default(value):
if isinstance(value, AssertionError):
global error_testing_default
error_testing_default = True
else:
raise exception, value, traceback
@socketio.on("error testing", namespace='/unused_namespace')
def raise_error_default(data):
raise AssertionError()
class TestSocketIO(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
cov.stop()
cov.report(include='flask_socketio/__init__.py')
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
client = socketio.test_client(app)
received = client.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'connected')
client.disconnect()
def test_connect_namespace(self):
client = socketio.test_client(app, namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'connected-test')
client.disconnect(namespace='/test')
def test_disconnect(self):
global disconnected
disconnected = None
client = socketio.test_client(app)
client.disconnect()
self.assertTrue(disconnected == '/')
def test_disconnect_namespace(self):
global disconnected
disconnected = None
client = socketio.test_client(app, namespace='/test')
client.disconnect('/test')
self.assertTrue(disconnected == '/test')
def test_send(self):
client = socketio.test_client(app)
client.get_received() # clean received
client.send('echo this message back')
received = client.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'echo this message back')
def test_send_json(self):
client1 = socketio.test_client(app)
client2 = socketio.test_client(app)
client1.get_received() # clean received
client2.get_received() # clean received
client1.send({'a': 'b'}, json=True)
received = client1.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args']['a'] == 'b')
received = client2.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args']['a'] == 'b')
def test_send_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test') # clean received
client.send('echo this message back', namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'echo this message back')
def test_send_json_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test') # clean received
client.send({'a': 'b'}, json=True, namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args']['a'] == 'b')
def test_emit(self):
client = socketio.test_client(app)
client.get_received() # clean received
client.emit('my custom event', {'a': 'b'})
received = client.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
def test_emit_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test') # clean received
client.emit('my custom namespace event', {'a': 'b'}, namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom namespace response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
def test_broadcast(self):
client1 = socketio.test_client(app)
client2 = socketio.test_client(app)
client3 = socketio.test_client(app, namespace='/test')
client2.get_received() # clean
client3.get_received('/test') # clean
client1.emit('my custom broadcast event', {'a': 'b'}, broadcast=True)
received = client2.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
self.assertTrue(len(client3.get_received('/test')) == 0)
def test_broadcast_namespace(self):
client1 = socketio.test_client(app, namespace='/test')
client2 = socketio.test_client(app, namespace='/test')
client3 = socketio.test_client(app)
client2.get_received('/test') # clean
client3.get_received() # clean
client1.emit('my custom broadcast namespace event', {'a': 'b'},
namespace='/test')
received = client2.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom namespace response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
self.assertTrue(len(client3.get_received()) == 0)
def test_session(self):
client = socketio.test_client(app)
client.get_received() # clean received
client.send('echo this message back')
self.assertTrue(client.socket[''].session['a'] == 'b')
def test_room(self):
client1 = socketio.test_client(app)
client2 = socketio.test_client(app)
client3 = socketio.test_client(app, namespace='/test')
client1.get_received() # clean
client2.get_received() # clean
client3.get_received('/test') # clean
client1.emit('join room', {'room': 'one'})
client2.emit('join room', {'room': 'one'})
client3.emit('join room', {'room': 'one'}, namespace='/test')
client1.emit('my room event', {'a': 'b', 'room': 'one'})
received = client1.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my room response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
self.assertTrue(received == client2.get_received())
received = client3.get_received('/test')
self.assertTrue(len(received) == 0)
client1.emit('leave room', {'room': 'one'})
client1.emit('my room event', {'a': 'b', 'room': 'one'})
received = client1.get_received()
self.assertTrue(len(received) == 0)
received = client2.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my room response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
client2.disconnect()
socketio.emit('my room event', {'a': 'b'}, room='one')
received = client1.get_received()
self.assertTrue(len(received) == 0)
received = client3.get_received('/test')
self.assertTrue(len(received) == 0)
client3.emit('my room namespace event', {'room': 'one'}, namespace='/test')
received = client3.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['name'] == 'message')
self.assertTrue(received[0]['args'] == 'room message')
self.assertTrue(len(socketio.rooms) == 1)
client3.disconnect('/test')
self.assertTrue(len(socketio.rooms) == 0)
def test_error_handling(self):
client = socketio.test_client(app)
client.get_received() # clean client
global error_testing
error_testing = False
client.emit("error testing", "")
self.assertTrue(error_testing)
def test_error_handling_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test')
global error_testing_namespace
error_testing_namespace = False
client.emit("error testing", "", namespace='/test')
self.assertTrue(error_testing_namespace)
def test_error_handling_default(self):
client = socketio.test_client(app, namespace='/unused_namespace')
client.get_received('/unused_namespace')
global error_testing_default
error_testing_default = False
client.emit("error testing", "", namespace='/unused_namespace')
self.assertTrue(error_testing_default)
if __name__ == '__main__':
unittest.main()
|
test_socketio.py
|
from gevent import monkey
monkey.patch_all()
import unittest
import coverage
cov = coverage.coverage()
cov.start()
from flask import Flask, session
from flask.ext.socketio import SocketIO, send, emit, join_room, leave_room
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret'
socketio = SocketIO(app)
disconnected = None
@socketio.on('connect')
def on_connect():
send('connected')
session['a'] = 'b'
@socketio.on('disconnect')
def on_connect():
global disconnected
disconnected = '/'
@socketio.on('connect', namespace='/test')
def on_connect_test():
send('connected-test')
@socketio.on('disconnect', namespace='/test')
def on_disconnect_test():
global disconnected
disconnected = '/test'
@socketio.on('message')
def on_message(message):
send(message)
@socketio.on('json')
def on_json(data):
send(data, json=True, broadcast=True)
@socketio.on('message', namespace='/test')
def on_message_test(message):
send(message)
@socketio.on('json', namespace='/test')
def on_json_test(data):
send(data, json=True, namespace='/test')
@socketio.on('my custom event')
def on_custom_event(data):
emit('my custom response', data)
@socketio.on('my custom namespace event', namespace='/test')
def on_custom_event_test(data):
emit('my custom namespace response', data, namespace='/test')
@socketio.on('my custom broadcast event')
def on_custom_event_broadcast(data):
emit('my custom response', data, broadcast=True)
@socketio.on('my custom broadcast namespace event', namespace='/test')
def on_custom_event_broadcast_test(data):
emit('my custom namespace response', data, namespace='/test',
broadcast=True)
@socketio.on('join room')
def on_join_room(data):
join_room(data['room'])
@socketio.on('leave room')
def on_leave_room(data):
leave_room(data['room'])
@socketio.on('join room', namespace='/test')
def on_join_room(data):
join_room(data['room'])
@socketio.on('leave room', namespace='/test')
def on_leave_room(data):
leave_room(data['room'])
@socketio.on('my room event')
def on_room_event(data):
room = data.pop('room')
emit('my room response', data, room=room)
@socketio.on('my room namespace event', namespace='/test')
def on_room_namespace_event(data):
room = data.pop('room')
send('room message', room=room)
@socketio.on_error()
def error_handler(value):
if isinstance(value, AssertionError):
global error_testing
error_testing = True
else:
raise value
@socketio.on('error testing')
def raise_error(data):
raise AssertionError()
@socketio.on_error('/test')
def error_handler_namespace(value):
if isinstance(value, AssertionError):
global error_testing_namespace
error_testing_namespace = True
else:
raise value
@socketio.on("error testing", namespace='/test')
def raise_error_namespace(data):
raise AssertionError()
@socketio.on_error_default
def error_handler_default(value):
if isinstance(value, AssertionError):
global error_testing_default
error_testing_default = True
else:
raise exception, value, traceback
@socketio.on("error testing", namespace='/unused_namespace')
def raise_error_default(data):
raise AssertionError()
class TestSocketIO(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
cov.stop()
cov.report(include='flask_socketio/__init__.py')
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
client = socketio.test_client(app)
received = client.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'connected')
client.disconnect()
def test_connect_namespace(self):
client = socketio.test_client(app, namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'connected-test')
client.disconnect(namespace='/test')
def test_disconnect(self):
global disconnected
disconnected = None
client = socketio.test_client(app)
client.disconnect()
self.assertTrue(disconnected == '/')
def test_disconnect_namespace(self):
global disconnected
disconnected = None
client = socketio.test_client(app, namespace='/test')
client.disconnect('/test')
self.assertTrue(disconnected == '/test')
def test_send(self):
client = socketio.test_client(app)
client.get_received() # clean received
client.send('echo this message back')
received = client.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'echo this message back')
def test_send_json(self):
client1 = socketio.test_client(app)
client2 = socketio.test_client(app)
client1.get_received() # clean received
client2.get_received() # clean received
client1.send({'a': 'b'}, json=True)
received = client1.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args']['a'] == 'b')
received = client2.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args']['a'] == 'b')
def test_send_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test') # clean received
client.send('echo this message back', namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args'] == 'echo this message back')
def test_send_json_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test') # clean received
client.send({'a': 'b'}, json=True, namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['args']['a'] == 'b')
def test_emit(self):
client = socketio.test_client(app)
client.get_received() # clean received
client.emit('my custom event', {'a': 'b'})
received = client.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
def test_emit_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test') # clean received
client.emit('my custom namespace event', {'a': 'b'}, namespace='/test')
received = client.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom namespace response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
def test_broadcast(self):
client1 = socketio.test_client(app)
client2 = socketio.test_client(app)
client3 = socketio.test_client(app, namespace='/test')
client2.get_received() # clean
client3.get_received('/test') # clean
client1.emit('my custom broadcast event', {'a': 'b'}, broadcast=True)
received = client2.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
self.assertTrue(len(client3.get_received('/test')) == 0)
def test_broadcast_namespace(self):
client1 = socketio.test_client(app, namespace='/test')
client2 = socketio.test_client(app, namespace='/test')
client3 = socketio.test_client(app)
client2.get_received('/test') # clean
client3.get_received() # clean
client1.emit('my custom broadcast namespace event', {'a': 'b'},
namespace='/test')
received = client2.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my custom namespace response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
self.assertTrue(len(client3.get_received()) == 0)
def test_session(self):
client = socketio.test_client(app)
client.get_received() # clean received
client.send('echo this message back')
self.assertTrue(client.socket[''].session['a'] == 'b')
def test_room(self):
client1 = socketio.test_client(app)
client2 = socketio.test_client(app)
client3 = socketio.test_client(app, namespace='/test')
client1.get_received() # clean
client2.get_received() # clean
client3.get_received('/test') # clean
client1.emit('join room', {'room': 'one'})
client2.emit('join room', {'room': 'one'})
client3.emit('join room', {'room': 'one'}, namespace='/test')
client1.emit('my room event', {'a': 'b', 'room': 'one'})
received = client1.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my room response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
self.assertTrue(received == client2.get_received())
received = client3.get_received('/test')
self.assertTrue(len(received) == 0)
client1.emit('leave room', {'room': 'one'})
client1.emit('my room event', {'a': 'b', 'room': 'one'})
received = client1.get_received()
self.assertTrue(len(received) == 0)
received = client2.get_received()
self.assertTrue(len(received) == 1)
self.assertTrue(len(received[0]['args']) == 1)
self.assertTrue(received[0]['name'] == 'my room response')
self.assertTrue(received[0]['args'][0]['a'] == 'b')
client2.disconnect()
socketio.emit('my room event', {'a': 'b'}, room='one')
received = client1.get_received()
self.assertTrue(len(received) == 0)
received = client3.get_received('/test')
self.assertTrue(len(received) == 0)
client3.emit('my room namespace event', {'room': 'one'}, namespace='/test')
received = client3.get_received('/test')
self.assertTrue(len(received) == 1)
self.assertTrue(received[0]['name'] == 'message')
self.assertTrue(received[0]['args'] == 'room message')
self.assertTrue(len(socketio.rooms) == 1)
client3.disconnect('/test')
self.assertTrue(len(socketio.rooms) == 0)
def test_error_handling(self):
client = socketio.test_client(app)
client.get_received() # clean client
global error_testing
error_testing = False
client.emit("error testing", "")
self.assertTrue(error_testing)
def test_error_handling_namespace(self):
client = socketio.test_client(app, namespace='/test')
client.get_received('/test')
global error_testing_namespace
error_testing_namespace = False
client.emit("error testing", "", namespace='/test')
self.assertTrue(error_testing_namespace)
def test_error_handling_default(self):
client = socketio.test_client(app, namespace='/unused_namespace')
client.get_received('/unused_namespace')
global error_testing_default
error_testing_default = False
client.emit("error testing", "", namespace='/unused_namespace')
self.assertTrue(error_testing_default)
if __name__ == '__main__':
unittest.main()
| 0.399929 | 0.239199 |