content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from setuptools import setup
with open("README.md") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
setup(
name="PERFORM",
version=0.1,
author="Christopher R. Wentland",
author_email="[email protected]",
url="https://github.com/cwentland0/pyGEMS_1D",
description="One-dimension reacting flow for ROM prototyping",
long_description=readme,
license=license,
install_requires=["numpy>=1.16.6", "scipy>=1.1.0", "matplotlib>=2.1.0"],
entry_points={"console_scripts": ["perform = perform.driver:main"]},
python_requires=">=3.6",
)
|
python
|
class ProxyError(StandardError):
def __init__(self, title, message):
super(ProxyError, self).__init__()
self.title = title
self.message = message
self.error = "Error"
def __str__(self):
return "%s => %s:%s" % (self.error, self.title, self.message)
class ResourceError(ProxyError):
def __init__(self, title, message):
super(ResourceError, self).__init__(title, message)
self.error = "Resource Error"
class RequestError(ProxyError):
def __init__(self, title, message):
super(RequestError, self).__init__(title, message)
self.error = "Request Error"
class HTTPResponseMarble(object):
def __init__(self, *k, **p):
self.__dict__['status'] = u'200 OK'
self.__dict__['status_format'] = u'unicode'
self.__dict__['header_list'] = \
[dict(name=u'Content-Type', value=u'text/html; charset=utf8')]
self.__dict__['header_list_format'] = u'unicode'
self.__dict__['body'] = []
self.__dict__['body_format'] = u'unicode'
def __setattr__(self, name, value):
if name not in self.__dict__:
raise AttributeError('No such attribute %s'%name)
self.__dict__[name] = value
|
python
|
__author__ = 'wanghao'
# import threading
import sys
import socket
from struct import *
import time
import threading
def run_flow(dst_ip, port, size):
def run(dst_ip, port, size):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# data = os.urandom(size)
data = pack('c', 'a')
try:
sock.connect((dst_ip, port))
size_left = size
while size_left:
if size_left > 200000000:
sock.sendall(data*200000000)
size_left -= 200000000
else:
sock.sendall(data*size_left)
size_left = 0
except socket.timeout:
print "Connection Timeout!"
except socket.error, e:
print e
finally:
sock.close()
t = threading.Thread(target=run(dst_ip, port, size))
t.start()
t.join()
print "Done"
#run(dst_ip, port, size)
if __name__ == '__main__':
dst_ip = sys.argv[1]
port = int(sys.argv[2])
size = int(float(sys.argv[3]))
fd = open("fct.txt", 'w')
#print "Flow Size:", size
fd.write("Flow Size %d " % size)
start_t = time.clock()
#print "Start:", time.strftime("%M:%S")
fd.write("Start: %s " % time.strftime("%M:%S"))
run_flow(dst_ip, port, size)
end_t = time.clock()
#print "End:", time.strftime("%M:%S")
fd.write("End: %s " % time.strftime("%M:%S"))
print "Duration:", end_t - start_t
fd.write("Duration: %f \r\n" % (end_t - start_t))
fd.close()
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import os
# Implementing relu()
def relu(z):
if z > 0:
return z
else:
return 0
# Define linewidth and fontsize to control the aesthetics of the plots easily
linewidth = 4
fontsize = 20
# Define a range of values for the inputs of relu(z)
z_range = np.arange(-5,5, 0.01)
plt.figure(figsize=(16,9))
# For each z in x_range compute relu(z)
y_relu = [relu(z) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth= linewidth, label='Relu(z)')
plt.ylim(-5, 5)
plt.xlim(-5, 5)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
def grad_relu(z):
if z > 0:
return 1
else:
return 0
### The gradients of relu
y_relu = [relu(z) for z in z_range]
grad_y_relu = [grad_relu(z) for z in z_range]
plt.figure(figsize=(16, 9))
# The relu
plt.subplot(1,2,1)
plt.plot(z_range, y_relu, c='b',linewidth= linewidth, label='Relu(z)')
plt.legend(fontsize=fontsize,loc=2)
plt.grid()
### The gradients of relu
plt.subplot(1,2,2)
plt.plot(z_range, grad_y_relu, c='r',linewidth= linewidth, label='d Relu(z)/dz')
plt.legend(fontsize=fontsize,loc=2)
plt.grid()
plt.show()
# Demonstrating the flexibility of relu: relu(z),relu(-z),-relu(z),-relu(-z)
z_range = np.arange(-5,5, 0.01)
plt.figure(figsize=(16,9))
plt.suptitle('The Flexibility of Relu(z)', fontsize=fontsize)
plt.subplot(2,2,1)
y_relu = [relu(z) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth= linewidth, label='Relu(z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.subplot(2,2,2)
y_relu = [relu(-z) for z in z_range]
plt.plot(z_range, y_relu, c='k', linewidth= linewidth,label='Relu(-z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.legend(fontsize=fontsize,loc=1)
plt.grid()
plt.subplot(2,2,3)
y_relu = [-relu(z) for z in z_range]
plt.plot(z_range, y_relu, c='r', linewidth= linewidth,label='-Relu(z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.legend(fontsize=fontsize,loc=2)
plt.grid()
plt.subplot(2,2,4)
y_relu = [-relu(-z) for z in z_range]
plt.plot(z_range, y_relu, c='g', linewidth= linewidth,label='-Relu(-z)')
plt.ylim(-5,5)
plt.xlim(-5,5)
plt.legend(fontsize=fontsize,loc=1)
plt.grid()
plt.show()
# The rotation of the slope in relu
w_range = np.arange(0.5, 3.5, 0.5)
plt.figure(figsize=(16, 9))
plt.suptitle('Changing the slope of Relu(w*z) using a coefficient w', fontsize=fontsize)
for idx, w in enumerate(w_range):
plt.subplot(2,3,idx+1)
y_relu = [relu(w*z) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth=linewidth, label='w = %.2f' % w)
plt.ylim(-1, 5)
plt.xlim(-5, 5)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
# Shifting the relu horizontally
bias = np.arange(0.5, 3.5, 0.5)
plt.figure(figsize=(16, 9))
plt.suptitle('Shifting Relu(z+b) horizontally using a bias term b inside Relu()', fontsize=fontsize)
for idx, b in enumerate(bias):
plt.subplot(2,3, idx+1)
y_relu = [relu(z+b) for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth=linewidth, label='b = %.2f' % b)
plt.ylim(-1, 5)
plt.xlim(-4, 4)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
# Shifting the relu vertically
bias = np.arange(0.5, 3.5, 0.5)
plt.figure(figsize=(16, 9))
plt.suptitle('Shifting Relu(z) + b vertically using a bias term b outside Relu()', fontsize=fontsize)
for idx, b in enumerate(bias):
plt.subplot(2,3, idx+1)
y_relu = [relu(z)+b for z in z_range]
plt.plot(z_range, y_relu, c='b', linewidth=linewidth, label='b = %.2f' % b)
plt.ylim(-1, 5)
plt.xlim(-4, 4)
plt.grid()
plt.legend(fontsize=fontsize, loc=2)
plt.show()
# Defining the data and the ground-truth
x = torch.unsqueeze(torch.linspace(-10, 10, 300), dim=1)
y = x.pow(3)
# Setting the available device
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
print("Device", device)
# Build a regression model clas
class Regressor(nn.Module):
def __init__(self, n_hidden=2):
super(Regressor, self).__init__()
self.hidden = torch.nn.Linear(1, n_hidden) # hidden layer
self.predict = torch.nn.Linear(n_hidden, 1) # output layer
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
# number of relu() units
n_hidden = 7
# total number of epochs
n_epochs = 4000
# Building an object from the regressor class while passing
# n_hidden and setting the model to train() mode
regressor = Regressor(n_hidden=n_hidden).train()
# Defining the optimizer
optimizer = torch.optim.SGD(regressor.parameters(), lr=0.0001)
# Defining MSE as the appropriate los function
# For regression.
loss_func = torch.nn.MSELoss()
plt.figure(figsize=(16, 9))
for epoch in range(n_epochs):
# Put the model in training mode
regressor.train()
# This is there to clear the previous plot in the animation
# After each epoch
plt.clf()
# input x to the regressor and receive the predicion
y_hat = regressor(x)
# Compute the loss between y_hat and the actual
# Value of the ground-truth curve, y
loss = loss_func(y_hat, y)
# Compute the gradients w.r.t all the parameters
loss.backward()
# Update the parameters
optimizer.step()
# Zero out all the gradients before inputing the next data point
# Into the regressor model
optimizer.zero_grad()
# Every 100 epoch evaluate do some plotting
if epoch % 100 == 0:
print('Epoch %d --- Loss %.5f' % (epoch+1, loss.data.numpy()))
# Bbefore evaluation, put the model back to evaluation mode
regressor.eval()
# At this very moment of training, grab the current biases and weights
# From the model object, namely, b_0, b_1, w_0, and w_1
biases_0 = regressor.hidden.bias.cpu().detach().numpy()
weights_0 = regressor.hidden.weight.squeeze(0).cpu().detach().numpy()
biases_1 = regressor.predict.bias.cpu().detach().numpy() # This has ONLY 1 value
weights_1 = regressor.predict.weight.squeeze(0).cpu().detach().numpy()
# For the purpose of plotting consider the current range of
# x as the inputs to EACH relu() individualy
data = x.detach().numpy()
# This will hold the UNLIMATE
# prediction, that is, relu(input*w_0+b_0)*w_1 + b_1
# We reset it before plotting the current status of the model
# And the learned relu() functions
sum_y_relu = []
# For each relu() unit do the following
for idx in range(n_hidden):
plt.suptitle('Epoch=%d --- MSE loss= %.2f' % (epoch+1, loss.data.numpy()), fontsize=fontsize)
# Plot output of the current relu() unit
plt.subplot(1,3,1)
plt.title('Relu(w_0*x + b_0)', fontsize=fontsize)
y_relu = [relu(d*weights_0[idx]+biases_0[idx]) for d in data]
plt.plot(data, y_relu)
plt.ylim(-1,40)
plt.grid()
plt.subplot(1, 3, 2)
# Plot output of the current relu(), multiplied by its
# corresponding weight, w_1, and summed with the bias b_1
plt.title('Relu(w_0*x + b_0)*w_1 + b_1',fontsize=fontsize)
y_relu = [relu(d*weights_0[idx]+biases_0[idx])*weights_1[idx] + biases_1[0] for d in data]
plt.plot(data,y_relu)
plt.ylim(-500,900)
plt.grid()
# Kee adding the Relu(w_0*x + b_0)*w_1 + b_1 for each relu to the
# sum_y_relu list. We will sum them up later to plot
# The ULTIMATE predction of the model y_hat
sum_y_relu.append([relu(d*weights_0[idx]+biases_0[idx])*weights_1[idx] + biases_1[0] for d in data])
# Sum it all up
sum_y_relu = np.sum(np.array(sum_y_relu),axis=0)
plt.subplot(1, 3, 3)
plt.title('y_hat)', fontsize=fontsize)
plt.plot(x.data.numpy(), y.data.numpy(), color="k", label='Ground-truth')
plt.plot(data,sum_y_relu, c='r', label='Prediction')
plt.legend()
plt.grid()
# A slight delay in the animation
plt.pause(0.1)
|
python
|
'''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
def right(a,b):
r=a
for i in range(b):
r=r[-1:]+r[:-1]
r=''.join(r)
return int(r)
def left(a,b):
r=a
for i in range(b):
r.append(a.pop(0))
r=''.join(r)
return int(r)
def btod(n):
if n==0:
return 0
return n%10+2*(btod(n//10))
for i in range(int(input())):
a,b,c=input().split()
a=int(a)
b=int(b)
a=bin(a)
a=a[2:]
a=str(a)
if (16-len(a))!=0:
a="0"*(16-len(a))+a
a=list(a)
if c=='L':
res=left(a,b)
res=btod(res)
print(res)
if c=='R':
res=right(a,b)
res=btod(res)
print(res)
|
python
|
import mykde
"""
If a font in the browser is not Droid, in Google Chrome right click on the text
with the wrong font, select 'Inspect element', find 'Computed style' and
'font-family' in it:
font-family: 'lucida grande', tahoma, verdana, arial, sans-serif;
And for each font do 'fc=match':
$ fc-match Helvetica
LiberationSans-Regular.ttf: "Liberation Sans" "Regular"
Ok, you found the offending font. Add it to 'fonts.conf' file.
"""
class Action(mykde.BaseAction):
name = 'Droid fonts everywhere'
description = """
Droid fonts are used everywhere possible, because they render very nice.<br>
In browser they should replace Verdana, Arial and other MS fonts.<br>
<br>
<img src="screenshot.png"/>
"""
packages = ['fonts-droid']
affects = [mykde.KdeSettings]
def proceed(self):
self.update_kconfig('./kdeglobals', '~/.kde/share/config/kdeglobals')
self.copy_file('./fonts.conf', '~/.config/fontconfig/')
self.delete_file('~/.fonts.conf')
# self.create_symlink('~/.config/fontconfig/fonts.conf', '~/.fonts.conf') # in 12.04 only this works
def override_font(self, font, override):
"""Add necessary nodes to fonts.conf """
raise NotImplemented
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 25 11:25:36 2017
@author: azkei
We Briefly covered operations between two data structures last file.
We will cover how arithmetic operators apply between two or more structured here
using Flexible Arithmetic Methods such as
add(), sub(),div(),mul()
"""
# 1. Flexible Arithmetic Methods
# Addition
frame1.add(frame2)
# Subtraction
frame1.sub(frame2)
# Division
frame1.div(frame2)
# Multiplication
frame1.mul(frame2)
# As you can see there is NaN's on values that have not be operated on.
# 2. Operations between DataFrame and Series
# Generate a 4x4 DF with range 0-15
frame = pd.DataFrame(np.arange(16).reshape((4,4)),
index=['red','blue','yellow','white'],
columns=['ball','pen','pencil','paper'])
# Generate Series, values 0-4
ser = pd.Series(np.arange(4),index=['ball','pen','pencil','paper'])
ser
# Subtract Series in DataFrame
# The frame will subtract based on the common indexes the two Structures have
frame - ser
# If the index is not present, the result will have elements with NaN
ser['mug'] = 9
frame - ser
|
python
|
from django.utils import timezone
import pytz
class TimeZoneMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
tzname = request.session.get("time_zone")
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
return self.get_response(request)
|
python
|
from podcast.views import home_page
from django.urls import path
from . import views
app_name = "podcast"
urlpatterns = [
path('', views.home_page, name="home-page"),
path('podcast/',views.podcast_list, name="podcast-list"),
path('podcast/search/',views.search_podcast, name="podcast-search"),
path('podcast/create/',views.create_podcast, name="create-podcast"),
path('podcast/detail/<int:pk>/',views.podcast_detail, name="podcast-detail"),
# Articles
path('article/',views.article_list, name="article-list"),
path('article/search/',views.search_article, name="article-search"),
path('article/create/',views.create_article, name="create-article"),
path('article/detail/<int:pk>/',views.article_detail, name="article-detail"),
# News
path('news/',views.news_list, name="news-list"),
path('news/search/',views.search_news, name="news-search"),
path('news/create/',views.create_news, name="create-news"),
path('news/detail/<int:pk>/',views.news_detail, name="news-detail"),
]
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
import uuid
from zipfile import ZipFile
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_webpackext import current_webpack
from invenio_assets import InvenioAssets
from invenio_config import InvenioConfigDefault
from invenio_db import InvenioDB
from invenio_db import db as db_
from invenio_files_rest import InvenioFilesREST
from invenio_files_rest.models import Location, ObjectVersion
from invenio_formatter import InvenioFormatter
from invenio_pidstore.providers.recordid import RecordIdProvider
from invenio_records import InvenioRecords
from invenio_records_files.api import Record
from invenio_records_ui import InvenioRecordsUI
from invenio_records_ui.views import create_blueprint_from_app
from six import BytesIO
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_previewer import InvenioPreviewer
@pytest.yield_fixture(scope='session', autouse=True)
def app():
"""Flask application fixture with database initialization."""
instance_path = tempfile.mkdtemp()
app_ = Flask(
'testapp', static_folder=instance_path, instance_path=instance_path)
app_.config.update(
TESTING=True,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///:memory:'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
RECORDS_UI_DEFAULT_PERMISSION_FACTORY=None,
RECORDS_UI_ENDPOINTS=dict(
recid=dict(
pid_type='recid',
route='/records/<pid_value>',
template='invenio_records_ui/detail.html',
),
recid_previewer=dict(
pid_type='recid',
route='/records/<pid_value>/preview',
view_imp='invenio_previewer.views:preview',
record_class='invenio_records_files.api:Record',
),
recid_files=dict(
pid_type='recid',
route='/record/<pid_value>/files/<filename>',
view_imp='invenio_records_files.utils.file_download_ui',
record_class='invenio_records_files.api:Record',
),
),
SERVER_NAME='localhost',
APP_THEME=['semantic-ui']
)
Babel(app_)
InvenioAssets(app_)
InvenioDB(app_)
InvenioRecords(app_)
InvenioConfigDefault(app_)
InvenioFormatter(app_)
InvenioPreviewer(app_)._state
InvenioRecordsUI(app_)
app_.register_blueprint(create_blueprint_from_app(app_))
InvenioFilesREST(app_)
with app_.app_context():
yield app_
shutil.rmtree(instance_path)
@pytest.yield_fixture()
def db(app):
"""Setup database."""
if not database_exists(str(db_.engine.url)):
create_database(str(db_.engine.url))
db_.create_all()
yield db_
db_.session.remove()
db_.drop_all()
@pytest.yield_fixture(scope='session')
def webassets(app):
"""Flask application fixture with assets."""
initial_dir = os.getcwd()
os.chdir(app.instance_path)
# force theme.config alias pinting to less/invenio_theme/theme.config
theme_bundle = current_webpack.project.bundles[0]
theme_bundle.aliases['../../theme.config'] = \
'less/invenio_theme/theme.config'
current_webpack.project.buildall()
yield app
os.chdir(initial_dir)
@pytest.yield_fixture()
def location(db):
"""File system location."""
tmppath = tempfile.mkdtemp()
loc = Location(
name='testloc',
uri=tmppath,
default=True
)
db.session.add(loc)
db.session.commit()
yield loc
shutil.rmtree(tmppath)
@pytest.fixture()
def record(db, location):
"""Record fixture."""
rec_uuid = uuid.uuid4()
provider = RecordIdProvider.create(
object_type='rec', object_uuid=rec_uuid)
record = Record.create({
'control_number': provider.pid.pid_value,
'title': 'TestDefault',
}, id_=rec_uuid)
db.session.commit()
return record
@pytest.fixture()
def record_with_file(db, record, location):
"""Record with a test file."""
testfile = ObjectVersion.create(record.bucket, 'testfile',
stream=BytesIO(b'atest'))
record.update(dict(
_files=[dict(
bucket=str(testfile.bucket_id),
key=testfile.key,
size=testfile.file.size,
checksum=str(testfile.file.checksum),
version_id=str(testfile.version_id),
), ]
))
record.commit()
db.session.commit()
return record, testfile
@pytest.fixture()
def zip_fp(db):
"""ZIP file stream."""
fp = BytesIO()
zipf = ZipFile(fp, 'w')
zipf.writestr('Example.txt', 'This is an example'.encode('utf-8'))
zipf.writestr(u'Lé UTF8 test.txt', 'This is an example'.encode('utf-8'))
zipf.close()
fp.seek(0)
return fp
|
python
|
import requests
from bs4 import BeautifulSoup
from .handler import add_handler
@add_handler(r'http(s?)://drops.dagstuhl.de/(\w+)')
def download(url):
metadata = dict()
metadata['importer'] = 'drops'
data = requests.get(url)
soup = BeautifulSoup(data.text, "lxml")
authortags = soup.find_all("meta", attrs={"name": "citation_author"})
metadata['authors'] = [i['content'].strip() for i in authortags ]
metadata['title'] = soup.find_all("meta", attrs={"name": "citation_title"})[0]['content']
metadata['url'] = soup.find_all("meta", attrs={"name": "citation_pdf_url"})[0]['content']
metadata['date'] = soup.find_all("meta", attrs={"name": "citation_date"})[0]['content']
metadata['abstract'] = soup.find(string='Abstract').findParent().findParent().text.replace('\nAbstract\n', '').strip()
metadata['venue'] = soup.find_all("meta", attrs={"name": "citation_conference_title"})[0]['content']
metadata['DOI'] = soup.find_all("meta", attrs={"name": "citation_doi"})[0]['content']
metadata['metaurl'] = url
metadata['uid'] = metadata['DOI']
metadata['keywords'] = soup.find("meta", attrs={"name": "DC.Subject", "scheme": "SWD"})['content'].split(',')
return metadata
|
python
|
# reference page
# https://iric-solver-dev-manual-jp.readthedocs.io/ja/latest/06/03_reference.html
import sys
import iric
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LightSource
from scipy import signal, interpolate
import flow
class cgns():
def __init__(self, f):
self.fid = iric.cg_open(f, iric.CG_MODE_MODIFY)
iric.cg_iRIC_Init(self.fid)
# iric.cg_iRIC_InitRead(fid)
# set grid and arid attributes
ier = self.set_grid()
# set time series parameters
ier = self.set_time_parameters()
# set flow calculation parameters
ier = self.set_flow_parameters()
#--------------------------------------------------
# set grid
#--------------------------------------------------
def set_grid(self):
ier = 0
self.ni, self.nj = iric.cg_iRIC_GotoGridCoord2d()
x, y = iric.cg_iRIC_GetGridCoord2d()
z = iric.cg_iRIC_Read_Grid_Real_Node('Elevation')
s = iric.cg_iRIC_Read_Grid_Real_Cell('roughness_cell')
xx = x.reshape(self.nj, self.ni)
yy = y.reshape(self.nj, self.ni)
zz = z.reshape(self.nj, self.ni)
ss = s.reshape(self.nj-1, self.ni-1)
# 2d plot
# fig, ax = plt.subplots()
# ax.contourf(xx, yy, zz, 20)
# 3d plot
# fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
# ls = LightSource(270, 45)
# rgb = ls.shade(zz, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
# surf = ax.plot_surface(xx, yy, zz, rstride=1, cstride=1, facecolors=rgb,
# linewidth=0, antialiased=False, shade=False)
# plt.show()
self.xx = xx
self.yy = yy
self.zz = zz
self.ss = ss
return ier
#--------------------------------------------------
# set time series parameters
#--------------------------------------------------
def set_time_parameters(self):
ier = 0
#流量条件
t_series = iric.cg_iRIC_Read_FunctionalWithName('discharge_waterlevel', 'time')
q_series = iric.cg_iRIC_Read_FunctionalWithName('discharge_waterlevel', 'discharge')
#計算時間の設定
if iric.cg_iRIC_Read_Integer('i_sec_hour') == 2:
t_series = t_series*3600.
t_start = t_series[0]
t_end = t_series[len(t_series)-1]
t_out = iric.cg_iRIC_Read_Real('tuk')
# class変数
self.t_series = t_series
self.q_series = q_series
self.dt = iric.cg_iRIC_Read_Real('dt')
self.istart = int(t_start / self.dt)
self.iend = int(t_end / self.dt) + 1
self.iout = int(t_out / self.dt)
return ier
#--------------------------------------------------
# set flow calculation parameters
#--------------------------------------------------
def set_flow_parameters(self):
ier = 0
self.cip = iric.cg_iRIC_Read_Integer('j_cip')
self.conf = iric.cg_iRIC_Read_Integer('j_conf')
return ier
#--------------------------------------------------
# write calculation result
#--------------------------------------------------
def write_calc_result(self, ctime, flw):
ier = 0
# # write time
iric.cg_iRIC_Write_Sol_Time(ctime)
# # write discharge
qq = self.get_upstream_q(ctime)
iric.cg_iRIC_Write_Sol_BaseIterative_Real('Discharge', qq)
# # write grid
iric.cg_iRIC_Write_Sol_GridCoord2d(self.xx.reshape(-1), self.yy.reshape(-1))
# # write node values
# iric.cg_iRIC_Write_Sol_Integer("Elevation", self.zz.reshape(-1))
iric.cg_iRIC_Write_Sol_Real("Elevation", self.zz.reshape(-1))
iric.cg_iRIC_Write_Sol_Real("VelocityX", flw.uu.reshape(-1))
iric.cg_iRIC_Write_Sol_Real("VelocityY", flw.vv.reshape(-1))
# # write cell values
# iric.cg_iRIC_Write_Sol_Cell_Integer("Manning_S", self.ss.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("ManningN_c", self.ss.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("Elevation_c", flw.zz.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("Depth_c", flw.hs.reshape(-1))
iric.cg_iRIC_Write_Sol_Cell_Real("WaterLevel_c", flw.hh.reshape(-1))
# # write edge values
# iric.cg_iRIC_Write_Sol_IFace_Integer(label, val)
# iric.cg_iRIC_Write_Sol_IFace_Real(label, val)
# # write edge values
# iric.cg_iRIC_Write_Sol_JFace_Integer(label, val)
# iric.cg_iRIC_Write_Sol_JFace_Real(label, val)
return ier
def close(self):
ier = 0
iric.cg_close(self.fid)
return ier
#--------------------------------------------------
# set flow calculation parameters
#--------------------------------------------------
def get_upstream_q(self, t):
tt = self.t_series
qq = self.q_series
#いろいろな補間関数がある
#https://org-technology.com/posts/univariate-interpolation.html
func = interpolate.interp1d(tt, qq)
# func = interpolate.interp1d(tt, qq, kind="quadratic")
q = float(func(t))
# q = float(q.astype(np.float64))
# print(q)
# print(type(q))
return q
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index, name='homePage'),
path('profile/<username>/', views.profile, name='profile'),
path('profile/<username>/update', views.edit_profile, name='update'),
path('category/<category>/', views.category, name='category'),
path('product/<id>', views.product, name='product'),
path('search/', views.search, name='search'),
path('cart', views.cart,name='cart'),
path('add_cart/<id>/', views.add, name='add_cart'),
path('remove_cart/<id>/', views.remove, name='remove'),
path('payment', views.payment,name='payment'),
path('success', views.success,name='success'),
]
|
python
|
"""
Leetcode #300
"""
from typing import List
import bisect
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
inc = [float("inf")] * len(nums)
size = 0
for num in nums:
i = bisect.bisect_left(inc, num)
inc[i] = num
size = max(i+1, size)
return size
if __name__ == "__main__":
assert Solution().lengthOfLIS([10,9,2,5,3,7,101,18]) == 4
|
python
|
import random
from multiprocessing import Pool
from gym_puyopuyo.agent import tree_search_actions
from gym_puyopuyo.state import State
def benchmark(depth, threshold, factor):
state = State(16, 8, 5, 3, tsu_rules=False)
total_reward = 0
for i in range(1000):
if i % 100 == 0:
print(i, "/ 1000")
actions = tree_search_actions(state, depth, occupation_threshold=threshold, factor=factor)
action = random.choice(actions)
reward = state.step(*state.actions[action])
total_reward += reward
if reward < 0:
return total_reward, True
return total_reward, False
if __name__ == "__main__":
argss = []
for t in [0.7, 0.75, 0.8, 0.85]:
for f in [15, 18, 20, 25]:
argss.append((3, t, f))
with Pool() as p:
results = p.starmap(benchmark, argss)
for result, args in zip(results, argss):
reward, died = result
if not died:
print(reward, args)
|
python
|
import json
import csv
import sys
from pathlib import Path
import datetime
import pytest
import networkx as nx
from sb.aws_trace_analyzer import CSV_FIELDS, extract_trace_breakdown, longest_path, create_span_graph, duration, get_sorted_children, is_async_call, call_stack # noqa: E501
def test_get_sorted_children():
G = nx.DiGraph()
# Example inspired from the matrix multiplication app
# where two spans (sub1, sub2) have the same end_time (end) but
# sub1 just starts 1ms earlier (start1). Timeline: start1<end1=start2=end2
start1 = 1613573901.68
start2 = 1613573901.681
end = 1613573901.681
root_id = 'root_id'
sub1_id = 'sub1'
sub2_id = 'sub2'
G.add_node(root_id)
# Adding sub2 first
G.add_node('sub2', **{'doc': {'start_time': start2, 'end_time': end}})
G.add_edge(root_id, sub2_id)
# Adding sub1 second
G.add_node('sub1', **{'doc': {'start_time': start1, 'end_time': end}})
G.add_edge(root_id, sub1_id)
succ_ids = list(G.successors(root_id))
# Should have wrong order by default
assert succ_ids == [sub2_id, sub1_id]
assert get_sorted_children(G, root_id) == ['sub1', 'sub2']
def test_is_async_call_async():
parent = {'end_time': 1624353531.865}
child = {'end_time': 1624353532.865}
assert is_async_call(parent, child)
def test_is_async_call_sync():
parent = {'end_time': 1624353531.865}
child = {'end_time': 1624353531.865}
assert not is_async_call(parent, child)
def test_is_async_call_sync_with_margin():
"""Case of 999 microsecond margin.
Source: exp31/realworld-dynamodb-lambda/logs/2021-04-30_14-52-50"""
parent = {'end_time': 1624353531.865}
child = {'end_time': 1624353531.8654525}
assert not is_async_call(parent, child)
def test_is_async_call_sync_with_margin_larger():
"""Case of 1001 microsecond margin.
Source: exp31/faas-migration-go/aws/logs/2021-04-30_09-06-52"""
parent = {'end_time': 1619774396.626}
child = {'end_time': 1619774396.627}
assert not is_async_call(parent, child)
def test_is_async_call_async_with_margin():
"""Edge case beyond the 1001 microsecond margin.
It should detect an async call when exceeding the margin."""
parent = {'end_time': 1619774396.626}
child = {'end_time': 1619774396.6271}
assert is_async_call(parent, child)
def traces_path(app):
"""Returns the path to the traces.json for a given app name."""
tests_path = Path(__file__).parent.parent
sub_path = f"fixtures/aws_trace_analyzer/{app}/traces.json"
return (tests_path / sub_path).resolve()
def assert_trace_breakdown(t_path, expected_breakdown):
"""Compares the trace breakdown from a single trace against
a given expected_breakdown.
Caveat: Supports only a single trace"""
with open(t_path) as json_file:
trace = json.load(json_file)
trace_breakdown = extract_trace_breakdown(trace)
assert trace_breakdown == expected_breakdown
def print_csv(trace_breakdown):
"""Debugging helper that prints a trace breakdown as CSV output"""
trace_writer = csv.writer(sys.stdout, quoting=csv.QUOTE_MINIMAL)
headers = CSV_FIELDS
trace_writer.writerow(headers)
trace_writer.writerow(trace_breakdown)
def test_extract_trace_breakdown_thumbnail_app():
"""Tests the most studied execution of the thumbnail app.
See fixtures/thumbnail_app for additional visualizations.
"""
expected_breakdown = ['1-5fbcfc1f-4d2e9bed6dc0c41c39dfdb2f', 1606220832.0, 1606220846.963, datetime.timedelta(seconds=14, microseconds=963000), 'https://gcz7l3ixlb.execute-api.us-east-1.amazonaws.com/production/upload', 2, 0, 0, 0, ['AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['production-thumbnail-generator/production', 'Lambda', 'thumbnail-generator-production-upload', 'thumbnail-generator-production-upload', 'Initialization', 'S3', 'S3', 'S3', 'S3', 'thumbnail-generator-production-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-production-thumbnail-generator', 'Initialization', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=86000), datetime.timedelta(seconds=1, microseconds=99000), datetime.timedelta(microseconds=771000), datetime.timedelta(seconds=4, microseconds=142000), datetime.timedelta(seconds=5, microseconds=501000), datetime.timedelta(microseconds=59000), None, datetime.timedelta(seconds=3, microseconds=305000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_thumbnail_app_warm():
expected_breakdown = ['1-6049e32a-49e6a9866fc15c8e30479d09', 1615455019.055, 1615455027.436, datetime.timedelta(seconds=8, microseconds=381000), 'https://d574arqmjg.execute-api.us-east-1.amazonaws.com/prod/upload', 1, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['prod-thumbnail-generator/prod', 'Lambda', 'thumbnail-generator-prod-upload', 'thumbnail-generator-prod-upload', 'S3', 'S3', 'thumbnail-generator-prod-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-prod-thumbnail-generator', 'Initialization', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=40000), datetime.timedelta(seconds=1, microseconds=44000), datetime.timedelta(microseconds=345000), datetime.timedelta(seconds=1, microseconds=29000), datetime.timedelta(seconds=4, microseconds=310000), datetime.timedelta(microseconds=76000), None, datetime.timedelta(seconds=1, microseconds=537000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_warm')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_thumbnail_app_missing_root():
"""
The root segment of the trace (6f58e0a0bce69065) is missing and created empty through
the parent_id of the child node (60f93765ebcf2a58). This invalidates the trace duration
because another node with the earliest start time is chosen as new root.
Source:
lg3/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-29_23-40-56
"""
# Potential partial result (if we plan to support this, missing external_services!)
expected_breakdown = ['1-608b4550-1929270a067637cfd701f545', 1619739984.606, 1619739985.731, datetime.timedelta(seconds=1, microseconds=125000), None, 0, 0, 0, 0, ['AWS::Lambda', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['thumbnail-generator-dev-upload', 'thumbnail-generator-dev-upload', 'S3', 'S3', 'thumbnail-generator-dev-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-dev-thumbnail-generator', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=17000), datetime.timedelta(microseconds=800000), None, None, datetime.timedelta(microseconds=123000), datetime.timedelta(microseconds=49000), None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_missing_root')
with pytest.raises(Exception) as e:
assert_trace_breakdown(tp, expected_breakdown)
assert str(e.value) == 'Incomplete trace 1-608b4550-1929270a067637cfd701f545 because the parent node 6f58e0a0bce69065 of node 60f93765ebcf2a58 is empty.' # noqa: E501
def test_extract_trace_breakdown_thumbnail_app_in_progress():
"""
The segment 55b8cdd122595924 is in_progress and therefore misses its 'end_time'.
Source:
lg3/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-29_23-40-56
"""
expected_breakdown = ['1-608b4565-558f8ff60404afa17feb278c', 1619739984.606, 1619739985.731, datetime.timedelta(seconds=1, microseconds=125000), None, 0, 0, 0, 0, ['AWS::Lambda', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::S3::Bucket', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['thumbnail-generator-dev-upload', 'thumbnail-generator-dev-upload', 'S3', 'S3', 'thumbnail-generator-dev-thumbnail-generator', 'Dwell Time', 'Attempt #1', 'thumbnail-generator-dev-thumbnail-generator', 'S3', 'S3', 'S3', 'S3'], datetime.timedelta(microseconds=17000), datetime.timedelta(microseconds=800000), None, None, datetime.timedelta(microseconds=123000), datetime.timedelta(microseconds=49000), None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_in_progress')
with pytest.raises(Exception) as e:
assert_trace_breakdown(tp, expected_breakdown)
assert str(e.value) == 'Subsegment 55b8cdd122595924 in progress.'
def test_extract_trace_breakdown_thumbnail_app_fault():
"""
The segment 514db1f2511b92cf has a fault and returned HTTP status 500.
Source:
lg4/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-30_11-49-21
"""
expected_breakdown = ['1-608bf96d-9be20cff02f9e96ff14ae178', 1619786093.459, 1619786093.469, datetime.timedelta(microseconds=10000), 'https://8vtxzfmw67.execute-api.us-west-2.amazonaws.com/dev/upload', 0, 0, 0, 1, ['AWS::ApiGateway::Stage', 'AWS::Lambda'], ['dev-thumbnail-generator/dev', 'Lambda', 'Lambda'], datetime.timedelta(microseconds=10000), None, None, None, None, None, None, None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_fault')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_thumbnail_app_error_fault_throttle():
"""Trace with errors, faults, and throttle
Source:
lg4/ec2-user/faas-migration/ThumbnailGenerator/Lambda/logs/2021-04-30_02-58-28
"""
expected_breakdown = ['1-608b7926-687718151c26192849c3d020', 1619753254.117, 1619753261.239, datetime.timedelta(seconds=7, microseconds=122000), 'https://ldblsc9z0j.execute-api.us-west-2.amazonaws.com/dev/upload', 0, 3, 2, 3, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::S3::Bucket'], ['dev-thumbnail-generator/dev', 'Lambda', 'thumbnail-generator-dev-upload', 'thumbnail-generator-dev-upload', 'S3', 'S3'], datetime.timedelta(microseconds=29000), None, None, None, datetime.timedelta(microseconds=115000), None, None, datetime.timedelta(seconds=6, microseconds=978000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('thumbnail_app_error_fault_throttle')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_breakdown_matrix_app():
expected_breakdown = ['1-602d2f0b-7bd8e768607f9c8200690500', 1613573899.167, 1613573911.234, datetime.timedelta(seconds=12, microseconds=67000), 'https://tf51nutw60.execute-api.us-east-1.amazonaws.com/prod/run', 5, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::StepFunctions::StateMachine'], ['prod-matrix-mul/prod', 'STEPFUNCTIONS', 'MatrixMul', 'CreateMatrix', 'Lambda', 'matrix-mul-prod-create_matrix', 'matrix-mul-prod-create_matrix', 'Initialization', 'ChooseVariant', 'AppendWorkerCount', 'DistributeWork', 'Lambda', 'matrix-mul-prod-paralell_mul_scheduler', 'matrix-mul-prod-paralell_mul_scheduler', 'Initialization', 'ParallelMul', 'Branch 2', 'AssignWorkerID3', 'MulWorker3', 'Lambda', 'matrix-mul-prod-mul_worker', 'matrix-mul-prod-mul_worker', 'Initialization', 'BuildResult', 'Lambda', 'matrix-mul-prod-result_builder', 'matrix-mul-prod-result_builder', 'Initialization', 'GenReport', 'Lambda', 'matrix-mul-prod-build_report', 'matrix-mul-prod-build_report', 'Initialization'], datetime.timedelta(microseconds=399000), datetime.timedelta(0), datetime.timedelta(seconds=2, microseconds=359000), datetime.timedelta(seconds=1, microseconds=243000), datetime.timedelta(seconds=8, microseconds=66000), None, None, None, datetime.timedelta(0)] # noqa: E501
tp = traces_path('matrix_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_model_training_app():
""" Different timestamp granularity at two places:
1) AWS::Lambda segment (03c544ca90d5515d) with end_time 1624353531.865
and the AWS::Lambda::Function segment (2d1ac1631da8de72) with end_time 1624353531.8654525
The microsecond-based timestamp is +0.0004525s later and hence counts as async invocation
because the child node has a later start time than the parent node. This is incorrect and
left 1547 microseconds missing at the end of the trace when validating against the duration.
This issue was fixed by adding a margin for imprecise timestamps (i.e., epsilon) in the
is_async_invocation heuristic.
2) The top-level API gateway end_time only has milliseconds (0.8669999 => 0.867)
Source:
exp31/serverless-faas-workbench/aws/cpu-memory/model_training/logs/2021-06-22_11-18-22
"""
expected_breakdown = ['1-60d1aaf2-671e23cc0b33e597b9728177', 1624353522.835, 1624353531.867, datetime.timedelta(seconds=9, microseconds=32000), 'https://fabi09ztfd.execute-api.us-east-1.amazonaws.com/dev/train', 0, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::S3::Bucket', 'AWS::S3::Bucket'], ['dev-model-training/dev', 'Lambda', 'model-training-dev-model-training', 'model-training-dev-model-training', 'Invocation', 'S3', 'S3', 'S3', 'S3', 'Overhead'], datetime.timedelta(microseconds=17530), None, None, None, datetime.timedelta(seconds=8, microseconds=818030), None, datetime.timedelta(microseconds=198), datetime.timedelta(microseconds=196242), datetime.timedelta(0)] # noqa: E501
tp = traces_path('model_training_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_realworld_app():
"""Official duration uses less accurate timestamp for start_time:
0:00:00.035000 (official) vs 0:00:00.035189 (end_time - start_time).
Fixed using a timestamp margin to ignore differences below a threshold (e.g., 1ms).
Source:
exp31/realworld-dynamodb-lambda/logs/2021-04-30_14-52-50
"""
expected_breakdown = ['1-608c1bd4-8ceba73b6799988cd7aaee1a', 1619794900.85, 1619794900.8851886, datetime.timedelta(microseconds=35000), 'https://538uury0ga.execute-api.eu-west-1.amazonaws.com/dev/api/articles/8c90798198-vvwthh/comments', 0, 0, 0, 0, ['AWS::ApiGateway::Stage', 'AWS::Lambda', 'AWS::Lambda::Function', 'AWS::DynamoDB::Table', 'AWS::DynamoDB::Table'], ['dev-realworld/dev', 'Lambda', 'realworld-dev-getComments', 'realworld-dev-getComments', 'Invocation', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'Overhead'], datetime.timedelta(microseconds=12043), None, None, None, datetime.timedelta(microseconds=3645), None, datetime.timedelta(microseconds=312), datetime.timedelta(microseconds=19000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('realworld_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_realworld_app_margin():
"""
Fixed by using timestamp margin when comparing
* the trace duration from X-Ray (50ms) against
* the latency breakdown (49ms).
Source:
lg3/ec2-user/realworld-dynamodb-lambda/logs/2021-04-30_15-52-46
"""
expected_breakdown = ['1-608c2b95-da77705249832f8715f095de', 1619798933.143, 1619798933.1929998, datetime.timedelta(microseconds=50000), 'https://myz35jktl7.execute-api.eu-west-1.amazonaws.com/dev/api/articles/110b06f1f4-kg38cx', 0, 0, 0, 0, ['AWS::ApiGateway::Stage', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::DynamoDB::Table', 'AWS::DynamoDB::Table', 'AWS::DynamoDB::Table'], ['dev-realworld/dev', 'Lambda', 'realworld-dev-getArticle', 'realworld-dev-getArticle', 'Invocation', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'DynamoDB', 'Overhead'], datetime.timedelta(microseconds=13087), None, None, None, datetime.timedelta(microseconds=4614), None, datetime.timedelta(microseconds=299), datetime.timedelta(microseconds=31000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('realworld_app_margin')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_todo_app():
"""Reproduces a trace where the trace duration doesn't
match the latency breakdown due to clock inaccuracy
between the API gateway Lambda segment (045739a66c26a771, 1619774396.626)
and the AWS::Lambda segment (6fd8f7cf8343d129, 1619774396.627).
The API gateway synchronously invokes AWS::Lambda and should therefore end
later and not 1ms earlier.
Source:
exp31/faas-migration-go/aws/logs/2021-04-30_09-06-52
"""
expected_breakdown = ['1-608bcbbc-634420f19aa0dd283cbf7529', 1619774396.595, 1619774396.636, datetime.timedelta(microseconds=41000), 'https://bm0q7xberc.execute-api.eu-west-1.amazonaws.com/dev/lst', 0, 0, 0, 0, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::DynamoDB::Table'], ['dev-aws/dev', 'Lambda', 'aws-dev-lst', 'aws-dev-lst', 'DynamoDB', 'DynamoDB'], datetime.timedelta(microseconds=31000), None, None, None, datetime.timedelta(microseconds=1579), None, None, datetime.timedelta(microseconds=8421), datetime.timedelta(0)] # noqa: E501
tp = traces_path('todo_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_longest_path_sync():
"""Scenario where a synchronous invocation is the longest path"""
start_time = 1619760991.000
s1_start = start_time
s2_start = start_time + 10
s2_end = start_time + 20
a_start = start_time + 25
s3_start = start_time + 30
a_end = start_time + 35
s3_end = start_time + 40
s1_end = start_time + 50
segments = [
('s1', s1_start, s1_end),
('s2', s2_start, s2_end),
('a', a_start, a_end),
('s3', s3_start, s3_end)
]
G = nx.DiGraph()
G.graph['start'] = 's1'
G.graph['end'] = 's1'
for (id, start_time, end_time) in segments:
s1 = {'id': id, 'start_time': start_time, 'end_time': end_time}
node_attr = {'doc': s1, 'duration': duration(s1)}
G.add_node(s1['id'], **node_attr)
G.add_edge('s1', 's2')
G.add_edge('s2', 'a')
G.add_edge('s1', 's3')
G.graph['call_stack'] = call_stack(G, 's1')
assert ['s1', 's2', 's3'] == longest_path(G, 's1')
def test_longest_path_async():
"""Scenario where an asynchronous invocation is the longest path"""
start_time = 1619760991.000
s1_start = start_time
s2_start = start_time + 10
s2_end = start_time + 20
s_start = start_time + 30
s_end = start_time + 40
s1_end = start_time + 50
s3_start = start_time + 70
s3_end = start_time + 80
segments = [
('s1', s1_start, s1_end),
('s2', s2_start, s2_end),
('s', s_start, s_end),
('s3', s3_start, s3_end)
]
G = nx.DiGraph()
for (id, start_time, end_time) in segments:
s1 = {'id': id, 'start_time': start_time, 'end_time': end_time}
node_attr = {'doc': s1, 'duration': duration(s1)}
G.add_node(s1['id'], **node_attr)
G.add_edge('s1', 's2')
G.add_edge('s1', 's')
G.add_edge('s2', 's3')
G.graph['call_stack'] = call_stack(G, 's3')
assert ['s1', 's2', 's3'] == longest_path(G, 's1')
def test_longest_path_event_processing_app():
"""Reproduces an issue where the last returning child
was appended to the longest path although not being part of it.
Specifically, the overhead node `0d431` was appended at the end of the
longest path but should not be part of it because the async transition
from SNS `3d46` to the format function `6d05` consitutes a longer path.
Source:
exp31/faas-migration/Event-Processing/Lambda/logs/2021-04-30_05-34-22
"""
# Manually validated based on trace map, timestamp inspection, and
# comparison against networkx implementation of dag_longest_path
expected_path = ['59d284e254912526', '7ddca1046ef1985c', '5d98752257e51041', '4c57c76218613840', 'ce71a9e6624497a6', '62a5b8bdc147d7dd', '3d46a9ec2871f006', '6d05055c18416f23', '09b064d0dfd77159', '1ac8a21aee22b4e3', '206c8bde4844d1da', '0968878f47f64916', '4e389a109ab7353c', '1a3fbbb81821e5dd', '02b8700a2f5e645f', 'e4546d09dde35985'] # noqa: E501
tp = traces_path('event_processing_app')
with open(tp) as json_file:
trace = json.load(json_file)
G = create_span_graph(trace)
assert G.graph['longest_path'] == expected_path
def test_extract_trace_event_processing_app():
"""Reproduces a trace with a validation error on the trace duration:
"Trace duration 0:00:00.125000 does not match latency breakdown 0:00:00.047000
within margin 0:00:00.001001."
Source:
exp31/faas-migration/Event-Processing/Lambda/logs/2021-04-30_05-34-22
"""
expected_breakdown = ['1-608b975f-82c9cf3915cf8d7c1093ada7', 1619760991.873, 1619760991.998, datetime.timedelta(microseconds=125000), 'https://aqk7l5ytj2.execute-api.eu-west-1.amazonaws.com/dev/ingest', 0, 0, 0, 0, ['AWS::Lambda', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::SNS', 'AWS::ApiGateway::Stage', 'AWS::Lambda::Function', 'AWS::SQS::Queue'], ['dev-event-processing/dev', 'Lambda', 'event-processing-dev-ingest', 'event-processing-dev-ingest', 'Invocation', 'SNS', 'SNS', 'event-processing-dev-format_state_change', 'Dwell Time', 'Attempt #1', 'event-processing-dev-format_state_change', 'Invocation', 'SQS', 'SQS', 'QueueTime', 'Overhead'], datetime.timedelta(microseconds=25523), datetime.timedelta(microseconds=11000), None, None, datetime.timedelta(microseconds=1807), datetime.timedelta(microseconds=32000), datetime.timedelta(microseconds=670), datetime.timedelta(microseconds=54000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('event_processing_app')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_hello_retail_app_error():
"""Reproduces a trace with the error:
"Task Timed Out:
'arn:aws:states:eu-west-1:0123456789012:activity:dev-hello-retail-product-photos-receive"
Source:
exp31/hello-retail/logs/2021-04-30_14-15-59
"""
expected_breakdown = ['1-608c1487-8892c4a7bc5e24a223902a15', 1619793031.301, 1619793031.47, datetime.timedelta(microseconds=169000), 'https://luokbyeogl.execute-api.eu-west-1.amazonaws.com/dev/sms', 0, 2, 0, 2, ['AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::DynamoDB::Table', 'AWS::S3::Bucket', 'AWS::stepfunctions'], ['dev-hello-retail-product-photos-receive/dev', 'Lambda', 'hello-retail-product-photos-receive-dev-receive', 'hello-retail-product-photos-receive-dev-receive', 'Invocation', 'DynamoDB', 'DynamoDB', 'S3', 'S3', 'stepfunctions', 'stepfunctions', 'Overhead'], datetime.timedelta(microseconds=52829), None, None, None, datetime.timedelta(microseconds=6511), None, datetime.timedelta(microseconds=660), datetime.timedelta(microseconds=109000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('hello_retail_app_error')
assert_trace_breakdown(tp, expected_breakdown)
def test_extract_trace_image_processing_app_error():
"""Reproduces a trace with the error:
"PhotoDoesNotMeetRequirementError"
Source:
exp31/aws-serverless-workshops/ImageProcessing/logs/2021-04-30_07-02-35
trace_id=1-608bac5f-feef4e053d4b9fa008fcb044
"""
expected_breakdown = ['1-608bac5f-feef4e053d4b9fa008fcb044', 1619766367.436, 1619766368.579, datetime.timedelta(seconds=1, microseconds=143000), 'https://s47zgw7ake.execute-api.eu-west-1.amazonaws.com/execute/', 0, 4, 0, 1, ['AWS::Lambda', 'AWS::StepFunctions::StateMachine', 'AWS::Lambda::Function', 'AWS::Lambda', 'AWS::ApiGateway::Stage', 'AWS::Lambda::Function', 'AWS::rekognition'], ['APIGatewayToStepFunctions/execute', 'STEPFUNCTIONS', 'RiderPhotoProcessing-8gfRn3qHdsBb', 'FaceDetection', 'Lambda', 'wildrydes-FaceDetectionFunction-UB72KZMWRLCF', 'wildrydes-FaceDetectionFunction-UB72KZMWRLCF', 'Invocation', 'rekognition', 'rekognition', 'Overhead', 'PhotoDoesNotMeetRequirement', 'Lambda', 'wildrydes-NotificationPlaceholderFunction-KDTBMSLPJ0O2', 'wildrydes-NotificationPlaceholderFunction-KDTBMSLPJ0O2', 'Invocation', 'Overhead'], datetime.timedelta(microseconds=106497), datetime.timedelta(0), None, None, datetime.timedelta(microseconds=4818), None, datetime.timedelta(microseconds=685), datetime.timedelta(seconds=1, microseconds=31000), datetime.timedelta(0)] # noqa: E501
tp = traces_path('image_processing_app_error')
assert_trace_breakdown(tp, expected_breakdown)
# def test_extract_tmp_visualizer():
# """Just a tmp case for creating visualizer data
# """
# expected_breakdown = [] # noqa: E501
# tp = traces_path('long_trigger1')
# assert_trace_breakdown(tp, expected_breakdown)
|
python
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import numpy.random as npr
import cv2
# from matplotlib.colors import rgb_to_hsv
# from matplotlib.colors import hsv_to_rgb
from configure import cfg
import utils.blob
# from caffe.io import resize_image
def GenerateBatchSamples(roi, img_shape):
sampled_bboxes = []
for i in range(len(cfg.TRAIN.batch_sampler)):
sampled_bboxes_this = GenerateSamples(roi, cfg.TRAIN.batch_sampler[i],
img_shape)
sampled_bboxes.extend(sampled_bboxes_this)
return sampled_bboxes
def GenerateSamples(roi, batch_sampler, img_shape):
found = 0
sampled_bboxes = []
for i in range(batch_sampler.max_trials):
if found > batch_sampler.max_sample:
return sampled_bboxes
# Generate sampled_bbox in the normalized space [0, 1].
sampled_bbox = SampleBBox(batch_sampler.sampler, img_shape)
if SatisfySampleConstraint(sampled_bbox, roi,
batch_sampler.sample_constraint):
found = found + 1
sampled_bboxes.append(sampled_bbox)
return sampled_bboxes
def SampleBBox(sampler, img_shape):
# Get random scale.
assert sampler.max_scale >= sampler.min_scale
assert sampler.min_scale > 0.0
assert sampler.max_scale <= 1.0
scale = npr.uniform(sampler.min_scale, sampler.max_scale)
# Get random aspect ratio.
assert sampler.max_aspect_ratio >= sampler.min_aspect_ratio
assert sampler.min_aspect_ratio > 0.0
assert sampler.max_aspect_ratio < 10000
aspect_ratio = npr.uniform(sampler.min_aspect_ratio,
sampler.max_aspect_ratio)
aspect_ratio = max(aspect_ratio, 1.0 * math.pow(scale, 2.0))
aspect_ratio = min(aspect_ratio, 1.0 / math.pow(scale, 2.0))
# Figure out bbox dimension.
bbox_width = scale * math.sqrt(aspect_ratio)
bbox_height = scale / math.sqrt(aspect_ratio)
# Figure out top left coordinates.
h_off = npr.uniform(0.0, 1.0 - bbox_height)
w_off = npr.uniform(0.0, 1.0 - bbox_width)
#---------------------------------------
bbox_height = bbox_height * img_shape[0]
bbox_width = bbox_width * img_shape[1]
h_off = h_off * img_shape[0]
w_off = w_off * img_shape[1]
assert bbox_width > 0
assert bbox_height > 0
sampled_bbox = np.array(
[w_off, h_off, w_off + bbox_width, h_off + bbox_height],
dtype=np.uint16)
sampled_bbox[0] = min(max(sampled_bbox[0], 0), img_shape[1] - 1)
sampled_bbox[1] = min(max(sampled_bbox[1], 0), img_shape[0] - 1)
sampled_bbox[2] = min(
max(sampled_bbox[2], sampled_bbox[0]), img_shape[1] - 1)
sampled_bbox[3] = min(
max(sampled_bbox[3], sampled_bbox[1]), img_shape[0] - 1)
assert sampled_bbox[0] <= sampled_bbox[2]
assert sampled_bbox[1] <= sampled_bbox[3]
return sampled_bbox
def SatisfySampleConstraint(sampled_bbox, roi, sample_constraint):
# Check constraints.
found = False
roi_num = roi.shape[0]
for i in range(roi_num):
this_roi = roi[i, :]
jaccard_overlap = JaccardOverlap(sampled_bbox, this_roi)
if jaccard_overlap < sample_constraint.min_jaccard_overlap:
continue
if jaccard_overlap > sample_constraint.max_jaccard_overlap:
continue
return True
return False
def JaccardOverlap(bbox1, bbox2):
intersect_bbox = IntersectBBox(bbox1, bbox2)
intersect_width = intersect_bbox[2] - intersect_bbox[0] + 1
intersect_height = intersect_bbox[3] - intersect_bbox[1] + 1
if intersect_width > 0 and intersect_height > 0:
intersect_size = intersect_width * intersect_height
bbox1_size = BBoxSize(bbox1)
bbox2_size = BBoxSize(bbox2)
return 1.0 * intersect_size / (
bbox1_size + bbox2_size - intersect_size)
else:
return 0.0
def IntersectBBox(bbox1, bbox2):
if bbox2[0] > bbox1[2] or bbox2[2] < bbox1[0] or bbox2[1] > bbox1[3] or bbox2[3] < bbox1[1]:
# Return [0, 0, 0, 0] if there is no intersection.
# intersect_bbox=[0.0,0.0,0.0,0.0]
intersect_bbox = [-1.0, -1.0, -1.0, -1.0]
else:
intersect_bbox = [
max(bbox1[0], bbox2[0]),
max(bbox1[1], bbox2[1]),
min(bbox1[2], bbox2[2]),
min(bbox1[3], bbox2[3])
]
return intersect_bbox
def BBoxSize(bbox):
if (bbox[2] < bbox[0] or bbox[3] < bbox[1]):
# If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0.0
else:
width = bbox[2] - bbox[0]
height = bbox[3] - bbox[1]
return (width + 1) * (height + 1)
def Crop(img, crop_bbox):
img_shape = img.shape
# x1 = 1.0 * crop_bbox[0] * img_shape[1]
# y1 = 1.0 * crop_bbox[1] * img_shape[0]
# x2 = 1.0 * crop_bbox[2] * img_shape[1]
# y2 = 1.0 * crop_bbox[3] * img_shape[0]
x1 = crop_bbox[0]
y1 = crop_bbox[1]
x2 = crop_bbox[2]
y2 = crop_bbox[3]
assert x1 >= 0, x1
assert y1 >= 0, y1
assert x2 <= img_shape[1], '{} vs {}'.format(x2, img_shape[1])
assert y2 <= img_shape[0], '{} vs {}'.format(y2, img_shape[0])
crop_img = img[y1:y2 + 1, x1:x2 + 1, :]
return crop_img
def MeetEmitConstraint(src_bbox, bbox):
x_center = 1.0 * (bbox[0] + bbox[2]) / 2
y_center = 1.0 * (bbox[1] + bbox[3]) / 2
if x_center >= src_bbox[0] and x_center <= src_bbox[2] and y_center >= src_bbox[1] and y_center <= src_bbox[3]:
return True
else:
return False
def ApplyCrop(img):
if cfg.TRAIN.CROP <= 0:
img_height = img.shape[0]
img_width = img.shape[1]
return img, np.array(
(0, 0, img_width - 1, img_height - 1), dtype=np.uint16)
img_shape = np.array(img.shape)
crop_dims = img_shape[:2] * cfg.TRAIN.CROP
# crop_dims = img_shape[:2] * 0.9
r0 = npr.random()
r1 = npr.random()
s = img_shape[:2] - crop_dims
s[0] *= r0
s[1] *= r1
# im_crop = np.array([s[0],
# s[1],
# s[0] + crop_dims[0] - 1,
# s[1] + crop_dims[1] - 1],
# dtype=np.uint16)
crop_bbox = np.array(
[s[1], s[0], s[1] + crop_dims[1] - 1, s[0] + crop_dims[0] - 1],
dtype=np.uint16)
crop_img = img[crop_bbox[1]:crop_bbox[3] + 1, crop_bbox[0]:
crop_bbox[2] + 1, :]
return crop_img, crop_bbox
def ApplyExpand(img):
img_shape = img.shape
prob = npr.random()
if prob > cfg.TRAIN.expand_prob:
return img, np.array(
(0, 0, img_shape[1], img_shape[0]), dtype=np.uint16)
if abs(cfg.TRAIN.max_expand_ratio - 1.) < 1e-2:
return img, np.array(
(0, 0, img_shape[1], img_shape[0]), dtype=np.uint16)
expand_ratio = npr.uniform(1, cfg.TRAIN.max_expand_ratio)
expand_img, expand_bbox = ExpandImage(img, expand_ratio)
return expand_img, expand_bbox
def ExpandImage(img, expand_ratio):
img_height = img.shape[0]
img_width = img.shape[1]
img_channels = img.shape[2]
# Get the bbox dimension.
height = int(img_height * expand_ratio)
width = int(img_width * expand_ratio)
h_off = npr.uniform(0, height - img_height)
w_off = npr.uniform(0, width - img_width)
h_off = int(h_off)
w_off = int(w_off)
expand_bbox = []
# expand_bbox.append(1.0 * (-w_off) / img_width)
# expand_bbox.append(1.0 * (-h_off) / img_height)
# expand_bbox.append(1.0 * (width - w_off) / img_width)
# expand_bbox.append(1.0 * (height - h_off) / img_height)
expand_bbox.append(-w_off)
expand_bbox.append(-h_off)
expand_bbox.append(width - w_off - 1)
expand_bbox.append(height - h_off - 1)
expand_bbox = np.array(expand_bbox)
expand_img = np.tile(cfg.PIXEL_MEANS, (height, width, 1)).astype(img.dtype)
expand_img[h_off:h_off + img_height, w_off:w_off + img_width, :] = img
return expand_img, expand_bbox
def ApplyDistort_old(in_img):
hsv = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
s0 = npr.random() * (cfg.TRAIN.SATURATION - 1) + 1
s1 = npr.random() * (cfg.TRAIN.EXPOSURE - 1) + 1
# s0 = npr.random() * (1.5 - 1) + 1
# s1 = npr.random() * (1.5 - 1) + 1
s0 = s0 if npr.random() > 0.5 else 1.0 / s0
s1 = s1 if npr.random() > 0.5 else 1.0 / s1
hsv = np.array(hsv, dtype=np.float32)
hsv[:, :, 1] = np.minimum(s0 * hsv[:, :, 1], 255)
hsv[:, :, 2] = np.minimum(s1 * hsv[:, :, 2], 255)
hsv = np.array(hsv, dtype=np.uint8)
out_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return out_img
def ApplyDistort(in_img):
prob = npr.random()
if prob > 0.5:
# Do random brightness distortion.
out_img = RandomBrightness(in_img, cfg.TRAIN.brightness_prob,
cfg.TRAIN.brightness_delta)
# cv2.imshow('0 RandomBrightness',out_img.astype(np.uint8))
# Do random contrast distortion.
out_img = RandomContrast(out_img, cfg.TRAIN.contrast_prob,
cfg.TRAIN.contrast_lower,
cfg.TRAIN.contrast_upper)
# cv2.imshow('1 RandomContrast',out_img.astype(np.uint8))
# Do random saturation distortion.
out_img = RandomSaturation(out_img, cfg.TRAIN.saturation_prob,
cfg.TRAIN.saturation_lower,
cfg.TRAIN.saturation_upper)
# cv2.imshow('2 RandomSaturation',out_img.astype(np.uint8))
# Do random exposure distortion.
out_img = RandomExposure(out_img, cfg.TRAIN.exposure_prob,
cfg.TRAIN.exposure_lower,
cfg.TRAIN.exposure_upper)
# cv2.imshow('3 RandomExposure',out_img.astype(np.uint8))
# Do random hue distortion.
out_img = RandomHue(out_img, cfg.TRAIN.hue_prob, cfg.TRAIN.hue_delta)
# cv2.imshow('4 RandomHue',out_img.astype(np.uint8))
# Do random reordering of the channels.
out_img = RandomOrderChannels(out_img, cfg.TRAIN.random_order_prob)
# cv2.imshow('5 RandomOrderChannels',out_img.astype(np.uint8))
else:
# Do random brightness distortion.
out_img = RandomBrightness(in_img, cfg.TRAIN.brightness_prob,
cfg.TRAIN.brightness_delta)
# cv2.imshow('0 RandomBrightness',out_img.astype(np.uint8))
# Do random saturation distortion.
out_img = RandomSaturation(out_img, cfg.TRAIN.saturation_prob,
cfg.TRAIN.saturation_lower,
cfg.TRAIN.saturation_upper)
# cv2.imshow('1 RandomSaturation',out_img.astype(np.uint8))
# Do random exposure distortion.
out_img = RandomExposure(out_img, cfg.TRAIN.exposure_prob,
cfg.TRAIN.exposure_lower,
cfg.TRAIN.exposure_upper)
# cv2.imshow('2 RandomExposure',out_img.astype(np.uint8))
# Do random hue distortion.
out_img = RandomHue(out_img, cfg.TRAIN.hue_prob, cfg.TRAIN.hue_delta)
# cv2.imshow('3 RandomHue',out_img.astype(np.uint8))
# Do random contrast distortion.
out_img = RandomContrast(out_img, cfg.TRAIN.contrast_prob,
cfg.TRAIN.contrast_lower,
cfg.TRAIN.contrast_upper)
# cv2.imshow('4 RandomContrast',out_img.astype(np.uint8))
# Do random reordering of the channels.
out_img = RandomOrderChannels(out_img, cfg.TRAIN.random_order_prob)
# cv2.imshow('5 RandomOrderChannels',out_img.astype(np.uint8))
return out_img
def convertTo(in_img, alpha, beta):
out_img = in_img.astype(np.float32)
out_img = out_img * alpha + beta
out_img = np.clip(out_img, 0, 255)
out_img = out_img.astype(in_img.dtype)
return out_img
# def bgr_to_hsv(bgr):
# b, g, r = cv2.split(bgr)
# rgb = cv2.merge((r, g, b))
# hsv = rgb_to_hsv(rgb)
# return hsv
# def hsv_to_bgr(hsv):
# rgb = hsv_to_rgb(hsv)
# r, g, b = cv2.split(rgb)
# bgr = cv2.merge((b, g, r))
# return bgr
def RandomBrightness(in_img, brightness_prob, brightness_delta):
prob = npr.random()
if prob < brightness_prob:
assert brightness_delta > 0, "brightness_delta must be non-negative."
delta = npr.uniform(-brightness_delta, brightness_delta)
out_img = AdjustBrightness(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustBrightness(in_img, delta):
if abs(delta) > 0:
# out_img = cv2.convertTo(in_img, 1, 1, delta)
out_img = convertTo(in_img, 1, delta)
else:
out_img = in_img
return out_img
def RandomContrast(in_img, contrast_prob, lower, upper):
prob = npr.random()
if prob < contrast_prob:
assert upper >= lower, 'contrast upper must be >= lower.'
assert lower >= 0, 'contrast lower must be non-negative.'
delta = npr.uniform(lower, upper)
out_img = AdjustContrast(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustContrast(in_img, delta):
if abs(delta - 1.0) > 1e-3:
# out_img = cv2.convertTo(in_img, -1, delta, 0)
out_img = convertTo(in_img, delta, 0)
else:
out_img = in_img
return out_img
def RandomExposure(in_img, exposure_prob, lower, upper):
prob = npr.random()
if prob < exposure_prob:
assert upper >= lower, 'saturation upper must be >= lower.'
assert lower >= 0, 'saturation lower must be non-negative.'
delta = npr.uniform(lower, upper)
out_img = AdjustExposure(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustExposure(in_img, delta):
if abs(delta - 1.0) != 1e-3:
# Convert to HSV colorspae.
out_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
# out_img = bgr_to_hsv(in_img)
# Split the image to 3 channels.
h, s, v = cv2.split(out_img)
# Adjust the exposure.
# channels[2] = cv2.convertTo(channels[2], -1, delta, 0)
v = convertTo(v, delta, 0)
# out_img = cv2.merge((h, s, v))
out_img[:, :, 2] = v
# Back to BGR colorspace.
out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
# out_img = hsv_to_bgr(out_img)
else:
out_img = in_img
return out_img
def RandomSaturation(in_img, saturation_prob, lower, upper):
prob = npr.random()
if prob < saturation_prob:
assert upper >= lower, 'saturation upper must be >= lower.'
assert lower >= 0, 'saturation lower must be non-negative.'
delta = npr.uniform(lower, upper)
out_img = AdjustSaturation(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustSaturation(in_img, delta):
if abs(delta - 1.0) != 1e-3:
# Convert to HSV colorspae.
out_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
# out_img = bgr_to_hsv(in_img)
# Split the image to 3 channels.
h, s, v = cv2.split(out_img)
# Adjust the saturation.
# channels[1] = cv2.convertTo(channels[1], -1, delta, 0)
s = convertTo(s, delta, 0)
# out_img = cv2.merge((h, s, v))
out_img[:, :, 1] = s
# Back to BGR colorspace.
out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
# out_img = hsv_to_bgr(out_img)
else:
out_img = in_img
return out_img
def RandomHue(in_img, hue_prob, hue_delta):
prob = npr.random()
if prob < hue_prob:
assert hue_delta >= 0, 'hue_delta must be non-negative.'
delta = npr.uniform(-hue_delta, hue_delta)
out_img = AdjustHue(in_img, delta)
else:
out_img = in_img
return out_img
def AdjustHue(in_img, delta):
if abs(delta) > 0:
# Convert to HSV colorspae.
out_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2HSV)
# out_img = bgr_to_hsv(in_img)
# Split the image to 3 channels.
h, s, v = cv2.split(out_img)
# Adjust the hue.
# channels[0] = cv2.convertTo(channels[0], -1, 1, delta)
h = convertTo(h, 1, delta)
# out_img = cv2.merge((h, s, v))
out_img[:, :, 0] = h
# Back to BGR colorspace.
out_img = cv2.cvtColor(out_img, cv2.COLOR_HSV2BGR)
# out_img = hsv_to_bgr(out_img)
else:
out_img = in_img
return out_img
def RandomOrderChannels(in_img, random_order_prob):
prob = npr.random()
if prob < random_order_prob:
# Split the image to 3 channels.
channels = cv2.split(in_img)
assert len(channels) == 3
# Shuffle the channels.
channels = npr.shuffle(channels)
out_img = cv2.merge(channels)
else:
out_img = in_img
return out_img
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
#-------------------------------------------------------------
interp_mode = cv2.INTER_LINEAR
if len(cfg.TRAIN.INTERP_MODEL) > 0:
idx = npr.randint(len(cfg.TRAIN.INTERP_MODEL))
interp_name = cfg.TRAIN.INTERP_MODEL[idx]
if interp_name == 'LINEAR':
interp_mode = cv2.INTER_LINEAR
elif interp_name == 'AREA':
interp_mode = cv2.INTER_AREA
elif interp_name == 'NEAREST':
interp_mode = cv2.INTER_NEAREST
elif interp_name == 'CUBIC':
interp_mode = cv2.INTER_CUBIC
elif interp_name == 'LANCZOS4':
interp_mode = cv2.INTER_LANCZOS4
else:
print 'Unknow interp mode: ', interp_name
exit(0)
# if len(cfg.TRAIN.INTERP_MODEL) > 0:
# interp_order = np.random.randint(0, 6)
if cfg.RESIZE_MODE == 'WARP':
im_scale_h = float(target_size) / float(im_shape[0])
im_scale_w = float(target_size) / float(im_shape[1])
im = cv2.resize(
im,
None,
None,
fx=im_scale_w,
fy=im_scale_h,
interpolation=interp_mode)
# im = resize_image(
# im, (target_size, target_size), interp_order=interp_order)
im_scales = [im_scale_h, im_scale_w]
elif cfg.RESIZE_MODE == 'FIT_SMALLEST':
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(
im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=interp_mode)
# im = resize_image(
# im, (im_shape[0] * im_scale, im_shape[1] * im_scale),
# interp_order=interp_order)
im_scales = [im_scale, im_scale]
else:
print 'Unknow resize mode.'
exit()
return im, im_scales
def get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
processed_ims = []
im_scale_factors = []
if cfg.RESIZE_MODE == 'WARP':
for target_size in cfg.TEST.SCALES:
im_scale_h = float(target_size) / float(im_shape[0])
im_scale_w = float(target_size) / float(im_shape[1])
im = cv2.resize(
im,
None,
None,
fx=im_scale_w,
fy=im_scale_h,
interpolation=cv2.INTER_LINEAR)
im_scale = [im_scale_h, im_scale_w]
im_scale_factors.append(im_scale)
processed_ims.append(im)
elif cfg.RESIZE_MODE == 'FIT_SMALLEST':
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(
im_orig,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale = [im_scale, im_scale]
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = utils.blob.im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def normalize_img_roi(img_roi, img_shape):
roi_normalized = np.copy(img_roi)
roi_normalized[:, 0] = roi_normalized[:, 0] / img_shape[1]
roi_normalized[:, 1] = roi_normalized[:, 1] / img_shape[0]
roi_normalized[:, 2] = roi_normalized[:, 2] / img_shape[1]
roi_normalized[:, 3] = roi_normalized[:, 3] / img_shape[0]
return roi_normalized
|
python
|
from django.urls import path
from .views import ResultListView, create_result, edit_results
urlpatterns = [
path("create/", create_result, name="create-result"),
path("edit-results/", edit_results, name="edit-results"),
path("view/all", ResultListView.as_view(), name="view-results"),
]
|
python
|
"""
Class for manipulating user information.
"""
from datetime import date, timedelta
from functools import total_ordering
from logging import getLogger
from os import stat
from os.path import exists
from re import compile as re_compile
from stat import (S_IMODE, S_ISDIR, S_ISREG)
from typing import (
Any, Collection, Dict, FrozenSet, Optional, NamedTuple, Set, Type, TypeVar,
Union)
from .constants import (
EPOCH, FIELD_PATTERN, REAL_NAME_MAX_LENGTH, UID_MIN, UID_MAX)
from .entity import Entity
from .utils import parse_opt_int
# pylint: disable=C0103
log = getLogger(__name__)
class UserTuple(NamedTuple):
"""
UserTuple(NamedTuple)
Holds the data for a User object in an immutable format.
"""
name: str
uid: int
gid: int
real_name: str
home: str
shell: str
password: Optional[str]
last_password_change_date: Optional[date]
password_age_min_days: Optional[int]
password_age_max_days: Optional[int]
password_warn_days: Optional[int]
password_disable_days: Optional[int]
account_expire_date: Optional[date]
ssh_public_keys: FrozenSet[str]
modified: bool
U = TypeVar("U", bound="User")
@total_ordering
class User(Entity):
"""
User object for holding data about a single user entry in the /etc/passwd
and /etc/shadow files.
"""
# pylint: disable=W0201,R0902
def __init__( # pylint: disable=R0913,R0914
self, name: str, uid: int, gid: int, real_name: str, home: str,
shell: str, password: Optional[str] = None,
last_password_change_date: Optional[date] = None,
password_age_min_days: Optional[int] = None,
password_age_max_days: Optional[int] = None,
password_warn_days: Optional[int] = None,
password_disable_days: Optional[int] = None,
account_expire_date: Optional[date] = None,
ssh_public_keys: Optional[Set[str]] = None,
modified: bool = False) -> None:
"""
User(
name: str, uid: int, gid: int, real_name: str, home: str, shell: str,
password: Optional[str] = None,
last_password_change_date: Optional[date] = None,
password_age_min_days: Optional[int] = None,
password_age_max_days: Optional[int] = None
password_warn_days: Optional[int] = None,
password_disable_days: Optional[int] = None,
account_expire_date: Optional[date] = None,
ssh_public_keys: Optional[str] = None, modified: bool = False) -> User
Create a new User object.
"""
super(User, self).__init__(name=name, gid=gid, password=password, modified=modified)
self.name = name
self.uid = uid
self.real_name = real_name
self.home = home
self.shell = shell
self.last_password_change_date = last_password_change_date
self.password_age_min_days = password_age_min_days
self.password_age_max_days = password_age_max_days
self.password_warn_days = password_warn_days
self.password_disable_days = password_disable_days
self.account_expire_date = account_expire_date
self.ssh_public_keys = ssh_public_keys
def __eq__(self, other: Any) -> bool:
if not isinstance(other, User):
return False
return self.as_tuple == other.as_tuple
def __ne__(self, other: Any) -> bool:
if not isinstance(other, User):
return True
return self.as_tuple != other.as_tuple
def __lt__(self, other: "User") -> bool:
self._lt_check_other_type(other)
return self.as_tuple < other.as_tuple
@property
def uid(self) -> int:
"""
The integer user id of the user.
"""
return self._uid
@uid.setter
def uid(self, value: int) -> None:
if not isinstance(value, int):
raise TypeError("uid must be an int")
if not UID_MIN <= value <= UID_MAX:
raise ValueError(
f"uid must be between {UID_MIN} and {UID_MAX}, inclusive: "
f"{value}")
self._uid = value
@property
def real_name(self) -> str:
"""
The real name of the user.
This _may_ be a comma-delimited list of values containing the following
fields:
* The user's full name
* The building and room number
* Office telephone number
* Home telephone number
* Other contact information
"""
return self._real_name
@real_name.setter
def real_name(self, value: Optional[str]) -> None:
if value is None:
self._real_name = ""
return
if not isinstance(value, str):
raise TypeError("real_name must be a string or None")
if not FIELD_PATTERN.match(value):
raise ValueError("real_name contains illegal characters")
if len(value.encode("utf-8")) > REAL_NAME_MAX_LENGTH:
raise ValueError(
f"real_name is longer than {REAL_NAME_MAX_LENGTH} bytes "
f"(UTF-8 encoded)")
self._real_name = value
@property
def home(self) -> str:
"""
The home directory of the user.
"""
return self._home
@home.setter
def home(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("home must be a string")
if not FIELD_PATTERN.match(value):
raise TypeError("home contains illegal characters")
self._home = value
@property
def shell(self) -> str:
"""
The login shell of the user.
"""
return self._shell
@shell.setter
def shell(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError("shell must be a string")
if not FIELD_PATTERN.match(value):
raise ValueError(
"shell is not an absolute path or contains doubled or "
f"trailing slashes: {value}")
self._shell = value
@property
def ssh_public_keys(self) -> FrozenSet[str]:
"""
The SSH public keys of the user.
"""
return frozenset(self._ssh_public_keys)
@ssh_public_keys.setter
def ssh_public_keys(
self, value: Optional[Union[Collection[str], str]]) -> None:
if value is None:
self._ssh_public_keys = set() # type: Set[str]
return
if isinstance(value, str):
self._ssh_public_keys = set([value])
return
if not isinstance(value, (list, tuple, set)):
raise TypeError("ssh_public_keys must be a collection of strings")
new_ssh_public_keys = set() # type: Set[str]
for el in value:
if not isinstance(el, str):
raise TypeError(
"ssh_public_keys must be a collection of strings")
new_ssh_public_keys.add(el)
self._ssh_public_keys = new_ssh_public_keys
@property
def ssh_dir_permissions_ok(self) -> bool:
"""
Indicates whether ~/.ssh exists, is a directory owned by the user,
and is only writable by the user.
"""
# pylint: disable=R0911
home = self.home
if not home:
log.debug(
"User %s does not have a home directory set", self.name)
return False
ssh_dir = home + "/.ssh"
if not exists(ssh_dir):
log.debug(
"User %s does not have ~/.ssh directory: %s", self.name,
ssh_dir)
return False
try:
ssh_stat = stat(ssh_dir)
except OSError as e:
log.error("Unable to stat %s: %s", ssh_dir, e)
return False
if ssh_stat.st_uid != self.uid:
log.warning(
"User %s does not own ~/.ssh directory %s: user uid %d, "
"owner uid %d", self.name, ssh_dir, self.uid, ssh_stat.st_uid)
return False
if not S_ISDIR(ssh_stat.st_mode):
log.warning(
"User %s ~/.ssh direcotry %s is not a directory", self.name,
ssh_dir)
return False
mode_bits = S_IMODE(ssh_stat.st_mode)
if mode_bits & 0o020:
log.warning(
"User %s ~/.ssh directory %s is group-writable", self.name,
ssh_dir)
return False
if mode_bits & 0o002:
log.warning(
"User %s ~/.ssh directory %s is other-writable", self.name,
ssh_dir)
return False
return True
@property
def authorized_keys_permissions_ok(self) -> bool:
"""
Indicates whether ~/.ssh/authorized_keys exists, is owned by the
user, and is only writable by the user.
"""
# pylint: disable=R0911
if not self.ssh_dir_permissions_ok:
return False
auth_keys = self.home + "/.ssh/authorized_keys"
if not exists(auth_keys):
log.debug(
"User %s does not have ~/.ssh/authorized_keys: %s", self.name,
auth_keys)
return False
try:
auth_keys_stat = stat(auth_keys)
except OSError as e:
log.error("Unable to stat %s: %s", auth_keys, e)
return False
if auth_keys_stat.st_uid != self.uid:
log.warning(
"User %s does not own ~/.ssh/authorized_keys file %s: user "
"uid %d, owner uid %d", self.name, auth_keys, self.uid,
auth_keys_stat.st_uid)
return False
if not S_ISREG(auth_keys_stat.st_mode):
log.warning(
"User %s ~/.ssh/authorized_keys file %s is not a file",
self.name, auth_keys)
return False
mode_bits = S_IMODE(auth_keys_stat.st_mode)
if mode_bits & 0o020:
log.warning(
"User %s ~/.ssh/authorized_keys file %s is group-writable",
self.name, auth_keys)
return False
if mode_bits & 0o002:
log.warning(
"User %s ~/.ssh/authorized_keys file %s is other-writable",
self.name, auth_keys)
return False
return True
@property
def authorized_keys(self) -> Set[str]:
"""
Return the authorized keys found in ~/.ssh
"""
result = set() # type: Set[str]
auth_keys = self.home + "/.ssh/authorized_keys"
if not self.authorized_keys_permissions_ok:
return result
with open(auth_keys, "r") as fd:
for line in fd:
line = line.strip()
if line:
result.add(line)
return result
@property
def as_tuple(self) -> UserTuple:
"""
The user represented as an immutable tuple object.
"""
return UserTuple(
name=self.name,
uid=self.uid,
gid=self.gid,
real_name=self.real_name,
home=self.home,
shell=self.shell,
password=self.password,
last_password_change_date=self.last_password_change_date,
password_age_min_days=self.password_age_min_days,
password_age_max_days=self.password_age_max_days,
password_warn_days=self.password_warn_days,
password_disable_days=self.password_disable_days,
account_expire_date=self.account_expire_date,
ssh_public_keys=self.ssh_public_keys,
modified=self.modified,
)
def __repr__(self):
return repr(self.as_tuple)
@staticmethod
def date_from_days(days: Optional[int]) -> Optional[date]:
"""
User.date_from_days(days: Optional[int]) -> Optional[date]
Convert a count of days-from-epoch to an optional date field.
If days is negative or None, the result is None.
This standardizes negative values returned by the Python spwd library
to None values.
"""
if days is None or days < 0:
return None
return EPOCH + timedelta(days=days)
@staticmethod
def age_from_days(days: int) -> Optional[int]:
"""
User.age_from_days(days: Optional[int]) -> Optional[int]
Convert an age in days to an optional age field.
If days is negative or None, the result is None.
This standardizes negative values returned by the Python spwd library
to None values.
"""
if days is None or days < 0:
return None
return days
_iso8601_date_pattern = re_compile(
r"^(?P<year>[0-9]{4})-?"
r"(?P<month>[0-9][1-9]|1[0-2])-?"
r"(?P<day>0[1-9]|[12][0-9]|3[01])$")
@staticmethod
def date_from_string(s: Optional[str]) -> Optional[date]:
"""
User.date_from_string(s: Optional[str]) -> Optional[date]
Convert a string date in YYYY-MM-DD form to a date object. If s is
None, this returns None.
"""
if s is None:
return None
m = User._iso8601_date_pattern.match(s)
if not m:
raise ValueError("Cannot parse as date: %r" % s)
year = int(m.group("year"))
month = int(m.group("month"))
day = int(m.group("day"))
return date(year, month, day)
def update_from_dynamodb_item(self, item: Dict[str, Any]) -> bool:
"""
user.update_from_dynamodb_item(item: Dict[str, Any]) -> bool
Update the user from a given DynamoDB item. If an attribute has been
modified, the modified flag is set to true.
The name field cannot be updated.
The return value is the value of the modified flag.
"""
super(User, self).update_from_dynamodb_item(item)
uid = int(item["UID"]["N"])
if self.uid != uid:
self.uid = uid
self.modified = True
real_name = item.get("RealName", {"S": ""})["S"]
if self.real_name != real_name:
self.real_name = real_name
self.modified = True
home = item.get("Home", {"S": ""})["S"]
if self.home != home:
self.home = home
self.modified = True
shell = item.get("Shell", {"S": ""})["S"]
if self.shell != shell:
self.shell = shell
self.modified = True
last_password_change_date = User.date_from_string(
item.get("LastPasswordChangeDate", {}).get("S"))
if self.last_password_change_date != last_password_change_date:
self.last_password_change_date = last_password_change_date
self.modified = True
password_age_min_days = parse_opt_int(
item.get("PasswordAgeMinDays", {}).get("N"))
if self.password_age_min_days != password_age_min_days:
self.password_age_min_days = password_age_min_days
self.modified = True
password_age_max_days = parse_opt_int(
item.get("PasswordAgeMaxDays", {}).get("N"))
if self.password_age_max_days != password_age_max_days:
self.password_age_max_days = password_age_max_days
self.modified = True
password_warn_days = parse_opt_int(
item.get("PasswordWarnDays", {}).get("N"))
if self.password_warn_days != password_warn_days:
self.password_warn_days = password_warn_days
self.modified = True
password_disable_days = parse_opt_int(
item.get("PasswordDisableDays", {}).get("N"))
if self.password_disable_days != password_disable_days:
self.password_disable_days = password_disable_days
self.modified = True
account_expire_date = User.date_from_string(
item.get("AccountExpireDate", {}).get("S"))
if self.account_expire_date != account_expire_date:
self.account_expire_date = account_expire_date
self.modified = True
ssh_public_keys = item.get("SSHPublicKeys", {}).get("SS", set())
if self.ssh_public_keys != ssh_public_keys:
self.ssh_public_keys = ssh_public_keys
self.modified = True
return self.modified
@classmethod
def from_dynamodb_item(cls: Type[U], item: Dict[str, Any]) -> U:
"""
User.from_dynamodb_item(item: Dict[str, Any]) -> User
Create a user from a given DynamoDB item. The modified flag is
automatically set to true.
"""
return cls(
name=item["Name"]["S"],
uid=int(item["UID"]["N"]),
gid=int(item["GID"]["N"]),
real_name=item.get("RealName", {"S": ""})["S"],
home=item.get("Home", {"S": ""})["S"],
shell=item.get("Shell", {"S": ""})["S"],
password=item.get("Password", {}).get("S"),
last_password_change_date=User.date_from_string(
item.get("LastPasswordChangeDate", {}).get("S")),
password_age_min_days=parse_opt_int(
item.get("PasswordAgeMinDays", {}).get("N")),
password_age_max_days=parse_opt_int(
item.get("PasswordAgeMaxDays", {}).get("N")),
password_warn_days=parse_opt_int(
item.get("PasswordWarnDays", {}).get("N")),
password_disable_days=parse_opt_int(
item.get("PasswordDisableDays", {}).get("N")),
account_expire_date=User.date_from_string(
item.get("AccountExpireDate", {}).get("S")),
ssh_public_keys=item.get("SSHPublicKeys", {}).get("SS", set()),
modified=True)
|
python
|
import os
import setuptools
with open("README.md") as f:
long_description = f.read()
with open(
os.path.join(os.path.dirname(__file__), "config", "requirements", "base.txt")
) as f:
requirements = [i.strip() for i in f]
setuptools.setup(
name="rembrain_robot_framework",
version="0.1.4",
author="Rembrain",
author_email="[email protected]",
description="Rembrain Robot Framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/VasilyMorzhakov/rembrain_robotframework",
# collect all packages
packages=setuptools.find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
|
python
|
#coding:utf-8
from flask import Flask, redirect, url_for, request
from datetime import datetime
from flask_bootstrap import Bootstrap
from app.config_default import Config as DefaultConfig
bootstrap = Bootstrap()
def check_start(app, db):
from app.includes.start import _exist_config, exist_table, create_path, set_site
create_path(app)
app.start = False
if _exist_config(app):
from app.config import Config
app.config.from_object(Config)
if exist_table(app):
app.start = True
return
@app.before_request
def request_check_start():
if app.start:
return set_site(app)
ends = frozenset(["admin.setup", "admin.install", "static"])
if request.endpoint in ends:
return
if not _exist_config(app):
return redirect(url_for("admin.setup"))
return redirect(url_for("admin.install"))
def template_filters(app):
@app.template_filter("friendly_time")
def friendly_time(date):
now = datetime.now()
delta = now - date
if delta.days >= 365:
return u'%d年前' % (delta.days / 365)
elif delta.days >= 30:
return u'%d个月前' % (delta.days / 30)
elif delta.days > 0:
return u'%d天前' % delta.days
elif delta.days < 0:
return u"0秒前"
elif delta.seconds < 60:
return u"%d秒前" % delta.seconds
elif delta.seconds < 60 * 60:
return u"%d分钟前" % (delta.seconds / 60)
else:
return u"%d小时前" % (delta.seconds / 60 / 60)
def create_app():
app = Flask(__name__)
app.config.from_object(DefaultConfig)
from app.models.model import db, login_manager
bootstrap.init_app(app)
db.init_app(app)
db.PREFIX = app.config["DB_PREFIX"]
app.site = {}
def site_context_processor():
return dict(site=app.site)
app.context_processor(site_context_processor)
check_start(app, db)
login_manager.init_app(app)
from app.web import web
app.register_blueprint(web)
from app.admin import admin
app.register_blueprint(admin, url_prefix="/admin")
from app.api import api
app.register_blueprint(api, url_prefix="/api")
template_filters(app)
login_manager.login_view = "admin.login"
login_manager.login_message = "请先登录!!!"
from app.log import init_logging
init_logging(app)
return app
|
python
|
"""
Generate_branched_alkane
========================
"""
from rdkit import Chem
import numpy as np
import random
def generate_branched_alkane(num_atoms: int, save: bool=False) -> Chem.Mol:
"""Generates a branched alkane.
Parameters
----------
num_atoms : int
Number of atoms in molecule to be generated.
save : bool
Whether to save the molecule as a .mol file.
"""
mol = Chem.MolFromSmiles('CCCC')
edit_mol = Chem.RWMol(mol)
while edit_mol.GetNumAtoms() < num_atoms:
x = Chem.rdchem.Atom(6)
randidx = np.random.randint(len(edit_mol.GetAtoms()))
atom = edit_mol.GetAtomWithIdx(randidx)
if atom.GetDegree() > 2:
continue
if atom.GetDegree() == 2 and random.random() <= 0.5:
continue
idx = edit_mol.AddAtom(x)
edit_mol.AddBond(idx, randidx, Chem.rdchem.BondType.SINGLE)
Chem.SanitizeMol(edit_mol)
mol = Chem.rdmolops.AddHs(edit_mol.GetMol())
if save:
Chem.rdmolfiles.MolToMolFile(mol, f'{num_atoms}_branched_alkane.mol')
return mol
|
python
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from openwisp_users.models import OrganizationUser, User
from .utils import TestOrganizationMixin
class TestUsers(TestOrganizationMixin, TestCase):
user_model = User
def test_create_superuser_email(self):
user = User.objects.create_superuser(username='tester',
password='tester',
email='[email protected]')
self.assertEqual(user.emailaddress_set.count(), 1)
self.assertEqual(user.emailaddress_set.first().email, '[email protected]')
def test_create_superuser_email_empty(self):
user = User.objects.create_superuser(username='tester',
password='tester',
email='')
self.assertEqual(user.emailaddress_set.count(), 0)
def test_unique_email_validation(self):
self._create_user(username='user1', email='[email protected]')
options = {
'username': 'user2',
'email': '[email protected]',
'password': 'pass1'
}
u = self.user_model(**options)
with self.assertRaises(ValidationError):
u.full_clean()
u.save()
def test_create_user_without_email(self):
options = {
'username': 'testuser',
'password': 'test1',
}
u = self.user_model(**options)
u.full_clean()
u.save()
self.assertIsNone(u.email)
def test_organizations_pk(self):
user = self._create_user(username='organizations_pk')
org1 = self._create_org(name='org1')
org2 = self._create_org(name='org2')
self._create_org(name='org3')
OrganizationUser.objects.create(user=user, organization=org1)
OrganizationUser.objects.create(user=user, organization=org2)
self.assertIn((org1.pk,), user.organizations_pk)
self.assertEqual(len(user.organizations_pk), 2)
def test_organizations_pk_empty(self):
user = self._create_user(username='organizations_pk')
self.assertEqual(len(user.organizations_pk), 0)
def test_organization_repr(self):
org = self._create_org(name='org1', is_active=False)
self.assertIn('disabled', str(org))
def test_organization_owner_bad_organization(self):
user = self._create_user(username='user1', email='[email protected]')
org1 = self._create_org(name='org1')
org2 = self._create_org(name='org2')
org_user = self._create_org_user(organization=org1, user=user)
org_owner = self._create_org_owner()
org_owner.organization = org2
org_owner.organization_user = org_user
with self.assertRaises(ValidationError):
org_owner.full_clean()
def test_create_users_without_email(self):
options = {
'username': 'testuser',
'password': 'test1',
}
u = self.user_model(**options)
u.full_clean()
u.save()
self.assertIsNone(u.email)
options['username'] = 'testuser2'
u = self.user_model(**options)
u.full_clean()
u.save()
self.assertIsNone(u.email)
self.assertEqual(User.objects.filter(email=None).count(), 2)
|
python
|
from tensorflow_functions import cosine_knn
import collections
import numpy as np
import logging
from embedding import load_embedding
import operator
from sklearn.cluster import KMeans
from utils import length_normalize, normalize_questions, normalize_vector, calculate_cosine_simil, perf_measure
import sklearn.metrics
import argparse
import os
import datetime
class Question_Manager():
questions = []
questions_normalized = []
questions_vectors = []
keywords = collections.defaultdict()
embedding = None
def __init__(self, embedding_path='/home/iker/Documents/QuestionCluster/TechEmbeddings/embeddings_lower.vec'):
self.questions = []
self.questions_normalized = []
self.questions_vectors = []
self.keywords = collections.defaultdict()
self.embedding = load_embedding(embedding_path)
def get_keywords(self):
return sorted(self.keywords.items(), key=operator.itemgetter(1), reverse=True)
def question_to_vector(self, question, prefix=False):
sentence = np.zeros([self.embedding.dims])
num_words = 0
for word in question:
try:
if prefix:
sentence += self.embedding.word_to_vector(prefix+'/'+word)
else:
sentence += self.embedding.word_to_vector(word)
num_words += 1
except KeyError as r:
continue
if num_words > 0:
sentence = sentence / num_words
else:
logging.warning('Could not calculate the sentence embedding fot the sentence ' + str(question))
return sentence
def update_keyword_for_sentence(self, question):
for word in question:
try:
self.keywords[word] += 1
except KeyError as er:
self.keywords[word] = 1
def print_question(self, question, path='questions.txt'):
with open(path, 'a') as file:
print(str(question), file=file)
def add_question(self, question):
normalized_question = normalize_questions(question)
question_vector = self.question_to_vector(normalized_question)
if len(normalized_question) > 0 and question_vector is not None:
self.questions.append(question)
self.questions_normalized.append(normalized_question)
self.questions_vectors.append(question_vector)
self.update_keyword_for_sentence(normalized_question)
self.print_question(question)
def load_form_file(self, path):
with open(path, 'r') as file:
for line in file:
self.add_question(line)
def clustering(self, n_clusters=8):
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(np.array(self.questions_vectors))
cluster_centers = kmeans.cluster_centers_
labels = kmeans.labels_
questions_cluster = [[] for x in range(n_clusters)]
for i_label, label in enumerate(kmeans.predict(np.array(self.questions_vectors))):
questions_cluster[label].append(self.questions[i_label])
return questions_cluster
def k_nearest(self, sentence, k=1):
vectors_norm = length_normalize(np.array(self.questions_vectors))
result = cosine_knn([self.question_to_vector(normalize_questions(sentence))], vectors_norm, k=k)
for s in result[0]:
print(self.questions[s])
def evaluate_similarity(self, question_file, threshold = 0.8, prefix =False):
question1 = []
question2 = []
gold_scores = []
with open(question_file) as file:
for line in file:
line = line.rstrip()
q1, q2, gold = line.split('\t')
question1.append(q1)
question2.append(q2)
gold_scores.append(int(gold))
question_vectors_1 = [self.question_to_vector(normalize_questions(x),prefix) for x in question1]
question_vectors_2 = [self.question_to_vector(normalize_questions(x), prefix) for x in question2]
scores = []
for i in range(len(question_vectors_1)):
if i % 10 == 0:
string = "<" + str(datetime.datetime.now()) + "> " + 'Evaluating Question Pairs: ' + str(
int(100 * ((i+10) / len(question_vectors_1)))) + '%'
print(string, end="\r")
score = calculate_cosine_simil(question_vectors_1[i], question_vectors_2[i])
if score > threshold:
scores.append(1)
else:
scores.append(0)
print()
result = sklearn.metrics.log_loss(gold_scores, scores)
TP, FP, TN, FN = perf_measure(gold_scores, scores)
acc = np.sum(np.array(gold_scores) == np.array(scores))/len(gold_scores)
print('Log Loss: ' + str(result))
print('Acc: ' + str(acc))
print('TP: ' + str(TP) + '\tFP: ' + str(FP) + '\tTN: ' + str(TN) + '\tFN: ' + str(FN))
print(scores)
print(gold_scores)
return result, acc, TP, FP, TN, FN
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--question_file', required=True, type=str)
parser.add_argument('-e', '--embedding', required=True, type=str)
#parser.add_argument('-t', '--threshold', default='0.8', type=float)
parser.add_argument('-p', '--prefix', type=str, default=None)
args = parser.parse_args()
qm = Question_Manager(embedding_path=args.embedding)
for threshold in [0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]:
print('===> Threshold: ' + str(threshold))
result = qm.evaluate_similarity(args.question_file, threshold, args.prefix)
if not os.path.exists('Results'):
os.makedirs('Results')
with open('Results/baseline.csv', 'a+') as file:
txtResults = str(args.embedding) + '\t' + str(threshold) + '\t' + str(result[0]) + '\t' + str(result[1]) + '\t' + \
str(result[2]) + '\t' + str(result[3]) + '\t' + str(result[4]) + '\t' + str(result[5])
print('%s' % (str(txtResults)), file=file)
|
python
|
from Impromptu import *
|
python
|
import pytch
from pytch import (
Sprite,
Stage,
Project,
when_green_flag_clicked,
when_this_sprite_clicked,
)
import random
# Click the balloons to pop them and score points
class BalloonStage(Stage):
Backdrops = [('midnightblue', 'library/images/stage/solid-midnightblue.png')]
# TODO: Improve how using a non-default backdrop works.
def __init__(self):
Stage.__init__(self)
self.switch_backdrop('midnightblue')
class Balloon(Sprite):
Costumes = [('balloon', 'library/images/balloon.png', 50, 80)]
Sounds = [('pop', 'library/sounds/pop.mp3')]
def __init__(self):
Sprite.__init__(self)
self.score = 0
def go_to_random_spot(self):
self.go_to_xy(random.randint(-200, 200),
random.randint(-150, 150))
@when_green_flag_clicked
def play_game(self):
self.score = 0
self.go_to_random_spot()
self.switch_costume('balloon')
self.show()
while True:
pytch.wait_seconds(3.0)
self.go_to_random_spot()
self.show()
@when_this_sprite_clicked
def pop(self):
self.start_sound('pop')
self.hide()
self.score += 1
project = pytch.Project()
project.register_stage_class(BalloonStage)
project.register_sprite_class(Balloon)
project.go_live()
|
python
|
from environment import environment as env
class syscalls:
"""This class holds a framework for system calls and should ultimately depend on an
architecture template I think. For now, it's basically a function map to allow
programming system calls like you really would.
"""
def __init__(self):
pass
def call(self):
""" Represents the 0x80 instruction.
"""
syscall_number = env.rax.val()
# args = [env.ebx.val(), env.ecx.val(), env.edx.val(), env.esi.val(), env.edi.val(), env.ebp.val()]
# handle stack args
# Look up syscall_number in the function map,
pass
def getIdByName(self, name):
""" Return a system call number based on the name of the syscall.
env.mov(rax, sys.getIdByName('foo_bar'))
"""
try:
value = self._map[name]
except KeyError:
raise KeyError("Invalid syscall name")
return value
|
python
|
from astute.__main__ import main
from astute.__main__ import Icon
if __name__ == '__main__':
main(icon=Icon('astute/astute.ico'))
|
python
|
# *ex2 - Escreve um programa com laço de repetição para o usuario encerra-lo apenas quando desejar, onde seu objetivo será fornecer a nota musical, bem como sua frequencia. bom base na tecla fornecida pelo usuario
from os import system
from time import sleep
from cores import *
def sair():
print(f'\t\t{cor["verde"]}Até mais!{cor["limpar"]}')
sleep(1)
quit()
def do():
print('''
Nota: Dó
Frequência: 262 Hz''')
input()
def re():
print('''
Nota: Ré
Frequência: 294 Hz''')
input()
def mi():
print('''
Nota: Mi
Frequência: 330 Hz''')
input()
def fa():
print('''
Nota: Fá
Frequência: 349 Hz''')
input()
def sol():
print('''
Nota: Sol
Frequência: 392 Hz''')
input()
def la():
print('''
Nota: Lá
Frequência: 440 Hz''')
input()
def si():
print('''
Nota: Si
Frequência: 494 Hz''')
input()
def erro(): # Caso insira um valor que n existe no dicionario
print(f'{cor["vermelho"]}Escolha invalida. Tente novamente!{cor["limpar"]}')
sleep(1)
while True:
system('cls')
tecla = str(input(f'''
{cor['verde']}> TECLA <{cor['limpar']}
---------
C
D
E
F
G
A
B
{cor['vermelho']}SAIR{cor['limpar']}
{cor['amarelo']}Insira a tecla:{cor['limpar']} ''')).lower().strip()
operacoes = {
'c': do,
'd': re,
'e': mi,
'f': fa,
'g': sol,
'a': la,
'b': si,
'sair': sair
}
system('cls')
print(operacoes.get(tecla, erro)())
|
python
|
# coding: utf-8
# libraries import
import json
from flask import Flask, render_template, redirect, url_for, request, jsonify, flash
from flask_zurb_foundation import Foundation
from sqlalchemy_wrapper import SQLAlchemy
app = Flask(__name__)
# Configuration
app.config["DEBUG"] = True
from models import RandomData, db
# Initializations
foundation = Foundation(app)
# Models
# views
@app.route("/")
def index():
data = db.query(RandomData).all()
return render_template(
"index.html",
data=data
)
@app.route("/save", methods=["POST"])
def save():
# cogemos la data recibida
returned_data = {}
received_data = request.json
try:
new_random_data = RandomData(received_data)
db.session.add(new_random_data)
db.session.commit()
returned_data["message"] = "Se Creó una nueva data aleatoria"
returned_data["data"] = received_data
returned_data["status"] = "success"
except Exception as e:
returned_data["message"] = "Hubo un error"
returned_data["error"] = e
returned_data["status"]= "alert"
return jsonify(returned_data)
|
python
|
class Solution(object):
def match_note_to_magazine(self, ransom_note, magazine):
if ransom_note is None or magazine is None:
raise TypeError('ransom_note or magazine cannot be None')
seen_chars = {}
for char in magazine:
if char in seen_chars:
seen_chars[char] += 1
else:
seen_chars[char] = 1
for char in ransom_note:
try:
seen_chars[char] -= 1
except KeyError:
return False
if seen_chars[char] < 0:
return False
return True
|
python
|
# File : text.py
# Author : Zhengkun Tian
# Email : [email protected]
import torch
import logging
from otrans.data import *
from torch.utils.data import Dataset
class TextDataset(Dataset):
def __init__(self, params, datadict, is_eval=False):
self.params = params
self.is_eval = is_eval
self.src_unit2idx = load_vocab(params['src_vocab'])
self.tgt_unit2idx = load_vocab(params['tgt_vocab'])
self.reverse = params['reverse'] if 'reverse' in params else False
if self.reverse:
logging.info('Reverse the src and tgt sequence!')
self.src_list = []
self.tgt_dict = {}
for src_file in datadict['src']:
with open(src_file, 'r', encoding='utf-8') as t:
for line in t:
parts = line.strip().split()
utt_id = parts[0]
label = []
for c in parts[1:]:
label.append(self.src_unit2idx[c] if c in self.src_unit2idx else self.src_unit2idx[UNK_TOKEN])
self.src_list.append((utt_id, label))
for tgt_file in datadict['tgt']:
with open(tgt_file, 'r', encoding='utf-8') as t:
for line in t:
parts = line.strip().split()
utt_id = parts[0]
label = []
for c in parts[1:]:
label.append(self.tgt_unit2idx[c] if c in self.tgt_unit2idx else self.tgt_unit2idx[UNK_TOKEN])
self.tgt_dict[utt_id] = label
assert len(self.src_list) == len(self.tgt_dict)
self.lengths = len(self.src_list)
def __getitem__(self, index):
idx, src_seq = self.src_list[index]
tgt_seq = self.tgt_dict[idx]
if self.reverse:
src_seq.reverse()
tgt_seq.reverse()
return idx, src_seq, tgt_seq
def __len__(self):
return self.lengths
@property
def src_vocab_size(self):
return len(self.src_unit2idx)
@property
def tgt_vocab_size(self):
return len(self.tgt_unit2idx)
@property
def src_idx2unit(self):
return {i: c for (c, i) in self.src_unit2idx.items()}
@property
def tgt_idx2unit(self):
return {i: c for (c, i) in self.tgt_unit2idx.items()}
|
python
|
"""Custom test and setup properties for checkin pull_info provider."""
load("//container:providers.bzl", "PullInfo")
def _pull_info_validation_test_impl(ctx):
pull_info = ctx.attr.target[PullInfo]
compare_script_file = ctx.actions.declare_file("compare.sh")
compare_script = """#!/usr/bin/env bash
function assert_equals(){
if [ "$2" != "$3" ]; then
echo "Expected $1 to be '$2' but was '$3'"
exit 1
fi
}
""" + """
assert_equals "base_image_registry" "{expected_registry}" "{actual_registry}"
assert_equals "base_image_repository" "{expected_repository}" "{actual_repository}"
assert_equals "base_image_digest" "{expected_digest}" "{actual_digest}"
echo "PASSED"
""".format(
expected_registry = ctx.attr.expected_registry,
actual_registry = pull_info.base_image_registry,
expected_repository = ctx.attr.expected_repository,
actual_repository = pull_info.base_image_repository,
expected_digest = ctx.attr.expected_digest,
actual_digest = pull_info.base_image_digest,
)
ctx.actions.write(compare_script_file, compare_script, is_executable = True)
return [DefaultInfo(executable = compare_script_file, runfiles = ctx.runfiles(files = [compare_script_file]))]
pull_info_validation_test = rule(
implementation = _pull_info_validation_test_impl,
attrs = {
"expected_digest": attr.string(mandatory = True),
"expected_registry": attr.string(mandatory = True),
"expected_repository": attr.string(mandatory = True),
"target": attr.label(providers = [PullInfo]),
},
test = True,
)
|
python
|
# Converting the main code to use datetime objects as well instead of just time objects
# Took out defaults from the iter functions
import time, math
from datetime import datetime
from models.energy import defaultModel, load_data, GPSCoordinate#, powerConsumption
from models.Predictor import powerGeneration
# function 1: Given velocity, find energy
# Default start: Now, end: 5 PM (17:00)
# energy change=energy generated-energy consumed
def calc_dE(velocity, latitude, longitude, altitude, start_time=time.strftime("%Y %m %d %H:%M", time.localtime()), end_time="17:00", cloudy=0):
# Must convert end_time into a proper string
# Format: Year Month Day Hour:Min
if end_time == "17:00":
end_time = time.strftime("%Y %m %d", time.localtime()) + " 17:00"
it = iter_dE(velocity, latitude, longitude, start_time, end_time, cloudy)
for (done, dE) in it:
if done:
return dE
def iter_dE(velocity, latitude, longitude, start_time, end_time, cloudy):
# Time Objects for Kevin's Program
# Arguments should be in this format: "Year Month Day Hour:Min"
# "2011 10 11 14:00"
st = time.strptime(start_time, "%Y %m %d %H:%M")
et = time.strptime(end_time, "%Y %m %d %H:%M")
# Datetime Objects for Wesley's Program
# Arguments should be in this format: "Year Month Day Hour:Min"
# "2011 10 11 14:00"
dST = datetime.strptime(start_time, "%Y %m %d %H:%M")
dET = datetime.strptime(end_time, "%Y %m %d %H:%M")
it = defaultModel.energy_loss_iterator(velocity,
latitude,
longitude,
time.mktime(et)-time.mktime(st))
for (done, losses) in it:
yield (False, -losses)
if done:
break
yield (True, powerGeneration(latitude, velocity, dST, dET, cloudy) - losses)
def calc_V(energy, latitude, longitude, altitude, start_time = time.strftime("%Y %m %d %H:%M", time.localtime()), end_time="17:00", cloudy=0):
# Must convert end_time into a proper string
# Format: Year Month Day Hour:Min
if end_time == "17:00":
end_time = time.strftime("%Y %m %d", time.localtime()) + " 17:00"
it = iter_V(energy, latitude, longitude, altitude, start_time, end_time, cloudy)
for (done, velocity) in it:
if done:
return velocity
# function 2: Given energy, find velocity
def iter_V(energy, latitude, longitude, altitude, start_time, end_time, cloudy):
# Start with an arbitrary average velocity... say...50 km/h
velocity_guess = 50.0
# error_bound
error = 0.01
# limit the number of iterations in case newton's method diverges
iteration_limit = 200
current_iteration = 0
dv = 0.01
# Time Objects
st = time.strptime(start_time, "%Y %m %d %H:%M")
et = time.strptime(end_time, "%Y %m %d %H:%M")
dt = time.mktime(et) - time.mktime(st)
# Datetime Objects
dST = datetime.strptime(start_time, "%Y %m %d %H:%M")
dET = datetime.strptime(end_time, "%Y %m %d %H:%M")
start = GPSCoordinate(latitude, longitude, altitude)
# We try to find a velocity such that the energy generated - the energy
# consumed = the specified energy change. In order to do this, we start
# with a guess for the correct velocity and use Newton's method to get
# closer and closer to the correct velocity. Newton's method is a method
# to approximate the root of a function f(x) by starting with a guess of
# the root and repeatedly updating the guess by finding the tangent to f(x)
# at the guess and then finding the intersection of that tangent and the x
# axis. This x-value of this intersection point is the new guess.
while current_iteration < iteration_limit:
energy_gen = powerGeneration(latitude, velocity_guess, dST, dET, cloudy)
energy_loss = powerConsumption(start, velocity_guess, dt)
energy_change = energy_gen - energy_loss
if math.fabs(energy_change - energy) < error:
yield (True, velocity_guess)
print 'answer=',velocity_guess
break
else:
# Update velocity guess value
energy_gen = powerGeneration(latitude, velocity_guess+dv, dST, dET, cloudy)
energy_loss = powerConsumption(start, velocity_guess+dv, dt)
print 'powerGeneration: ', energy_gen
print 'powerConsumption: ', energy_loss
E_prime = ((energy_gen - energy_loss) - energy_change) / dv
velocity_guess = velocity_guess - (energy_change - energy) / E_prime
current_iteration += 1
yield (False, velocity_guess)
if not(math.fabs(energy_change - energy) < error):
# Sometime's Newton's method diverges, so we use a more reliable naive
# method if Newton's fails to converge after the set amount of iterations.
# Reset velocity_guess
velocity_guess = 50.0
# Reset current_iteration
current_iteration = 0
# Change limit
iteration_limit = 1000
# Start with some increment amount
increment_amount = 25.0
# Hold onto our previous guesses just in case...
prev_guess = 0
# We assume that energy generated - energy consumed generally decreases
# when velocity increases. So when the calculated energy change - the
# desired change in energy at the guess velocity is positive, we increase
# the guess velocity to get closer to the correct velocity. On the other
# hand, if the calculated energy change - the desired change in energy at
# the guess velocity is negative, we decrease the guess velocity to get
# closer to the correct velocity. Everytime we change the direction in
# which we increment the guess velocity, we know we have overshot the
# correct velocity, so we half the increment amount to zero in on the
# correct velocity.
while current_iteration < iteration_limit:
energy_gen = powerGeneration(latitude, velocity_guess, dST, dET, cloudy)
energy_loss = powerConsumption(start, velocity_guess, dt)
energy_change = energy_gen - energy_loss
if math.fabs(energy_change-energy) < error:
if velocity_guess < 0:
print "Input energy too high -> velocity ended up negative."
yield (True, velocity_guess)
print 'answer=',velocity_guess
break
elif energy_change-energy > 0:
#check to see if we overshot:
if velocity_guess+increment_amount == prev_guess:
increment_amount = increment_amount/2
prev_guess = velocity_guess
velocity_guess += increment_amount
else:
#check to see if we overshot:
if velocity_guess-increment_amount == prev_guess:
increment_amount = increment_amount/2
prev_guess = velocity_guess
velocity_guess -= increment_amount
current_iteration += 1
yield (False, velocity_guess)
if not(math.fabs(energy_change - energy) < error):
# DOOM
print "Max iterations exceeded. Try different inputs."
yield (True, -1)
# Dummy test functions
##def powerGeneration(latitude, velocity, start_time, end_time, cloudy):
## energy_change = (1-cloudy)*(time.mktime(end_time)-time.mktime(start_time))
## return energy_change
def powerConsumption((latitude, longitude, altitude), velocity, time):
energy_eaten = 0.3*time*velocity
return energy_eaten
# Main Caller and Loop Function
if __name__ == '__main__':
# Previous calculation state:
calcType = 0
energyState = 0
inputVelocity = 0
inputEnergy = 0
endTime = "0:00"
#initialize route database:
load_data()
# User input loop
while True:
# Asks user whether to start a new calculation or modify the previous one
operationType = raw_input("Enter 'n' to start a new calculation. Enter 'm' to modify a previous calculation. ")
if operationType=="n":
# Starting new calculation
calcType=raw_input("Enter 'v' to calculate the average velocity given a change in battery energy. Enter 'e' to calculate change in battery energy given an average velocity. ")
# Calculate velocity given a change in energy
if calcType=="v":
inputEnergy=raw_input("Please enter the desired energy change: ")
longitude=raw_input("Please enter your current longitude coordinate: ")
lat=raw_input("Please enter your current latitude coordinate: ")
alt=raw_input("Please enter your current altitude: ")
startTime=raw_input("Please enter your desired start time. Format: 'year month day hr:min' (24 hr time) If you leave this field blank, 'now' will be the start time. ")
if startTime=="":
print ("Start time defaulted to now")
startTime=time.strftime("%Y %m %d %H:%M",time.localtime())
endTime=raw_input("Please enter your desired end time. Format: 'year month day hr:min' (24 hr time) If you leave this field blank, 17:00 will be the start time. ")
if endTime=="":
print ("End time defaulted to today at 17:00")
# Default endTime will be handled along the way
endTime="17:00"
energyState=raw_input("Please enter the energy level (in MJ) of the batteries at the start location: ")
cloudiness=raw_input("Please enter a projected %cloudy value [0,1]. If you leave this field blank, historical values will be used. ")
if cloudiness=="":
cloudiness=-1
print str(calc_V(float(inputEnergy),float(longitude),float(lat),float(alt),startTime,endTime,float(cloudiness))) + "km/h"
# Calculate change in energy given a velocity
if calcType=="e":
inputVelocity=raw_input("Please enter the desired average velocity: ")
longitude=raw_input("Please enter your current longitude coordinate: ")
lat=raw_input("Please enter your current latitude coordinate: ")
alt=raw_input("Please enter your current altitude: ")
startTime=raw_input("Please enter your desired start time. Format: 'year month day hr:min' (24 hr time) If you leave this field blank, 'now' will be the start time. ")
if startTime=="":
print ("Start time defaulted to now")
startTime=time.strftime("%Y %m %d %H:%M",time.localtime())
endTime=raw_input("Please enter your desired end time. Format: 'hr:min' (24 hr time) If you leave this field blank, 17:00 will be the start time. ")
if endTime=="":
print ("End time defaulted to today at 17:00")
# This'll be handled later
endTime="17:00"
energyState=raw_input("Please enter the energy level (in MJ) of the batteries at the start location: ")
cloudiness=raw_input("Please enter a projected %cloudy value [0,1]. If you leave this field blank, historical values will be used. ")
if cloudiness=="":
cloudiness=-1
print str(calc_dE(float(inputVelocity),float(longitude),float(lat), float(alt), startTime,endTime,float(cloudiness))) + "MJ"
elif operationType == "m" and type!=0:
# Modifying previous calculation
ce = raw_input("Please enter the current energy of the car: ")
currentEnergy = float(ce)
newEnergy = float(inputEnergy) - (currentEnergy - float(energyState))
clouds = raw_input("Please enter a new %cloudy value [0,1]: ")
cloudiness = float(clouds)
newLongitude = raw_input("Please enter a new longitude value: ")
longitude = float(newLongitude)
newLat = raw_input("Please enter a new latitude value: ")
lat = float(newLat)
startTime = time.strftime("%Y %m %d %H:%M", time.localtime())
if type == "v":
# Calculate velocity given a change in energy
print str(calc_V(newEnergy, longitude, lat, startTime, endTime, cloudiness))+ "km/h"
else:
# Calculate change in energy given a velocity
print str(calc_dE(float(inputVelocity), longitude, lat, startTime, endTime, cloudiness) + (currentEnergy - float(energyState))+"MJ")
|
python
|
#!/usr/bin/env python3
#ccc 2021 senior 10/15
from sys import stdin
from itertools import repeat
m = int(stdin.readline())
n = int(stdin.readline())
k = int(stdin.readline())
canvas = []
for _ in range(m):
canvas.append(list(repeat(False,n)))
gold = 0
for _ in range(k):
query = stdin.readline().split()
query[1] = int(query[1])
if query[0] == 'R':
for i in range(n):
if canvas[query[1]-1][i]:
gold -= 1
else:
gold += 1
canvas[query[1]-1][i] = not canvas[query[1]-1][i]
if query[0] == 'C':
for i in range(m):
if canvas[i][query[1]-1]:
gold -= 1
else:
gold += 1
canvas[i][query[1]-1] = not canvas[i][query[1]-1]
print(gold)
|
python
|
import requests
import json
from . import filler, models
VALID_HTTP_METHODS = {
'GET': ['url'],
'PATCH': ['url', 'data'],
'PUT': ['url', 'data'],
'POST': ['url', 'data'],
'DELETE': ['url'],
}
def call(step, responses, config: models.SuiteConfig):
"""
Main API Caller
:param step:
:param responses:
:param config:
:return:
"""
req = step.get('request')
validate_request(req)
method = req.get('method', None)
url = filler.fill_regex(req['url'].replace('{{baseUrl}}', config.base_url), responses)
payload = req.get('data', None)
if payload is not None:
payload_clean = filler.fill_regex(payload, responses)
payload = json.loads(payload_clean)
headers = None
print('Calling {method} @ {url}'.format(method=method, url=url))
response_raw = requests.request(method=method, url=url, json=payload, headers=headers)
response_json = {}
try:
response_json = response_raw.json()
except ValueError:
print('Invalid json')
# no JSON: nothing to do
print('Response ({number}) {status}: {response}'.format(number=len(responses), status=response_raw.status_code, response=json.dumps(response_json)))
response = build_response(step, response_raw, payload)
return response
def mock(step, responses):
response = step.get('response')
response_filled = filler.fill_regex(response, responses)
response_json = json.loads(response_filled)
return build_response_mock(step, response_json)
def build_response(step, response_raw, payload):
response = {
"type": "HTTP",
"name": step.get("name", "Unnamed Request"),
"description": step.get("name", "Undescribed Request"),
"headers": dict(response_raw.headers),
"body": response_raw.text,
"status": response_raw.status_code,
"request": {
"body": payload,
"headers": dict(response_raw.request.headers),
"method": response_raw.request.method,
"url": response_raw.request.url
}
}
try:
response["json"] = response_raw.json()
except json.JSONDecodeError:
print('Unable to parse json response')
response["json"] = None
return response
def build_response_mock(step, response_json):
return {
"type": "MOCK",
"name": step.get("name", "Unnamed Request"),
"description": step.get("name", "Undescribed Request"),
"json": response_json
}
# simple validation
def validate_request(req):
name = req.get('name', 'UNNAMED REQUEST')
method = req.get('method', None)
if method is None:
raise Exception('MISSING METHOD')
if method not in VALID_HTTP_METHODS.keys():
raise Exception('INVALID METHOD {method}'.format(method=method))
configs = VALID_HTTP_METHODS.get(method)
for config in configs:
if req.get(config, None) is None:
raise Exception('MISSING {config} FROM {name}'.format(config=config, name=name))
return True
|
python
|
'''
Created on 1.12.2016
@author: Darren
''''''
Median is the middle value in an ordered integer list. If the size of the list is even, there is no middle value. So the median is the mean of the two middle value.
Examples:
[2,3,4] , the median is 3
[2,3], the median is (2 + 3) / 2 = 2.5
Design a data structure that supports the following two operations:
void addNum(int num) - Add a integer number from the data stream to the data structure.
double findMedian() - Return the median of all elements so far.
For example:
add(1)
add(2)
findMedian() -> 1.5
add(3)
findMedian() -> 2
Credits:Special thanks to @Louis1992 for adding this problem and creating all test cases."
'''
from heapq import *
class MedianFinder:
def __init__(self):
self.heaps = [], []
def addNum(self, num):
small, large = self.heaps
heappush(small, -heappushpop(large, num))
if len(large) < len(small):
heappush(large, -heappop(small))
def findMedian(self):
small, large = self.heaps
if len(large) > len(small):
return float(large[0])
return (large[0] - small[0]) / 2.0
nums=[1,2,3,4,5,6]
mf=MedianFinder()
for num in nums:
mf.addNum(num)
print(mf.findMedian())
|
python
|
from tkinter import *
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from search import *
import numpy as np
distances = {}
class TSP_problem(Problem):
""" subclass of Problem to define various functions """
def two_opt(self, state):
""" Neighbour generating function for Traveling Salesman Problem """
neighbour_state = state[:]
left = random.randint(0, len(neighbour_state) - 1)
right = random.randint(0, len(neighbour_state) - 1)
if left > right:
left, right = right, left
neighbour_state[left: right + 1] = reversed(neighbour_state[left: right + 1])
return neighbour_state
def actions(self, state):
""" action that can be excuted in given state """
return [self.two_opt]
def result(self, state, action):
""" result after applying the given action on the given state """
return action(state)
def path_cost(self, c, state1, action, state2):
""" total distance for the Traveling Salesman to be covered if in state2 """
cost = 0
for i in range(len(state2) - 1):
cost += distances[state2[i]][state2[i + 1]]
cost += distances[state2[0]][state2[-1]]
return cost
def value(self, state):
""" value of path cost given negative for the given state """
return -1 * self.path_cost(None, None, None, state)
class TSP_Gui():
""" Class to create gui of Traveling Salesman using simulated annealing where one can
select cities, change speed and temperature. Distances between cities are euclidean
distances between them.
"""
def __init__(self, root, all_cities):
self.root = root
self.vars = []
self.frame_locations = {}
self.calculate_canvas_size()
self.button_text = StringVar()
self.button_text.set("Start")
self.all_cities = all_cities
self.frame_select_cities = Frame(self.root)
self.frame_select_cities.grid(row=1)
self.frame_canvas = Frame(self.root)
self.frame_canvas.grid(row=2)
Label(self.root, text="Map of Romania", font="Times 13 bold").grid(row=0, columnspan=10)
def create_checkboxes(self, side=LEFT, anchor=W):
""" To select cities which are to be a part of Traveling Salesman Problem """
row_number = 0
column_number = 0
for city in self.all_cities:
var = IntVar()
var.set(1)
Checkbutton(self.frame_select_cities, text=city, variable=var).grid(
row=row_number, column=column_number, sticky=W)
self.vars.append(var)
column_number += 1
if column_number == 10:
column_number = 0
row_number += 1
def create_buttons(self):
""" Create start and quit button """
Button(self.frame_select_cities, textvariable=self.button_text,
command=self.run_traveling_salesman).grid(row=3, column=4, sticky=E + W)
Button(self.frame_select_cities, text='Quit', command=self.root.destroy).grid(
row=3, column=5, sticky=E + W)
def run_traveling_salesman(self):
""" Choose selected citites """
cities = []
for i in range(len(self.vars)):
if self.vars[i].get() == 1:
cities.append(self.all_cities[i])
tsp_problem = TSP_problem(cities)
self.button_text.set("Reset")
self.create_canvas(tsp_problem)
def calculate_canvas_size(self):
""" Width and height for canvas """
minx, maxx = sys.maxsize, -1 * sys.maxsize
miny, maxy = sys.maxsize, -1 * sys.maxsize
for value in romania_map.locations.values():
minx = min(minx, value[0])
maxx = max(maxx, value[0])
miny = min(miny, value[1])
maxy = max(maxy, value[1])
# New locations squeezed to fit inside the map of romania
for name, coordinates in romania_map.locations.items():
self.frame_locations[name] = (coordinates[0] / 1.2 - minx +
150, coordinates[1] / 1.2 - miny + 165)
canvas_width = maxx - minx + 200
canvas_height = maxy - miny + 200
self.canvas_width = canvas_width
self.canvas_height = canvas_height
def create_canvas(self, problem):
""" creating map with cities """
map_canvas = Canvas(self.frame_canvas, width=self.canvas_width, height=self.canvas_height)
map_canvas.grid(row=3, columnspan=10)
current = Node(problem.initial)
map_canvas.delete("all")
self.romania_image = PhotoImage(file="../images/romania_map.png")
map_canvas.create_image(self.canvas_width / 2, self.canvas_height / 2,
image=self.romania_image)
cities = current.state
for city in cities:
x = self.frame_locations[city][0]
y = self.frame_locations[city][1]
map_canvas.create_oval(x - 3, y - 3, x + 3, y + 3,
fill="red", outline="red")
map_canvas.create_text(x - 15, y - 10, text=city)
self.cost = StringVar()
Label(self.frame_canvas, textvariable=self.cost, relief="sunken").grid(
row=2, columnspan=10)
self.speed = IntVar()
speed_scale = Scale(self.frame_canvas, from_=500, to=1, orient=HORIZONTAL,
variable=self.speed, label="Speed ----> ", showvalue=0, font="Times 11",
relief="sunken", cursor="gumby")
speed_scale.grid(row=1, columnspan=5, sticky=N + S + E + W)
self.temperature = IntVar()
temperature_scale = Scale(self.frame_canvas, from_=100, to=0, orient=HORIZONTAL,
length=200, variable=self.temperature, label="Temperature ---->",
font="Times 11", relief="sunken", showvalue=0, cursor="gumby")
temperature_scale.grid(row=1, column=5, columnspan=5, sticky=N + S + E + W)
self.simulated_annealing_with_tunable_T(problem, map_canvas)
def exp_schedule(k=100, lam=0.03, limit=1000):
""" One possible schedule function for simulated annealing """
return lambda t: (k * math.exp(-lam * t) if t < limit else 0)
def simulated_annealing_with_tunable_T(self, problem, map_canvas, schedule=exp_schedule()):
""" Simulated annealing where temperature is taken as user input """
current = Node(problem.initial)
while(1):
T = schedule(self.temperature.get())
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next = random.choice(neighbors)
delta_e = problem.value(next.state) - problem.value(current.state)
if delta_e > 0 or probability(math.exp(delta_e / T)):
map_canvas.delete("poly")
current = next
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state))))
points = []
for city in current.state:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=3, fill='', tag="poly")
map_canvas.update()
map_canvas.after(self.speed.get())
def main():
all_cities = []
for city in romania_map.locations.keys():
distances[city] = {}
all_cities.append(city)
all_cities.sort()
# distances['city1']['city2'] contains euclidean distance between their coordinates
for name_1, coordinates_1 in romania_map.locations.items():
for name_2, coordinates_2 in romania_map.locations.items():
distances[name_1][name_2] = np.linalg.norm(
[coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]])
distances[name_2][name_1] = np.linalg.norm(
[coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]])
root = Tk()
root.title("Traveling Salesman Problem")
cities_selection_panel = TSP_Gui(root, all_cities)
cities_selection_panel.create_checkboxes()
cities_selection_panel.create_buttons()
root.mainloop()
if __name__ == '__main__':
main()
|
python
|
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.animation as animation
from matplotlib.text import OffsetFrom
import numpy as np
import csv
a = []
b = []
with open('curvatest.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
a.append(float(row[0])/10)
b.append(float(row[1])/10)
print(a)
t = np.linspace(1, 12, 26)
out_a= np.asarray(b)
out_b= np.asarray(a)
x_watts = out_a ** 2
target_noise_db = 30
prom=0
text=r"80 OI "
c_red=[1.0,0.5,0.5]
c_blue=[0.5,0.5,1.0]
color=c_blue
fig, ax = plt.subplots(figsize=(3, 3))
el = Ellipse((2, -1), 0.5, 0.5)
ax.add_patch(el)
for i in range(10):
plt.title('PEATC')
plt.ylabel('Amplitud (uV)')
plt.xlabel('Tiempo (ms)')
#plt.yticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17])
#plt.axis([0,12,0,18])
ax.grid(True)
target_noise_db = target_noise_db - 1
target_noise_watts = 10 ** (target_noise_db / 10)
mean_noise = 0
noise_volts = np.random.normal(mean_noise, np.sqrt(target_noise_watts), len(x_watts))
y_volts = out_a + noise_volts
ytext=y_volts[0]
xtext=out_b[0]
prom=prom+1
line, = ax.plot(out_b, y_volts)
ann = ax.annotate(text,
xy=(xtext,ytext), xycoords='data',
xytext=(8, 0), textcoords='offset points',
size=30, va="center",
bbox=dict(boxstyle="round", fc=(color), ec="none"),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(color), ec="none",
patchA=None,
patchB=el,
relpos=(0.2, 0.5)))
#ax.text(right, top, 'right bottom',
# horizontalalignment='right',
# verticalalignment='bottom',
# transform=ax.transAxes)
plt.pause(0.2)
plt.cla()
print("ok")
plt.show()
|
python
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description='Gossip Chat via GPT2')
parser.add_argument('-m', '--model', dest="model", help="pretrained model path")
parser.add_argument('-c', '--config', dest="config", help="model config path")
parser.add_argument('-p', '--port', dest='port', default=5000, help="listen port, default is 5000")
parser.add_argument('--device', dest="device", default="cuda:0", help="choose to use cpu or cuda:x, default is cuda:0")
parser.add_argument('--backend', dest='backend', default='shell', help="choose for backend from: shell, restful, default is shell")
args = parser.parse_args()
from gossipbot.backend import Backend
s = Backend(backend_type=args.backend,
model_path=args.model,
config_path=args.config,
device=args.device,
port=args.port)
s.run()
|
python
|
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import Employee
from django.forms import ModelForm, fields
from .models import Order
from .models import Customer
from .models import Tag
from .models import Product
class ProductForm(ModelForm):
class Meta:
model = Tag
fields = '__all__'
class ProductForm(ModelForm):
class Meta:
model = Product
fields = '__all__'
class OrderForm(ModelForm):
class Meta:
model = Order
fields = '__all__'
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
fields = ('name', 'phone', 'email')
def __init__(self, *args, **kwargs):
super(CustomerForm,self).__init__(*args, **kwargs)
self.fields['email'].required = False
self.fields['phone'].required = False
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = ('fullname','age','gender','email','mobile','emp_code','position')
labels = {
'fullname': 'Full Name',
'age': 'Age',
'gender': 'Gender',
'email': 'Email',
'mobile': 'Mobile Number',
'emp_code':'Employee Code',
'position': 'Position',
}
def __init__(self, *args, **kwargs):
super(EmployeeForm,self).__init__(*args, **kwargs)
self.fields['gender'].empty_label = "Select"
self.fields['position'].empty_label = "Select"
self.fields['emp_code'].required = True
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
|
python
|
from django.http import HttpResponse
from django.shortcuts import reverse
from django.views import View
class HeavenTestAPIView(View):
def get(self, request):
link_format = "<a href='{reversed_link}'>{link}</a>"
example_urls = [link_format.format(reversed_link=reverse(link), link=link) for link in (
'example_http',
'example_json',
'example_json_proxy',
'example_redirect',
)]
try:
example_urls += [
link_format.format(reversed_link=reverse(link), link=link)
for link in ['example_rest', 'example_rest_proxy']
]
except Exception:
pass
return HttpResponse("<br>".join(example_urls))
|
python
|
"""
@author: Gabriele Girelli
@contact: [email protected]
"""
import argparse
from fastx_barber import scriptio
from fastx_barber.const import PATTERN_EXAMPLE, FlagData
from fastx_barber.exception import enable_rich_assert
from fastx_barber.flag import (
FastqFlagExtractor,
FlagStats,
get_fastx_flag_extractor,
)
from fastx_barber.io import ChunkMerger
from fastx_barber.match import AlphaNumericPattern, FastxMatcher
from fastx_barber.qual import setup_qual_filters
from fastx_barber.scriptio import get_handles, get_split_handles
from fastx_barber.scripts import arguments as ap
from fastx_barber.seqio import (
get_fastx_format,
SimpleFastxRecord,
SimpleFastxWriter,
SimpleSplitFastxWriter,
)
from fastx_barber.trim import get_fastx_trimmer
import joblib # type: ignore
import logging
import regex as re # type: ignore
from rich.logging import RichHandler # type: ignore
import sys
from typing import Dict, List, Tuple, Union
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[RichHandler(markup=True, rich_tracebacks=True)],
)
def init_parser(subparsers: argparse._SubParsersAction) -> argparse.ArgumentParser:
parser = subparsers.add_parser(
"extract",
description="Extract flags and trim the records of a FASTX file.",
formatter_class=argparse.RawDescriptionHelpFormatter,
help="Extract flags and trim the records of a FASTX file.",
)
parser.add_argument(
"input",
type=str,
metavar="in.fastx[.gz]",
help="""Path to the fasta/q file to trim.""",
)
parser.add_argument(
"output",
type=str,
metavar="out.fastx[.gz]",
help="Path to fasta/q file where to write trimmed records. "
+ "Format will match the input.",
)
parser.add_argument(
"--pattern",
type=str,
help="Pattern to match to reads and extract flagged groups. "
+ f"Remember to use quotes. Example: '{PATTERN_EXAMPLE}'",
)
parser = ap.add_version_option(parser)
advanced = parser.add_argument_group("advanced arguments")
advanced = ap.add_unmatched_output_option(advanced)
advanced = ap.add_flag_delim_option(advanced)
advanced.add_argument(
"--selected-flags",
type=str,
nargs="+",
help="Space-separated names of flags to be extracted. "
+ "By default it extracts all flags.",
)
advanced = ap.add_flagstats_option(advanced)
advanced = ap.add_split_by_option(advanced)
advanced = ap.add_filter_qual_flags_option(advanced)
advanced = ap.add_filter_qual_output_option(advanced)
advanced = ap.add_phred_offset_option(advanced)
advanced.add_argument(
"--no-qual-flags",
action="store_const",
dest="qual_flags",
const=False,
default=True,
help="Do not extract quality flags (when running on a fastq file).",
)
advanced.add_argument(
"--simple-pattern",
action="store_const",
dest="simple_pattern",
const=True,
default=False,
help="Parse pattern as 'simple' (alphanumeric) pattern.",
)
advanced = ap.add_comment_space_option(advanced)
advanced = ap.add_compress_level_option(advanced)
advanced = ap.add_log_file_option(advanced)
advanced = ap.add_chunk_size_option(advanced)
advanced = ap.add_threads_option(advanced)
advanced = ap.add_tempdir_option(advanced)
parser.set_defaults(parse=parse_arguments, run=run)
return parser
@enable_rich_assert
def parse_arguments(args: argparse.Namespace) -> argparse.Namespace:
assert 1 == len(args.flag_delim)
args.threads = ap.check_threads(args.threads)
args = scriptio.set_tempdir(args)
if args.pattern is None:
logging.info(
"No pattern specified (--pattern), nothing to do. :person_shrugging:"
)
sys.exit()
args.pattern = (
AlphaNumericPattern(args.pattern)
if args.simple_pattern
else re.compile(args.pattern)
)
if args.log_file is not None:
scriptio.add_log_file_handler(args.log_file)
ap.log_args(args)
logging.info("[bold underline red]Flag extraction[/]")
if args.selected_flags is not None:
logging.info(f"Selected flags\t{args.selected_flags}")
logging.info(f"Flag stats\t{args.flagstats}")
logging.info(f"Flag delim\t'{args.flag_delim}'")
logging.info(f"Comment delim\t'{args.comment_space}'")
logging.info(f"Quality flags\t{args.qual_flags}")
if args.split_by is not None:
logging.info(f"Split by\t'{args.split_by}'")
return args
ChunkDetails = Tuple[int, int, int, FlagStats]
def run_chunk(
chunk: List[SimpleFastxRecord],
cid: int,
args: argparse.Namespace,
) -> ChunkDetails:
fmt, _ = get_fastx_format(args.input)
OHC: Union[SimpleFastxWriter, SimpleSplitFastxWriter, None]
FHC: Union[SimpleFastxWriter, SimpleSplitFastxWriter, None]
OHC, UHC, FHC, filter_output_fun = (
get_handles(fmt, cid, args)
if args.split_by is None
else get_split_handles(fmt, cid, args)
)
foutput = scriptio.get_output_fun(OHC, UHC)
matcher = FastxMatcher(args.pattern)
trimmer = get_fastx_trimmer(fmt)
quality_flag_filters, filter_fun = setup_qual_filters(
args.filter_qual_flags, args.phred_offset
)
flag_extractor = get_fastx_flag_extractor(fmt)(args.selected_flags, args.flagstats)
flag_extractor.flag_delim = args.flag_delim
flag_extractor.comment_space = args.comment_space
if isinstance(flag_extractor, FastqFlagExtractor):
flag_extractor.extract_qual_flags = args.qual_flags
filtered_counter = 0
for record in chunk:
flags: Dict[str, FlagData] = {}
match, matched = matcher.do(record)
if matched:
flags = flag_extractor.extract_all(record, match)
flag_extractor.update_stats(flags)
flags_selected = flag_extractor.apply_selection(flags)
record = flag_extractor.update(record, flags_selected)
record = trimmer.trim_re(record, match)
pass_filters = filter_fun(flags, quality_flag_filters)
if not pass_filters:
filtered_counter += 1
filter_output_fun(record, flags)
continue
foutput[matched](record, flags)
SimpleFastxWriter.close_handle(OHC)
SimpleFastxWriter.close_handle(UHC)
SimpleFastxWriter.close_handle(FHC)
return (
filtered_counter,
matcher.matched_count,
len(chunk),
flag_extractor.flagstats,
)
def merge_chunk_details(chunk_details: List[ChunkDetails]) -> ChunkDetails:
parsed_counter = 0
matched_counter = 0
filtered_counter = 0
flagstats: FlagStats = FlagStats()
for filtered, matched, parsed, stats in chunk_details:
filtered_counter += filtered
matched_counter += matched
parsed_counter += parsed
for flag_name, data in stats.items():
for k, v in data.items():
flagstats[flag_name][k] += v
return (parsed_counter, matched_counter, filtered_counter, flagstats)
@enable_rich_assert
def run(args: argparse.Namespace) -> None:
fmt, IH = scriptio.get_input_handler(args.input, args.chunk_size)
quality_flag_filters, filter_fun = setup_qual_filters(
args.filter_qual_flags, args.phred_offset, verbose=True
)
logging.info("[bold underline red]Running[/]")
logging.info("Trimming and extracting flags...")
chunk_details = joblib.Parallel(n_jobs=args.threads, verbose=10)(
joblib.delayed(run_chunk)(chunk, cid, args) for chunk, cid in IH
)
logging.info("Merging subprocesses details...")
n_parsed, n_matched, n_filtered, flagstats = merge_chunk_details(chunk_details)
logging.info(
f"{n_matched}/{n_parsed} ({n_matched/n_parsed*100:.2f}%) "
+ "records matched the pattern.",
)
if args.filter_qual_flags is not None and 0 != n_matched:
logging.info(
" ".join(
(
f"{(n_matched-n_filtered)}/{n_matched}",
f"({(n_matched-n_filtered)/n_matched*100:.2f}%)",
"records passed the quality filters.",
)
)
)
if args.flagstats is not None:
flagstats.export(args.output)
logging.info("Merging batch output...")
if args.unmatched_output is not None:
merger = ChunkMerger(args.temp_dir, None)
merger.do(args.unmatched_output, IH.last_chunk_id, "Writing unmatched records")
merger = ChunkMerger(args.temp_dir, args.split_by)
merger.do(args.output, IH.last_chunk_id, "Writing matched records")
if args.filter_qual_output is not None:
merger.do(args.filter_qual_output, IH.last_chunk_id, "Writing filtered records")
logging.info("Done. :thumbs_up: :smiley:")
|
python
|
"""
Minimal and functional version of CPython's argparse module.
"""
import sys
try:
from ucollections import namedtuple
except ImportError:
from collections import namedtuple
class _ArgError(BaseException):
pass
class _Arg:
def __init__(self, names, dest, metavar, arg_type, action, nargs, const, default, required, choices, help):
self.names = names
self.dest = dest
self.metavar = metavar
self.arg_type = arg_type
self.action = action
self.nargs = nargs
self.const = const
self.default = default
self.required = required
self.choices = choices
self.help = help
def parse(self, optname, args):
# parse args for this Arg
def _checked(_arg):
if self.choices and _arg not in self.choices:
raise _ArgError("value %s must be one of this '%s'" % (_arg, ', '.join(map(str, self.choices))))
try:
return self.arg_type(_arg)
except (TypeError, ValueError, OSError):
try:
raise _ArgError('invalid %s value: %s' % (self.arg_type.__name__, _arg))
except AttributeError:
raise _ArgError('value %s is not applicable for type of key %s' % (_arg, optname))
if self.action == "store" or self.action == "append":
if self.nargs is None:
if args:
return _checked(args.pop(0))
else:
raise _ArgError("expecting value for %s" % optname)
elif self.nargs == "?":
if args:
return _checked(args.pop(0))
else:
return self.default
else:
if self.nargs == "*":
n = -1
elif self.nargs == "+":
if not args:
raise _ArgError("expecting value for %s" % optname)
n = -1
else:
n = int(self.nargs)
ret = []
stop_at_opt = True
while args and n != 0:
if stop_at_opt and args[0].startswith("-") and args[0] != "-":
if args[0] == "--":
stop_at_opt = False
args.pop(0)
else:
break
else:
ret.append(_checked(args.pop(0)))
n -= 1
if n > 0:
raise _ArgError("expecting value for %s" % optname)
return ret
elif self.action == "store_const":
return self.const
elif self.action == "append":
if args:
return _checked(args.pop(0))
else:
raise _ArgError("expecting value for %s" % optname)
else:
assert False
class FileType:
def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None):
self._mode = mode
self._bufsize = bufsize
self._encoding = encoding
self._errors = errors
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return sys.stdin
elif 'w' in self._mode:
return sys.stdout
else:
msg = 'argument "-" with mode %r' % self._mode
raise _ArgError(msg)
# all other arguments are used as file names
try:
# return open(string, self._mode, self._bufsize, self._encoding,self._errors) # incompatible with micropython
return open(string, self._mode)
except OSError as e:
message = "can't open '%s': %s"
raise _ArgError(message % (string, e))
def __repr__(self):
args = self._mode, self._bufsize
kwargs = [('encoding', self._encoding), ('errors', self._errors)]
args_str = ', '.join([repr(arg) for arg in args if arg != -1] +
['%s=%r' % (kw, arg) for kw, arg in kwargs
if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
def _dest_from_optnames(opt_names):
dest = opt_names[0]
for name in opt_names:
if name.startswith("--"):
dest = name
break
return dest.lstrip("-").replace("-", "_")
class ArgumentParser:
def __init__(self, *, prog=None, description="", epilog=""):
self.prog = sys.argv[0] if (sys.argv and not prog) else prog
self.description = description
self.epilog = epilog
self.opt = []
self.pos = []
def add_argument(self, *args, **kwargs):
action = kwargs.get("action", "store")
if action == "store_true":
action = "store_const"
const = True
default = kwargs.get("default", False)
elif action == "store_false":
action = "store_const"
const = False
default = kwargs.get("default", True)
else:
const = kwargs.get("const", None)
default = kwargs.get("default", None)
if args and args[0].startswith("-"):
list = self.opt
dest = kwargs.get("dest")
if dest is None:
dest = _dest_from_optnames(args)
else:
list = self.pos
dest = kwargs.get("dest")
if dest is None:
dest = args[0]
if not args:
args = [dest]
arg_type = kwargs.get("type", str)
nargs = kwargs.get("nargs", None)
metavar = kwargs.get("metavar", None)
required = kwargs.get("required", False)
choices = kwargs.get("choices", None)
help = kwargs.get("help", "")
list.append(
_Arg(args, dest, metavar, arg_type, action, nargs, const, default, required, choices, help))
def usage(self, full):
# print short usage
print("usage: %s [-h, --help]" % self.prog, end="")
def render_arg(arg):
if arg.action in ["store", "append"]:
if arg.metavar:
arg_for_render = "%s" % arg.metavar.upper()
elif arg.choices:
arg_for_render = "[%s]" % ", ".join(arg.choices)
else:
arg_for_render = arg.dest.upper()
if arg.nargs is None:
return " %s" % arg_for_render
if isinstance(arg.nargs, int):
return " %s(x%d)" % (arg_for_render, arg.nargs)
else:
return " [%s...]" % arg_for_render
else:
return ""
for opt in self.opt:
print(" [%s%s]" % (', '.join(opt.names), render_arg(opt)), end="")
for pos in self.pos:
print(render_arg(pos), end="")
print()
if not full:
return
# print full information
print()
if self.description:
print(self.description)
if self.pos:
print("\nPositional arguments:")
for pos in self.pos:
print(" %-20s%s" % (pos.names[0], pos.help))
print("\nNamed arguments:")
print(" -h, --help show this message and exit")
for opt in self.opt:
# Dont show help with possible values for opt. It's stays in "usage" anyway.
# print(" %-20s%s " % (', '.join(opt.names) + render_arg(opt).upper(), opt.help))
print(" %-20s%s" % (', '.join(opt.names), opt.help))
print("\n", self.epilog)
def parse_args(self, args=None):
return self._parse_args_impl(args, False)
def parse_known_args(self, args=None):
return self._parse_args_impl(args, True)
def _parse_args_impl(self, args, return_unknown):
if args is None:
args = sys.argv[1:]
else:
args = args[:]
try:
return self._parse_args(args, return_unknown)
except _ArgError as e:
self.usage(False)
print("error:", e)
sys.exit(2)
def _parse_args(self, args, return_unknown):
# add optional(named) args with defaults
arg_dest = []
arg_vals = []
for opt in self.opt:
arg_dest.append(opt.dest)
arg_vals.append(opt.default)
# deal with unknown arguments, if needed
unknown = []
def consume_unknown():
while args and not args[0].startswith("-"):
unknown.append(args.pop(0))
# parse all args
parsed_pos = False
while args or not parsed_pos:
if args and args[0].startswith("-") and args[0] != "-" and args[0] != "--":
# optional(named) arguments
a = args.pop(0)
if a in ("-h", "--help"):
self.usage(True)
sys.exit(0)
found = False
for i, opt in enumerate(self.opt):
if a in opt.names:
if opt.action == "append":
if type(arg_vals[i]) is type(None):
arg_vals[i] = []
arg_vals[i].append(opt.parse(a, args))
found = True
else:
arg_vals[i] = opt.parse(a, args)
found = True
break
if not found:
if return_unknown:
unknown.append(a)
consume_unknown()
else:
raise _ArgError("unknown option %s" % a)
else:
# positional arguments
if parsed_pos:
if return_unknown:
unknown = unknown + args
break
else:
raise _ArgError("extra args: %s" % " ".join(args))
for pos in self.pos:
arg_dest.append(pos.dest)
arg_vals.append(pos.parse(pos.names[0], args))
parsed_pos = True
if return_unknown:
consume_unknown()
# checks the required arguments
required_but_not_used = ([arg.dest for i, arg in enumerate(self.opt) if arg.required == True and arg_vals[i] == None])
if required_but_not_used:
raise _ArgError("option(s) '%s' is(are) required" % ", ".join(required_but_not_used))
values = namedtuple("args", arg_dest)(*arg_vals)
return (values, unknown) if return_unknown else values
|
python
|
import json
from hashlib import sha256
from typing import List
from ..configs import BaseLayerConfig
def create_dir_name_from_config(config: BaseLayerConfig, prefix: str = "") -> str:
config_class_name = config.__class__.__name__
config_json = json.dumps(config.to_dict_without_cache())
return f"{prefix}{sha256((config_class_name + config_json).encode()).hexdigest()}"
def create_file_name_from_path(path: str, ext: str, prefix: str = "") -> str:
return f"{prefix}{sha256(path.encode()).hexdigest()}.{ext}"
def create_file_name_from_paths(paths: List[str], ext: str, prefix: str = "") -> str:
return f"{prefix}{sha256(json.dumps(sorted(paths)).encode()).hexdigest()}.{ext}"
|
python
|
#!/usr/bin/env python3
# @Time : 27/6/29 2:46 PM
# @Author : fangcheng.ji
# @FileName: qfl_atss.py
import math
import torch
import torch.nn.functional as F
from torch import nn
import os
from typing import Dict, List
from .fcos import Scale
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
from detectron2.layers import ShapeSpec, cat, ml_nms, quality_focal_loss
from detectron2.layers import DFConv2d, get_norm
from detectron2.structures import Instances, Boxes, pairwise_iou, matched_boxlist_iou
from detectron2.utils.comm import get_world_size, reduce_sum
from fvcore.nn import sigmoid_focal_loss_jit
from ..anchor_generator import build_anchor_generator
from ..matcher import Matcher
INF = 100000000
@PROPOSAL_GENERATOR_REGISTRY.register()
class QFLATSS(torch.nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super(QFLATSS, self).__init__()
self.cfg = cfg
self.in_features = cfg.MODEL.ATSS.IN_FEATURES
feature_shapes = [input_shape[f] for f in self.in_features]
in_channels = [f.channels for f in feature_shapes]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
self.fcos_head = ATSSHead(cfg, in_channels)
box_coder = BoxCoder(cfg)
self.loss_evaluator = ATSSLossComputation(cfg, box_coder)
# for inference
self.box_selector_test = ATSSPostProcessor(
pre_nms_thresh=cfg.MODEL.ATSS.INFERENCE_TH,
pre_nms_top_n=cfg.MODEL.ATSS.PRE_NMS_TOP_N,
nms_thresh=cfg.MODEL.ATSS.NMS_TH,
fpn_post_nms_top_n=cfg.MODEL.ATSS.POST_NMS_TOPK_TEST,
min_size=0,
num_classes=cfg.MODEL.ATSS.NUM_CLASSES + 1, # add background
bbox_aug_enabled=cfg.TEST.AUG.ENABLED,
box_coder=box_coder,
)
# self.anchor_generator = make_anchor_generator_atss(cfg)
self.anchor_generator = build_anchor_generator(cfg, feature_shapes)
def forward(self, images, features, gt_instances):
features = [features[f] for f in self.in_features]
box_cls, box_regression = self.fcos_head(features)
anchors = self.anchor_generator(features)
if self.training:
return self._forward_train(box_cls, box_regression, gt_instances, anchors)
else:
return self._forward_test(images.image_sizes, box_cls, box_regression, anchors)
def _forward_train(self, box_cls, box_regression, gt_instances, anchors):
loss_box_cls, loss_box_reg = self.loss_evaluator(
box_cls, box_regression, gt_instances, anchors
)
losses = {
"loss_cls": loss_box_cls,
"loss_reg": loss_box_reg,
}
return None, losses
def _forward_test(self, image_sizes, box_cls, box_regression, anchors):
boxes = self.box_selector_test(image_sizes, box_cls, box_regression, anchors)
return boxes, {}
class ATSSHead(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(ATSSHead, self).__init__()
self.cfg = cfg
num_classes = cfg.MODEL.ATSS.NUM_CLASSES
num_anchors = len(cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS[0]) * len(cfg.MODEL.ANCHOR_GENERATOR.SIZES[0])
head_configs = {"cls": (cfg.MODEL.ATSS.NUM_CONVS,
False),
"bbox": (cfg.MODEL.ATSS.NUM_CONVS,
cfg.MODEL.ATSS.USE_DCN_IN_TOWER),
}
norm = None if cfg.MODEL.ATSS.NORM == "none" else cfg.MODEL.ATSS.NORM
for head in head_configs:
tower = []
num_convs, use_deformable = head_configs[head]
if use_deformable:
conv_func = DFConv2d
else:
conv_func = nn.Conv2d
for i in range(num_convs):
tower.append(conv_func(
in_channels, in_channels,
kernel_size=3, stride=1,
padding=1, bias=True
))
if norm == "GN":
tower.append(nn.GroupNorm(32, in_channels))
elif norm is not None:
tower.append(get_norm(norm, in_channels))
tower.append(nn.ReLU())
self.add_module('{}_tower'.format(head),
nn.Sequential(*tower))
self.cls_logits = nn.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1,
padding=1
)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1,
padding=1
)
# initialization
for modules in [self.cls_tower, self.bbox_tower,
self.cls_logits, self.bbox_pred,
]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# initialize the bias for focal loss
prior_prob = cfg.MODEL.ATSS.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT':
assert num_anchors == 1, "regressing from a point only support num_anchors == 1"
torch.nn.init.constant_(self.bbox_pred.bias, 4)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
def forward(self, x):
logits = []
bbox_reg = []
for l, feature in enumerate(x):
cls_tower = self.cls_tower(feature)
box_tower = self.bbox_tower(feature)
logits.append(self.cls_logits(cls_tower))
bbox_pred = self.scales[l](self.bbox_pred(box_tower))
if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT':
bbox_pred = F.relu(bbox_pred)
bbox_reg.append(bbox_pred)
return logits, bbox_reg
class ATSSLossComputation(object):
def __init__(self, cfg, box_coder):
self.cfg = cfg
self.focal_loss_alpha = cfg.MODEL.ATSS.LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.ATSS.LOSS_GAMMA
self.num_classes = cfg.MODEL.ATSS.NUM_CLASSES
self.matcher = Matcher(
cfg.MODEL.ATSS.IOU_THRESHOLDS,
cfg.MODEL.ATSS.IOU_LABELS,
allow_low_quality_matches=True
)
self.box_coder = box_coder
def GIoULoss(self, pred, target, anchor, weight=None):
pred_boxes = self.box_coder.decode(pred.view(-1, 4), anchor.view(-1, 4))
pred_x1 = pred_boxes[:, 0]
pred_y1 = pred_boxes[:, 1]
pred_x2 = pred_boxes[:, 2]
pred_y2 = pred_boxes[:, 3]
pred_x2 = torch.max(pred_x1, pred_x2)
pred_y2 = torch.max(pred_y1, pred_y2)
pred_area = (pred_x2 - pred_x1) * (pred_y2 - pred_y1)
gt_boxes = self.box_coder.decode(target.view(-1, 4), anchor.view(-1, 4))
target_x1 = gt_boxes[:, 0]
target_y1 = gt_boxes[:, 1]
target_x2 = gt_boxes[:, 2]
target_y2 = gt_boxes[:, 3]
target_area = (target_x2 - target_x1) * (target_y2 - target_y1)
x1_intersect = torch.max(pred_x1, target_x1)
y1_intersect = torch.max(pred_y1, target_y1)
x2_intersect = torch.min(pred_x2, target_x2)
y2_intersect = torch.min(pred_y2, target_y2)
area_intersect = torch.zeros(pred_x1.size()).to(pred)
mask = (y2_intersect > y1_intersect) * (x2_intersect > x1_intersect)
area_intersect[mask] = (x2_intersect[mask] - x1_intersect[mask]) * (y2_intersect[mask] - y1_intersect[mask])
x1_enclosing = torch.min(pred_x1, target_x1)
y1_enclosing = torch.min(pred_y1, target_y1)
x2_enclosing = torch.max(pred_x2, target_x2)
y2_enclosing = torch.max(pred_y2, target_y2)
area_enclosing = (x2_enclosing - x1_enclosing) * (y2_enclosing - y1_enclosing) + 1e-7
area_union = pred_area + target_area - area_intersect + 1e-7
ious = area_intersect / area_union
gious = ious - (area_enclosing - area_union) / area_enclosing
losses = 1 - gious
if weight is not None and weight.sum() > 0:
return (losses * weight).sum()
else:
assert losses.numel() != 0
return losses.sum()
def DIoULoss(self, pred, target, anchor, weight=None):
pred_boxes = self.box_coder.decode(pred.view(-1, 4), anchor.view(-1, 4))
pred_x1 = pred_boxes[:, 0]
pred_y1 = pred_boxes[:, 1]
pred_x2 = pred_boxes[:, 2]
pred_y2 = pred_boxes[:, 3]
pred_x2 = torch.max(pred_x1, pred_x2)
pred_y2 = torch.max(pred_y1, pred_y2)
pred_area = (pred_x2 - pred_x1) * (pred_y2 - pred_y1)
pred_cx = (pred_x2 + pred_x1) / 2.0
pred_cy = (pred_y2 + pred_y1) / 2.0
gt_boxes = self.box_coder.decode(target.view(-1, 4), anchor.view(-1, 4))
target_x1 = gt_boxes[:, 0]
target_y1 = gt_boxes[:, 1]
target_x2 = gt_boxes[:, 2]
target_y2 = gt_boxes[:, 3]
target_area = (target_x2 - target_x1) * (target_y2 - target_y1)
target_cx = (target_x2 + target_x1) / 2.0
target_cy = (target_y2 + target_y1) / 2.0
x1_intersect = torch.max(pred_x1, target_x1)
y1_intersect = torch.max(pred_y1, target_y1)
x2_intersect = torch.min(pred_x2, target_x2)
y2_intersect = torch.min(pred_y2, target_y2)
area_intersect = torch.zeros(pred_x1.size()).to(pred)
mask = (y2_intersect > y1_intersect) * (x2_intersect > x1_intersect)
area_intersect[mask] = (x2_intersect[mask] - x1_intersect[mask]) * (y2_intersect[mask] - y1_intersect[mask])
x1_enclosing = torch.min(pred_x1, target_x1)
y1_enclosing = torch.min(pred_y1, target_y1)
x2_enclosing = torch.max(pred_x2, target_x2)
y2_enclosing = torch.max(pred_y2, target_y2)
c_squared = torch.pow(y2_enclosing - y1_enclosing, 2) + torch.pow(x2_enclosing - x1_enclosing, 2) + 1e-7
d_squared = torch.pow(target_cy - pred_cy, 2) + torch.pow(target_cx - pred_cx, 2)
area_union = pred_area + target_area - area_intersect + 1e-7
ious = area_intersect / area_union
dious = ious - d_squared / c_squared
losses = 1 - dious
if weight is not None and weight.sum() > 0:
return (losses * weight).sum()
else:
assert losses.numel() != 0
return losses.sum()
def prepare_targets(self, gt_instances, anchors):
cls_labels = []
reg_targets = []
anchors_all_level = Boxes.cat(anchors)
for im_i in range(len(gt_instances)):
targets_per_im = gt_instances[im_i]
bboxes_per_im = targets_per_im.gt_boxes
labels_per_im = targets_per_im.gt_classes
num_gt = len(bboxes_per_im)
if num_gt > 0:
if self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'SSC':
object_sizes_of_interest = [[-1, 64], [64, 128], [128, 256], [256, 512], [512, INF]]
area_per_im = targets_per_im.area()
expanded_object_sizes_of_interest = []
points = []
for l, anchors_per_level in enumerate(anchors[im_i]):
anchors_per_level = anchors_per_level.bbox
anchors_cx_per_level = (anchors_per_level[:, 2] + anchors_per_level[:, 0]) / 2.0
anchors_cy_per_level = (anchors_per_level[:, 3] + anchors_per_level[:, 1]) / 2.0
points_per_level = torch.stack((anchors_cx_per_level, anchors_cy_per_level), dim=1)
points.append(points_per_level)
object_sizes_of_interest_per_level = \
points_per_level.new_tensor(object_sizes_of_interest[l])
expanded_object_sizes_of_interest.append(
object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)
)
expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)
points = torch.cat(points, dim=0)
xs, ys = points[:, 0], points[:, 1]
l = xs[:, None] - bboxes_per_im[:, 0][None]
t = ys[:, None] - bboxes_per_im[:, 1][None]
r = bboxes_per_im[:, 2][None] - xs[:, None]
b = bboxes_per_im[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0.01
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
is_cared_in_the_level = \
(max_reg_targets_per_im >= expanded_object_sizes_of_interest[:, [0]]) & \
(max_reg_targets_per_im <= expanded_object_sizes_of_interest[:, [1]])
locations_to_gt_area = area_per_im[None].repeat(len(points), 1)
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1)
cls_labels_per_im = labels_per_im[locations_to_gt_inds]
cls_labels_per_im[locations_to_min_area == INF] = self.num_classes
matched_gts = bboxes_per_im[locations_to_gt_inds]
elif self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'ATSS':
num_anchors_per_loc = len(self.cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS[0]) \
* len(self.cfg.MODEL.ANCHOR_GENERATOR.SIZES[0])
# TODO deal with num gt is 0
num_anchors_per_level = [len(anchors_per_level) for anchors_per_level in anchors]
ious = pairwise_iou(anchors_all_level, bboxes_per_im)
gt_cx = (bboxes_per_im.tensor[:, 2] + bboxes_per_im.tensor[:, 0]) / 2.0
gt_cy = (bboxes_per_im.tensor[:, 3] + bboxes_per_im.tensor[:, 1]) / 2.0
gt_points = torch.stack((gt_cx, gt_cy), dim=1)
anchors_cx_per_im = (anchors_all_level.tensor[:, 2] + anchors_all_level.tensor[:, 0]) / 2.0
anchors_cy_per_im = (anchors_all_level.tensor[:, 3] + anchors_all_level.tensor[:, 1]) / 2.0
anchor_points = torch.stack((anchors_cx_per_im, anchors_cy_per_im), dim=1)
distances = (anchor_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt()
# Selecting candidates based on the center distance between anchor box and object
candidate_idxs = []
star_idx = 0
for level, anchors_per_level in enumerate(anchors):
end_idx = star_idx + num_anchors_per_level[level]
distances_per_level = distances[star_idx:end_idx, :]
topk = min(self.cfg.MODEL.ATSS.TOPK * num_anchors_per_loc, num_anchors_per_level[level])
_, topk_idxs_per_level = distances_per_level.topk(topk, dim=0, largest=False)
candidate_idxs.append(topk_idxs_per_level + star_idx)
star_idx = end_idx
candidate_idxs = torch.cat(candidate_idxs, dim=0)
# Using the sum of mean and standard deviation as the IoU threshold to select final positive samples
candidate_ious = ious[candidate_idxs, torch.arange(num_gt)]
iou_mean_per_gt = candidate_ious.mean(0)
iou_std_per_gt = candidate_ious.std(0)
iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt
is_pos = candidate_ious >= iou_thresh_per_gt[None, :]
# Limiting the final positive samples’ center to object
anchor_num = anchors_cx_per_im.shape[0]
for ng in range(num_gt):
candidate_idxs[:, ng] += ng * anchor_num
e_anchors_cx = anchors_cx_per_im.view(1, -1).expand(num_gt, anchor_num).contiguous().view(-1)
e_anchors_cy = anchors_cy_per_im.view(1, -1).expand(num_gt, anchor_num).contiguous().view(-1)
candidate_idxs = candidate_idxs.view(-1)
l = e_anchors_cx[candidate_idxs].view(-1, num_gt) - bboxes_per_im.tensor[:, 0]
t = e_anchors_cy[candidate_idxs].view(-1, num_gt) - bboxes_per_im.tensor[:, 1]
r = bboxes_per_im.tensor[:, 2] - e_anchors_cx[candidate_idxs].view(-1, num_gt)
b = bboxes_per_im.tensor[:, 3] - e_anchors_cy[candidate_idxs].view(-1, num_gt)
is_in_gts = torch.stack([l, t, r, b], dim=1).min(dim=1)[0] > 0.01
is_pos = is_pos & is_in_gts
# if an anchor box is assigned to multiple gts, the one with the highest IoU will be selected.
ious_inf = torch.full_like(ious, -INF).t().contiguous().view(-1)
index = candidate_idxs.view(-1)[is_pos.view(-1)]
ious_inf[index] = ious.t().contiguous().view(-1)[index]
ious_inf = ious_inf.view(num_gt, -1).t()
anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1)
cls_labels_per_im = labels_per_im[anchors_to_gt_indexs]
cls_labels_per_im[anchors_to_gt_values == -INF] = self.num_classes
matched_gts = bboxes_per_im[anchors_to_gt_indexs]
elif self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'TOPK':
gt_cx = (bboxes_per_im[:, 2] + bboxes_per_im[:, 0]) / 2.0
gt_cy = (bboxes_per_im[:, 3] + bboxes_per_im[:, 1]) / 2.0
gt_points = torch.stack((gt_cx, gt_cy), dim=1)
anchors_cx_per_im = (anchors_all_level.tensor[:, 2] + anchors_all_level.tensor[:, 0]) / 2.0
anchors_cy_per_im = (anchors_all_level.tensor[:, 3] + anchors_all_level.tensor[:, 1]) / 2.0
anchor_points = torch.stack((anchors_cx_per_im, anchors_cy_per_im), dim=1)
distances = (anchor_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt()
distances = distances / distances.max() / 1000
ious = pairwise_iou(anchors_all_level, bboxes_per_im)
is_pos = ious * False
for ng in range(num_gt):
_, topk_idxs = (ious[:, ng] - distances[:, ng]).topk(self.cfg.MODEL.ATSS.TOPK, dim=0)
l = anchors_cx_per_im[topk_idxs] - bboxes_per_im[ng, 0]
t = anchors_cy_per_im[topk_idxs] - bboxes_per_im[ng, 1]
r = bboxes_per_im[ng, 2] - anchors_cx_per_im[topk_idxs]
b = bboxes_per_im[ng, 3] - anchors_cy_per_im[topk_idxs]
is_in_gt = torch.stack([l, t, r, b], dim=1).min(dim=1)[0] > 0.01
is_pos[topk_idxs[is_in_gt == 1], ng] = True
ious[is_pos == 0] = -INF
anchors_to_gt_values, anchors_to_gt_indexs = ious.max(dim=1)
cls_labels_per_im = labels_per_im[anchors_to_gt_indexs]
cls_labels_per_im[anchors_to_gt_values == -INF] = self.num_classes
matched_gts = bboxes_per_im[anchors_to_gt_indexs]
elif self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'IoU':
match_quality_matrix = pairwise_iou(bboxes_per_im, anchors_all_level)
matched_idxs = self.matcher(match_quality_matrix)
targets_per_im = targets_per_im.copy_with_fields(['labels'])
matched_targets = targets_per_im[matched_idxs.clamp(min=0)]
cls_labels_per_im = matched_targets.get_field("labels")
cls_labels_per_im = cls_labels_per_im.to(dtype=torch.float32)
# Background (negative examples)
bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
cls_labels_per_im[bg_indices] = 0
# discard indices that are between thresholds
inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS
cls_labels_per_im[inds_to_discard] = -1
matched_gts = matched_targets.bbox
# Limiting positive samples’ center to object
# in order to filter out poor positives and use the centerness branch
pos_idxs = torch.nonzero(cls_labels_per_im > 0).squeeze(1)
pos_anchors_cx = (anchors_all_level.tensor[pos_idxs, 2] + anchors_all_level.tensor[pos_idxs, 0]) / 2.0
pos_anchors_cy = (anchors_all_level.tensor[pos_idxs, 3] + anchors_all_level.tensor[pos_idxs, 1]) / 2.0
l = pos_anchors_cx - matched_gts[pos_idxs, 0]
t = pos_anchors_cy - matched_gts[pos_idxs, 1]
r = matched_gts[pos_idxs, 2] - pos_anchors_cx
b = matched_gts[pos_idxs, 3] - pos_anchors_cy
is_in_gts = torch.stack([l, t, r, b], dim=1).min(dim=1)[0] > 0.01
cls_labels_per_im[pos_idxs[is_in_gts == 0]] = -1
else:
raise NotImplementedError
reg_targets_per_im = self.box_coder.encode(matched_gts.tensor, anchors_all_level.tensor)
else: # no gt instance
# all negative
reg_targets_per_im = torch.zeros_like(anchors_all_level.tensor)
cls_labels_per_im = torch.zeros(
len(anchors_all_level.tensor),
dtype=torch.long,
device=anchors_all_level.device
) + self.num_classes
cls_labels.append(cls_labels_per_im)
reg_targets.append(reg_targets_per_im)
return cls_labels, reg_targets
def compute_centerness_targets(self, reg_targets, anchors):
gts = self.box_coder.decode(reg_targets, anchors)
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
l = anchors_cx - gts[:, 0]
t = anchors_cy - gts[:, 1]
r = gts[:, 2] - anchors_cx
b = gts[:, 3] - anchors_cy
left_right = torch.stack([l, r], dim=1)
top_bottom = torch.stack([t, b], dim=1)
centerness = torch.sqrt((left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
assert not torch.isnan(centerness).any()
return centerness
def compute_iou_score(self, reg_preds, reg_targets, anchors):
gts = self.box_coder.decode(reg_targets, anchors)
preds = self.box_coder.decode(reg_preds, anchors)
gts = Boxes(gts)
preds = Boxes(preds)
return matched_boxlist_iou(preds, gts)
def __call__(self, box_cls, box_regression, gt_instances, anchors):
labels, reg_targets = self.prepare_targets(gt_instances, anchors)
N = len(labels)
box_cls_flatten, box_regression_flatten = concat_box_prediction_layers(box_cls, box_regression)
labels_flatten = torch.cat(labels, dim=0)
reg_targets_flatten = torch.cat(reg_targets, dim=0)
anchors_flatten = torch.cat([Boxes.cat(anchors).tensor for _ in range(N)], dim=0)
pos_inds = torch.nonzero(labels_flatten != self.num_classes).squeeze(1)
num_gpus = get_num_gpus()
total_num_pos = reduce_sum(pos_inds.new_tensor([pos_inds.numel()])).item()
num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0)
# one hot label for focal loss
class_target = torch.zeros_like(box_cls_flatten)
class_target[pos_inds, labels_flatten[pos_inds]] = 1
# cls_loss = sigmoid_focal_loss_jit(
# box_cls_flatten,
# class_target,
# alpha=self.focal_loss_alpha,
# gamma=self.focal_loss_gamma,
# reduction="sum"
# ) / num_pos_avg_per_gpu
box_regression_flatten = box_regression_flatten[pos_inds]
reg_targets_flatten = reg_targets_flatten[pos_inds]
anchors_flatten = anchors_flatten[pos_inds]
centerness_targets = self.compute_centerness_targets(reg_targets_flatten, anchors_flatten)
sum_centerness_targets_avg_per_gpu = reduce_sum(centerness_targets.sum()).item() / float(num_gpus)
# qfl score
score = torch.zeros(class_target.size()[0], dtype=torch.float32, device=class_target.device)
score[pos_inds] = self.compute_iou_score(
box_regression_flatten.detach(),
reg_targets_flatten,
anchors_flatten
)
cls_loss = quality_focal_loss(
box_cls_flatten,
class_target,
score, # IoU score
weight=1.0, # weight = 1.0
beta=self.focal_loss_gamma,
reduction='mean',
avg_factor=num_pos_avg_per_gpu,
)
if pos_inds.numel() > 0:
reg_loss = self.DIoULoss(box_regression_flatten, reg_targets_flatten, anchors_flatten,
weight=centerness_targets) / sum_centerness_targets_avg_per_gpu
else:
reg_loss = box_regression_flatten.sum()
return cls_loss, reg_loss * self.cfg.MODEL.ATSS.REG_LOSS_WEIGHT
class ATSSPostProcessor(torch.nn.Module):
def __init__(
self,
pre_nms_thresh,
pre_nms_top_n,
nms_thresh,
fpn_post_nms_top_n,
min_size,
num_classes,
box_coder,
bbox_aug_enabled=False,
):
super(ATSSPostProcessor, self).__init__()
self.pre_nms_thresh = pre_nms_thresh
self.pre_nms_top_n = pre_nms_top_n
self.nms_thresh = nms_thresh
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.min_size = min_size
self.num_classes = num_classes
self.bbox_aug_enabled = bbox_aug_enabled
self.box_coder = box_coder
def forward_for_single_feature_map(self, box_cls, box_regression, anchors):
N, _, H, W = box_cls.shape
A = box_regression.size(1) // 4
C = box_cls.size(1) // A
# put in the same format as anchors
box_cls = permute_and_flatten(box_cls, N, A, C, H, W)
box_cls = box_cls.sigmoid()
box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
box_regression = box_regression.reshape(N, -1, 4)
candidate_inds = box_cls > self.pre_nms_thresh
pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)
results = []
for per_box_cls, per_box_regression, per_pre_nms_top_n, per_candidate_inds \
in zip(box_cls, box_regression, pre_nms_top_n, candidate_inds):
per_box_cls = per_box_cls[per_candidate_inds]
per_box_cls, top_k_indices = per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_candidate_nonzeros = per_candidate_inds.nonzero()[top_k_indices, :]
per_box_loc = per_candidate_nonzeros[:, 0]
per_class = per_candidate_nonzeros[:, 1]
detections = self.box_coder.decode(
per_box_regression[per_box_loc, :].view(-1, 4),
anchors.tensor[per_box_loc, :].view(-1, 4)
)
pred_boxes = Boxes(detections)
scores = torch.sqrt(per_box_cls)
pred_classes = per_class
results.append((pred_boxes, scores, pred_classes))
return results
def forward(self, image_sizes, box_cls, box_regression, anchors):
sampled_boxes = []
# anchors = list(zip(*anchors))
for _, (o, b, a) in enumerate(zip(box_cls, box_regression, anchors)):
sampled_boxes.append(
self.forward_for_single_feature_map(o, b, a)
)
boxlists = []
for i, image_size in enumerate(image_sizes):
boxlist = Instances(image_size)
boxes = []
scores = []
classes = []
for j in range(len(anchors)):
boxes.append(sampled_boxes[j][i][0])
scores.append(sampled_boxes[j][i][1])
classes.append(sampled_boxes[j][i][2])
boxes = Boxes.cat(boxes)
boxes.clip(image_size)
keep = boxes.nonempty(self.min_size)
boxlist.pred_boxes = boxes[keep]
boxlist.scores = torch.cat(scores, dim=0)[keep]
boxlist.pred_classes = torch.cat(classes, dim=0)[keep]
boxlists.append(boxlist)
boxlists = self.select_over_all_levels(boxlists)
return boxlists
# TODO very similar to filter_results from PostProcessor
# but filter_results is per image
# TODO Yang: solve this issue in the future. No good solution
# right now.
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
results = []
for i in range(num_images):
# multiclass nms
result = ml_nms(boxlists[i], self.nms_thresh)
number_of_detections = len(result)
# Limit to max_per_image detections **over all classes**
if number_of_detections > self.fpn_post_nms_top_n > 0:
cls_scores = result.scores
image_thresh, _ = torch.kthvalue(
cls_scores.cpu(),
number_of_detections - self.fpn_post_nms_top_n + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
results.append(result)
return results
class BoxCoder(object):
def __init__(self, cfg):
self.cfg = cfg
def encode(self, gt_boxes, anchors):
if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT':
TO_REMOVE = 1 # TODO remove
anchors_w = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
anchors_h = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
w = self.cfg.MODEL.ANCHOR_GENERATOR.SIZES[0][0] / self.cfg.MODEL.ATSS.FPN_STRIDES[0]
l = w * (anchors_cx - gt_boxes[:, 0]) / anchors_w
t = w * (anchors_cy - gt_boxes[:, 1]) / anchors_h
r = w * (gt_boxes[:, 2] - anchors_cx) / anchors_w
b = w * (gt_boxes[:, 3] - anchors_cy) / anchors_h
targets = torch.stack([l, t, r, b], dim=1)
elif self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'BOX':
TO_REMOVE = 1 # TODO remove
ex_widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
ex_heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
ex_ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
ex_ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + TO_REMOVE
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + TO_REMOVE
gt_ctr_x = (gt_boxes[:, 2] + gt_boxes[:, 0]) / 2
gt_ctr_y = (gt_boxes[:, 3] + gt_boxes[:, 1]) / 2
wx, wy, ww, wh = (10., 10., 5., 5.)
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
def decode(self, preds, anchors):
if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT':
TO_REMOVE = 1 # TODO remove
anchors_w = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
anchors_h = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
w = self.cfg.MODEL.ANCHOR_GENERATOR.SIZES[0][0] / self.cfg.MODEL.ATSS.FPN_STRIDES[0]
x1 = anchors_cx - preds[:, 0] / w * anchors_w
y1 = anchors_cy - preds[:, 1] / w * anchors_h
x2 = anchors_cx + preds[:, 2] / w * anchors_w
y2 = anchors_cy + preds[:, 3] / w * anchors_h
pred_boxes = torch.stack([x1, y1, x2, y2], dim=1)
elif self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'BOX':
anchors = anchors.to(preds.dtype)
TO_REMOVE = 1 # TODO remove
widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
wx, wy, ww, wh = (10., 10., 5., 5.)
dx = preds[:, 0::4] / wx
dy = preds[:, 1::4] / wy
dw = preds[:, 2::4] / ww
dh = preds[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=math.log(1000. / 16))
dh = torch.clamp(dh, max=math.log(1000. / 16))
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(preds)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1)
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1)
return pred_boxes
def get_num_gpus():
return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
def reduce_sum(tensor):
if get_num_gpus() <= 1:
return tensor
import torch.distributed as dist
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.reduce_op.SUM)
return tensor
def permute_and_flatten(layer, N, A, C, H, W):
layer = layer.view(N, -1, C, H, W)
layer = layer.permute(0, 3, 4, 1, 2)
layer = layer.reshape(N, -1, C)
return layer
def concat_box_prediction_layers(box_cls, box_regression):
box_cls_flattened = []
box_regression_flattened = []
# for each feature level, permute the outputs to make them be in the
# same format as the labels. Note that the labels are computed for
# all feature levels concatenated, so we keep the same representation
# for the objectness and the box_regression
for box_cls_per_level, box_regression_per_level in zip(
box_cls, box_regression
):
N, AxC, H, W = box_cls_per_level.shape
Ax4 = box_regression_per_level.shape[1]
A = Ax4 // 4
C = AxC // A
box_cls_per_level = permute_and_flatten(
box_cls_per_level, N, A, C, H, W
)
box_cls_flattened.append(box_cls_per_level)
box_regression_per_level = permute_and_flatten(
box_regression_per_level, N, A, 4, H, W
)
box_regression_flattened.append(box_regression_per_level)
# concatenate on the first dimension (representing the feature levels), to
# take into account the way the labels were generated (with all feature maps
# being concatenated as well)
box_cls = cat(box_cls_flattened, dim=1).reshape(-1, C)
box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4)
return box_cls, box_regression
# def make_anchor_generator_atss(config):
# anchor_sizes = config.MODEL.ATSS.ANCHOR_SIZES
# aspect_ratios = config.MODEL.ATSS.ASPECT_RATIOS
# anchor_strides = config.MODEL.ATSS.ANCHOR_STRIDES
# straddle_thresh = config.MODEL.ATSS.STRADDLE_THRESH
# octave = config.MODEL.ATSS.OCTAVE
# scales_per_octave = config.MODEL.ATSS.SCALES_PER_OCTAVE
#
# assert len(anchor_strides) == len(anchor_sizes), "Only support FPN now"
# new_anchor_sizes = []
# for size in anchor_sizes:
# per_layer_anchor_sizes = []
# for scale_per_octave in range(scales_per_octave):
# octave_scale = octave ** (scale_per_octave / float(scales_per_octave))
# per_layer_anchor_sizes.append(octave_scale * size)
# new_anchor_sizes.append(tuple(per_layer_anchor_sizes))
#
# anchor_generator = DefaultAnchorGenerator(
# {
# "sizes": new_anchor_sizes,
# "aspect_ratios": aspect_ratios,
# "strides": anchor_strides,
# "offset": 0.0
# }
# )
# return anchor_generator
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.rest.health import HealthResource
from tests import unittest
class HealthCheckTests(unittest.HomeserverTestCase):
def create_test_resource(self):
# replace the JsonResource with a HealthResource.
return HealthResource()
def test_health(self):
request, channel = self.make_request("GET", "/health", shorthand=False)
self.assertEqual(request.code, 200)
self.assertEqual(channel.result["body"], b"OK")
|
python
|
#pragma repy
# This test tries to do recvmess / stopcomm in a loop
def foo(ip,port,mess, ch):
print ip,port,mess,ch
if callfunc == 'initialize':
for x in xrange(0,10):
ch = recvmess(getmyip(),<messport>,foo)
sleep(.1)
stopcomm(ch)
sleep(.1)
|
python
|
import os
from datetime import timedelta
class Config(object):
DEBUG = False
AUTHENTICATED_SEARCH_API = os.environ['AUTHENTICATED_SEARCH_API']
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
CASES_URL = os.environ['CASES_URL']
MATCHING_URL = os.environ['MATCHING_URL']
OWNERSHIP_URL = os.environ['OWNERSHIP_URL']
OS_API_KEY = os.environ['OS_API_KEY']
INTRODUCTION_URL = os.environ['INTRODUCTION_URL']
HISTORIAN_URL = os.environ['HISTORIAN_URL']
REDIS_URL = os.environ['REDIS_URL']
PERMANENT_SESSION_LIFETIME = timedelta(minutes=int(os.environ['PERMANENT_SESSION_LIFETIME']))
WTF_CSRF_ENABLED = True
VIEW_COUNT = int(os.environ['VIEW_COUNT'])
VIEW_COUNT_ENABLED = os.environ['VIEW_COUNT_ENABLED']
SECRET_KEY = os.environ['SECRET_KEY']
SECURITY_PASSWORD_SALT = SECRET_KEY
SECURITY_PASSWORD_HASH = os.environ['SECURITY_PASSWORD_HASH']
# optional and only needed on heroku so get
# safely
BASIC_AUTH_USERNAME = os.environ.get('BASIC_AUTH_USERNAME')
BASIC_AUTH_PASSWORD = os.environ.get('BASIC_AUTH_PASSWORD')
class DevelopmentConfig(Config):
DEBUG = True
WTF_CSRF_ENABLED = False
class TestConfig(DevelopmentConfig):
TESTING = True
VIEW_COUNT_ENABLED = False
|
python
|
import gym
import random
import tensorflow
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import utils.utils as utils
import tensorflow as tf
from ddpg_tf import DDPG
env = gym.make('BipedalWalker-v2')
env.seed(0)
sess = tf.Session()
agent = DDPG('ddpg', utils.load_args(), sess=sess)
agent.restore()
def ddpg(n_episodes=10000, max_t=1000):
scores_deque = deque(maxlen=100)
scores = []
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_deque.append(score)
scores.append(score)
average_score = np.mean(scores_deque)
print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}'.format(i_episode, average_score, score), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, average_score))
agent.save()
return scores
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
|
python
|
from factory import robot
import rospy
from std_msgs.msg import Float64
from std_msgs.msg import Bool
from geometry_msgs.msg import Point
import time as t
import math as m
t0=t.time()
Robot=None
class rosact(object):
def __init__(self):
rospy.init_node('act')
self.pubs=[]
self.pubs.append(rospy.Publisher('/irb120/joint_1_position_controller/command',Float64,queue_size=10))
self.pubs.append(rospy.Publisher('/irb120/joint_2_position_controller/command',Float64,queue_size=10))
self.pubs.append(rospy.Publisher('/irb120/joint_3_position_controller/command',Float64,queue_size=10))
self.pubs.append(rospy.Publisher('/irb120/joint_4_position_controller/command',Float64,queue_size=10))
self.pubs.append(rospy.Publisher('/irb120/joint_5_position_controller/command',Float64,queue_size=10))
self.pubs.append(rospy.Publisher('/irb120/joint_6_position_controller/command',Float64,queue_size=10))
rospy.sleep(1)
def write(self,rob,pos=None):
# <<<<<<< HEAD
traj_st=time.time()
while True:
pts=traj_pnt('square',traj_st,pts)
pos=rob.IK_bfgs(pts)
# =======
try:
# while True:
x=0.350
z=0.400
y=-0.200#*m.sin(t.time()-t0)
pos=rob.iterIK([x,y,z])
# >>>>>>> fe9901f37a59c0681df3eebb48965f692c6aede5
pos=pos[1:]
print(pos)
#pos=[0]*6
#pos[4]=m.pi/2
# pos[3]=0
# pos[4]=0
# pos[5]=m.pi/2
msg=Float64()
print('Writing ')
print(pos)
for i in range(len(pos)):
# msg.data=pos[i]+(t.time()-t0)/180 if i==4 else pos[i]
msg.data = pos[i]
self.pubs[i].publish(msg)
#rospy.sleep(0.01)
# <<<<<<< HEAD
# def traj_pnt(tr_type,tm,st,vel=0.1,**kwargs):
# t=time.time()
# assert type(tr_type) is str
# if type.lower()=='square':
# if st+vel*((t-tm)%` )>kwargs[side]:
# =======
except KeyboardInterrupt as e:
print('Execution Stopped.')
# raise e
# >>>>>>> fe9901f37a59c0681df3eebb48965f692c6aede5
def main():
Robot=robot()
Robot.BuildKineModules()
jts=[0,10,30,0,20,0,0]
a=Robot.GetEffectorPosition(jts)
# print(a)
# print('final pos')
print(Robot.SetEffectorPosition(a)*180/m.pi)
act=rosact()
# <<<<<<< HEAD
# act.write(Robot)
try:
act.write(Robot)
except KeyboardInterrupt:
print('Execution stopped.')
print('this shouldnt be displayed')
if __name__== '__main__':
main()
|
python
|
import time
from os import path
from collections import defaultdict
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException
from utils import tools, COMPANYDIR
from crawler.preprocessor import process_summary
from localpaths import DOWNLOADPATH, DRIVERPATH, ID, PASSWORD
# URLs
def signinurl():
return "https://login.yahoo.com/"
def summaryurl(t):
return f"https://finance.yahoo.com/quote/{t}"
def profileurl(t):
return f"https://finance.yahoo.com/quote/{t}/profile?p={t}"
def statisticsurl(t):
return f"https://finance.yahoo.com/quote/{t}/key-statistics?p={t}"
def incomestatementurl(t):
return f"https://finance.yahoo.com/quote/{t}/financials?p={t}"
def balancesheeturl(t):
return f"https://finance.yahoo.com/quote/{t}/balance-sheet?p={t}"
def cashflowurl(t):
return f"https://finance.yahoo.com/quote/{t}/cash-flow?p={t}"
def historyurl(t):
return f"https://finance.yahoo.com/quote/{t}/history?p={t}"
class ChromeDriver:
def __init__(self, init, debug, headless=True):
self.init = init
self.debug = debug
self.results = defaultdict(dict)
if self.init or self.debug:
self.currency_of_last_symbol = None
self.last_symbol_is_stock = None
self.stocks = []
self.currencys = []
self.timeout = 5 # how many seconds to wait
self.max_trial = 3 # how many times to try
self.init_driver(headless and not self.init)
if self.init:
self.signin()
def init_driver(self, headless):
"""Initialize ChromeDriver."""
options = webdriver.ChromeOptions()
if headless:
options.add_argument("--headless")
options.add_argument("--incognito")
options.add_argument("--disable-notifications")
options.add_argument("--user-agent" \
"=''Mozilla/5.0 (Windows NT 10.0; Win64; x64)" \
" AppleWebKit/537.36 (KHTML, like Gecko)" \
" Chrome/74.0.3729.157 Safari/537.36''")
# Disable unnecessary driver tips for speedup
# From https://github.com/dinuduke/Selenium-chrome-firefox-tips
prefs = {"profile.managed_default_content_settings.images" : 2,
"profile.default_content_setting_values.notifications" : 2,
"profile.managed_default_content_settings.stylesheets" : 2,
"profile.managed_default_content_settings.javascript" : 1,
"profile.managed_default_content_settings.plugins" : 1,
"profile.managed_default_content_settings.popups" : 2,
"profile.managed_default_content_settings.geolocation" : 2,
"profile.managed_default_content_settings.media_stream" : 2}
if not self.init: # cookie must be enabled to sign in
prefs["profile.managed_default_content_settings.cookies"] = 2
options.add_experimental_option("prefs", prefs)
self.driver = webdriver.Chrome(DRIVERPATH, options=options)
def signin(self):
"""Sign in to Yahoo Finance using ID and password saved in localpaths.py."""
self.driver.get(signinurl())
# send username
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"input[name='username']"))).send_keys(ID)
# click 'Next'
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"input[name='signin']"))).click()
# wait til password
self.sleep(3)
# send password
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"input[name='password']"))).send_keys(PASSWORD)
# click submit
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"button[type='submit']"))).click()
# wait til log-in
self.sleep(3)
def reset_last_symbol_info(self):
self.stocks.append(self.last_symbol_is_stock)
self.currencys.append(self.currency_of_last_symbol)
self.last_symbol_is_stock = None
self.currency_of_last_symbol = None
def reboot(self):
"""Reboot driver."""
self.driver.quit()
self.sleep(600) # rest for 10 minutes
self.init_driver()
self.signin()
def close(self):
"""Close driver."""
self.driver.close()
def quit(self):
"""Quit driver."""
self.driver.quit()
def sleep(self, t=6):
"""Sleep crawler."""
return time.sleep(t)
def parse(self, tr):
"""Parse row element from table into column and value."""
# choose any char not commonly used
splitted = tr.get_text("|").split("|")
val = (splitted[-1] if (col := splitted[0]) != "Earnings Date"
else "".join(splitted[1:]))
return col, val # column, value
def save(self, col, symbol, data, sep=",", index_col=0, backup=True):
"""Save data."""
if not path.exists(dir_ := path.join(COMPANYDIR, symbol)):
tools.mkdir(dir_)
inpath = tools.get_path(col, symbol)
if self.debug:
if not path.exists(debugdir := path.join(dir_, "debug")):
tools.mkdir(debugdir)
inpath = tools.get_path(col, symbol, debug=self.debug)
# backup
if not self.debug and backup and path.exists(inpath):
if not path.exists(backdir := path.join(dir_, "backup")):
tools.mkdir(backdir)
tools.cp(inpath, tools.get_path(col, symbol, backup=True))
# convert data to df
if not isinstance(data, list):
data = [data]
curdf = pd.DataFrame(data)
curdf["Date"] = curdf["Date"].apply(tools.to_date)
curdf.set_index("Date", inplace=True)
process_summary(curdf)
if path.exists(inpath):
# concatenate with existing file, remove any duplicate row
maindf = tools.path2df(inpath, sep=sep, index_col=index_col)
maindf = maindf[maindf.index != tools.get_today()]
curdf = pd.concat([curdf, maindf], axis=0)
# sort and save
curdf.sort_index(ascending=False, inplace=True)
curdf.to_csv(inpath, index=True)
def is_stock(self):
"""Return True if corresponding symbol is a stock else False."""
try:
# wait until sections are visible
WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_all_elements_located((
By.CSS_SELECTOR,
"div[id='quote-nav']>ul>li")))
for section in self.driver.find_elements_by_css_selector(
"div[id='quote-nav']>ul>li"):
if "Financials" in section.text:
return True
except TimeoutException:
return None
else:
return False
def get_currency(self):
tmp = WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_element_located((
By.CSS_SELECTOR,
"section[data-test='qsp-financial']>div>span>span"))).text
if "." in tmp:
tmp = tmp.split(".")[0].split(" ") # split first sentence
return tmp[2] if len(tmp) == 3 else None
return "USD"
def finished(self, result):
"""Return True if all elements in result is True else False."""
return sum(result) == len(result)
def exist(self, symbol):
"""Return True if symbol exists, else False."""
self.driver.get(summaryurl(symbol)) # check Summary section
try:
WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_element_located((
By.CSS_SELECTOR,
"section[id='lookup-page']>section>div>h2")))
except TimeoutException:
self.last_symbol_is_stock = self.is_stock()
return True
else:
self.sleep()
return False
def mv_downloaded(self, symbol, from_, to_):
"""Move downloaded file from from_ to to_."""
tools.mv(path.join(DOWNLOADPATH, from_),
tools.get_path(to_, symbol, debug=self.debug))
def crawl_summary(self, symbols):
"""Crawl data to get saved in symbol_summary.csv."""
for symbol in symbols:
data = {"Date" : tools.get_today(), "Symbol" : symbol}
# [Summary, Statistics]
result = [False, False]
for _ in range(self.max_trial):
if not result[0]: # crawl summary section
try:
self.driver.get(summaryurl(symbol))
WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_all_elements_located((By.TAG_NAME,
"table")))
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
html_content = self.driver.page_source
soup = BeautifulSoup(html_content, "html.parser")
for table in soup.find_all("table")[:2]:
for tr in table.find_all("tr"):
col, val = self.parse(tr)
data[col] = val
result[0] = True
self.sleep(3)
if not result[1]:
try:
self.driver.get(statisticsurl(symbol))
WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_element_located((
By.ID, "Main")))
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
html_content = self.driver.page_source
soup = BeautifulSoup(html_content, "html.parser")
for section in soup.find_all(
"section", {"data-test":"qsp-statistics"}):
for div in section.find_all("div"):
children = list(div.children)
if len(children) == 2 and children[0].text in {
"Stock Price History", "Share Statistics"}:
for tr in children[1].find_all("tr"):
col, val = self.parse(tr)
data[col] = val
result[1] = True
self.sleep(3)
if self.finished(result):
break
name = "summary"
if not self.finished(result):
self.results[symbol][name] = result
else:
self.save(name, symbol, data)
def crawl_history(self, symbol):
"""Crawl historical data.
This includes:
- Dividend history: symbol_dividend.csv
- Stock price history: symbol_history.csv
- Stock split history: symbol_stock_split.csv"""
def download():
WebDriverWait(self.driver, self.timeout).until( # click arrow
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section>div>div>span>a"))).click()
self.sleep(3) # wait to download
def switch(to_):
WebDriverWait(self.driver, self.timeout).until( # click Show
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section span>div[data-test='select-container']"))).click()
menu = WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_element_located((
By.CSS_SELECTOR,
"section span>div[data-test='historicalFilter-menu']")))
for d in menu.find_elements_by_tag_name("div"):
if d.text == to_:
d.click()
break
WebDriverWait(self.driver, self.timeout).until( #click Apply
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section>div>div>button"))).click()
self.sleep(3) # wait to load
def switch_max():
# click dropdown
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section div[data-test='dropdown']>div"))).click()
# click max
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR, "li>button[data-value='MAX']"))
).click()
# wait to load
self.sleep(3)
global is_max
is_max = True
if self.last_symbol_is_stock:
self.driver.get(historyurl(symbol))
is_max = False
# [Historical Prices, Dividends Only, Stock Splits]
result = [False, False, False]
for _ in range(self.max_trial):
downloaded = f"{symbol}.csv"
if not result[0]:
name = "history"
if not self.debug and path.exists(tools.get_path(name,
symbol)):
result[0] = True
else:
try:
if not is_max:
switch_max()
# download
download()
# move summary.csv to data dir
self.mv_downloaded(symbol,
downloaded,
name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[0] = True
if not result[1]:
name = "Dividends Only"
if not self.debug and path.exists(tools.get_path(name,
symbol)):
result[1] = True
else:
try:
# switch to dividends
switch(name)
if not is_max:
switch_max()
# download
download()
# move divdend.csv to data dir
self.mv_downloaded(symbol, downloaded, name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[1] = True
if not result[2]:
name = "Stock Splits"
if not self.debug and path.exists(tools.get_path(name,
symbol)):
result[2] = True
else:
try:
# switch to dividends
switch(name)
if not is_max:
switch_max()
# click download
download()
# move split.csv to data dir
self.mv_downloaded(symbol, downloaded, name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[2] = True
if self.finished(result):
break
self.driver.refresh()
if not self.finished(result):
self.results[symbol]["history"] = result
self.sleep()
def crawl_financials(self, symbol):
"""Crawl financial data.
This includes:
- Income Statement
- Balance Sheet
- Cash Flow"""
def click_quarterly_and_download():
"""Click 'Quarterly' and 'Download'."""
# click Quarterly
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section[data-test='qsp-financial']>div>div>button"))
).click()
self.sleep(3) # wait to load
# click Download
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section[data-test='qsp-financial'] div>span>button"))
).click()
self.sleep(3) # wait to download
if self.last_symbol_is_stock:
# [Income Statement, Balance Sheet, Cash Flow]
result = [False, False, False]
for _ in range(self.max_trial):
if not result[0]:
name = "income_statement"
if self.init or self.debug:
try:
self.driver.get(incomestatementurl(symbol))
self.currency_of_last_symbol = self.get_currency()
if not path.exists(tools.get_path(name, symbol)):
click_quarterly_and_download()
self.mv_downloaded(symbol,
f"{symbol}_quarterly_financials.csv",
name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[0] = True
if not result[1]:
name = "balance_sheet"
if not self.debug and path.exists(tools.get_path(name,
symbol)):
result[1] = True
else:
try:
self.driver.get(balancesheeturl(symbol))
click_quarterly_and_download()
self.mv_downloaded(symbol,
f"{symbol}_quarterly_balance-sheet.csv",
name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[1] = True
if not result[2]:
name = "cash_flow"
if not self.debug and path.exists(tools.get_path(name,
symbol)):
result[2] = True
else:
try:
self.driver.get(cashflowurl(symbol))
click_quarterly_and_download()
self.mv_downloaded(symbol,
f"{symbol}_quarterly_cash-flow.csv",
name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[2] = True
if self.finished(result):
break
if not self.finished(result):
self.results[symbol]["financials"] = result
self.sleep()
def crawl_statistics(self, symbol):
"""Crawl statistics.csv."""
result = [False, False]
data = {}
self.driver.get(statisticsurl(symbol))
self.sleep(3)
for _ in range(self.max_trial):
name = "tmp"
if not self.debug and path.exists(tools.get_path(name, symbol)):
result[0] = True
else:
try:
WebDriverWait(self.driver, self.timeout).until(
EC.visibility_of_element_located((
By.ID, "Main")))
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else: # crawl statistics with bs4
html_content = self.driver.page_source
soup = BeautifulSoup(html_content, "html.parser")
for section in soup.find_all(
"section", {"data-test":"qsp-statistics"}):
for div in section.find_all("div"):
children = list(div.children)
if len(children) == 2 and children[0].text in {
"Fiscal Year", "Profitability",
"Management Effectiveness",
"Income Statement", "Balance Sheet",
"Cash Flow Statement", "Dividends & Splits"}:
for tr in children[1].find_all("tr"):
col, val = self.parse(tr)
data[col] = val
self.save(name, symbol, data)
result[0] = True
name = "statistics"
if not self.debug and path.exists(tools.get_path(name, symbol)):
result[1] = True
else:
try: # download quarterly statistics
WebDriverWait(self.driver, self.timeout).until(
EC.element_to_be_clickable((
By.CSS_SELECTOR,
"section[data-test='qsp-statistics'] div>span>button"))
).click()
self.sleep()
# move downloaded file to symbol dir
self.mv_downloaded(symbol,
f"{symbol}_quarterly_valuation_measures.csv",
name)
except TimeoutException:
pass
except StaleElementReferenceException:
self.reboot()
else:
result[1] = True
if self.finished(result):
break
if not self.finished(result):
self.results[symbol]["statistics"] = result
self.sleep()
def crawl_profile_info(self, symbols):
"""Crawl 'Stock' and 'Currency' columns in stock_profile.csv."""
data = {"Stock" : [False for _ in range(len(symbols))],
"Currency" : [None for _ in range(len(symbols))]}
for i, symbol in enumerate(symbols):
if self.exist(symbol):
try:
# crawl 'Stock' column
is_stock = self.is_stock()
self.sleep(3)
# crawl 'Currency' column
self.driver.get(incomestatementurl(symbol))
currency = self.get_currency()
self.sleep(3)
except:
pass
else:
data["Stock"][i] = is_stock
data["Currency"][i] = currency
return data
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import math
class Frame():
def __init__(self, path_):
self.path = path_
self.data =pd.read_csv(filepath_or_buffer=path_, delimiter=',')
def clean(self, subs:bool):
Ncol = len( self.data.columns )
self.data.dropna(inplace=True, thresh=Ncol, axis=0)# elimina lineas vacias
if subs:
for i in self.data.columns[1:]:
self.data[i].fillna( method='ffill', inplace=True)
def split_datetime(self, col:str, delimiter:str, dropit:bool):
field_split = self.data[col].str.split(delimiter)
self.data['Fecha'] = field_split.apply(lambda x: x[0])
self.data['Time'] = field_split.apply(lambda x: x[1])
self.data.drop(col,axis=1, inplace=True)
def dec_time(self, delimiter:str):
field_split = self.data['Time'].str.split(delimiter)
self.data['Hora'] = field_split.apply(lambda x: float(x[0]) + float(x[1]) / 60 + float(x[2])/3600)
def check_decimals(self, col:str, digits:int, dropit:bool):
self.data[col+'ajus'] = self.data[col].apply( lambda x: (x/(pow(10, len(str(round(x))) - digits))) if (digits != len(str(round(x)))) else x)
self.data.drop(col,axis=1, inplace=True)
class Granularity():
def __init__(self, df_):
self.data = df_
def SizzSub(self,col):
first_ = int(self.data[col].index[0])
self.data['index'] = self.data[col].index
self.data['colval'] = self.data['index'].apply(lambda x: self.data[col][x] if ( x == first_) else self.data[col][x-1])
self.data[col + "SizzSub"] = abs( (self.data[col] - self.data['colval']).round(4) )
self.data.drop('index',axis=1, inplace=True)
self.data.drop('colval',axis=1, inplace=True)
def SizzRep(self,col):
self.data['index'] = self.data[col].index
self.data['colval'] = self.data['index'].apply(lambda x: self.data[col][x] if ( x == first_) else self.data[col][x-1])
#self.data[col + "SizzRep"] = abs( (self.data[col] - self.data['colval']).round(4) )
#self.data.drop('index',axis=1, inplace=True)
#self.data.drop('colval',axis=1, inplace=True)
|
python
|
# 본 Code에서 사용할 tensorflow, matplotlib.pyplot, nupmy, random을 import한다.
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import random
# MNIST data를 불러오고 이를 one_hot encoding합니다.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
# parameter 설정
learning_rate = 0.01
training_epoch = 15
batch_size = 100
#Hidden layer의 Feature 개수
n_hidden = 300
# 입력의 크기 28 x 28 pixels
n_input = 28*28
# Step 1 Neural network setting
# Y는 placeholder로 선언되지 않습니다.
X = tf.placeholder(tf.float32, [None, n_input])
# input -> encoder -> decoder -> output
# Encoder는 정보를 압축하여 Feature를 얻어냅니다.
W1 = tf.Variable(tf.random_normal([n_input,n_hidden]))
B1 = tf.Variable(tf.random_normal([n_hidden]))
# Deocder는 출력을 입력값과 동일하게 하여 입력과 같은 아웃풋을 만들어 냅니다.
W2 = tf.Variable(tf.random_normal([n_hidden,n_input]))
B2 = tf.Variable(tf.random_normal([n_input]))
encoder = tf.nn.sigmoid(tf.add(tf.matmul(X,W1),B1))
decoder = tf.nn.sigmoid(tf.add(tf.matmul(encoder,W2),B2))
# Decoder는 입력과 비슷한 결과를 내야합니다.
Y = X
# 입력과 비슷하게 Decoder의 출력이 나와야 하기 때문에 Cost function으로 decoder와 실제 값의 차이의 제곱으로 정합니다.
# Cost function의 값이 크다면 실제 값과 Decoding된 결과가 다르다는 것을 의미합니다.
cost = tf.reduce_mean(tf.pow(Y - decoder,2))
train = tf.train.AdamOptimizer(learning_rate).minimize(cost)
total_batch = int(mnist.train.num_examples/batch_size)
# Step 2 Training
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(training_epoch):
sum_cost = 0
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train, feed_dict={X:batch_xs})
sum_cost += sess.run(cost,feed_dict={X:batch_xs})
print("Epoch:",epoch,"Avg Cost:",sum_cost/total_batch)
print("Optimization finished")
# Decoding
pred = sess.run(decoder,feed_dict={X:mnist.test.images[:10]})
figure, axis = plt.subplots(2,10,figsize=(10,2))
for i in range(10):
axis[0][i].set_axis_off()
axis[1][i].set_axis_off()
axis[0][i].imshow(np.reshape(mnist.test.images[i],(28,28)))
axis[1][i].imshow(np.reshape(pred[i],(28,28)))
plt.show()
|
python
|
def kw_only_args(*, kwo):
pass
def kw_only_args_with_varargs(*varargs, kwo, another='default'):
pass
|
python
|
# Copyright 2017 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import uuid
from threading import Condition
from threading import Lock
LOGGER = logging.getLogger(__name__)
class AuthorizationException(Exception):
def __init__(self, address):
super(AuthorizationException, self).__init__(
"Not authorized to read/write to {}".format(address))
class ExecutionContext(object):
"""A thread-safe data structure holding address-_ContextFuture pairs and
the addresses that can be written to and read from.
"""
def __init__(self, state_hash, read_list, write_list, base_context_ids):
"""
Args:
state_hash: the Merkle root
read_list (list of str): Addresses that were listed as inputs on
the transaction.
write_list (list of str): Addresses that were listed as outputs on
the transaction.
base_context_ids (list of str): Context ids of contexts that this
context is based off of.
"""
self._state_hash = state_hash
# Create copies of the read and write lists
self._read_list = read_list.copy()
self._write_list = write_list.copy()
self._state = {}
self._lock = Lock()
self._read_only = False
self.base_contexts = base_context_ids
self._id = uuid.uuid4().hex
self._execution_data = []
self._execution_events = []
@property
def session_id(self):
return self._id
@property
def merkle_root(self):
return self._state_hash
def _contains_and_deleted(self, address):
#LOGGER.debug('ExecutionContext:_contains_and_deleted "%s"', address)
return address in self._state and \
self._state[address].deleted_in_context()
def _contains_and_set(self, address):
return address in self._state and self._state[address].set_in_context()
def _contains_and_not_set(self, add):
return add in self._state and not self._state[add].set_in_context()
def _contains(self, address):
return address in self._state
def __contains__(self, item):
with self._lock:
return self._contains(item)
def _get(self, address):
value = None
if self._contains(address):
value = self._state[address].result()
return value
def _get_if_set(self, address):
value = None
if self._contains_and_set(address):
value = self._state[address].result()
return value
def _get_if_deleted(self, address):
add = None
if self._contains_and_deleted(address=address):
add = address
return add
def _get_if_not_set(self, address):
value = None
if self._contains_and_not_set(address):
value = self._state[address].result()
return value
def is_read_only(self):
return self._read_only
def make_read_only(self):
with self._lock:
if not self._read_only:
for fut in self._state.values():
fut.make_read_only()
self._read_only = True
def get(self, addresses):
"""Returns the value in this context, or None, for each address in
addresses. Useful for gets on the context manager.
Args:
addresses (list of str): The addresses to return values for, if
within this context.
Returns:
results (list of bytes): The values in state for these addresses.
"""
with self._lock:
results = []
for add in addresses:
self.validate_read(add)
results.append(self._get(add))
return results
def get_if_set(self, addresses):
"""Returns the value set in this context, or None, for each address in
addresses.
Args:
addresses (list of str): The addresses to return values for, if set
within this context.
Returns:
(list): bytes set at the address or None
"""
with self._lock:
results = []
for add in addresses:
results.append(self._get_if_set(add))
return results
def get_if_deleted(self, addresses):
"""Returns a list of addresses that have been deleted, or None if it
hasn't been deleted.
Args:
addresses (list of str): The addresses to check if deleted.
Returns:
(list of str): The addresses, if deleted, or None.
"""
with self._lock:
results = []
for add in addresses:
results.append(self._get_if_deleted(add))
return results
def get_if_not_set(self, addresses):
"""Returns the value at an address if it was an input to the txn but
never set. It returns None if that address was never set in the
merkle database, or if the address is not within the context.
Args:
addresses (list of str): The full 70 character addresses.
Returns:
(list): bytes at that address but not set within the context
"""
with self._lock:
results = []
for add in addresses:
results.append(self._get_if_not_set(add))
return results
def get_all_if_set(self):
"""Return all the addresses and opaque values set in the context.
Useful in the squash method.
Returns:
(dict of str to bytes): The addresses and bytes that have
been set in the context.
"""
with self._lock:
results = {}
for add, fut in self._state.items():
if self._contains_and_set(add):
results[add] = fut.result()
return results
def get_all_if_deleted(self):
"""Return all the addresses deleted in the context.
Useful in the squash method.
Returns:
(dict of str to bytes): The addresses and bytes that have
been deleted in the context.
"""
with self._lock:
results = {}
for add, fut in self._state.items():
if self._contains_and_deleted(add):
results[add] = fut.result()
return results
def create_prefetch(self, addresses):
"""Create futures needed before starting the process of reading the
address's value from the merkle tree.
Args:
addresses (list of str): addresses in the txn's inputs that
aren't in any base context (or any in the chain).
"""
with self._lock:
for add in addresses:
self._state[add] = _ContextFuture(address=add,
wait_for_tree=True)
def create_initial(self, address_values):
"""Create futures from inputs with the current value for that address
at the start of that context.
Args:
address_values (list of tuple): The tuple is string, bytes of the
address and value.
"""
with self._lock:
for add, val in address_values:
self._state[add] = _ContextFuture(address=add, result=val)
def set_from_tree(self, address_value_dict):
"""Set the result for each future at the given addresses with the value
stored in the merkle database.
Args:
address_value_dict (dict of str: bytes): The unique
full addresses that the bytes values should be set with.
"""
#LOGGER.debug('set_from_tree: %s\n',address_value_dict)
for address, value in address_value_dict.items():
if address in self._state:
self._state[address].set_result(result=value,
from_tree=True)
def delete_direct(self, addresses):
"""Called in the context manager's delete method to either
mark an entry for deletion , or create a new future and immediately
set it for deletion in the future.
Args:
address_list (list of str): The unique full addresses.
Raises:
AuthorizationException
"""
with self._lock:
for address in addresses:
self._validate_write(address)
if address in self._state:
self._state[address].set_deleted()
else:
fut = _ContextFuture(address=address)
self._state[address] = fut
fut.set_deleted()
def set_direct(self, address_value_dict):
"""Called in the context manager's set method to either overwrite the
value for an address, or create a new future and immediately set a
value in the future.
Args:
address_value_dict (dict of str:bytes): The unique full addresses
with bytes to set at that address.
Raises:
AuthorizationException
"""
with self._lock:
for address, value in address_value_dict.items():
self._validate_write(address)
if address in self._state:
self._state[address].set_result(result=value)
else:
fut = _ContextFuture(address=address)
self._state[address] = fut
fut.set_result(result=value)
def _validate_write(self, address):
"""Raises an exception if the address is not allowed to be set
in this context, based on txn outputs.
Notes:
Checks that the address is either listed fully as one of the
outputs, or some portion of the address is listed as a namespace
in the outputs of the txn.
Args:
address (str): The address to be validated. The context manager
validates the address correctness (70 hex characters).
Returns:
None
Raises:
AuthorizationException
"""
if not any(address.startswith(ns) for ns in self._write_list):
raise AuthorizationException(address=address)
def validate_read(self, address):
"""Raises an exception if the address is not allowed to be read in
this context, based on txn inputs.
Args:
address (str): An address to be validated.
Returns:
None
Raises:
AuthorizationException
"""
if not any(address.startswith(ns) for ns in self._read_list):
raise AuthorizationException(address=address)
def add_execution_data(self, data):
with self._lock:
self._execution_data.append(data)
def get_execution_data(self):
with self._lock:
return self._execution_data.copy()
def add_execution_event(self, event):
with self._lock:
self._execution_events.append(event)
def get_execution_events(self):
with self._lock:
return self._execution_events.copy()
class _ContextFuture(object):
"""Controls access to bytes set in the _result variable. The booleans
that are flipped in set_result, based on whether the value is being set
from the merkle tree or a direct set on the context manager are needed
to later determine whether the value was set in that context or was
looked up as a new address location from the merkle tree and then only
read from, not set.
In any context the lifecycle of a _ContextFuture can be several paths:
Input:
Address not in base:
F -----> get from merkle database ----> get from the context
Address in base:
|---> set (F)
F --->|
|---> get
Output:
Doesn't exist ----> set address in context (F)
Input + Output:
Address not in base:
|-> set
F |-> get from merkle -|
| |-> get
| |
| |-> noop
|--> set Can happen before the pre-fetch operation
|-> set (F) ---> get
|
|-> set (F) ----> set
|
Address in base: |-> set (F)
Doesn't exist -|
|-> get Future doesn't exit in context
|
|-> get ----> set (F)
"""
def __init__(self, address, result=None, wait_for_tree=False):
self.address = address
self._result = result
self._result_set_in_context = False
self._condition = Condition()
self._wait_for_tree = wait_for_tree
self._tree_has_set = False
self._read_only = False
self._deleted = False
def make_read_only(self):
with self._condition:
if self._wait_for_tree and not self._result_set_in_context:
self._condition.wait_for(
lambda: self._tree_has_set or self._result_set_in_context)
self._read_only = True
def set_in_context(self):
with self._condition:
return self._result_set_in_context
def deleted_in_context(self):
with self._condition:
return self._deleted
def result(self):
"""Return the value at an address, optionally waiting until it is
set from the context_manager, or set based on the pre-fetch mechanism.
Returns:
(bytes): The opaque value for an address.
"""
if self._read_only:
return self._result
with self._condition:
if self._wait_for_tree and not self._result_set_in_context:
self._condition.wait_for(
lambda: self._tree_has_set or self._result_set_in_context)
return self._result
def set_deleted(self):
self._result_set_in_context = False
self._deleted = True
def set_result(self, result, from_tree=False):
"""Set the addresses's value unless the future has been declared
read only.
Args:
result (bytes): The value at an address.
from_tree (bool): Whether the value is being set by a read from
the merkle tree.
Returns:
None
"""
if self._read_only:
if not from_tree:
LOGGER.warning("Tried to set address %s on a"
" read-only context.",
self.address)
return
with self._condition:
if self._read_only:
if not from_tree:
LOGGER.warning("Tried to set address %s on a"
" read-only context.",
self.address)
return
if from_tree:
# If the result has not been set in the context, overwrite the
# value with the value from the merkle tree. Otherwise, do
# nothing.
if not self._result_set_in_context:
self._result = result
self._tree_has_set = True
else:
self._result = result
self._result_set_in_context = True
self._deleted = False
self._condition.notify_all()
|
python
|
import logging
from logging.config import dictConfig
def setup(conf):
dictConfig(conf)
class LoggerMixIn:
def __init__(self, *args, **kwargs):
logger_name = getattr(self, "__logger_name__", self.__class__.__name__)
self.logger = logging.getLogger(logger_name)
for lvl in ["CRITICAL", "DEBUG", "INFO", "WARN", "WARNING", "ERROR", "FATAL"]:
setattr(self.logger, lvl, getattr(logging, lvl))
super().__init__(*args, **kwargs)
|
python
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All of the message templates used across the various services."""
import json
import stan.pb.protocol_pb2 as protocol
from stan.aio.client import Msg
from queue_common import messages
def test_create_payment_msg():
"""Assert a payment message can be created."""
# setup
identifier = 'test_id'
status = 'TEST_STATUS'
return messages.create_payment_msg(identifier, status) == {'paymentToken': {'id': identifier, 'statusCode': status}}
def test_get_payment_id_from_msg():
"""Assert that an id can be extracted from the payment message."""
# setup
identifier = 'test_id'
status = 'TEST_STATUS'
token = {'paymentToken': {'id': identifier, 'statusCode': status}}
msg = Msg()
msg.proto = protocol.MsgProto
msg.proto.data = json.dumps(token).encode('utf-8')
assert identifier == messages.get_payment_id_from_msg(msg)
assert not messages.get_payment_id_from_msg(None)
|
python
|
from . import location
|
python
|
import os
import sys
import time
import machine
import badger2040
from badger2040 import WIDTH, HEIGHT
REAMDE = """
Images must be 296x128 pixel with 1bit colour depth.
You can use examples/badger2040/image_converter/convert.py to convert them:
python3 convert.py --binary --resize image_file_1.png image_file_2.png image_file_3.png
Create a new "images" directory via Thonny, and upload the .bin files there.
"""
OVERLAY_BORDER = 40
OVERLAY_SPACING = 20
OVERLAY_TEXT_SIZE = 0.5
TOTAL_IMAGES = 0
# Try to preload BadgerPunk image
try:
os.mkdir("images")
except OSError:
pass
try:
import badgerpunk
with open("images/badgerpunk.bin", "wb") as f:
f.write(badgerpunk.data())
f.flush()
with open("images/readme.txt", "w") as f:
f.write(REAMDE)
f.flush()
del badgerpunk
except (OSError, ImportError):
pass
try:
IMAGES = [f for f in os.listdir("/images") if f.endswith(".bin")]
TOTAL_IMAGES = len(IMAGES)
except OSError:
pass
display = badger2040.Badger2040()
button_a = machine.Pin(badger2040.BUTTON_A, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_b = machine.Pin(badger2040.BUTTON_B, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_c = machine.Pin(badger2040.BUTTON_C, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_up = machine.Pin(badger2040.BUTTON_UP, machine.Pin.IN, machine.Pin.PULL_DOWN)
button_down = machine.Pin(badger2040.BUTTON_DOWN, machine.Pin.IN, machine.Pin.PULL_DOWN)
image = bytearray(int(296 * 128 / 8))
current_image = 0
show_info = True
# Draw an overlay box with a given message within it
def draw_overlay(message, width, height, line_spacing, text_size):
# Draw a light grey background
display.pen(12)
display.rectangle((WIDTH - width) // 2, (HEIGHT - height) // 2, width, height)
# Take the provided message and split it up into
# lines that fit within the specified width
words = message.split(" ")
lines = []
current_line = ""
for word in words:
if display.measure_text(current_line + word + " ", text_size) < width:
current_line += word + " "
else:
lines.append(current_line.strip())
current_line = word + " "
lines.append(current_line.strip())
display.pen(0)
display.thickness(2)
# Display each line of text from the message, centre-aligned
num_lines = len(lines)
for i in range(num_lines):
length = display.measure_text(lines[i], text_size)
current_line = (i * line_spacing) - ((num_lines - 1) * line_spacing) // 2
display.text(lines[i], (WIDTH - length) // 2, (HEIGHT // 2) + current_line, text_size)
def show_image(n):
file = IMAGES[n]
name = file.split(".")[0]
open("images/{}".format(file), "r").readinto(image)
display.image(image)
if show_info:
name_length = display.measure_text(name, 0.5)
display.pen(0)
display.rectangle(0, HEIGHT - 21, name_length + 11, 21)
display.pen(15)
display.rectangle(0, HEIGHT - 20, name_length + 10, 20)
display.pen(0)
display.text(name, 5, HEIGHT - 10, 0.5)
for i in range(TOTAL_IMAGES):
x = 286
y = int((128 / 2) - (TOTAL_IMAGES * 10 / 2) + (i * 10))
display.pen(0)
display.rectangle(x, y, 8, 8)
if current_image != i:
display.pen(15)
display.rectangle(x + 1, y + 1, 6, 6)
display.update()
if TOTAL_IMAGES == 0:
display.pen(15)
display.clear()
draw_overlay("To run this demo, create an /images directory on your device and upload some 1bit 296x128 pixel images.", WIDTH - OVERLAY_BORDER, HEIGHT - OVERLAY_BORDER, OVERLAY_SPACING, OVERLAY_TEXT_SIZE)
display.update()
sys.exit()
show_image(current_image)
while True:
if button_up.value():
if current_image > 0:
current_image -= 1
show_image(current_image)
if button_down.value():
if current_image < TOTAL_IMAGES - 1:
current_image += 1
show_image(current_image)
if button_a.value():
show_info = not show_info
show_image(current_image)
if button_b.value() or button_c.value():
display.pen(15)
display.clear()
draw_overlay("To add images connect Badger2040 to a PC, load up Thonny, and see readme.txt in images/", WIDTH - OVERLAY_BORDER, HEIGHT - OVERLAY_BORDER, OVERLAY_SPACING, 0.5)
display.update()
time.sleep(4)
show_image(current_image)
time.sleep(0.01)
|
python
|
import requests
from requests_oauthlib import OAuth1
import json
reload(sys)
sys.setdefaultencoding("utf-8")
params = {'app_key': 'xx',
'app_secret': 'xx',
'access_token': 'xx-xx',
'access_secret': 'xx'}
auth = OAuth1(params['app_key'],
params['app_secret'],
params['access_token'],
params['access_secret'])
twittername = 'nameoftwitteraccount'
# Twitter API can only limit by day
since = '2017-09-20' #date
# https://dev.twitter.com/rest/public/search
# note that space, #, etc. have their special percent encoding https://en.wikipedia.org/wiki/Percent-encoding
url_rest = 'https://api.twitter.com/1.1/search/tweets.json?q=from%3A{}&result_type=recent&since%3A{}'.format(twittername, since)
results = requests.get(url_rest, auth=auth)
results = results.json() #convert json into dict
results = json.dumps(results, indent=4) #print pretty, converts to string
with open('text.json', 'wb') as file:
file.write(results)
|
python
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from ....Functions.init_fig import init_fig
def plot(
self,
fig=None,
ax=None,
sym=1,
alpha=0,
delta=0,
is_edge_only=False,
comp_machine=None,
is_show_fig=True,
save_path=None,
win_title=None,
):
"""Plot the Machine in a matplotlib fig
Parameters
----------
self : Machine
A Machine object
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
Axis on which to plot the data
sym : int
Symmetry factor (1= full machine, 2= half of the machine...)
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
is_edge_only: bool
To plot transparent Patches
comp_machine : Machine
A machine to plot in transparency on top of the self machine
is_show_fig : bool
To call show at the end of the method
save_path : str
full path including folder, name and extension of the file to save if save_path is not None
"""
(fig, ax, _, _) = init_fig(fig=fig, ax=ax, shape="rectangle")
# Call each plot method to properly set the legend
if self.frame is not None:
self.frame.plot(
fig=fig,
ax=ax,
sym=sym,
alpha=alpha,
delta=delta,
is_edge_only=is_edge_only,
is_show_fig=False,
)
Wfra = self.frame.comp_height_eq()
else:
Wfra = 0
# Determin order of plotting parts
lam_list = self.get_lam_list(is_int_to_ext=True)
Rext = lam_list[-1].Rext
for lam in lam_list[::-1]:
lam.plot(
fig=fig,
ax=ax,
sym=sym,
alpha=alpha,
delta=delta,
is_edge_only=is_edge_only,
is_show_fig=False,
)
if lam_list[0].Rint > 0 and self.shaft is not None:
self.shaft.plot(
fig=fig,
ax=ax,
sym=sym,
alpha=alpha,
delta=delta,
is_edge_only=is_edge_only,
is_show_fig=False,
)
Lim = (Rext + Wfra) * 1.5 # Axes limit for plot
if comp_machine is not None:
comp_machine.rotor.plot(
fig,
ax,
sym=sym,
alpha=alpha,
delta=delta,
is_edge_only=True,
is_show_fig=is_show_fig,
)
comp_machine.stator.plot(
fig,
ax,
sym=sym,
alpha=alpha,
delta=delta,
is_edge_only=True,
is_show_fig=is_show_fig,
)
ax.set_xlabel("(m)")
ax.set_ylabel("(m)")
ax.set_title(self.name)
# Axis Setup
plt.axis("equal")
# The Lamination is centered in the figure
ax.set_xlim(-Lim, Lim)
ax.set_ylim(-Lim, Lim)
# Set Windows title
if self.name not in ["", None] and win_title is None:
win_title = self.name + " plot machine"
if save_path is not None:
fig.savefig(save_path)
plt.close()
if is_show_fig:
fig.show()
if win_title:
manager = plt.get_current_fig_manager()
if manager is not None:
manager.set_window_title(win_title)
|
python
|
from core.Model import *
from core.Utils import Utils
from models.User import User
from models.AppVersion import AppVersion
class Device(Base, Model):
__tablename__ = "device"
id = Column(BigInteger, primary_key=True, autoincrement=True)
uuid = Column(String(300), nullable=False)
user_id = Column(Integer, ForeignKey(User.id), nullable=False)
token = Column(String(100), default=None)
app_version_id = Column(BigInteger, ForeignKey(AppVersion.id), default=1)
created = Column(DateTime, default=Utils.time())
updated = Column(DateTime, default=Utils.time(), onupdate=Utils.time())
enable = Column(mysql.TINYINT(1), default=1)
user = relationship(User)
app_version = relationship(AppVersion)
formatters = {"created": Utils.date_formatter, "updated": Utils.date_formatter}
|
python
|
from .BaseCamera import BaseCamera
import numpy as np
import math
class PersPectiveCamera(BaseCamera):
def __init__(self):
BaseCamera.__init__(self, "PerspectiveCamera")
def get_projection_mat(self):
# http://www.songho.ca/opengl/gl_projectionmatrix.html
projection_mat = np.eye(4)
projection_mat[0, 0] = 2 / self.magnification_x
projection_mat[1, 1] = 2 / self.magnification_y
projection_mat[2, 2] = -(self.far + self.near) / (self.far - self.near)
projection_mat[2, 3] = -(2 * self.far * self.near) / (self.far - self.near)
projection_mat[3, 2] = -1
projection_mat[3, 3] = 0
return projection_mat
def set_by_field_of_view(self, fov_x, fov_y=None):
'''
Set the intrinsic by given field of view, in angle degrees
:param fov_x:
:param fov_y: Optional for y direction; Use the same value as for x direction if None
'''
if fov_y is None:
fov_y = fov_x
self.set_parameters(
magnification_x=2 * math.tan(fov_x / 2),
magnification_y=2 * math.tan(fov_y / 2),
)
def set_by_35mm_equivalent_focal_length(self, focal_x, focal_y=None):
'''
Set the intrinsic by given 35mm equivalent focal lengths.
https://en.wikipedia.org/wiki/35_mm_equivalent_focal_length
:param focal_x:
:param focal_y: Optional for y direction; Use the same value as for x direction if None
'''
if focal_y is None:
focal_y = focal_x
# 35mm equivalent sensor width and height for this camera
film_35mm_height = math.sqrt((36 ** 2 + 24 ** 2) / (1 + self.aspect_ratio ** 2))
film_35mm_width = film_35mm_height * self.aspect_ratio
self.set_parameters(
magnification_x=film_35mm_width / focal_x,
magnification_y=film_35mm_height / focal_y
)
def set_by_sensor_and_focal_length(self, sensor_width, sensor_height, focal_x, focal_y=None):
self.aspect_ratio = sensor_width / sensor_height
if focal_y is None:
focal_y = focal_x
# 35mm equivalent sensor width and height for this camera
self.set_parameters(
magnification_x=sensor_width / focal_x,
magnification_y=sensor_height / focal_y
)
|
python
|
"""
django admin pages for program support models
"""
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
class ProgramsApiConfigAdmin(ConfigurationModelAdmin):
pass
admin.site.register(ProgramsApiConfig, ProgramsApiConfigAdmin)
|
python
|
import pandas
def find_na(df):
print(pd.isna(df).sum())
|
python
|
#!/usr/bin/env python3
import pyglet
import glooey
import run_demos
window = pyglet.window.Window()
gui = glooey.Gui(window)
bin = glooey.Bin()
widget = glooey.Placeholder(100, 100)
bin.add(widget)
gui.add(bin)
@run_demos.on_space(gui)
def test_bin():
bin.add(widget)
yield "Put a widget in the bin."
bin.clear()
yield "Clear the bin."
pyglet.app.run()
|
python
|
# -*- coding:utf-8 -*-
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sentencepiece as spm
import tensorflow as tf
import tensorflow.keras.backend as K
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
random_seed = 1234
random.seed(random_seed)
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
print(tf.__version__)
print(tf.config.list_physical_devices('GPU'))
print(tf.test.gpu_device_name())
#
# prepare dir
#
data_dir = './data'
if not os.path.exists(data_dir):
data_dir = '../data'
print(os.listdir(data_dir))
songys_dir = os.path.join(data_dir, 'songys')
if not os.path.exists(songys_dir):
os.makedirs(songys_dir)
train_txt = os.path.join(songys_dir, 'ChatbotData.csv')
#
# file check
#
def print_file(filename, count=10):
"""
라인 수 만큼 파일내용 출력
:param filename: file name
:param count: print line count
"""
with open(filename) as f:
for i, line in enumerate(f):
print(line.strip())
if count < i:
break
#
# data read
# https://pandas.pydata.org/pandas-docs/stable/index.html
#
# head=0 첫벗째 줄이 head
train_data = pd.read_csv(train_txt, header=0, delimiter=',')
print(f'전체 학습 raw 개수: {len(train_data)}')
train_data = train_data.dropna()
print(f'전체 학습 valid 개수: {len(train_data)}')
train_data = train_data.sample(1000) # 빠른 확인을 위해 1000개만 사용
print(f'전체 학습 sample 개수: {len(train_data)}')
label_counts = train_data['label'].value_counts()
print(f'전체 학습 label 개수: {label_counts}')
#
# vocabulary
#
# vocab load
vocab_file = os.path.join(data_dir, 'ko_32000.model')
vocab = spm.SentencePieceProcessor()
vocab.load(vocab_file)
#
# tokenize
#
questions, answers = [], []
for i, row in train_data.iterrows():
question = vocab.encode_as_pieces(row['Q'])
questions.append(question)
answer = vocab.encode_as_pieces(row['A'])
answers.append(answer)
assert len(questions) == len(answers)
print(questions[:100])
print(answers[:100])
#
# token to id
#
question_ids = [[vocab.piece_to_id(p) for p in question] for question in questions]
answer_ids = [[vocab.piece_to_id(p) for p in answer] for answer in answers]
print(question_ids[:100])
print(answer_ids[:100])
#
# pad
#
# 길이가 달라서 matrix 생성 안됨
print(np.array(question_ids)[:50])
print(np.array(answer_ids)[:50])
# 길이 확인
question_length = [len(question_id) for question_id in question_ids]
print(question_length[:100])
answer_length = [len(answer_id) for answer_id in answer_ids]
print(answer_length[:100])
# 최대 길이 확인
answer_max_length, question_max_length = max(question_length), max(answer_length)
# 최대 sequence 길이 지정 (임의 지정)
n_seq = max(answer_max_length, question_max_length) + 2
print(answer_max_length, question_max_length, n_seq)
#
# inputs
#
# train numpy matrix
enc_inputs = np.zeros((len(question_ids), n_seq))
dec_inputs = np.zeros((len(answer_ids), n_seq))
dec_labels = np.zeros((len(answer_ids), n_seq))
print(enc_inputs.shape, enc_inputs[0], enc_inputs[-1])
print(dec_inputs.shape, dec_inputs[0], dec_inputs[-1])
print(dec_labels.shape, dec_labels[0], dec_labels[-1])
# assing question_ids to enc_inputs
for i, token_id in enumerate(question_ids):
token_id += [0] * (n_seq - len(token_id))
token_id = token_id[:n_seq]
assert len(token_id) == n_seq
enc_inputs[i] = token_id
print(enc_inputs.shape, enc_inputs[0], enc_inputs[-1])
# assing answer_ids to dec_inputs and dec_labels
n_max = n_seq - 1
for i, token_id in enumerate(answer_ids):
token_id = token_id[:n_max]
dec_input = [vocab.bos_id()] + token_id
dec_input += [0] * (n_seq - len(dec_input))
dec_label = token_id + [vocab.eos_id()]
dec_label += [0] * (n_seq - len(dec_label))
assert len(dec_input) == len(dec_label) == n_seq
dec_inputs[i] = dec_input
dec_labels[i] = dec_label
print(dec_inputs.shape, dec_inputs[0].astype(np.int), dec_inputs[-1].astype(np.int))
print(dec_labels.shape, dec_labels[0].astype(np.int), dec_labels[-1].astype(np.int))
train_inputs = (enc_inputs, dec_inputs)
#
# loss and accuracy
#
def lm_loss(y_true, y_pred):
"""
pad 부분을 제외하고 loss를 계산하는 함수
:param y_true: 정답
:param y_pred: 예측 값
:retrun loss: pad 부분이 제외된 loss 값
"""
loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
loss *= mask
return loss
def lm_acc(y_true, y_pred):
"""
pad 부분을 제외하고 accuracy를 계산하는 함수
:param y_true: 정답
:param y_pred: 예측 값
:retrun loss: pad 부분이 제외된 accuracy 값
"""
y_pred_class = tf.cast(K.argmax(y_pred, axis=-1), tf.float32)
y_true = tf.cast(y_true, tf.float32)
matches = tf.cast(K.equal(y_true, y_pred_class), tf.float32)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
matches *= mask
accuracy = K.sum(matches) / K.maximum(K.sum(mask), 1)
return accuracy
#
# Attention
#
# inputs
enc_tokens = np.random.randint(1, 10, (1, 5))
print(enc_tokens)
dec_tokens = np.random.randint(1, 10, (1, 6))
print(dec_tokens)
# embedding
embedding = tf.keras.layers.Embedding(10, 4)
enc_embed = embedding(enc_tokens)
print(enc_embed)
dec_embed = embedding(dec_tokens)
print(dec_embed)
# score by for loop
attn_score_t = np.zeros((len(dec_embed[0]), len(enc_embed[0])))
print(attn_score_t)
for i in range(len(dec_embed[0])):
dec_hidden = dec_embed[0][i]
for j in range(len(enc_embed[0])):
enc_hidden = enc_embed[0][j]
score = tf.matmul([dec_hidden], [enc_hidden], transpose_b=True)
attn_score_t[i][j] = score[0]
print(attn_score_t)
# score by matmul
attn_score = tf.matmul(dec_embed, enc_embed, transpose_b=True)
print(attn_score)
# attn prob
attn_prob = tf.nn.softmax(attn_score, axis=-1)
print(attn_prob)
# atten output by for loop
attn_out_t = np.zeros((len(dec_embed[0]), len(dec_embed[0][0])))
print(attn_out_t)
for i in range(len(attn_prob[0])):
attn_row = attn_prob[0][i]
assert len(attn_row) == len(enc_embed[0])
weighted_sum = 0
for j in range(len(enc_embed[0])):
enc_hidden = enc_embed[0][j]
weighted_sum += attn_row[j] * enc_hidden
attn_out_t[i] = weighted_sum
print(attn_out_t)
# atten output by matmul
attn_out = tf.matmul(attn_prob, enc_embed)
print(attn_out)
class DotProductAttention(tf.keras.layers.Layer):
"""
dot product attention class
"""
def __init__(self, **kwargs):
"""
init class
:param kwargs: args
"""
super().__init__(**kwargs)
def call(self, inputs):
"""
run layer
:param inputs: enc_input, dec_input tuple
:return attn_out: attention output
"""
enc_input, dec_input = inputs
# attention score (dot-product)
attn_score = tf.matmul(dec_input, enc_input, transpose_b=True)
# attention prov
attn_prob = tf.nn.softmax(attn_score, axis=-1)
# weighted sum
attn_out = tf.matmul(attn_prob, enc_input)
return attn_out
# atten by class
attn_out_c = DotProductAttention()((enc_embed, dec_embed))
print(attn_out_c)
#
# rnn
#
def build_model_rnn_dot(n_vocab, d_model):
"""
rnn attention model build
:param n_vocab: number of vocab
:param d_model: hidden size
:return model: model
"""
enc_inputs = tf.keras.layers.Input((None,))
dec_inputs = tf.keras.layers.Input((None,))
embedding = tf.keras.layers.Embedding(n_vocab, d_model)
enc_hidden = embedding(enc_inputs) # bs, n_seq, d_model
enc_hidden, fw_h = tf.keras.layers.SimpleRNN(units=d_model, return_sequences=True, return_state=True)(enc_hidden) # bs, n_seq, d_model
dec_hidden = embedding(dec_inputs) # bs, n_seq, d_model
dec_hidden = tf.keras.layers.SimpleRNN(units=d_model, return_sequences=True)(dec_hidden, initial_state=[fw_h]) # bs, n_seq, d_model
attn = DotProductAttention()
attn_out = attn((enc_hidden, dec_hidden)) # bs, n_seq, d_model
hidden = tf.concat([dec_hidden, attn_out], axis=-1) # bs, n_seq, 2 * d_model
outputs = tf.keras.layers.Dense(n_vocab, activation=tf.nn.softmax)(hidden)
model = tf.keras.Model(inputs=(enc_inputs, dec_inputs), outputs=outputs)
return model
# model build
model_rnn = build_model_rnn_dot(len(vocab), 256)
print(model_rnn.summary())
# complie
model_rnn.compile(loss=lm_loss, optimizer=tf.keras.optimizers.Adam(), metrics=[lm_acc])
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='lm_acc', patience=10)
# save weights
save_rnn_dot_file = os.path.join(songys_dir, 'rnn_dot.hdf5')
save_weights = tf.keras.callbacks.ModelCheckpoint(save_rnn_dot_file, monitor='lm_acc', verbose=1, save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
# train
history = model_rnn.fit(train_inputs, dec_labels, epochs=500, batch_size=128, callbacks=[early_stopping, save_weights])
def draw_history(history, acc='lm_acc'):
"""
draw training history
:param history: training history object
:param acc: acc key
"""
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['loss'], 'b-', label='loss')
plt.xlabel('Epoch')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(history.history[acc], 'g-', label=acc)
plt.xlabel('Epoch')
plt.legend()
plt.show()
draw_history(history)
def do_predict(vocab, model, n_seq, string):
"""
응답을 순차적으로 생성
:param vocab: vocab
:param model: model object
:param n_seq: 시퀀스 길이 (number of sequence)
:param string: 입력 문자열
:return response: 입력 문자열에 대한 응답
"""
# encoder_tokens = vocab.encode_as_pieces(string)
enc_inputs = vocab.encode_as_ids(string)[:n_seq]
enc_inputs += [0] * (n_seq - len(enc_inputs))
assert len(enc_inputs) == n_seq
# decoder_tokens = ['[BOS]']
dec_inputs = [vocab.bos_id()]
dec_inputs += [0] * (n_seq - len(dec_inputs))
response = []
for i in range(n_seq - 1):
outputs = model.predict([np.array([enc_inputs]), np.array([dec_inputs])])
prob = outputs[0][i]
word_id = int(np.random.choice(len(vocab), 1, p=prob)[0])
if word_id == vocab.eos_id():
break
response.append(word_id)
dec_inputs[i + 1] = word_id
return vocab.decode_ids(response)
model_rnn = build_model_rnn_dot(len(vocab), 256)
print(model_rnn.summary())
string = '안녕 만나서 반가워'
print(do_predict(vocab, model_rnn, n_seq, string))
model_rnn.load_weights(save_rnn_dot_file)
print(do_predict(vocab, model_rnn, n_seq, string))
#
# bi rnn
#
def build_model_bi_rnn_dot(n_vocab, d_model):
"""
bi rnn attention model build
:param n_vocab: number of vocab
:param d_model: hidden size
:return model: model
"""
enc_inputs = tf.keras.layers.Input((None,))
dec_inputs = tf.keras.layers.Input((None,))
embedding = tf.keras.layers.Embedding(n_vocab, d_model)
enc_hidden = embedding(enc_inputs) # bs, n_seq, d_model
enc_hidden, fw_h, bw_h = tf.keras.layers.Bidirectional(tf.keras.layers.SimpleRNN(units=d_model, return_state=True))(enc_hidden) # bs, 2 * d_model
s_h = tf.concat([fw_h, bw_h], axis=-1) # bs, 2 * d_model
dec_hidden = embedding(dec_inputs) # bs, n_seq, d_model
dec_hidden = tf.keras.layers.SimpleRNN(units=d_model * 2, return_sequences=True)(dec_hidden, initial_state=[s_h]) # bs, n_seq, 2 * d_model
attn = DotProductAttention()
attn_out = attn((enc_hidden, dec_hidden)) # bs, n_seq, 2 * d_model
hidden = tf.concat([dec_hidden, attn_out], axis=-1) # bs, n_seq, 4 * d_model
outputs = tf.keras.layers.Dense(n_vocab, activation=tf.nn.softmax)(hidden)
model = tf.keras.Model(inputs=(enc_inputs, dec_inputs), outputs=outputs)
return model
# model build
model_bi_rnn = build_model_bi_rnn_dot(len(vocab), 256)
print(model_bi_rnn.summary())
# complie
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam
model_bi_rnn.compile(loss=lm_loss, optimizer=tf.keras.optimizers.Adam(), metrics=[lm_acc])
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='lm_acc', patience=10)
# save weights
save_bi_rnn_dot_file = os.path.join(songys_dir, 'bi_rnn_dot.hdf5')
save_weights = tf.keras.callbacks.ModelCheckpoint(save_bi_rnn_dot_file, monitor='lm_acc', verbose=1, save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True)
# train
history = model_bi_rnn.fit(train_inputs, dec_labels, epochs=500, batch_size=128, callbacks=[early_stopping, save_weights])
# history
draw_history(history)
model_bi_rnn = build_model_bi_rnn_dot(len(vocab), 256)
print(model_bi_rnn.summary())
print(do_predict(vocab, model_rnn, n_seq, string))
model_bi_rnn.load_weights(save_bi_rnn_dot_file)
print(do_predict(vocab, model_bi_rnn, n_seq, string))
|
python
|
#!/usr/bin/python
import simple_test
simple_test.test("test9", ["-VVV", "-N", "--noise", "-rr", ], expect_fail=True)
|
python
|
import minpy
import minpy.numpy as np
import minpy.numpy.random as random
from minpy.core import grad_and_loss
# from examples.utils.data_utils import gaussian_cluster_generator as make_data
# from minpy.context import set_context, gpu
# Please uncomment following if you have GPU-enabled MXNet installed.
# This single line of code will run MXNet operations on GPU 0.
# set_context(gpu(0)) # set the global context as gpu(0)
# Predict the class using multinomial logistic regression (softmax regression).
# Because MXNet's implementation does not support mu and sigma to be arrays
# (only scalar is supported right now), we need to change the policy for
# this function by `@minpy.wrap_policy`or `with minpy.OnlyNumPyPolicy(): ...`
""" Generates several clusters of Gaussian points """
def test_policy():
@minpy.wrap_policy("only_numpy")
def gaussian_cluster_generator(num_samples=10000, num_features=500, num_classes=5):
mu = np.random.rand(num_classes, num_features)
sigma = np.ones((num_classes, num_features)) * 0.1
num_cls_samples = int(num_samples / num_classes)
x = np.zeros((num_samples, num_features))
y = np.zeros((num_samples, num_classes))
for i in range(num_classes):
cls_samples = np.random.normal(mu[i,:], sigma[i,:], (num_cls_samples, num_features))
x[i*num_cls_samples:(i+1)*num_cls_samples] = cls_samples
y[i*num_cls_samples:(i+1)*num_cls_samples,i] = 1
return x, y
def predict(w, x):
a = np.exp(np.dot(x, w))
a_sum = np.sum(a, axis=1, keepdims=True)
prob = a / a_sum
return prob
def train_loss(w, x):
prob = predict(w, x)
loss = -np.sum(label * np.log(prob)) / num_samples
return loss
"""Use Minpy's auto-grad to derive a gradient function off loss"""
grad_function = grad_and_loss(train_loss)
# Using gradient descent to fit the correct classes.
def train(w, x, loops):
for i in range(loops):
dw, loss = grad_function(w, x)
if i % 10 == 0:
print('Iter {}, training loss {}'.format(i, loss))
# gradient descent
w -= 0.1 * dw
# Initialize training data.
num_samples = 10000
num_features = 500
num_classes = 5
data, label = gaussian_cluster_generator(num_samples, num_features, num_classes)
# Initialize training weight and train
weight = random.randn(num_features, num_classes)
train(weight, data, 100)
if __name__ == "__main__":
test_policy()
|
python
|
from .pytorch_sampler import PyTorchSampler
from .sampler import Sampler
from .unigram import UnigramDistribution
from .vocab import Vocabulary
|
python
|
import json
import os
import tempfile
from datetime import datetime, timedelta
from enum import Enum
from itertools import zip_longest, groupby
from threading import Timer
from typing import Any, List, Optional, Dict, Iterable, Tuple, Set
import sentry_sdk
from telegram import ParseMode, TelegramError, Update, Message, ChatPermissions
from telegram.error import BadRequest
from telegram.ext import CallbackContext, Updater
from .chat import Chat, User
from .decorators import Command
from .logger import create_logger
def grouper(iterable, n, fillvalue=None) -> Iterable[Tuple[Any, Any]]:
"""Collect data into fixed-length chunks or blocks"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
class SpamType(Enum):
NONE = 0
CONSECUTIVE = 1
DIFFERENT = 2
SAME = 3
class Bot:
def __init__(self, updater: Updater, state_filepath: str):
self.logger = create_logger("hhh_diff_bot")
self.chats: Dict[int, Chat] = {}
self.updater = updater
self.main_admin_ids: Set[int] = self._load_main_admin_ids()
self.state: Dict[str, Any] = {
"group_message_id": [],
"recent_changes": [],
"hhh_id": -1001473841450,
"pinned_message_id": None
}
self.groups = []
self.state_filepath = state_filepath
def _load_main_admin_ids(self) -> Set[int]:
raw_value = os.getenv("MAIN_ADMIN_IDS")
if not raw_value:
self.logger.warning("MAIN_ADMIN_IDS is not set!")
return set()
try:
id_list = json.loads(raw_value)
except ValueError as e:
self.logger.error("Could not load main admins", exc_info=e)
return set()
if not isinstance(id_list, list):
self.logger.error("MAIN_ADMIN_IDS is not a JSON list")
return set()
result = set()
for main_admin_id in id_list:
try:
result.add(int(main_admin_id))
except ValueError:
self.logger.error("Not a valid user ID: %s", main_admin_id)
return result
def save_state(self) -> None:
self.state["chats"] = [chat.serialize() for chat in self.chats.values()]
self.state["groups"] = self.groups
with open(self.state_filepath, "w+") as f:
json.dump(self.state, f)
@Command(chat_admin=True)
def delete_chat(self, update: Update, context: CallbackContext) -> None:
chat: Chat = context.chat_data["chat"]
if chat.id in self.chats:
self.logger.info(f"Deleting chat ({chat}) from state.")
del self.chats[chat.id]
del context.chat_data["chat"]
@Command(main_admin=True)
def delete_chat_by_id(self, update: Update, context: CallbackContext) -> Optional[Message]:
try:
chat_id = int(context.args[0])
except (IndexError, ValueError):
return update.effective_message.reply_text(
text=f"Enter a (valid) chat_id as an argument to use this command.")
try:
self.chats.pop(chat_id)
except KeyError:
return update.effective_message.reply_text(text=f"Not a valid chat_id.")
def set_user_restriction(self, chat_id: int, user: User, until_date: timedelta, permissions: ChatPermissions,
reason: str = None) -> bool:
timestamp: int = int((datetime.now() + until_date).timestamp())
try:
result: bool = self.updater.bot.restrict_chat_member(chat_id, user.id, permissions, until_date=timestamp)
if not permissions.can_send_messages:
datestring: str = str(until_date).rsplit(".")[0] # str(timedelta) -> [D day[s], ][H]H:MM:SS[.UUUUUU]
message = f"{user.name} has been restricted for {datestring}."
if reason:
message += f"\nReason: {reason}"
self.send_message(chat_id=chat_id, text=message, disable_notification=True)
except TelegramError as e:
if e.message == "Can't demote chat creator" and not permissions.can_send_messages:
message = "Sadly, user {} couldn't be restricted due to: `{}`. Shame on {}".format(user.name,
e.message,
user.name)
self.logger.debug("{}".format(message))
self.send_message(chat_id=chat_id, text=message, parse_mode=ParseMode.MARKDOWN)
self.logger.error(e)
result = False
return result
def unmute_user(self, chat_id: int, user: User) -> bool:
result = False
permissions = ChatPermissions(can_send_messages=True, can_send_media_messages=True,
can_send_other_messages=True, can_add_web_page_previews=True)
try:
# if self.updater.bot.promote_chat_member(chat_id, user.id, can_post_messages=True):
if self.set_user_restriction(chat_id, user, timedelta(minutes=0), permissions):
user.muted = False
result = True
else:
self.logger.error("Failed to unmute user")
except TelegramError:
self.logger.error("Error while promoting chat member", exc_info=True)
return result
def mute_user(self, chat_id: int, user: User, until_date: timedelta, reason: Optional[str] = None) -> bool:
if user.muted:
return True
permissions = ChatPermissions(can_send_messages=False)
result = False
self.logger.info(f"Reason for muting: {reason}")
if self.set_user_restriction(chat_id, user, until_date=until_date, reason=reason, permissions=permissions):
user.muted = True
result = True
# We'd need to parse the exception before assigning user.muted differently
def _set_user_unmute():
user.muted = False
self.logger.info(f"Set timer for {until_date.total_seconds()}s to set user mute state to `False`")
Timer(until_date.total_seconds(), _set_user_unmute).start()
return result
def update_recent_changes(self, update: str):
rc: List[str] = self.state.get("recent_changes", [])
if len(rc) > 2:
rc.pop()
self.state["recent_changes"] = [update] + rc
@staticmethod
def create_latest_change_text(chat: Chat, new_title: str, delete: bool = False) -> str:
change = f"Added {chat.title}"
if new_title:
change = f"{chat.title} -> {new_title}"
elif delete:
change = f"Removed {chat.title}"
return change
def build_hhh_group_list_text(self, prefix: str = "", suffix: str = "") -> List[str]:
"""
For now, we'll assume that chats starting with the same letter will all fit into a single message
:param prefix: Put in front of the constructed text for the groups names
:param suffix: Put behind of the constructed text for the groups names
:return: List[str]
"""
def chat_to_item(chat: Chat):
try:
if chat.invite_link:
return f"<a href=\"{chat.invite_link}\">{chat.title}</a>"
else:
return f"{chat.title}"
except AttributeError:
return f"{chat.title}"
messages = []
message = f"{prefix}\n" if prefix else ""
"""
Telegram counts the character count after entity parsing.
i.e. <a href="https://example.com">A</a> should only be one character
We need this for the invite links
"""
deductable_per_chat = 0
for _, g in groupby(
sorted([chat for _, chat in self.chats.items() if chat and chat.title], key=lambda c: c.title.lower()),
key=lambda c: c.title[0].lower()):
line = " | ".join([chat_to_item(chat) for chat in g]) + "\n"
if len(message) + len(line) - deductable_per_chat * len(list(g)) >= 4096:
messages.append(message)
message = ""
message += line
if len(message) + len(suffix) >= 4096:
messages.append(message)
message = ""
message += suffix
messages.append(message)
return messages
@property
def group_message_ids(self) -> List:
"""
This is purely for migrative purposes (str -> list)
:return: List[str]
"""
value = self.state.get("group_message_id", [])
if not value:
return []
elif isinstance(value, str):
return [value]
else:
return value
@group_message_ids.setter
def group_message_ids(self, value: List[str]):
self.state["group_message_id"] = value
def delete_message(self, chat_id: str, message_id: str, *args, **kwargs):
return self.updater.bot.delete_message(chat_id=chat_id, message_id=message_id, *args, **kwargs)
def update_hhh_message(self, chat: Chat, new_title: str = "", delete: bool = False, retry: bool = False):
if not retry:
latest_change = self.create_latest_change_text(chat, new_title, delete)
self.logger.debug(f"Add latest change {latest_change} to recent_changes")
self.update_recent_changes(latest_change)
if new_title:
self.logger.debug(f"Update chat.title ({chat.title}) to {new_title}.")
chat.title = new_title
self.chats.update({chat.id: chat})
if delete and chat.id in self.chats.keys():
self.chats.pop(chat.id)
self.logger.debug(f"Build new group list.")
total_group_count_text = f"{len([c for c in self.chats.values() if c.title])} groups in total"
changes = "\n".join(["========", "\n".join(self.state["recent_changes"])])
messages = self.build_hhh_group_list_text(prefix=total_group_count_text, suffix=changes)
diff = len(messages) - len(self.group_message_ids)
if diff > 0:
# We have to send more messages than before
# -> send a new set of messages since we can't insert one into the conversation
self.group_message_ids = []
elif diff < 0:
# We have less messages than before
# -> delete the unused ones
for message_id in self.group_message_ids[-diff:]:
try:
self.delete_message(self.state["hhh_id"], message_id)
except BadRequest as e:
self.logger.debug("Exception occured", exc_info=True)
pinned = False
for index, message_text in enumerate(messages):
if not self.group_message_ids or index >= len(self.group_message_ids):
self.logger.debug(f"Send {len(messages)} new messages.")
message: Message = self.send_message(chat_id=self.state["hhh_id"], text=message_text,
parse_mode=ParseMode.HTML)
self.group_message_ids = self.group_message_ids + [message.message_id]
if not pinned:
try:
if self.state.get("pinned_message_id"):
try:
self.updater.bot.unpin_chat_message(chat_id=self.state["hhh_id"],
message_id=self.state["pinned_message_id"])
except BadRequest:
self.logger.error("Couldn't unpin message", exc_info=True)
self.updater.bot.pin_chat_message(chat_id=self.state["hhh_id"],
message_id=self.group_message_ids[0],
disable_notification=True)
self.state["pinned_message_id"] = self.group_message_ids[0]
pinned = True
except BadRequest:
self.logger.error("Couldn't pin the message", exc_info=True)
pass
else:
try:
self.logger.debug(f"Edit an old message with the new text ({message_text})")
self.updater.bot.edit_message_text(message_text, chat_id=self.state["hhh_id"],
message_id=self.group_message_ids[index],
disable_web_page_preview=True,
parse_mode=ParseMode.HTML)
except BadRequest as e:
self.logger.exception("Couldn't edit message", exc_info=True)
if e.message == "Message to edit not found":
self.logger.debug("Try sending a new message")
self.group_message_ids = []
return self.update_hhh_message(chat, new_title, delete, retry=True)
@Command()
def handle_message(self, update: Update, context: CallbackContext) -> None:
self.logger.info("Handle message: {}".format(update.effective_message.text))
@Command()
def handle_left_chat_member(self, update: Update, context: CallbackContext) -> None:
chat: Chat = context.chat_data["chat"]
if update.effective_message.left_chat_member.id != self.updater.bot.id:
try:
user: User = [user for user in chat.users if user.id == update.effective_message.left_chat_member.id][0]
except IndexError:
self.logger.error("Couldn't find user in chat")
else:
chat.users.remove(user)
else:
self.update_hhh_message(chat, "", delete=True)
context.chat_data.clear()
def set_state(self, state: Dict[str, Any]) -> None:
self.state = state
self.chats = {schat["id"]: Chat.deserialize(schat, self.updater.bot) for schat in state.get("chats", [])}
def send_message(self, *, chat_id: int, text: str, **kwargs) -> Message:
return self.updater.bot.send_message(chat_id=chat_id, text=text, disable_web_page_preview=True, **kwargs)
def _get_chat_by_title(self, title: str) -> Optional[Chat]:
for chat in self.chats.values():
if title == chat.title:
return chat
return None
@Command()
def show_users(self, update: Update, context: CallbackContext) -> Optional[Message]:
from_chat: Chat = context.chat_data["chat"]
if context.args:
search_title = " ".join(context.args).strip()
chat: Optional[Chat] = self._get_chat_by_title(search_title)
if not chat:
return self.send_message(chat_id=from_chat.id, text="This chat doesn't exist")
else:
chat = from_chat
sorted_users: List[User] = sorted(chat.users, key=lambda _user: _user.name)
if sorted_users:
message = "\n".join([user.name for user in sorted_users])
else:
message = "No active users. Users need to write a message in the chat to be recognized (not just a command)"
return self.send_message(chat_id=from_chat.id, text=message)
@Command()
def new_member(self, update: Update, context: CallbackContext) -> None:
chat = context.chat_data["chat"]
self.logger.info(f"New member(s) have joined this chat")
for member in update.effective_message.new_chat_members:
if member.id != self.updater.bot.id:
chat.users.add(User.from_tuser(member))
else:
try:
self.update_hhh_message(context.chat_data["chat"], "")
except BadRequest:
self.logger.exception("Failed to update message", exc_info=True)
self.send_message(chat_id=self.state["hhh_id"], text=f"Created {update.effective_chat.title}")
@Command()
def status(self, update: Update, context: CallbackContext) -> Message:
return update.effective_message.reply_text(text=f"{context.chat_data['chat']}")
@Command()
def version(self, update: Update, context: CallbackContext) -> Message:
return update.effective_message.reply_text("{{VERSION}}")
@Command()
def server_time(self, update: Update, context: CallbackContext) -> Message:
return update.effective_message.reply_text(datetime.now().strftime("%d-%m-%Y %H-%M-%S"))
@Command()
def get_data(self, update: Update, context: CallbackContext) -> Message:
chat: Chat = context.chat_data["chat"]
data = [_chat for _chat in self.state.get("chats", []) if _chat.get("id") == chat.id]
if data:
with tempfile.TemporaryFile() as temp:
temp.write(json.dumps(data[0]).encode("utf-8"))
temp.seek(0)
return self.updater.bot.send_document(chat_id=chat.id, document=temp, filename=f"{chat.title}.json")
else:
return update.effective_message.reply_text("Couldn't find any data for this chat.")
@Command(chat_admin=True)
def mute(self, update: Update, context: CallbackContext):
if not context.args:
message = "Please provide a user and an optional timeout (`/mute <user> [<timeout in minutes>] [<reason>]`)"
self.logger.warning("No arguments have been provided, don't execute `mute`.")
return self.send_message(chat_id=update.message.chat_id, text=message, parse_mode=ParseMode.MARKDOWN)
username = context.args[0]
minutes = 15
reason = " ".join(context.args[2:])
try:
minutes = int(context.args[1])
except (IndexError, ValueError):
sentry_sdk.capture_exception()
self.logger.error("Exception while getting time string from mute command", exc_info=True)
mute_time = timedelta(minutes=minutes)
chat = context.chat_data["chat"]
try:
user = next(filter(lambda x: x.name == username, chat.users))
except StopIteration:
sentry_sdk.capture_exception()
self.logger.warning(f"Couldn't find user {username} in users for chat {update.message.chat_id}",
exc_info=True)
update.effective_message.reply_text(f"Can't mute {username} (not found in current chat).")
else:
self.mute_user(update.message.chat_id, user, until_date=mute_time, reason=reason)
@Command(chat_admin=True)
def unmute(self, update: Update, context: CallbackContext):
if not context.args:
message = "You have to provide a user which should be unmuted."
self.logger.warning("No arguments have been provided, don't execute `unmute`.")
return update.effective_message.reply_text(message, parse_mode=ParseMode.MARKDOWN)
username: str = context.args[0].strip()
chat: Chat = context.chat_data["chat"]
# @all is an unusable username
if username == "@all":
for user in chat.users:
try:
self.unmute_user(chat.id, user)
except BadRequest:
self.logger.error(f"Failed to unmute user ({user})")
return
try:
user = next(filter(lambda x: x.name.lower() == username.lower(), chat.users))
except StopIteration:
sentry_sdk.capture_exception()
self.logger.warning(f"Couldn't find user {username} in users for chat {update.message.chat_id}",
exc_info=True)
update.effective_message.reply_text(f"Can't unmute {username} (not found in current chat).")
else:
if self.unmute_user(chat.id, user):
update.effective_message.reply_text(f"Successfully unmuted {username}.")
else:
update.effective_message.reply_text(f"Failed to unmute {username}.")
@Command()
def handle_unknown_command(self, update: Update, context: CallbackContext):
user: User = context.user_data["user"]
chat: Chat = context.chat_data["chat"]
reason = "This is not a valid command fuckwit."
self.mute_user(chat_id=chat.id, user=user, until_date=timedelta(minutes=15), reason=reason)
def kick_user(self, chat: Chat, user: User):
return self.updater.bot.kick_chat_member(chat_id=chat.id, user_id=user.id)
@Command(chat_admin=True)
def kick(self, update: Update, context: CallbackContext):
chat: Chat = context.chat_data["chat"]
if not context.args:
message = "Please provide a user and an optional reason(`/kick <user> [<reason>]`)"
self.logger.warning("No arguments have been provided, don't execute `kick`.")
return update.message.reply_text(text=message, parse_mode=ParseMode.MARKDOWN)
username = context.args[0]
reason = " ".join(context.args[1:])
try:
user: User = next(filter(lambda x: x.name == username, chat.users))
except StopIteration:
sentry_sdk.capture_exception()
self.logger.warning(f"Couldn't find user {username} in users for chat {update.message.chat_id}",
exc_info=True)
update.effective_message.reply_text(f"Can't kick {username} (not found in current chat).")
else:
try:
result = self.kick_user(chat, user)
except TelegramError as e:
message = f"Couldn't remove {user.name} from chat due to error ({e})"
self.logger.error(message)
update.message.reply_text(message)
else:
if result:
message = f"{user.name} was kicked from chat"
message += f" due to {reason}." if reason else "."
self.logger.debug(message)
chat.users.remove(user)
update.message.reply_text(message)
else:
message = f"{user.name} couldn't be kicked from chat"
self.logger.warning(message)
update.effective_message.reply_text(message)
@Command()
def new_chat_title(self, update: Update, context: CallbackContext):
chat: Chat = context.chat_data["chat"]
new_title = update.effective_message.new_chat_title
self.update_hhh_message(chat, new_title)
@Command()
def chat_created(self, update: Update, context: CallbackContext):
try:
self.update_hhh_message(context.chat_data["chat"], "")
except BadRequest:
self.logger.exception("Failed to update message", exc_info=True)
self.send_message(chat_id=self.state["hhh_id"], text=f"Created {update.effective_chat.title}")
@Command(chat_admin=True)
def add_invite_link(self, update: Update, context: CallbackContext):
chat: Chat = context.chat_data["chat"]
if context.args:
invite_link: str = context.args[0]
else:
return update.effective_message.reply_text("Provide an invite link moron")
if _validate_invite_link(invite_link):
chat.invite_link = invite_link
if update.effective_message.reply_text("Added (new) invite link"):
self.update_hhh_message(context.chat_data["chat"], "", retry=True)
else:
return update.effective_message.reply_text(
"invite link isn't in a correct form (tg://join?invite=[...] | https://t.me/joinchat/[...] | t.me/[...]")
@Command()
def get_invite_link(self, update: Update, context: CallbackContext):
if context.args:
group_name: str = " ".join(context.args)
else:
return update.effective_message.reply_text("Provide a group name moron")
try:
chat: Chat = [c for c in self.chats.values() if c.title == group_name][0]
except IndexError:
return update.effective_message.reply_text("I don't know that group")
if chat.invite_link:
return update.effective_message.reply_text(chat.invite_link)
else:
return update.effective_message.reply_text("No invite link found for the given group")
@Command(chat_admin=True)
def remove_invite_link(self, update: Update, context: CallbackContext):
chat: Chat = context.chat_data["chat"]
chat.invite_link = None
self.update_hhh_message(context.chat_data["chat"], "", retry=True)
@Command()
def migrate_chat_id(self, update: Update, context: CallbackContext):
self.logger.debug(f"Migrating {update.effective_message}")
if not update.effective_message.migrate_from_chat_id:
self.logger.warning("Aborting migration since `migrate_from_chat_id` is unset, see #49")
return None
from_id = int(update.effective_message.migrate_from_chat_id)
to_id = int(update.effective_message.chat.id)
self.logger.debug(f"Update chat_id to {to_id} (was: {from_id})")
new_chat = context.chat_data["chat"]
new_chat.id = to_id
context.chat_data["chat"] = new_chat
self.chats[to_id] = new_chat
self.chats.pop(from_id)
@Command()
def renew_diff_message(self, update: Update, context: CallbackContext):
self.group_message_ids = []
# retry doesn't update the recent changes
self.update_hhh_message(context.chat_data["chat"], "", retry=True)
def me(self):
return self.updater.bot.get_me()
@Command()
def noop(self, update: Update, context: CallbackContext):
self.logger.debug(update)
pass
def _split_messages(lines):
message_length = 4096
messages = []
current_length = 0
current_message = 0
for line in lines:
if len(messages) <= current_message:
messages.append([])
line_length = len(line)
if current_length + line_length < message_length:
current_length += line_length
messages[current_message].append(line)
else:
current_length = 0
current_message += 1
return messages
def _validate_invite_link(link: str) -> bool:
import re
if re.match(r"https://t.me/(joinchat/)?.*", link):
return True
m = re.match(r"tg://join\?invite=.*", link)
b = bool(m)
return b
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/>
#
# Licensed under the GNU General Public License, version 3 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://jxself.org/translations/gpl-3.zh.shtml
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import uuid
from sfo_server import access_logger
from sfo_server.models import SfoServerRole,db, SfoServerPermission
from flask_restful import Resource,request,abort,fields,marshal_with
from sfo_server.decorate import access_log_decorate,permission_required, login_required
from sfo_server.resource.common import timestamp_format
rolelist_resource_fields = {
"status": fields.String,
"message": fields.String,
"data": fields.List(fields.Nested({
"role_name": fields.String,
"add_time": fields.String,
"last_modify_time": fields.String,
"role_desc": fields.String
}))
}
def get_role_list():
status = ''
message = ''
data = []
resp = {"status": status, "message": message, "data": data}
sfo_server_rolelist = SfoServerRole.query.all()
if sfo_server_rolelist:
data = sfo_server_rolelist
status = 200
message = 'SUCCESS'
else:
status = 404
message = 'Not Found Record'
resp.update({"status": status, "message": message, "data":data})
return resp, status
def add_role_logic(role_json):
"""
:param role_json:
:return:
"""
status = ''
message = ''
resp = {"status": status, "message": message}
new_role = SfoServerRole()
try:
if role_json:
for key, value in role_json.items():
if hasattr(new_role, key):
if key == 'permissions':
value = SfoServerPermission.query_permissions(value)
setattr(new_role, key, value)
new_role.guid = str(uuid.uuid4())
new_role.add_time = new_role.last_modify_time = timestamp_format(time.time())
db.session.add(new_role)
db.session.commit()
status = 200
message = 'SUCCCESS'
else:
status = 501
message = 'Null Value %s' % role_json
except Exception, ex:
status = 502
message = str(ex)
resp.update({"status": status, "message": message})
return resp, status
class SfoServerRoleListAPI(Resource):
resource = SfoServerRole
method_decorators = [permission_required(resource), login_required]
@marshal_with(rolelist_resource_fields)
def get(self):
try:
resp, status = get_role_list()
return resp, status
except Exception, ex:
status = 500
message = str(ex)
return {'status': status, "message": message}, status
def post(self):
try:
if not request.json:
abort(400)
role_json = request.json
resp, status = add_role_logic(role_json)
return resp, status
except Exception, ex:
status = 500
message = str(ex)
return {'status': status, "message": message}, status
|
python
|
#
# Copyright (c) 2017-2018 Joy Diamond. All rights reserved.
#
__import__('Boot').boot()
def line(format, *args):
print format % args
def main():
if 0:
from Pattern import make_match_function
joy_match = make_match_function('[Aa](m)i(?P<what>t)\Z')
else:
import _sre
joy_match = _sre.compile(
None,#'[Aa](m)i(?P<what>t)\\Z',
0,
[
17, 9, 4, 4, 4, 19, 65, 19, 97, 0, 15, 6, 19, 65, 19, 97, 0, 21, 0, 19, 109,
21, 1, 19, 105, 21, 2, 19, 116, 21, 3, 6, 7, 1,
],
2,
{'what': 2},
((None, None, 'what')),
).match
m = joy_match('Joy')
print m.group(0, 1, 2)
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 15:12:49 2016
@author: uzivatel
"""
import numpy as np
import scipy
from functools import partial
from copy import deepcopy
from .general import Coordinate,Grid
from ...General.UnitsManager import PositionUnitsManaged,position_units
from ...General.types import UnitsManaged
from ..positioningTools import RotateAndMove, RotateAndMove_1, CenterMolecule
class DensityGrid(PositionUnitsManaged):
''' Class representing electronic density on spatial grid (e.g. molecular
orbitals, transition density, ...)
origin : numpy.array of real (dimension 3)
origin of density grid (Position managed units)
grid : numpy.array of integer (dimension 3)
number of grid points at each dimension
step : numpy.array of real (dimension 3x3)
step[i,:] translational vector in first dimension (Position managed units)
data : numpy.array of real (dimension Npoints_x x Npoints_y x Npoints_z)
Density values on the grid. data[i,j,k] correspond to the point with
coordinates self.origin+i*self.step[0,:]+j*self.step[1,:]+kk*self.step[2,:]
type : string
If ``typ='mo'`` density values correspond to real wavefunction otherwise
it is an electron density
indx : integer
Index of molecular orbital to which wavefunction correspond
coor : Coordinate class
Atomic coordinates for every atom in the molecule or complex.
(Position managed units)
at_charge : numpy array of real, integer or string (dimension Natoms)
Proton number for every atom in the molecule or complex
Functions
----------
rotate :
Rotate the density and all its properties by specified angles in
radians in positive direction.
rotate_1 :
Inverse totation to rotate
move :
Moves the density and all its properties along specified vector
center :
Center the density and allign in defined plane
copy :
Create 1 to 1 deep copy of the density with all classes and types.
import_cub :
Read density from cube file
output :
Outputs density into cube file
get_axes :
Outputs x, y and z axis of the grid on which density is evaluated
(only for nonrotated grid - oriented along coordinate axis)
copy :
Create 1 to 1 deep copy of the density with all classes and types.
dipole :
Numerical calculation of dipole from the density
dipole_partial :
Numerical calculation of dipole for only specified spatial cut of the
density. (only for nonrotated grid - oriented along coordinate axis)
cut :
Spatial cut of the density which is outputed as a new density.
(only for nonrotated grid - oriented along coordinate axis)
calc_atomic_properties :
Calculate atomic charges and dipoles from numerical integration of the
density into individual atoms. Quantity from grid point will be assigned
to nearest atom.
'''
origin=UnitsManaged("origin")
step=UnitsManaged("step")
def __init__(self,origin,grid,step,density,typ='mo',mo_indx=1,Coor=None,At_charge=None):
if origin is None:
self.origin=None
else:
self.origin = np.copy(origin)
if grid is None:
self.grid=None
else:
self.grid = np.copy(grid)
if step is None:
self.step=None
else:
self.step = np.copy(step)
if density is None:
self.data=None
else:
self.data = np.copy(density)
self.type = typ
self.indx = mo_indx
if Coor is None:
self.coor=None
else:
self.coor = Coordinate(Coor)
self.at_charge = np.copy(At_charge)
def output(self,filename='density.cub'):
''' Output density to cube file
Parameters
----------
filename : string (optional - init='density.cub')
Output file name including the path to output folder
'''
with position_units('Bohr'):
Coor = np.copy(self.coor.value)
Grid = np.copy(self.grid)
Step = np.copy(self.step)
At_charge = np.copy(self.at_charge)
with open(filename, "wt") as f:
# Vypis hlavicky
f.write("____Zde muze byt napsano cokoliv____ \n MO coefficients \n")
# f.write(" %i %5.2f %5.2f %5.2f \n" % (-len(qc.at_coord),min_[0],min_[1],min_[2]))
if self.type=='mo':
f.write("{:5d}".format(-len(Coor)))
else:
f.write("{:5d}".format(len(Coor)))
for ii in range(3):
f.write("{:12.6f}".format(self.origin[ii]))
f.write("{:5d}\n".format(1))
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[0], Step[0,0], Step[0,1], Step[0,2] ))
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[1], Step[1,0], Step[1,1], Step[1,2] ))
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[2], Step[2,0], Step[2,1], Step[2,2] ))
for ii in range(len(Coor)):
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}{:12.6f}\n".format(int(float(At_charge[ii])), float(At_charge[ii]), Coor[ii,0], Coor[ii,1], Coor[ii,2]))
if self.type=='mo':
f.write("{:5d}{:5d}\n".format(1, self.indx))
# vypis molekuloveho orbitalu na gridu
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
f.write("{:13.5E}".format(self.data[ii,jj,kk]))
if (kk % 6) == 5:
f.write("\n")
#f.write("\n")
if self.grid[2]%6!=0:
f.write("\n")
def import_cub(self,filename):
''' Import data from density cube file
Parameters
----------
filename : string
Imput file name (.cub) including the path to file folder
'''
origin=np.zeros(3,dtype='f8')
self.grid=np.zeros(3,dtype='i8')
step=np.zeros((3,3),dtype='f8')
fid = open(filename,'r') # Open the file
flines = fid.readlines() # Read the WHOLE file into RAM
fid.close() # Close the file
thisline = flines[2].split()
Natom=np.abs(int(thisline[0]))
if int(thisline[0]) < 0:
self.type='mo'
else:
self.type='transition'
self.at_charge=np.zeros(Natom,dtype='f')
Coor=np.zeros((Natom,3),dtype='f8')
for ii in range(3):
origin[ii]=float(thisline[ii+1])
for kk in range(3):
thisline = flines[kk+3].split()
self.grid[kk]=int(thisline[0])
for ii in range(3):
step[kk,ii]=float(thisline[ii+1])
# atomic information:
for kk in range(Natom):
thisline = flines[kk+6].split()
self.at_charge[kk]=float(thisline[1])
for ii in range(3):
Coor[kk,ii]=float(thisline[ii+2])
if self.type=='mo':
thisline = flines[Natom+6].split()
self.indx=int(thisline[1])
il=7
else:
il=6
with position_units('Bohr'):
self.coor=Coordinate(Coor)
self.origin=origin.copy()
self.step=step.copy()
# read density
self.data=np.zeros((self.grid[0],self.grid[1],self.grid[2]),dtype='f8')
counter=np.zeros(3,dtype='i8')
for kk in range(Natom+il,len(flines)):
line = flines[kk] # The current line as string
thisline = line.split() # The current line split into segments
for ii in range(6):
self.data[counter[0],counter[1],counter[2]]=float(thisline[ii])
counter[2]+=1
if counter[2]==self.grid[2]:
counter[2]=0
counter[1]+=1
if counter[1]==self.grid[1]:
counter[1]=0
counter[0]+=1
break
def get_axes(self):
""" Outputs x, y and z axis of the grid. ** Working only for grid
oriented along coordinate axis (nonrotated grid)**
Returns
--------
x,y,z : numpy array of float (dimension Grid_Nx, Grid_Ny, Grid_Nz)
Coordinates of grid points in coordinate axes
"""
print("Working only for nonrotated grid oriented along coordinate axes")
x=np.arange(self.grid[0])*self.step[0,0]+self.origin[0]
y=np.arange(self.grid[1])*self.step[1,1]+self.origin[1]
z=np.arange(self.grid[2])*self.step[2,2]+self.origin[2]
return x,y,z
def copy(self):
''' Copy DensityGrid class variable into the new one
Returns
----------
density_new : DensityGrid class
New DensityGrid class variable with exactly the same values as the
original one
Notes
----------
We have to use this function because simple density_new=density_old
only create pointer to the old density and therefore all changes in
density_new would be also done on density_old and this is what we don't
want
'''
density_new = deepcopy(self)
return density_new
def move(self,dx,dy,dz):
''' Moves density grid in space
Parameters
----------
dx,dy,dz : real
Distance of density shift along x resp. y resp.
z axis.
'''
vec=np.array([dx,dy,dz],dtype='f8')
self.origin=self.origin+vec
self.coor.move(dx,dy,dz)
def rotate(self,rotxy,rotxz,rotyz):
''' Rotate DENSITY in SPACE in positive rotational angle
(if right thumb pointing in direction of axes fingers are pointing in
positive rotation direction). First is rotation aroud z axes then around y axes and then around
x axes.
Parameters
----------
rotxy,rotxz,rotyz : real
`rotxy` resp. `rotxz` resp. `rotyz` is angle in RADIANS of rotation
around z resp. y resp. x axis in positive direction
'''
# Rotation handled in atomic units
#print('Pred rotaci')
self._origin=RotateAndMove(np.array([self._origin]),0.0,0.0,0.0,rotxy,rotxz,rotyz)
self.coor.rotate(rotxy,rotxz,rotyz)
self._step=RotateAndMove(self._step,0.0,0.0,0.0,rotxy,rotxz,rotyz)
#print('Po rotaci')
def rotate_1(self,rotxy,rotxz,rotyz):
''' Rotate DENSITY in SPACE in negative rotational angle
(if right thumb pointing in direction of axes fingers are pointing in
positive rotation direction). First is rotation aroud x axes then around y axes and then around
z axes. Inverse function to rotate(rotxy,rotxz,rotyz)
Parameters
----------
rotxy,rotxz,rotyz : real
`rotxy` resp. `rotxz` resp. `rotyz` is angle in RADIANS of rotation
around z resp. y resp. x axis in positive direction
'''
#print('Pred rotaci')
self._origin=RotateAndMove_1(np.array([self._origin]),0.0,0.0,0.0,rotxy,rotxz,rotyz)
self.coor.rotate_1(rotxy,rotxz,rotyz)
self._step=RotateAndMove_1(self._step,0.0,0.0,0.0,rotxy,rotxz,rotyz)
#print('Po rotaci')
def center(self,indx_center,indx_x,indx_y):
''' Center density according to defined center and main axes
Center atom will be in origin of coordinate system
(will have [0.0,0.0,0.0] coordinates) and vector X will be pointing into
direction of x axes and vector Y will be in xy plane. Vector X and Y
are defined by atomic indexes.
Parameters
----------
indx_center : int or list of int
When `indx_center`=i it refers to atomic coordnitate of ith atom
(counted from zero) => center=coor[i,:]. When `indx_center`=[i,j,k,..]
than center is center of all listed atoms (average coordinate) =>
center=(coor[i,:]+coor[j,:]+coor[k,:]...)/N
indx_x : int or list of int of length 2 or 4
When `indx_x`=i than vector X is defined as Coor[i,:]-center.
When `indx_x`=[i,j] than vector X is defined as Coor[j,:]-Coor[i,:].
When `indx_x`=[i,j,k,l] than vector X is defined as
(Coor[j,:]-Coor[i,:])+(Coor[l,:]-Coor[k,:]).
indx_y : int or list of int of length 2 or 4
When `indx_y`=i than vector Y is defined as Coor[i,:]-center.
When `indx_y`=[i,j] than vector Y is defined as Coor[j,:]-Coor[i,:].
When `indx_y`=[i,j,k,l] than vector Y is defined as
(Coor[j,:]-Coor[i,:])+(Coor[l,:]-Coor[k,:]).
'''
Coor_ext=[]
for ii in range(len(self.coor._value)):
Coor_ext.append(self.coor._value[ii])
Coor_ext.append(self._origin)
Coor_ext=np.array(Coor_ext)
Coor_centered,Phi,Psi,Chi,center=CenterMolecule(Coor_ext,indx_center,indx_x,indx_y,print_angles=True)
with position_units("Bohr"):
self.coor=Coordinate(Coor_centered[0,:])
for ii in range(1,len(Coor_centered)-1):
self.coor.add_coor(Coor_centered[ii,:])
self._origin=Coor_centered[len(self.coor._value),:]
self._step=RotateAndMove(self._step,0.0,0.0,0.0,Phi,Psi,Chi)
def dipole(self,output_center=False):
''' Calculate numericaly dipole from density. For ground state electron
density it calculates ground state dipole and fror transition density
it calculates transition dipole
Returns
----------
dipole : numpy.array of real (dimension 3)
dipole in ATOMIC UNITS (e*bohr)
Notes
----------
It calculates Int{-r.rho(r)dxdydz} which is dipole
'''
# TODO: repair matrix approach to be used also for rotated density
if 0: # This works only for nonrotated grid - change but keep the idea
grid=Grid()
grid.init_from_cub(self)
dipole=np.zeros(3,dtype='f8')
dipole[0]=np.sum(np.multiply(grid.X,self.data))
dipole[1]=np.sum(np.multiply(grid.Y,self.data))
dipole[2]=np.sum(np.multiply(grid.Z,self.data))
dipole = -np.multiply(grid.ddV,dipole)
dV=np.dot(self.step[0,:],np.cross(self.step[1,:],self.step[2,:]))
dipole=np.multiply(-dV,dipole)
return dipole
else:
# more efficient would be to create 3D grids with coordinates then multiply and then sum all
dipole = np.zeros(3,dtype='f8')
center = np.zeros(3,dtype='f8')
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:]
dipole+=self.data[ii,jj,kk]*rr
center+=np.abs(self.data[ii,jj,kk])*rr
dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:]))
dipole=dipole*dV
center = center/np.sum(np.abs(self.data))
print('Dipole calculated by function dipole was chaged from -dipole to dipole. Make sure that you are using right value')
if output_center:
return -dipole,center
else:
return -dipole
def dipole_partial(self,x_min=None,x_max=None,y_min=None,y_max=None,z_min=None,z_max=None):
''' Calculate numericaly dipole from part of the density. For ground
state electron density it calculates ground state partial dipole and
from transition density it calculates partial transition dipole.
Parameters
----------
x_min,x_max : real (optional - init=None)
Specifies minimal and maximal x coordinate
between which density is used for calculation of dipole. If some of
those values are not specified there is taken minimal resp. maximal
x coordinate of the density.
y_min,y_max : real (optional - init=None)
Specifies minimal and maximal y coordinate
between which density is used for calculation of dipole. If some of
those values are not specified there is taken minimal resp. maximal
y coordinate of the density.
z_min,z_max : real (optional - init=None)
Specifies minimal and maximal z coordinate
between which density is used for calculation of dipole. If some of
those values are not specified there is taken minimal resp. maximal
z coordinate of the density.
Returns
----------
dipole : numpy.array of real (dimension 3)
dipole in ATOMIC UNITS (e*bohr)
Notes
Resulting dipole is numericaly calculated integral
Int_{x_min,y_min,z_min}^{x_max,y_max,z_max} (-r.rho(r))dxdydz
'''
if x_min==None:
x_min=-1.0e5
else:
x_min=PositionUnitsManaged.manager.convert_position_2_internal_u(x_min)
if x_max==None:
x_max=1.0e5
else:
x_max=PositionUnitsManaged.manager.convert_position_2_internal_u(x_max)
if y_min==None:
y_min=-1.0e5
else:
y_min=PositionUnitsManaged.manager.convert_position_2_internal_u(y_min)
if y_max==None:
y_max=1.0e5
else:
y_max=PositionUnitsManaged.manager.convert_position_2_internal_u(y_max)
if z_min==None:
z_min=-1.0e5
else:
z_min=PositionUnitsManaged.manager.convert_position_2_internal_u(z_min)
if z_max==None:
z_max=1.0e5
else:
z_max=PositionUnitsManaged.manager.convert_position_2_internal_u(z_max)
# TODO: Convert boundaries from current values to internal
#print(x_min,x_max,y_min,y_max,z_min,z_max)
dipole=np.zeros(3,dtype='f8')
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:]
if rr[0]>=x_min and rr[0]<=x_max and rr[1]>=y_min and rr[1]<=y_max and rr[2]>=z_min and rr[2]<=z_max:
dipole+=self.data[ii,jj,kk]*rr
dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:]))
dipole=dipole*dV
print('Dipole calculated by function dipole_partial was chaged from -dipole to dipole. Make sure that you are using right value')
return -dipole
def cut(self,x_min=None,x_max=None,y_min=None,y_max=None,z_min=None,z_max=None):
''' Takes a cut of density. **Works only for original (nonrotated) transition
density with step[0,:] pointing along x axis, step[1,:] pointing along
y axis and step[2,:] pointing along z axis.**
Parameters
----------
x_min,x_max : real (optional - init=None)
Specifies minimal and maximal x coordinate in ATOMIC UNITS (Bohr)
between which density is outputed. If some of those values are not
specified there is taken minimal resp. maximal x coordinate of the
density
y_min,y_max : real (optional - init=None)
Specifies minimal and maximal y coordinate in ATOMIC UNITS (Bohr)
between which density is outputed. If some of those values are not
specified there is taken minimal resp. maximal y coordinate of the
density
z_min,z_max : real (optional - init=None)
Specifies minimal and maximal z coordinate in ATOMIC UNITS (Bohr)
between which density is outputed. If some of those values are not
specified there is taken minimal resp. maximal z coordinate of the
density
Returns
----------
cuted_density : DensityGrid class
DensityGrid class with desity which is subsystem of original density
and it is defined on grid points with coordinates: x_min <= x <= x_max,
y_min <= y <= y_max and z_min <= z <= z_max.
'''
if x_min==None:
if self._step[0,0]>0:
x_min=self._origin[0]
else:
x_min=self._origin[0]+self._step[0,0]*(self.grid[0]-1)
else:
x_min=PositionUnitsManaged.manager.convert_position_2_internal_u(x_min)
if x_max==None:
if self._step[0,0]>0:
x_max=self._origin[0]+self._step[0,0]*(self.grid[0]-1)
else:
x_max=self._origin[0]
else:
x_max=PositionUnitsManaged.manager.convert_position_2_internal_u(x_max)
if y_min==None:
if self._step[1,1]>0:
y_min=self._origin[1]
else:
y_min=self._origin[1]+self._step[1,1]*(self.grid[1]-1)
else:
y_min=PositionUnitsManaged.manager.convert_position_2_internal_u(y_min)
if y_max==None:
if self._step[1,1]>0:
y_max=self._origin[1]+self._step[1,1]*(self.grid[1]-1)
else:
y_max=self._origin[1]
else:
y_max=PositionUnitsManaged.manager.convert_position_2_internal_u(y_max)
if z_min==None:
if self._step[2,2]>0:
z_min=self._origin[2]
else:
z_min=self._origin[2]+self._step[2,2]*(self.grid[2]-1)
else:
z_min=PositionUnitsManaged.manager.convert_position_2_internal_u(z_min)
if z_max==None:
if self._step[2,2]>0:
z_max=self._origin[2]+self._step[2,2]*(self.grid[2]-1)
else:
z_max=self._origin[2]
else:
z_max=PositionUnitsManaged.manager.convert_position_2_internal_u(z_max)
#print(x_min,x_max,y_min,y_max,z_min,z_max)
x=[0,0]
if self._step[0,0]>0:
for ii in range(self.grid[0]):
if self._origin[0]+self._step[0,0]*ii<x_min:
x[0]=ii+1
elif self._origin[0]+self._step[0,0]*ii>x_max and x[1]==0:
x[1]=ii-1
if x[1]==0:
x[1]=self.grid[0]
else:
for ii in range(self.grid[0]):
if self._origin[0]+self._step[0,0]*ii>x_max:
x[0]=ii+1
elif self._origin[0]+self._step[0,0]*ii<x_min and x[1]==0:
x[1]=ii-1
if x[1]==0:
x[1]=self.grid[0]
y=[0,0]
if self._step[1,1]>0:
for ii in range(self.grid[1]):
if self._origin[1]+self._step[1,1]*ii<y_min:
y[0]=ii+1
elif self._origin[1]+self._step[1,1]*ii>y_max and y[1]==0:
y[1]=ii-1
if y[1]==0:
y[1]=self.grid[1]
else:
for ii in range(self.grid[1]):
if self._origin[1]+self._step[1,1]*ii>y_max:
y[0]=ii+1
elif self._origin[1]+self._step[1,1]*ii<y_min and y[1]==0:
y[1]=ii-1
if y[1]==0:
y[1]=self.grid[0]
z=[0,0]
if self._step[2,2]>0:
for ii in range(self.grid[2]):
if self._origin[2]+self._step[2,2]*ii<z_min:
z[0]=ii+1
elif self._origin[2]+self._step[2,2]*ii>z_max and z[1]==0:
z[1]=ii-1
if z[1]==0:
z[1]=self.grid[2]
else:
print('z is negative')
for ii in range(self.grid[2]):
if self._origin[2]+self._step[2,2]*ii>z_max:
z[0]=ii+1
elif self._origin[2]+self._step[2,2]*ii<z_min and z[1]==0:
z[1]=ii-1
if z[1]==0:
z[1]=self.grid[2]
#print(x,y,z)
origin_new=self._origin[:]+self._step[0,:]*x[0]+self._step[1,:]*y[0]+self._step[2,:]*z[0]
grid_new=np.array([x[1]-x[0],y[1]-y[0],z[1]-z[0]])
data_new=self.data[x[0]:x[1],y[0]:y[1],z[0]:z[1]]
step_new=np.copy(self._step)
with position_units("Bohr"):
cuted_density=DensityGrid(origin_new,grid_new,step_new,data_new,typ=np.copy(self.type),mo_indx=np.copy(self.indx),Coor=np.copy(self.coor.value),At_charge=np.copy(self.at_charge))
return cuted_density
def calc_atomic_properties(self):
''' Calculate atomic charges and atomic dipoles by numericaly integrating
density. Fisrt it is determined to which atom the grid point is the closest
and to this atom small delta charge and dipole is added.
Atomic charges are calculated as a sum of density from grid points for
which this atom is the closest one. The atomic dipoles are calculated
as vector from atom to grid point multiplied by density.
Returns
----------
charges : numpy.array of real (dimension Natoms)
Atomic charges for every atom of the system
dipoles : numpy.array of real (dimension Natoms x 3)
Atomic dipole in ATOMIC UNITS (e*bohr) for every atom
'''
Nat=len(self.coor._value)
charges=np.zeros(Nat,dtype='f8')
dipoles=np.zeros((Nat,3),dtype='f8')
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:]
dist_min=30.0
index=0
for ll in range(len(self.coor._value)):
dist=np.sqrt(np.dot(rr-self.coor._value[ll],rr-self.coor._value[ll]))
if dist<dist_min:
index=ll
dist_min=np.copy(dist)
charges[index]+=self.data[ii,jj,kk]
dipoles[index,:]+=(rr-self.coor._value[index])*self.data[ii,jj,kk]
dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:]))
print('Atomic dipole calculated by function calc_atomic_properties was chaged from -dipole to dipole. Make sure that you are using right value')
return charges*dV,-dipoles*dV
def _elpot_at_position(self,position):
''' Calculate electrostatic potential for electronic density assumed that
it is composed of cubic boxes with homogenous charge distribition
**THIS IS WERY CRUDE APPROXIMATION AND I HAVE SHOWN BY COMPARING CALCULATED
POTENTIAL FROM ATOMIC ORBITALS WITH THIS ONE THAT IT DOESN'T PROVIDE (NOT
EVEN CLOSE TO) REAL POTENTIAL**
Parameters
----------
position : numpy.array of real (dimension 3)
Coordinates in ATOMIC UNITS (Bohr) of point where we would like to
calculate electrostatic potential
Returns
----------
result : real
Potential at `position` in ATOMIC UNITS
'''
result=0.0
def aux_function(rr,stepx,stepy,stepz,t):
res=scipy.special.erf(t*(stepx/2-rr[0]))+scipy.special.erf(t*(stepx/2+rr[0]))
res=res * (scipy.special.erf(t*(stepy/2-rr[1]))+scipy.special.erf(t*(stepy/2+rr[1])))
res=res * (scipy.special.erf(t*(stepz/2-rr[2]))+scipy.special.erf(t*(stepz/2+rr[2])))
res=res/t**3
return res
rr1=np.copy(position)
for m in range(self.grid[0]):
for n in range(self.grid[1]):
for o in range(self.grid[2]):
rr2=self._origin + m*self._step[0,:]+n*self._step[1,:]+o*self._step[2,:]
dr=rr1-rr2
tmax=max([5/np.abs(np.abs(dr[0])-np.abs(self._step[0,0]/2)),5/np.abs(np.abs(dr[1])-np.abs(self._step[1,1]/2)),5/np.abs(np.abs(dr[2])-np.abs(self._step[2,2]/2))])
#if tmax<5e-1:
# ESP_Grid[i,j,k]-=self.data[m,n,o]/np.sqrt(np.dot(dr,dr))*dV
#else:
tmax=max([200,tmax])
aux_function_partial = partial(aux_function,dr,self._step[0,0],self._step[1,1],self._step[2,2])
result-=self.data[m,n,o]*np.pi/4*scipy.integrate.quadrature(aux_function_partial,0,tmax,tol=1e-05,maxiter=100)[0]
return result
def _dens_to_ESP2(self):
''' This should create electrostatic potential grid file from electronic
density assumed that it is composed of cubic boxes with homogenous
charge distribition.
**THIS IS WERY CRUDE APPROXIMATION AND I HAVE SHOWN BY COMPARING CALCULATED
POTENTIAL FROM ATOMIC ORBITALS WITH THIS ONE THAT IT DOESN'T PROVIDE (NOT
EVEN CLOSE TO) REAL POTENTIAL**
'''
ESP=DensityGrid(self.origin,self.grid,self.step,None,Coor=self.coor.value,At_charge=self.at_charge)
''' Calculate volume element '''
vecX=np.copy(self._step[0,:])
vecY=np.copy(self._step[1,:])
vecZ=np.cross(vecX,vecY)
dV=np.dot(vecZ,self._step[2,:])
ESP._origin=ESP._origin+self._step[0,:]/2.0+self._step[1,:]/2.0+self._step[2,:]/2.0
ESP_Grid=np.zeros((self.grid[0],self.grid[1],self.grid[2]),dtype='f8')
def aux_function(rr,stepx,stepy,stepz,t):
res=scipy.special.erf(t*(stepx/2-rr[0]))+scipy.special.erf(t*(stepx/2+rr[0]))
res=res * (scipy.special.erf(t*(stepy/2-rr[1]))+scipy.special.erf(t*(stepy/2+rr[1])))
res=res * (scipy.special.erf(t*(stepz/2-rr[2]))+scipy.special.erf(t*(stepz/2+rr[2])))
res=res/t**3
return res
for i in range(ESP.grid[0]):
print(i,'/',ESP.grid[0])
for j in range(ESP.grid[1]):
for k in range(ESP.grid[2]):
rr1=ESP._origin + i*ESP._step[0,:]+j*ESP._step[1,:]+k*ESP._step[2,:]
for m in range(self.grid[0]):
for n in range(self.grid[1]):
for o in range(self.grid[2]):
rr2=self._origin + m*self._step[0,:]+n*self._step[1,:]+o*self._step[2,:]
dr=rr1-rr2
tmax=max([5/np.abs(np.abs(dr[0])-np.abs(self._step[0,0]/2)),5/np.abs(np.abs(dr[1])-np.abs(self._step[1,1]/2)),5/np.abs(np.abs(dr[2])-np.abs(self._step[2,2]/2))])
if tmax<5e-1:
ESP_Grid[i,j,k]-=self.data[m,n,o]/np.sqrt(np.dot(dr,dr))*dV
else:
tmax=max([200,tmax])
aux_function_partial = partial(aux_function,dr,self._step[0,0],self._step[1,1],self._step[2,2])
ESP_Grid[i,j,k]-=self.data[m,n,o]*np.sqrt(np.pi)/4*scipy.integrate.quadrature(aux_function_partial,0,tmax,tol=1e-05,maxiter=100)[0]
ESP_Grid[i,j,k]-=np.pi/tmax**2*self.data[i,j,k]
#ESP_Grid=ESP_Grid
#for m in range(ESP.grid[0]):
# for n in range(ESP.grid[1]):
# for o in range(ESP.grid[2]):
# for ii in range(len(self.coor)):
# dr=ESP.origin + m*ESP.step[0,:]+n*ESP.step[1,:]+o*ESP.step[2,:]-self.coor[ii]
# norm2=np.sqrt(np.dot(dr,dr))
# ESP_Grid[m,n,o]+=self.at_charge[ii]/norm2
ESP.data=np.copy(ESP_Grid)
return ESP
|
python
|
from unittest import TestCase
from unittest.mock import patch
import getting_logs
class TestGetLog(TestCase):
"""Testing of getting logs from a third-party resource.
Testing the correctness of the transmitted data
for saving to the database."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.data = {
'error': '',
'logs': [{
'сreated_at': '2021-01-23T12:33:14',
'first_name': 'А',
'message': 'Write the code!',
'second_name': 'B',
'user_id': '123456'
}]
}
cls.error = {
'error': 'created_day: does not match format 20200105'
}
@patch('getting_logs.GetLog')
def test_logs_get(self, MockGetLog):
"""Tests getting logs."""
logs = MockGetLog()
logs.get.return_value = self.data
response = logs.get()
self.assertIsNotNone(response, 'Ошибка. Пустой объект.')
self.assertIsInstance(response, dict, 'Ошибка. Получен не json.')
self.assertEqual(
response,
logs.get.return_value,
'Ошибка. Получены неверные данные.')
@patch('getting_logs.GetLog')
def test_error_get(self, MockGetLog):
"""Tests getting logs."""
logs = MockGetLog()
logs.get.return_value = self.error
response = logs.get()
self.assertIsNotNone(response, 'Ошибка. Пустой объект.')
self.assertIsInstance(response, dict, 'Ошибка. Получен не json.')
self.assertEqual(response,
logs.get.return_value,
'Ошибка. Получены неверные данные.')
@patch('getting_logs.GetLog')
def test_saving_logs(self, MockGetLog):
"""Tests the correctness of the transmitted data."""
get_log = MockGetLog()
get_log.get.return_value = self.data
logs = get_log.get()
get_log.saving_logs(logs)
self.assertEqual(MockGetLog, getting_logs.GetLog)
self.assertIsNotNone(MockGetLog.called)
get_log.get.assert_called_with()
get_log.saving_logs.assert_called_once_with(get_log.get.return_value)
|
python
|
# coding: utf8
import requests
import json
import os
import time
import pymysql.cursors
connection = pymysql.connect(host='127.0.0.1', port=3306, user='ub', password='UB@018_world_cup', db='db_world_cup',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
g_stake_address = [
'18QQJNannotKo2Q9CkiqBJcf4qZWANZvGM',
'16JUBxCKb5LsQP7pZANc2yWpqvv4Xxqpw5',
'17ThubQK723mnUAhJyQ5g3y7WGExMu5X1d',
# '1BdR8SFVB67JbLdbtJBagN4oFGvUGYqUjh',
'1EvSWArvHhg2LxDBBSqDmyabqKpJXh2dVW',
'1Kn4scG7XnyHkWS8JXBEnHv1rHZuatKK1r',
'141fdMZPSXyx1Ym73Tf7f5PgLrw4sTaRcG',
'13QXEiy8nfSiZa5co2bMCcKXbbDTCUaqPd',
'1EiiEpwmueb5gnPaf83QMfpZZa6NHa5xyu',
'1Q82uttbmSsiTSb3xkk16u5bY3Vd8NJi1k',
'1NCbHsPT7ET1W5M1eUxfdRnecUHKWifLey',
'12fZL8ujSoDyf1JGG5bwTZveMfpBnKNbjK',
'1DvETyyKTNbTVi8YFeqJcQGMz6PAbsgSc2',
'1N91rYn2vcuZH9twrx9sZEbMD1Va4oxb8M',
'16K7C5qHL7mRY31Wu4dGXx6DgHrHvrPMEm',
'16VCNicr93VhLQuFgJaXu8JmbJubA68fnS',
'17gEQUDzoBucaDb5yNVf7h9RwzR8h8ndWc',
'19JrzBCwat2yEy2Y7LZpkKozgffCNoc5mz',
'1Axnf6NNABo8VnDyFYk7FEajuNtSFjRYZw',
'13BauCmfa5JNoHxtQaeWoWT1Xqwree6HZx',
'1BYunn44TecdU1tRWtSnxpPhYbAA99rGm1',
'18L1zzKrNwL2Huov1iUdCuUr1HE1e7tFLk',
'18hsZYuXmD2oBHdxnWLTqVAaX9Ge7t8KxB',
'1FNNeq9Wpq1TQ2C1iLYQL3zn3BHAkh12dp',
'15hoi9mLw53ATgMdtwdMgtJUUin6cTwxYc',
'19Eb7zndhKVVozm4AD9e3KtxbESBvZZqLa',
'1KdK3LMNjrPaRhn7i3evGX5uxBhhP2nTsw',
'1HpTt76LdQG21QFttRtNGPqTcF6Tjbh2hY',
'1NK6KkGo1uYCq1Xv4GZ9gL3217UbqbFygP',
'15amvgyWfrCyFtr1r1NXX3GLoAzUX6pE2w',
'1BaTnykKitJ5mG8RJXfR1YNbcnDF8ZDHcF',
'12LGWm2ovNiKVafAm9GbEmDbQdz7ezGeto',
'1FbQBr2fg9aQyJp1HhsENFGo6tdcNjpguc'
]
class StateManager(object):
def __init__(self):
self.id_file = 'id_file'
try:
f = os.open(self.id_file, os.O_RDONLY)
self.latest_id = os.read(f, 10)
os.close(f)
if self.latest_id is None:
self.latest_id = '1'
else:
self.latest_id = self.latest_id.decode('ascii')
except:
self.latest_id = '1'
def get_last_id(self):
return self.latest_id
def increase_last_id(self, id):
if id > self.latest_id:
self.latest_id = id
def save_latest_id(self):
try:
os.remove(self.id_file)
except:
pass
f = os.open(self.id_file, os.O_WRONLY | os.O_TRUNC | os.O_CREAT)
os.write(f, self.latest_id.encode('ascii'))
os.close(f)
print(self.latest_id)
def __del__(self):
self.save_latest_id()
def update_database(address, count, time, item, isAnybit):
cursor = connection.cursor()
sql = "INSERT INTO `t_stake` (`address`, `count`, `time`, `type`, `item`, `txid`, `isAnybit`) VALUES ('%s', %d, '%s', 2, %d, '', %d) ON DUPLICATE KEY UPDATE count=VALUES(count)" % \
(address, count, time, item, isAnybit)
cursor.execute(sql)
connection.commit()
def get_latest_transaction(last_id, address, item):
query_trans_request = '''{
"header": {
"version": "1.0.1",
"language": "zh",
"trancode": "tran_page",
"clienttype": "Android",
"walletid": "927fc097c3567fe119cde85529fb7630fc1b690a",
"random": "123456",
"handshake": "abcdefg",
"imie": "abcdefg"
},
"body": {
"coinAddr":"%s",
"coinType":"UBTC",
"queryType":"1",
"lastId":%d,
"limit":10
}
}
''' % (address, int(last_id))
query_headers = {'Content-Type': "application/json"}
# print(query_trans_request)
response = requests.post('https://www.anybit.io/server/process/', data=query_trans_request, headers=query_headers)
# response = requests.post('http://192.168.1.220:8080/lightwallet/server/process', data=query_trans_request, headers=query_headers)
# print(response.text)
records = json.loads(response.text)['data']['trans']
latest_id = int(last_id)
for r in records:
print(r['targetAddr'] + ': ' + str(r['tranAmt']))
if latest_id < r['id']:
latest_id = r['id']
if 'source' in r and r['source'] == 1:
count = int(float(r['tranAmt']) * 10 * 5 / 4)
isAnybit = 1
else:
count = int(float(r['tranAmt']) * 10)
isAnybit = 0
if count <= 0:
continue
ctime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(r['createTime'] / 1000))
# item = g_stake_address.index(r['targetAddr'])
print(r['targetAddr'], count, ctime, item)
update_database(r['targetAddr'], count, ctime, item, isAnybit)
return latest_id
if __name__ == '__main__':
sm = StateManager()
while True:
item = 1
for a in g_stake_address:
last_id = get_latest_transaction(sm.get_last_id(), a, item)
item += 1
sm.increase_last_id(str(last_id))
sm.save_latest_id()
time.sleep(30)
# print('end: ', sm.get_last_id())
|
python
|
from .bar import f
|
python
|
from django.contrib.auth.models import User
from django_grpc_framework import proto_serializers
import account_pb2
class UserProtoSerializer(proto_serializers.ModelProtoSerializer):
class Meta:
model = User
proto_class = account_pb2.User
fields = ['id', 'username', 'email', 'groups']
|
python
|
from minecraftmath import calculator
from system import window_management as wm
xFirThr = 0
zFirThr = 0
angleFirThr = 0
def findSecondSuggestedThrow(startPosX, startPosZ, startAngle):
global xFirThr, zFirThr, angleFirThr
xFirThr = startPosX
zFirThr = startPosZ
angleFirThr = startAngle
inRing, distance = calculator.distanceFromOrigin(xFirThr, zFirThr)
if inRing:
return (0,0,calculator.convertToMinecraftAngle(angleFirThr, inRing=True), distance)
else:
xSugThr, zSugThr = calculator.calculateSecondThrowCoordinates(*calculator.calculateHitRing(xFirThr, zFirThr, angleFirThr))
angleSugThr = calculator.calculateAngleAToB(xFirThr, zFirThr, xSugThr, zSugThr)
return (xSugThr, zSugThr, angleSugThr)
def findStronghold(startPosX, startPosZ, startAngle):
global xFirThr, zFirThr, angleFirThr
xStronghold, zStronghold = calculator.calculateStrongholdCoordinates(xFirThr, zFirThr, angleFirThr, startPosX, startPosZ, startAngle)
angleStronghold = calculator.calculateAngleAToB(startPosX, startPosZ, xStronghold, zStronghold)
return (xStronghold, zStronghold, angleStronghold)
|
python
|
"""The builtin object type implementation"""
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import applevel, interp2app, unwrap_spec
from pypy.interpreter.typedef import (
GetSetProperty, TypeDef, default_identity_hash)
from pypy.objspace.descroperation import Object
app = applevel(r'''
def _abstract_method_error(typ):
methods = ", ".join(sorted(typ.__abstractmethods__))
err = "Can't instantiate abstract class %s with abstract methods %s"
raise TypeError(err % (typ.__name__, methods))
def reduce_1(obj, proto):
import copyreg
return copyreg._reduce_ex(obj, proto)
def _getstate(obj):
cls = obj.__class__
try:
getstate = obj.__getstate__
except AttributeError:
# and raises a TypeError if the condition holds true, this is done
# just before reduce_2 is called in pypy
state = getattr(obj, "__dict__", None)
# CPython returns None if the dict is empty
if state is not None and len(state) == 0:
state = None
names = slotnames(cls) # not checking for list
if names is not None:
slots = {}
for name in names:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
slots[name] = value
if slots:
state = state, slots
else:
state = getstate()
return state
def reduce_2(obj, proto, args, kwargs):
cls = obj.__class__
if not hasattr(type(obj), "__new__"):
raise TypeError("can't pickle %s objects" % type(obj).__name__)
import copyreg
if not isinstance(args, tuple):
raise TypeError("__getnewargs__ should return a tuple")
if not kwargs:
newobj = copyreg.__newobj__
args2 = (cls,) + args
elif proto >= 4:
newobj = copyreg.__newobj_ex__
args2 = (cls, args, kwargs)
else:
raise ValueError("must use protocol 4 or greater to copy this "
"object; since __getnewargs_ex__ returned "
"keyword arguments.")
state = _getstate(obj)
listitems = iter(obj) if isinstance(obj, list) else None
dictitems = iter(obj.items()) if isinstance(obj, dict) else None
return newobj, args2, state, listitems, dictitems
def slotnames(cls):
if not isinstance(cls, type):
return None
try:
return cls.__dict__["__slotnames__"]
except KeyError:
pass
import copyreg
slotnames = copyreg._slotnames(cls)
if not isinstance(slotnames, list) and slotnames is not None:
raise TypeError("copyreg._slotnames didn't return a list or None")
return slotnames
''', filename=__file__)
_abstract_method_error = app.interphook("_abstract_method_error")
reduce_1 = app.interphook('reduce_1')
reduce_2 = app.interphook('reduce_2')
class W_ObjectObject(W_Root):
"""Instances of this class are what the user can directly see with an
'object()' call."""
def _excess_args(__args__):
return bool(__args__.arguments_w) or bool(__args__.keywords)
def descr__new__(space, w_type, __args__):
from pypy.objspace.std.typeobject import _precheck_for_new
w_type = _precheck_for_new(space, w_type)
if _excess_args(__args__):
w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__')
w_parent_init, _ = space.lookup_in_type_where(w_type, '__init__')
w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__')
if (w_parent_init is space.w_object or
w_parent_new is not space.w_object):
raise oefmt(space.w_TypeError,
"object() takes no parameters")
if w_type.is_abstract():
_abstract_method_error(space, w_type)
return space.allocate_instance(W_ObjectObject, w_type)
def descr___subclasshook__(space, __args__):
return space.w_NotImplemented
def descr__init__(space, w_obj, __args__):
if _excess_args(__args__):
w_type = space.type(w_obj)
w_parent_init, _ = space.lookup_in_type_where(w_type, '__init__')
w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__')
w_parent_init, _ = space.lookup_in_type_where(w_type, '__init__')
if (w_parent_new is space.w_object or
w_parent_init is not space.w_object):
raise oefmt(space.w_TypeError,
"object.__init__() takes no parameters")
def descr_get___class__(space, w_obj):
return space.type(w_obj)
def descr_set___class__(space, w_obj, w_newcls):
from pypy.objspace.std.typeobject import W_TypeObject
from pypy.interpreter.module import Module
#
if not isinstance(w_newcls, W_TypeObject):
raise oefmt(space.w_TypeError,
"__class__ must be set to a class, not '%T' "
"object", w_newcls)
if not (w_newcls.is_heaptype() or
w_newcls is space.gettypeobject(Module.typedef)):
raise oefmt(space.w_TypeError,
"__class__ assignment only supported for heap types "
"or ModuleType subclasses")
w_oldcls = space.type(w_obj)
assert isinstance(w_oldcls, W_TypeObject)
if (w_oldcls.get_full_instance_layout() ==
w_newcls.get_full_instance_layout()):
w_obj.setclass(space, w_newcls)
else:
raise oefmt(space.w_TypeError,
"__class__ assignment: '%N' object layout differs from "
"'%N'", w_oldcls, w_newcls)
def descr__repr__(space, w_obj):
classname = space.getfulltypename(w_obj)
return w_obj.getrepr(space, u'%s object' % (classname,))
def descr__str__(space, w_obj):
w_type = space.type(w_obj)
w_impl = w_type.lookup("__repr__")
if w_impl is None:
# can it really occur?
raise oefmt(space.w_TypeError, "operand does not support unary str")
return space.get_and_call_function(w_impl, w_obj)
def _getnewargs(space, w_obj):
w_descr = space.lookup(w_obj, '__getnewargs_ex__')
hasargs = True
if w_descr is not None:
w_result = space.get_and_call_function(w_descr, w_obj)
if not space.isinstance_w(w_result, space.w_tuple):
raise oefmt(space.w_TypeError,
"__getnewargs_ex__ should return a tuple, not '%T'", w_result)
n = space.len_w(w_result)
if n != 2:
raise oefmt(space.w_ValueError,
"__getnewargs_ex__ should return a tuple of length 2, not %d",
n)
w_args, w_kwargs = space.fixedview(w_result, 2)
if not space.isinstance_w(w_args, space.w_tuple):
raise oefmt(space.w_TypeError,
"first item of the tuple returned by __getnewargs_ex__ must "
"be a tuple, not '%T'", w_args)
if not space.isinstance_w(w_kwargs, space.w_dict):
raise oefmt(space.w_TypeError,
"second item of the tuple returned by __getnewargs_ex__ must "
"be a dict, not '%T'", w_kwargs)
else:
w_descr = space.lookup(w_obj, '__getnewargs__')
if w_descr is not None:
w_args = space.get_and_call_function(w_descr, w_obj)
if not space.isinstance_w(w_args, space.w_tuple):
raise oefmt(space.w_TypeError,
"__getnewargs__ should return a tuple, not '%T'", w_args)
else:
hasargs = False
w_args = space.newtuple([])
w_kwargs = space.w_None
return hasargs, w_args, w_kwargs
@unwrap_spec(proto=int)
def descr__reduce__(space, w_obj, proto=0):
w_proto = space.newint(proto)
if proto >= 2:
hasargs, w_args, w_kwargs = _getnewargs(space, w_obj)
w_getstate = space.lookup(w_obj, '__get_state__')
if w_getstate is None:
required = (not hasargs and
not space.isinstance_w(w_obj, space.w_list) and
not space.isinstance_w(w_obj, space.w_dict))
w_obj_type = space.type(w_obj)
if required and w_obj_type.layout.typedef.variable_sized:
raise oefmt(
space.w_TypeError, "cannot pickle %N objects", w_obj_type)
return reduce_2(space, w_obj, w_proto, w_args, w_kwargs)
return reduce_1(space, w_obj, w_proto)
@unwrap_spec(proto=int)
def descr__reduce_ex__(space, w_obj, proto=0):
w_st_reduce = space.newtext('__reduce__')
w_reduce = space.findattr(w_obj, w_st_reduce)
if w_reduce is not None:
# Check if __reduce__ has been overridden:
# "type(obj).__reduce__ is not object.__reduce__"
w_cls_reduce = space.getattr(space.type(w_obj), w_st_reduce)
w_obj_reduce = space.getattr(space.w_object, w_st_reduce)
override = not space.is_w(w_cls_reduce, w_obj_reduce)
if override:
return space.call_function(w_reduce)
return descr__reduce__(space, w_obj, proto)
def descr___format__(space, w_obj, w_format_spec):
if space.isinstance_w(w_format_spec, space.w_unicode):
w_as_str = space.call_function(space.w_unicode, w_obj)
elif space.isinstance_w(w_format_spec, space.w_bytes):
w_as_str = space.str(w_obj)
else:
raise oefmt(space.w_TypeError, "format_spec must be a string")
if space.len_w(w_format_spec) > 0:
raise oefmt(space.w_TypeError,
"unsupported format string passed to %T.__format__",
w_obj);
return space.format(w_as_str, w_format_spec)
def descr__eq__(space, w_self, w_other):
if space.is_w(w_self, w_other):
return space.w_True
# Return NotImplemented instead of False, so if two objects are
# compared, both get a chance at the comparison (issue #1393)
return space.w_NotImplemented
def descr__ne__(space, w_self, w_other):
# By default, __ne__() delegates to __eq__() and inverts the result,
# unless the latter returns NotImplemented.
w_eq = space.lookup(w_self, '__eq__')
w_res = space.get_and_call_function(w_eq, w_self, w_other)
if space.is_w(w_res, space.w_NotImplemented):
return w_res
return space.not_(w_res)
def descr_richcompare(space, w_self, w_other):
return space.w_NotImplemented
def descr__dir__(space, w_obj):
from pypy.objspace.std.util import _objectdir
return space.call_function(space.w_list, _objectdir(space, w_obj))
W_ObjectObject.typedef = TypeDef("object",
_text_signature_='()',
__doc__ = "The most base type",
__new__ = interp2app(descr__new__),
__subclasshook__ = interp2app(descr___subclasshook__, as_classmethod=True),
# these are actually implemented in pypy.objspace.descroperation
__getattribute__ = interp2app(Object.descr__getattribute__.im_func),
__setattr__ = interp2app(Object.descr__setattr__.im_func),
__delattr__ = interp2app(Object.descr__delattr__.im_func),
__init__ = interp2app(descr__init__),
__class__ = GetSetProperty(descr_get___class__, descr_set___class__),
__repr__ = interp2app(descr__repr__),
__str__ = interp2app(descr__str__),
__hash__ = interp2app(default_identity_hash),
__reduce__ = interp2app(descr__reduce__),
__reduce_ex__ = interp2app(descr__reduce_ex__),
__format__ = interp2app(descr___format__),
__dir__ = interp2app(descr__dir__),
__eq__ = interp2app(descr__eq__),
__ne__ = interp2app(descr__ne__),
__le__ = interp2app(descr_richcompare),
__lt__ = interp2app(descr_richcompare),
__ge__ = interp2app(descr_richcompare),
__gt__ = interp2app(descr_richcompare),
)
|
python
|
import collections
from sgfs import SGFS
ReferenceStatus = collections.namedtuple('ReferenceStatus', ('path', 'used', 'latest', 'is_latest', 'all'))
def check_paths(paths, only_published=True):
sgfs = SGFS()
res = []
for path in paths:
publishes = sgfs.entities_from_path(path, 'PublishEvent')
if only_published and not publishes:
continue
publish = publishes[0] if publishes else None
if publish:
siblings = sgfs.session.find('PublishEvent', [
('sg_link', 'is', publish['sg_link']),
('code', 'is', publish['code']),
('sg_type', 'is', publish['sg_type']),
], ['sg_path'])
siblings.sort(key=lambda x: x['sg_version'])
latest = max(siblings, key=lambda pub: pub['sg_version'])
else:
siblings = []
latest = None
res.append(ReferenceStatus(
path=path,
used=publish,
latest=latest,
is_latest=publish is latest if publish else False,
all=siblings,
))
return res
|
python
|
'''
Written by Jason Reaves - @sysopfb
Free to use, attribute properly.
'''
import sys
import pefile
import struct
import re
def decrypt(keystream, blob):
for i in range(len(blob)):
blob[i] ^= keystream[i%len(keystream)]
def rc4_crypt(data, sbox):
S = list(sbox)
out = []
i = j = 0
for char in data:
i = ( i + 1 ) % 256
j = ( j + S[i] ) % 256
S[i] , S[j] = S[j] , S[i]
out.append(chr(ord(char) ^ S[(S[i] + S[j]) % 256]))
return ''.join(out)
def decoder(data):
conf = None
#m = re.findall('''\x8a[\x82\x86]([\x00-\xff]{3}\x00)''', data)
blob = None
pe = pefile.PE(data=data)
for section in pe.sections:
if '.rdata' in section.Name:
blob = section.get_data()
if blob != None:
temp = re.split('[\x00]{3,}', blob)
temp = filter(lambda x: len(x) > 254, temp)
found = None
for val in temp:
testdata = val[:-0x100]
testkey = val[-0x100:]
test = rc4_crypt(testdata, bytearray(testkey))
if 'http' in test:
found = test
break
if found == None:
possible_keys = filter(lambda x: len(x) == 256, temp)
possible_data = filter(lambda x: len(x) != 256, temp)
for testkey in possible_keys:
for testdata in possible_data:
test = rc4_crypt(testdata, bytearray(testkey))
if 'http' in test:
found = test
break
if found != None:
break
if found != None:
print("Found embed config!")
urls = re.findall('https?:\/\/[a-zA-Z0-9\-\/\._]+', found)
conf ={'urls': urls}
return conf
if __name__ == "__main__":
data = open(sys.argv[1],'rb').read()
t = decoder(data)
print(t)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'baidupcsapi'
__version__ = '0.2.12'
__author__ = 'mozillazg,latyas'
__license__ = 'MIT'
from .api import PCS
|
python
|
import inspect
class SuperMixin(object):
def super(cls, *args, **kwargs):
frame = inspect.currentframe(1)
self = frame.f_locals['self']
methodName = frame.f_code.co_name
method = getattr(super(cls, self), methodName, None)
if inspect.ismethod(method):
return method(*args, **kwargs)
super = classmethod(super)
|
python
|
#Includes BaseClassApi class
import BaseClassApi
class Augmentor(BaseClassApi.Api):
pass
#print "This is augmentor api class: \n"
def execute_augmentor_api():
BaseClassApi.Api.url_path = "api/v1/augmentors"
aug_api = Augmentor()
#This module gives list of organizations available.
aug_api.list_operation()
#This module uploads file i.e. json data and returns upload id.
#BaseClassApi.Api.upload_id = aug_api.upload_file_operation(json_file_name)
#This is the payload information which is required for creating organization.
## #BaseClassApi.Api.payload = {"organization": {"name": "New organization1", "url": "www.test1.com", "upload_id": "%s" %BaseClassApi.Api.upload_id }}
BaseClassApi.Api.payload = {"augmentor": {"name" : "audience name" , "upload_id":"%s" %BaseClassApi.Api.upload_id}}
#This module creates organization.
BaseClassApi.Api.aug_id = aug_api.create_operation()
#BaseClassApi.Api.general_id = ""
#This is the payload information which is required for updating organization.
## BaseClassApi.Api.payload = {"organization": {"name": "Rename organization1", "url": "www.test1.com", "upload_id": "%s" %BaseClassApi.Api.upload_id }}
BaseClassApi.Api.payload = {"augmentor": {"name" : "audience name" , "upload_id":"%s" %BaseClassApi.Api.upload_id}}
#This module updates organization.
aug_api.update_operation(BaseClassApi.Api.aug_id)
#This module gives details of specific organization
aug_api.show_operation(BaseClassApi.Api.aug_id)
#This module deletes organization
########################
#aug_api.destroy_operation(BaseClassApi.Api.aug_id)
|
python
|
from ..abstract import ErdReadOnlyConverter
from ..primitives import *
from gehomesdk.erd.values.oven import OvenConfiguration, ErdOvenConfiguration
class OvenConfigurationConverter(ErdReadOnlyConverter[OvenConfiguration]):
def erd_decode(self, value: str) -> OvenConfiguration:
if not value:
n = 0
else:
n = erd_decode_int(value)
config = OvenConfiguration(
has_knob=bool(n & ErdOvenConfiguration.HAS_KNOB.value),
has_warming_drawer=bool(n & ErdOvenConfiguration.HAS_WARMING_DRAWER.value),
has_light_bar=bool(n & ErdOvenConfiguration.HAS_LIGHT_BAR.value),
has_lower_oven=bool(n & ErdOvenConfiguration.HAS_LOWER_OVEN.value),
has_lower_oven_kitchen_timer=bool(n & ErdOvenConfiguration.HAS_LOWER_OVEN_KITCHEN_TIMER.value),
raw_value=value,
)
return config
|
python
|
from __future__ import unicode_literals
import datetime
from django.http import Http404
from django.utils.timezone import utc
from model_mommy import mommy
from kb.tests.test import ViewTestCase
from kb.models import Article
class TestCategoryFeed(ViewTestCase):
view_name = 'kb:category_feed'
view_kwargs = {'slug': 'spam'}
def view(self, request):
from kb.feeds import CategoryFeed
return CategoryFeed()(request, slug='spam')
def test_with_category_without_articles_should_fail(self):
mommy.make_recipe('kb.tests.category_without_articles', slug='spam')
self.assertRaises(Http404, self.get)
def test_view(self):
category = mommy.make_recipe('kb.tests.category_with_articles', slug='spam')
mommy.make_recipe('kb.tests.published_article',
created=datetime.datetime(2013, 5, 27, tzinfo=utc),
created_by=mommy.make('User', username='Guido'),
category=category)
for article in Article.objects.all():
article.tags.add('Spam', 'Eggs')
response = self.get()
self.assertHttpOK(response)
self.assertContains(response, '<title>Category With Articles Title</title>')
self.assertContains(response, '<description>Category With Articles Description</description>')
self.assertContains(response, '<title>Published Article Title</title>')
self.assertContains(response, '<description><p>Published Article Content</p></description>')
self.assertContains(response, '<pubDate>Mon, 27 May 2013 00:00:00 +0000</pubDate>')
self.assertContains(response, '<category>Spam</category>')
self.assertContains(response, '<category>Eggs</category>')
self.assertContains(response, '>Guido</dc:creator>')
self.assertNotContains(response, '<title>Draft Article Title</title>')
self.assertNotContains(response, '<title>Draft Article Content</title>')
|
python
|
"""
test_ext_autodoc_configs
~~~~~~~~~~~~~~~~~~~~~~~~
Test the autodoc extension. This tests mainly for config variables
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import platform
import sys
import pytest
from sphinx.testing import restructuredtext
from .test_ext_autodoc import do_autodoc
IS_PYPY = platform.python_implementation() == 'PyPy'
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoclass_content_class(app):
app.config.autoclass_content = 'class'
options = {"members": None}
actual = do_autodoc(app, 'module', 'target.autoclass_content', options)
assert list(actual) == [
'',
'.. py:module:: target.autoclass_content',
'',
'',
'.. py:class:: A()',
' :module: target.autoclass_content',
'',
' A class having no __init__, no __new__',
'',
'',
'.. py:class:: B()',
' :module: target.autoclass_content',
'',
' A class having __init__(no docstring), no __new__',
'',
'',
'.. py:class:: C()',
' :module: target.autoclass_content',
'',
' A class having __init__, no __new__',
'',
'',
'.. py:class:: D()',
' :module: target.autoclass_content',
'',
' A class having no __init__, __new__(no docstring)',
'',
'',
'.. py:class:: E()',
' :module: target.autoclass_content',
'',
' A class having no __init__, __new__',
'',
'',
'.. py:class:: F()',
' :module: target.autoclass_content',
'',
' A class having both __init__ and __new__',
'',
'',
'.. py:class:: G()',
' :module: target.autoclass_content',
'',
' A class inherits __init__ without docstring.',
'',
'',
'.. py:class:: H()',
' :module: target.autoclass_content',
'',
' A class inherits __new__ without docstring.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoclass_content_init(app):
app.config.autoclass_content = 'init'
options = {"members": None}
actual = do_autodoc(app, 'module', 'target.autoclass_content', options)
assert list(actual) == [
'',
'.. py:module:: target.autoclass_content',
'',
'',
'.. py:class:: A()',
' :module: target.autoclass_content',
'',
' A class having no __init__, no __new__',
'',
'',
'.. py:class:: B()',
' :module: target.autoclass_content',
'',
' A class having __init__(no docstring), no __new__',
'',
'',
'.. py:class:: C()',
' :module: target.autoclass_content',
'',
' __init__ docstring',
'',
'',
'.. py:class:: D()',
' :module: target.autoclass_content',
'',
' A class having no __init__, __new__(no docstring)',
'',
'',
'.. py:class:: E()',
' :module: target.autoclass_content',
'',
' __new__ docstring',
'',
'',
'.. py:class:: F()',
' :module: target.autoclass_content',
'',
' __init__ docstring',
'',
'',
'.. py:class:: G()',
' :module: target.autoclass_content',
'',
' __init__ docstring',
'',
'',
'.. py:class:: H()',
' :module: target.autoclass_content',
'',
' __new__ docstring',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoclass_content_both(app):
app.config.autoclass_content = 'both'
options = {"members": None}
actual = do_autodoc(app, 'module', 'target.autoclass_content', options)
assert list(actual) == [
'',
'.. py:module:: target.autoclass_content',
'',
'',
'.. py:class:: A()',
' :module: target.autoclass_content',
'',
' A class having no __init__, no __new__',
'',
'',
'.. py:class:: B()',
' :module: target.autoclass_content',
'',
' A class having __init__(no docstring), no __new__',
'',
'',
'.. py:class:: C()',
' :module: target.autoclass_content',
'',
' A class having __init__, no __new__',
'',
' __init__ docstring',
'',
'',
'.. py:class:: D()',
' :module: target.autoclass_content',
'',
' A class having no __init__, __new__(no docstring)',
'',
'',
'.. py:class:: E()',
' :module: target.autoclass_content',
'',
' A class having no __init__, __new__',
'',
' __new__ docstring',
'',
'',
'.. py:class:: F()',
' :module: target.autoclass_content',
'',
' A class having both __init__ and __new__',
'',
' __init__ docstring',
'',
'',
'.. py:class:: G()',
' :module: target.autoclass_content',
'',
' A class inherits __init__ without docstring.',
'',
' __init__ docstring',
'',
'',
'.. py:class:: H()',
' :module: target.autoclass_content',
'',
' A class inherits __new__ without docstring.',
'',
' __new__ docstring',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_inherit_docstrings(app):
assert app.config.autodoc_inherit_docstrings is True # default
actual = do_autodoc(app, 'method', 'target.inheritance.Derived.inheritedmeth')
assert list(actual) == [
'',
'.. py:method:: Derived.inheritedmeth()',
' :module: target.inheritance',
'',
' Inherited function.',
'',
]
# disable autodoc_inherit_docstrings
app.config.autodoc_inherit_docstrings = False
actual = do_autodoc(app, 'method', 'target.inheritance.Derived.inheritedmeth')
assert list(actual) == [
'',
'.. py:method:: Derived.inheritedmeth()',
' :module: target.inheritance',
''
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_docstring_signature(app):
options = {"members": None}
actual = do_autodoc(app, 'class', 'target.DocstringSig', options)
assert list(actual) == [
'',
'.. py:class:: DocstringSig()',
' :module: target',
'',
'',
' .. py:method:: DocstringSig.meth(FOO, BAR=1) -> BAZ',
' :module: target',
'',
' First line of docstring',
'',
' rest of docstring',
'',
'',
' .. py:method:: DocstringSig.meth2()',
' :module: target',
'',
' First line, no signature',
' Second line followed by indentation::',
'',
' indented line',
'',
'',
' .. py:property:: DocstringSig.prop1',
' :module: target',
'',
' First line of docstring',
'',
'',
' .. py:property:: DocstringSig.prop2',
' :module: target',
'',
' First line of docstring',
' Second line of docstring',
'',
]
# disable autodoc_docstring_signature
app.config.autodoc_docstring_signature = False
actual = do_autodoc(app, 'class', 'target.DocstringSig', options)
assert list(actual) == [
'',
'.. py:class:: DocstringSig()',
' :module: target',
'',
'',
' .. py:method:: DocstringSig.meth()',
' :module: target',
'',
' meth(FOO, BAR=1) -> BAZ',
' First line of docstring',
'',
' rest of docstring',
'',
'',
'',
' .. py:method:: DocstringSig.meth2()',
' :module: target',
'',
' First line, no signature',
' Second line followed by indentation::',
'',
' indented line',
'',
'',
' .. py:property:: DocstringSig.prop1',
' :module: target',
'',
' DocstringSig.prop1(self)',
' First line of docstring',
'',
'',
' .. py:property:: DocstringSig.prop2',
' :module: target',
'',
' First line of docstring',
' Second line of docstring',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoclass_content_and_docstring_signature_class(app):
app.config.autoclass_content = 'class'
options = {"members": None,
"undoc-members": None}
actual = do_autodoc(app, 'module', 'target.docstring_signature', options)
assert list(actual) == [
'',
'.. py:module:: target.docstring_signature',
'',
'',
'.. py:class:: A(foo, bar)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: B(foo, bar)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: C(foo, bar)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: D()',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: E()',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: F()',
' :module: target.docstring_signature',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoclass_content_and_docstring_signature_init(app):
app.config.autoclass_content = 'init'
options = {"members": None,
"undoc-members": None}
actual = do_autodoc(app, 'module', 'target.docstring_signature', options)
assert list(actual) == [
'',
'.. py:module:: target.docstring_signature',
'',
'',
'.. py:class:: A(foo, bar)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: B(foo, bar, baz)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: C(foo, bar, baz)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: D(foo, bar, baz)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: E(foo: int, bar: int, baz: int) -> None',
' E(foo: str, bar: str, baz: str) -> None',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: F(foo: int, bar: int, baz: int) -> None',
' F(foo: str, bar: str, baz: str) -> None',
' :module: target.docstring_signature',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autoclass_content_and_docstring_signature_both(app):
app.config.autoclass_content = 'both'
options = {"members": None,
"undoc-members": None}
actual = do_autodoc(app, 'module', 'target.docstring_signature', options)
assert list(actual) == [
'',
'.. py:module:: target.docstring_signature',
'',
'',
'.. py:class:: A(foo, bar)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: B(foo, bar)',
' :module: target.docstring_signature',
'',
' B(foo, bar, baz)',
'',
'',
'.. py:class:: C(foo, bar)',
' :module: target.docstring_signature',
'',
' C(foo, bar, baz)',
'',
'',
'.. py:class:: D(foo, bar, baz)',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: E(foo: int, bar: int, baz: int) -> None',
' E(foo: str, bar: str, baz: str) -> None',
' :module: target.docstring_signature',
'',
'',
'.. py:class:: F(foo: int, bar: int, baz: int) -> None',
' F(foo: str, bar: str, baz: str) -> None',
' :module: target.docstring_signature',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
@pytest.mark.usefixtures("rollback_sysmodules")
def test_mocked_module_imports(app, warning):
sys.modules.pop('target', None) # unload target module to clear the module cache
# no autodoc_mock_imports
options = {"members": 'TestAutodoc,decoratedFunction,func'}
actual = do_autodoc(app, 'module', 'target.need_mocks', options)
assert list(actual) == []
assert "autodoc: failed to import module 'need_mocks'" in warning.getvalue()
# with autodoc_mock_imports
app.config.autodoc_mock_imports = [
'missing_module',
'missing_package1',
'missing_package2',
'missing_package3',
'sphinx.missing_module4',
]
warning.truncate(0)
actual = do_autodoc(app, 'module', 'target.need_mocks', options)
assert list(actual) == [
'',
'.. py:module:: target.need_mocks',
'',
'',
'.. py:class:: TestAutodoc()',
' :module: target.need_mocks',
'',
' TestAutodoc docstring.',
'',
'',
' .. py:method:: TestAutodoc.decoratedMethod()',
' :module: target.need_mocks',
'',
' TestAutodoc::decoratedMethod docstring',
'',
'',
'.. py:function:: decoratedFunction()',
' :module: target.need_mocks',
'',
' decoratedFunction docstring',
'',
'',
'.. py:function:: func(arg: missing_module.Class)',
' :module: target.need_mocks',
'',
' a function takes mocked object as an argument',
'',
]
assert warning.getvalue() == ''
@pytest.mark.sphinx('html', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "signature"})
def test_autodoc_typehints_signature(app):
options = {"members": None,
"undoc-members": None}
actual = do_autodoc(app, 'module', 'target.typehints', options)
assert list(actual) == [
'',
'.. py:module:: target.typehints',
'',
'',
'.. py:class:: Math(s: str, o: Optional[Any] = None)',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.decr(a: int, b: int = 1) -> int',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.horse(a: str, b: int) -> None',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.incr(a: int, b: int = 1) -> int',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.nothing() -> None',
' :module: target.typehints',
'',
'',
'.. py:class:: NewAnnotation(i: int)',
' :module: target.typehints',
'',
'',
'.. py:class:: NewComment(i: int)',
' :module: target.typehints',
'',
'',
'.. py:class:: SignatureFromMetaclass(a: int)',
' :module: target.typehints',
'',
'',
'.. py:function:: complex_func(arg1: str, arg2: List[int], arg3: Tuple[int, '
'Union[str, Unknown]] = None, *args: str, **kwargs: str) -> None',
' :module: target.typehints',
'',
'',
'.. py:function:: decr(a: int, b: int = 1) -> int',
' :module: target.typehints',
'',
'',
'.. py:function:: incr(a: int, b: int = 1) -> int',
' :module: target.typehints',
'',
'',
'.. py:function:: missing_attr(c, a: str, b: Optional[str] = None) -> str',
' :module: target.typehints',
'',
'',
'.. py:function:: tuple_args(x: Tuple[int, Union[int, str]]) -> Tuple[int, int]',
' :module: target.typehints',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "none"})
def test_autodoc_typehints_none(app):
options = {"members": None,
"undoc-members": None}
actual = do_autodoc(app, 'module', 'target.typehints', options)
assert list(actual) == [
'',
'.. py:module:: target.typehints',
'',
'',
'.. py:class:: Math(s, o=None)',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.decr(a, b=1)',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.horse(a, b)',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.incr(a, b=1)',
' :module: target.typehints',
'',
'',
' .. py:method:: Math.nothing()',
' :module: target.typehints',
'',
'',
'.. py:class:: NewAnnotation(i)',
' :module: target.typehints',
'',
'',
'.. py:class:: NewComment(i)',
' :module: target.typehints',
'',
'',
'.. py:class:: SignatureFromMetaclass(a)',
' :module: target.typehints',
'',
'',
'.. py:function:: complex_func(arg1, arg2, arg3=None, *args, **kwargs)',
' :module: target.typehints',
'',
'',
'.. py:function:: decr(a, b=1)',
' :module: target.typehints',
'',
'',
'.. py:function:: incr(a, b=1)',
' :module: target.typehints',
'',
'',
'.. py:function:: missing_attr(c, a, b=None)',
' :module: target.typehints',
'',
'',
'.. py:function:: tuple_args(x)',
' :module: target.typehints',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': 'none'})
def test_autodoc_typehints_none_for_overload(app):
options = {"members": None}
actual = do_autodoc(app, 'module', 'target.overload', options)
assert list(actual) == [
'',
'.. py:module:: target.overload',
'',
'',
'.. py:class:: Bar(x, y)',
' :module: target.overload',
'',
' docstring',
'',
'',
'.. py:class:: Baz(x, y)',
' :module: target.overload',
'',
' docstring',
'',
'',
'.. py:class:: Foo(x, y)',
' :module: target.overload',
'',
' docstring',
'',
'',
'.. py:class:: Math()',
' :module: target.overload',
'',
' docstring',
'',
'',
' .. py:method:: Math.sum(x, y=None)',
' :module: target.overload',
'',
' docstring',
'',
'',
'.. py:function:: sum(x, y=None)',
' :module: target.overload',
'',
' docstring',
'',
]
@pytest.mark.sphinx('text', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "description"})
def test_autodoc_typehints_description(app):
app.build()
context = (app.outdir / 'index.txt').read_text()
assert ('target.typehints.incr(a, b=1)\n'
'\n'
' Parameters:\n'
' * **a** (*int*) --\n'
'\n'
' * **b** (*int*) --\n'
'\n'
' Return type:\n'
' int\n'
in context)
assert ('target.typehints.tuple_args(x)\n'
'\n'
' Parameters:\n'
' **x** (*Tuple**[**int**, **Union**[**int**, **str**]**]*) --\n'
'\n'
' Return type:\n'
' Tuple[int, int]\n'
in context)
# Overloads still get displyed in the signature
assert ('target.overload.sum(x: int, y: int = 0) -> int\n'
'target.overload.sum(x: float, y: float = 0.0) -> float\n'
'target.overload.sum(x: str, y: str = None) -> str\n'
'\n'
' docstring\n'
in context)
@pytest.mark.sphinx('text', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "description",
'autodoc_typehints_description_target': 'documented'})
def test_autodoc_typehints_description_no_undoc(app):
# No :type: or :rtype: will be injected for `incr`, which does not have
# a description for its parameters or its return. `tuple_args` does
# describe them, so :type: and :rtype: will be added.
(app.srcdir / 'index.rst').write_text(
'.. autofunction:: target.typehints.incr\n'
'\n'
'.. autofunction:: target.typehints.tuple_args\n'
'\n'
' :param x: arg\n'
' :return: another tuple\n'
)
app.build()
context = (app.outdir / 'index.txt').read_text()
assert ('target.typehints.incr(a, b=1)\n'
'\n'
'target.typehints.tuple_args(x)\n'
'\n'
' Parameters:\n'
' **x** (*Tuple**[**int**, **Union**[**int**, **str**]**]*) -- arg\n'
'\n'
' Returns:\n'
' another tuple\n'
'\n'
' Return type:\n'
' Tuple[int, int]\n'
in context)
@pytest.mark.sphinx('text', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "description"})
def test_autodoc_typehints_description_with_documented_init(app):
(app.srcdir / 'index.rst').write_text(
'.. autoclass:: target.typehints._ClassWithDocumentedInit\n'
' :special-members: __init__\n'
)
app.build()
context = (app.outdir / 'index.txt').read_text()
assert ('class target.typehints._ClassWithDocumentedInit(x)\n'
'\n'
' Class docstring.\n'
'\n'
' Parameters:\n'
' **x** (*int*) --\n'
'\n'
' Return type:\n'
' None\n'
'\n'
' __init__(x)\n'
'\n'
' Init docstring.\n'
'\n'
' Parameters:\n'
' **x** (*int*) -- Some integer\n'
'\n'
' Return type:\n'
' None\n' == context)
@pytest.mark.sphinx('text', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "description",
'autodoc_typehints_description_target': 'documented'})
def test_autodoc_typehints_description_with_documented_init_no_undoc(app):
(app.srcdir / 'index.rst').write_text(
'.. autoclass:: target.typehints._ClassWithDocumentedInit\n'
' :special-members: __init__\n'
)
app.build()
context = (app.outdir / 'index.txt').read_text()
assert ('class target.typehints._ClassWithDocumentedInit(x)\n'
'\n'
' Class docstring.\n'
'\n'
' __init__(x)\n'
'\n'
' Init docstring.\n'
'\n'
' Parameters:\n'
' **x** (*int*) -- Some integer\n' == context)
@pytest.mark.sphinx('text', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "description"})
def test_autodoc_typehints_description_for_invalid_node(app):
text = ".. py:function:: hello; world"
restructuredtext.parse(app, text) # raises no error
@pytest.mark.sphinx('text', testroot='ext-autodoc',
confoverrides={'autodoc_typehints': "both"})
def test_autodoc_typehints_both(app):
(app.srcdir / 'index.rst').write_text(
'.. autofunction:: target.typehints.incr\n'
'\n'
'.. autofunction:: target.typehints.tuple_args\n'
'\n'
'.. autofunction:: target.overload.sum\n'
)
app.build()
context = (app.outdir / 'index.txt').read_text()
assert ('target.typehints.incr(a: int, b: int = 1) -> int\n'
'\n'
' Parameters:\n'
' * **a** (*int*) --\n'
'\n'
' * **b** (*int*) --\n'
'\n'
' Return type:\n'
' int\n'
in context)
assert ('target.typehints.tuple_args(x: Tuple[int, Union[int, str]]) -> Tuple[int, int]\n'
'\n'
' Parameters:\n'
' **x** (*Tuple**[**int**, **Union**[**int**, **str**]**]*) --\n'
'\n'
' Return type:\n'
' Tuple[int, int]\n'
in context)
# Overloads still get displyed in the signature
assert ('target.overload.sum(x: int, y: int = 0) -> int\n'
'target.overload.sum(x: float, y: float = 0.0) -> float\n'
'target.overload.sum(x: str, y: str = None) -> str\n'
'\n'
' docstring\n'
in context)
@pytest.mark.skipif(sys.version_info < (3, 7), reason='python 3.7+ is required.')
@pytest.mark.sphinx('text', testroot='ext-autodoc')
def test_autodoc_type_aliases(app):
# default
options = {"members": None}
actual = do_autodoc(app, 'module', 'target.autodoc_type_aliases', options)
assert list(actual) == [
'',
'.. py:module:: target.autodoc_type_aliases',
'',
'',
'.. py:class:: Foo()',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
' .. py:attribute:: Foo.attr1',
' :module: target.autodoc_type_aliases',
' :type: int',
'',
' docstring',
'',
'',
' .. py:attribute:: Foo.attr2',
' :module: target.autodoc_type_aliases',
' :type: int',
'',
' docstring',
'',
'',
'.. py:function:: mult(x: int, y: int) -> int',
' mult(x: float, y: float) -> float',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
'.. py:function:: read(r: _io.BytesIO) -> _io.StringIO',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
'.. py:function:: sum(x: int, y: int) -> int',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
'.. py:data:: variable',
' :module: target.autodoc_type_aliases',
' :type: int',
'',
' docstring',
'',
'',
'.. py:data:: variable2',
' :module: target.autodoc_type_aliases',
' :type: int',
' :value: None',
'',
' docstring',
'',
]
# define aliases
app.config.autodoc_type_aliases = {'myint': 'myint',
'io.StringIO': 'my.module.StringIO'}
actual = do_autodoc(app, 'module', 'target.autodoc_type_aliases', options)
assert list(actual) == [
'',
'.. py:module:: target.autodoc_type_aliases',
'',
'',
'.. py:class:: Foo()',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
' .. py:attribute:: Foo.attr1',
' :module: target.autodoc_type_aliases',
' :type: myint',
'',
' docstring',
'',
'',
' .. py:attribute:: Foo.attr2',
' :module: target.autodoc_type_aliases',
' :type: myint',
'',
' docstring',
'',
'',
'.. py:function:: mult(x: myint, y: myint) -> myint',
' mult(x: float, y: float) -> float',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
'.. py:function:: read(r: _io.BytesIO) -> my.module.StringIO',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
'.. py:function:: sum(x: myint, y: myint) -> myint',
' :module: target.autodoc_type_aliases',
'',
' docstring',
'',
'',
'.. py:data:: variable',
' :module: target.autodoc_type_aliases',
' :type: myint',
'',
' docstring',
'',
'',
'.. py:data:: variable2',
' :module: target.autodoc_type_aliases',
' :type: myint',
' :value: None',
'',
' docstring',
'',
]
@pytest.mark.skipif(sys.version_info < (3, 7), reason='python 3.7+ is required.')
@pytest.mark.sphinx('text', testroot='ext-autodoc',
srcdir='autodoc_typehints_description_and_type_aliases',
confoverrides={'autodoc_typehints': "description",
'autodoc_type_aliases': {'myint': 'myint'}})
def test_autodoc_typehints_description_and_type_aliases(app):
(app.srcdir / 'autodoc_type_aliases.rst').write_text('.. autofunction:: target.autodoc_type_aliases.sum')
app.build()
context = (app.outdir / 'autodoc_type_aliases.txt').read_text()
assert ('target.autodoc_type_aliases.sum(x, y)\n'
'\n'
' docstring\n'
'\n'
' Parameters:\n'
' * **x** (*myint*) --\n'
'\n'
' * **y** (*myint*) --\n'
'\n'
' Return type:\n'
' myint\n' == context)
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_default_options(app):
# no settings
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' not in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
actual = do_autodoc(app, 'class', 'target.CustomIter')
assert ' .. py:method:: target.CustomIter' not in actual
actual = do_autodoc(app, 'module', 'target')
assert '.. py:function:: save_traceback(app)' not in actual
# with :members:
app.config.autodoc_default_options = {'members': None}
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
# with :members: = True
app.config.autodoc_default_options = {'members': None}
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
# with :members: and :undoc-members:
app.config.autodoc_default_options = {
'members': None,
'undoc-members': None,
}
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' in actual
assert ' .. py:attribute:: EnumCls.val4' in actual
# with :special-members:
# Note that :members: must be *on* for :special-members: to work.
app.config.autodoc_default_options = {
'members': None,
'special-members': None
}
actual = do_autodoc(app, 'class', 'target.CustomIter')
assert ' .. py:method:: CustomIter.__init__()' in actual
assert ' Create a new `CustomIter`.' in actual
assert ' .. py:method:: CustomIter.__iter__()' in actual
assert ' Iterate squares of each value.' in actual
if not IS_PYPY:
assert ' .. py:attribute:: CustomIter.__weakref__' in actual
assert ' list of weak references to the object (if defined)' in actual
# :exclude-members: None - has no effect. Unlike :members:,
# :special-members:, etc. where None == "include all", here None means
# "no/false/off".
app.config.autodoc_default_options = {
'members': None,
'exclude-members': None,
}
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
app.config.autodoc_default_options = {
'members': None,
'special-members': None,
'exclude-members': None,
}
actual = do_autodoc(app, 'class', 'target.CustomIter')
assert ' .. py:method:: CustomIter.__init__()' in actual
assert ' Create a new `CustomIter`.' in actual
assert ' .. py:method:: CustomIter.__iter__()' in actual
assert ' Iterate squares of each value.' in actual
if not IS_PYPY:
assert ' .. py:attribute:: CustomIter.__weakref__' in actual
assert ' list of weak references to the object (if defined)' in actual
assert ' .. py:method:: CustomIter.snafucate()' in actual
assert ' Makes this snafucated.' in actual
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_default_options_with_values(app):
# with :members:
app.config.autodoc_default_options = {'members': 'val1,val2'}
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' in actual
assert ' .. py:attribute:: EnumCls.val2' in actual
assert ' .. py:attribute:: EnumCls.val3' not in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
# with :member-order:
app.config.autodoc_default_options = {
'members': None,
'member-order': 'bysource',
}
actual = do_autodoc(app, 'class', 'target.Class')
assert list(filter(lambda l: '::' in l, actual)) == [
'.. py:class:: Class(arg)',
' .. py:method:: Class.meth()',
' .. py:method:: Class.skipmeth()',
' .. py:method:: Class.excludemeth()',
' .. py:attribute:: Class.attr',
' .. py:attribute:: Class.docattr',
' .. py:attribute:: Class.udocattr',
' .. py:attribute:: Class.mdocattr',
' .. py:method:: Class.moore(a, e, f) -> happiness',
' .. py:attribute:: Class.inst_attr_inline',
' .. py:attribute:: Class.inst_attr_comment',
' .. py:attribute:: Class.inst_attr_string',
]
# with :special-members:
app.config.autodoc_default_options = {
'special-members': '__init__,__iter__',
}
actual = do_autodoc(app, 'class', 'target.CustomIter')
assert ' .. py:method:: CustomIter.__init__()' in actual
assert ' Create a new `CustomIter`.' in actual
assert ' .. py:method:: CustomIter.__iter__()' in actual
assert ' Iterate squares of each value.' in actual
if not IS_PYPY:
assert ' .. py:attribute:: CustomIter.__weakref__' not in actual
assert ' list of weak references to the object (if defined)' not in actual
# with :exclude-members:
app.config.autodoc_default_options = {
'members': None,
'exclude-members': 'val1'
}
actual = do_autodoc(app, 'class', 'target.enums.EnumCls')
assert ' .. py:attribute:: EnumCls.val1' not in actual
assert ' .. py:attribute:: EnumCls.val2' in actual
assert ' .. py:attribute:: EnumCls.val3' in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
app.config.autodoc_default_options = {
'members': None,
'special-members': None,
'exclude-members': '__weakref__,snafucate',
}
actual = do_autodoc(app, 'class', 'target.CustomIter')
assert ' .. py:method:: CustomIter.__init__()' in actual
assert ' Create a new `CustomIter`.' in actual
assert ' .. py:method:: CustomIter.__iter__()' in actual
assert ' Iterate squares of each value.' in actual
if not IS_PYPY:
assert ' .. py:attribute:: CustomIter.__weakref__' not in actual
assert ' list of weak references to the object (if defined)' not in actual
assert ' .. py:method:: CustomIter.snafucate()' not in actual
assert ' Makes this snafucated.' not in actual
|
python
|
from collections import Counter
def mejority(lst):
freDict = Counter(lst)
size = len(lst)
for key, value in freDict.items():
if value > (size//2):
print(key)
return
print("None")
if __name__ == "__main__":
lst = [3, 3, 4, 2, 4, 4, 2, 2,2,2,2]
mejority(lst)
|
python
|
import pytest
import pathlib
from align.cell_fabric import Canvas, Pdk, Wire
mydir = pathlib.Path(__file__).resolve().parent
pdkfile = mydir.parent.parent / 'pdks' / 'FinFET14nm_Mock_PDK' / 'layers.json'
@pytest.fixture
def setup():
p = Pdk().load(pdkfile)
c = Canvas(p)
c.addGen( Wire( nm='m2', layer='M2', direction='h', clg=None, spg=None))
m2 = p['M2']
m2['AdjacentAttacker'] = 1
assert 'Width' in m2
dy = m2['Width']//2
py = m2['Pitch']
c.terminals = [
{'layer': 'M2', 'netName': 'x', 'rect': [ 0, 0*py-dy, 200, 0*py+dy], "netType": "drawing"},
{'layer': 'M2', 'netName': 'y', 'rect': [ 200, 1*py-dy, 400, 1*py+dy], "netType": "drawing"}
]
return c
def test_adjacent_ok(setup):
c = setup
c.gen_data()
assert c.drc.num_errors == 0
def test_adjacent_bad(setup):
c = setup
c.terminals[1]['rect'][0] += 1
c.terminals[1]['rect'][2] += 1
c.gen_data()
assert c.drc.num_errors == 1
def test_adjacent_ok2(setup):
c = setup
c.terminals[1]['rect'][0] += 2
c.terminals[1]['rect'][2] += 2
c.gen_data()
assert c.drc.num_errors == 0
|
python
|
# -*- coding: utf-8 -*-
"""
Script Name: PipelineTool.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
This is main UI of PipelineTool.
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
# PLM
from PLM import __homepage__, __appName__
from pyPLM.damg import DAMGDICT
from pyPLM.Widgets import MainWindow, Widget, GridLayout
from pyPLM.Gui import LogoIcon
from .components import MainStatusBar, MidTab, BotTab, MainHeader
from .models import ButtonManager, ActionManager
from PLM.cores import ThreadManager
# -------------------------------------------------------------------------------------------------------------
""" Pipeline Tool main layout """
class PipelineManager(MainWindow):
key = 'PipelineManager'
_name = __appName__
toolBars = DAMGDICT()
menus = DAMGDICT()
_count = 0
def __init__(self, parent=None):
super(PipelineManager, self).__init__(parent)
self.url = __homepage__
self.setObjectName(self.name)
self.setWindowTitle(self.name)
self.setWindowIcon(LogoIcon('PLM'))
self.actionManager = ActionManager(self.parent)
self.buttonManager = ButtonManager(self.parent)
self.threadManager = ThreadManager(self.parent)
self.mainWidget = Widget()
self.layout = GridLayout()
self.mainWidget.setLayout(self.layout)
self.setCentralWidget(self.mainWidget)
self.buildUI()
def buildUI(self):
self.header = MainHeader(self.parent)
self.body = MidTab(self.buttonManager, self)
self.footer = BotTab(self)
self.statusBar = MainStatusBar(self)
self.menus = self.header.menuBar.menus
self.toolBars = self.header.toolBar.toolBars
self.mns = self.header.menuBar.mns
self.tbs = self.header.toolBar.tbs
self.updating = self.header.connectStatus.updating
self.server = self.header.connectStatus.server
self.connectServer = self.header.connectStatus.connectServer
self.connectInternet = self.header.connectStatus.connectInternet
self.layouts = [self.header, self.body, self.footer, self.statusBar]
self.layout.addWidget(self.header, 0, 0, 2, 9)
self.layout.addWidget(self.body, 2, 0, 8, 9)
self.layout.addWidget(self.footer, 10, 0, 6, 9)
self.setStatusBar(self.statusBar)
self.body.setFixedHeight(400)
self.updateSize()
def resizeEvent(self, event):
self.updateSize()
# print('header: {0}, body: {1}, footer: {2}'.format(self.header.height(), self.body.height(), self.footer.height()))
super(PipelineManager, self).resizeEvent(event)
def updateSize(self):
bodySize = self.body.size()
baseW = bodySize.width()
baseH = bodySize.height()
self.header.resize(baseW, baseH / 4)
self.footer.resize(baseW, baseH * 3 / 4)
@property
def count(self):
return self._count
@count.setter
def count(self, val):
self._count = val
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 6/07/2018 - 11:31 AM
# © 2017 - 2018 DAMGTEAM. All rights reserved
|
python
|
from pyiced import (
column, css_color, IcedApp, Length, radio, Settings, text,
WindowSettings,
)
class RadioExample(IcedApp):
class settings(Settings):
class window(WindowSettings):
size = (640, 320)
def __init__(self):
self.__season = None
def title(self):
return 'Radio Example'
def background_color(self):
match self.__season:
case 1:
return css_color.MEDIUMSPRINGGREEN
case 2:
return css_color.LIGHTGOLDENRODYELLOW
case 3:
return css_color.GOLDENROD
case 4:
return css_color.GHOSTWHITE
def update(self, msg, clipboard):
match msg:
case 'select', value:
self.__season = value
def view(self):
return column(
[
text("What's your favorite season?"),
radio('select', self.__season, 1, 'Spring'),
radio('select', self.__season, 2, 'Summer'),
radio('select', self.__season, 3, 'Fall'),
radio('select', self.__season, 4, 'Winter'),
],
padding=20, spacing=5,
width=Length.FILL, height=Length.FILL,
)
if __name__ == '__main__':
RadioExample().run()
|
python
|
# Copyright (C) 2021 ServiceNow, Inc.
""" Combine output datasets from different source datasets
e.g. If you have generated training datasets for dataset A and dataset B
you can combine them into A+B using this script
It will *not* overwrite existing files (an error will be thrown).
Input files must exist (an error will be thrown otherwise).
It assumes that the output file will be saved to the same folder as the input
(/nrcan_p2/data/03_primary/).
It assumes nrcan specific file naming conventions.
You MUST update the dataset parameters below.
"""
import pathlib
import subprocess
###################################
# DATASET PARAMETERS
PIPE = 'PIPELINE_BERT_80_POSTPIPE_BERT_SPACY_2' #'PIPELINE_GLOVE_80_POSTPIPE_GLOVE'
DATASET_A = 'dA_full_dB'
DATASET_B = 'dD'
DATASET_C = 'dA_full_dB_dD'
###################################
print('Combining files...')
DIR_MAPPING = {
'dA_full': 'v4',
'dB': 'v4_B',
'dD': 'v4_D',
'dA_full_dB': 'v4_A_B',
'dA_full_dB_dD': 'v4_A_B_D'
}
DIR_A = DIR_MAPPING[DATASET_A]
DIR_B = DIR_MAPPING[DATASET_B]
DIR_C = DIR_MAPPING[DATASET_C]
FILE_A = f'/nrcan_p2/data/03_primary/{DIR_A}/all_text_{PIPE}_{DATASET_A}_v1.txt'
print(FILE_A)
FILE_B = f'/nrcan_p2/data/03_primary/{DIR_B}/all_text_{PIPE}_{DATASET_B}_v1.txt'
print(FILE_B)
print('... into:')
FILE_C = f'/nrcan_p2/data/03_primary/{DIR_C}/all_text_{PIPE}_{DATASET_C}_v1.txt'
print(FILE_C)
file_a = pathlib.Path(FILE_A)
file_b = pathlib.Path(FILE_B)
file_c = pathlib.Path(FILE_C)
LOG_FILE = file_c.parent / (file_c.stem + '.log')
if not file_a.exists():
raise(ValueError(f'File a does not exist: {FILE_A}'))
if not file_b.exists():
raise(ValueError(f'File b does not exist: {FILE_B}'))
if file_c.exists():
raise(ValueError(f'File c already exists. You must delete it manually: {FILE_C}'))
with open(LOG_FILE, 'w') as lf:
lf.write(f'FILE_A: {FILE_A}\n')
lf.write(f'FILE_B: {FILE_B}\n')
lf.write(f'FILE_C: {FILE_C}\n')
with open(file_a, 'r') as fa, open(file_b, 'r') as fb, open(file_c, 'w') as fc:
for line in fa:
fc.write(line)
for line in fb:
fc.write(line)
if not file_c.exists():
raise ValueError('ERROR! Something went wrong in the concatenation.')
|
python
|
import logging, queue
from datetime import datetime
import threading
from time import strftime
from .scanner_thread import ScannerThread
from scapy.all import *
class PyScanner:
def __init__(self, params_names={"-threads":5, "-ip": "127.0.0.1", "-ports":"0-100", "-scan_type": "S"}):
# print("ok")
# print(dir(queue))
params_names["-threads"]=int(params_names["-threads"])
threads_count = params_names["-threads"]
scan_type = params_names["-scan_type"]
#now we will calculate
self.lock = threading.Lock()
self.queue = queue.Queue()
ports_pair = params_names["-ports"].split("-")
ports_ranges = self.calcTasks(threads_num=threads_count, ports=ports_pair)
#timer, that we will use to get speed
start_clock = datetime.now()
self.threads = []
for i in range(params_names["-threads"]):
thread = ScannerThread(dest_ip=params_names["-ip"], ports=ports_ranges[i], thread_num=i, scan_type = scan_type)
self.threads.append(thread)
for th in self.threads:
th.start()
th.join()
end_clock = datetime.now()
def calcTasks(self, threads_num=1, ports=[0,65536], queue=[]):
[ports[0], ports[1]] = [int(ports[0]), int(ports[1])]
ports_count_range = round((ports[1] - ports[0])/threads_num) #getting count of ports pairs
ports_ranges = []
last_from = ports[0]
last_to = last_from + ports_count_range
for i in range(threads_num):
ports = {"from": last_from + 1, "to": last_to}
ports_ranges.append(ports)
last_from = ports["to"]
last_to = last_from + ports_count_range
print("there is ports ranges")
print(ports_ranges)
return ports_ranges
def checkhost(self, ip="127.0.0.1"):
# conf.verb = 0
a=send(IP(ttl=10, dst=ip)/ICMP())
print(a)
print("\n[*] Target is up, Beginning scanning...")
# try:
# a=send(IP(ttl=10, dst=ip)/ICMP())
# print(a)
# print("\n[*] Target is up, Beginning scanning...")
# except Exception:
# print("\nCouldn't resolve Target: %s((", ip)
# print(Exception)
|
python
|
def som(n):
if len(n) == 1: return n
s = 0
for i in range(len(n)):
s += int(n[i])
return som(str(s))
while True:
e = str(input()).split()
a = e[0]
b = e[1]
if a == b == '0': break
ta = som(a)
tb = som(b)
if ta > tb: print(1)
elif ta < tb: print(2)
else: print(0)
|
python
|
# Generated by Django 2.0.4 on 2018-12-13 12:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tradukoj', '0007_bcp47_default'),
]
operations = [
migrations.CreateModel(
name='GetTextFile',
fields=[
('id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('file', models.FileField(upload_to='')),
('file_type',
models.IntegerField(
choices=[(0, 'PO file'), (1, 'MO file')], default=0)),
('last_updated_date',
models.DateTimeField(
auto_now=True,
db_index=True,
verbose_name='Last updated date')),
('done', models.BooleanField(default=False)),
('done_with_errors', models.BooleanField(default=False)),
('log', models.TextField(max_length=1024, verbose_name='Log')),
('bcp47',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='get_text_files',
to='tradukoj.BCP47',
verbose_name='Lang')),
],
),
migrations.CreateModel(
name='Namespace',
fields=[
('id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('text',
models.CharField(
max_length=255, unique=True, verbose_name='text key')),
],
),
migrations.AlterField(
model_name='translationkey',
name='namespace',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='translationkey',
name='text',
field=models.CharField(max_length=255, verbose_name='text key'),
),
migrations.AddField(
model_name='translationkey',
name='new_namespace',
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='translation_keys',
to='tradukoj.Namespace'),
),
migrations.AlterUniqueTogether(
name='translationkey',
unique_together={('namespace', 'text')},
),
migrations.AddField(
model_name='gettextfile',
name='namespace',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='get_text_files',
to='tradukoj.Namespace'),
),
]
|
python
|
import shutil
from pathlib import Path
import hydra
import matplotlib.pyplot as plt
import numpy as np
import torch
from hydra.utils import to_absolute_path
from omegaconf import OmegaConf
from torch import nn, optim
from torch.utils import data as data_utils
from torch.utils.tensorboard import SummaryWriter
from ttslearn.logger import getLogger
from ttslearn.util import init_seed, load_utt_list, pad_1d, pad_2d
def get_epochs_with_optional_tqdm(tqdm_mode, nepochs):
"""Get epochs with optional progress bar.
Args:
tqdm_mode (str): Progress bar mode.
nepochs (int): Number of epochs.
Returns:
iterable: Epochs.
"""
if tqdm_mode == "tqdm":
from tqdm import tqdm
epochs = tqdm(range(1, nepochs + 1), desc="epoch")
else:
epochs = range(1, nepochs + 1)
return epochs
def moving_average_(model, model_test, beta=0.9999):
"""Exponential moving average (EMA) of model parameters.
Args:
model (torch.nn.Module): Model to perform EMA on.
model_test (torch.nn.Module): Model to use for the test phase.
beta (float, optional): [description]. Defaults to 0.9999.
"""
for param, param_test in zip(model.parameters(), model_test.parameters()):
param_test.data = torch.lerp(param.data, param_test.data, beta)
def num_trainable_params(model):
"""Count the number of trainable parameters in the model.
Args:
model (torch.nn.Module): Model to count the number of trainable parameters.
Returns:
int: Number of trainable parameters.
"""
parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in parameters])
class Dataset(data_utils.Dataset): # type: ignore
"""Dataset for numpy files
Args:
in_paths (list): List of paths to input files
out_paths (list): List of paths to output files
"""
def __init__(self, in_paths, out_paths):
self.in_paths = in_paths
self.out_paths = out_paths
def __getitem__(self, idx):
"""Get a pair of input and target
Args:
idx (int): index of the pair
Returns:
tuple: input and target in numpy format
"""
return np.load(self.in_paths[idx]), np.load(self.out_paths[idx])
def __len__(self):
"""Returns the size of the dataset
Returns:
int: size of the dataset
"""
return len(self.in_paths)
def get_data_loaders(data_config, collate_fn):
"""Get data loaders for training and validation.
Args:
data_config (dict): Data configuration.
collate_fn (callable): Collate function.
Returns:
dict: Data loaders.
"""
data_loaders = {}
for phase in ["train", "dev"]:
utt_ids = load_utt_list(to_absolute_path(data_config[phase].utt_list))
in_dir = Path(to_absolute_path(data_config[phase].in_dir))
out_dir = Path(to_absolute_path(data_config[phase].out_dir))
in_feats_paths = [in_dir / f"{utt_id}-feats.npy" for utt_id in utt_ids]
out_feats_paths = [out_dir / f"{utt_id}-feats.npy" for utt_id in utt_ids]
dataset = Dataset(in_feats_paths, out_feats_paths)
data_loaders[phase] = data_utils.DataLoader(
dataset,
batch_size=data_config.batch_size,
collate_fn=collate_fn,
pin_memory=True,
num_workers=data_config.num_workers,
shuffle=phase.startswith("train"),
)
return data_loaders
def collate_fn_dnntts(batch):
"""Collate function for DNN-TTS.
Args:
batch (list): List of tuples of the form (inputs, targets).
Returns:
tuple: Batch of inputs, targets, and lengths.
"""
lengths = [len(x[0]) for x in batch]
max_len = max(lengths)
x_batch = torch.stack([torch.from_numpy(pad_2d(x[0], max_len)) for x in batch])
y_batch = torch.stack([torch.from_numpy(pad_2d(x[1], max_len)) for x in batch])
l_batch = torch.tensor(lengths, dtype=torch.long)
return x_batch, y_batch, l_batch
def collate_fn_wavenet(batch, max_time_frames=100, hop_size=80, aux_context_window=2):
"""Collate function for WaveNet.
Args:
batch (list): List of tuples of the form (inputs, targets).
max_time_frames (int, optional): Number of time frames. Defaults to 100.
hop_size (int, optional): Hop size. Defaults to 80.
aux_context_window (int, optional): Auxiliary context window. Defaults to 2.
Returns:
tuple: Batch of waveforms and conditional features.
"""
max_time_steps = max_time_frames * hop_size
xs, cs = [b[1] for b in batch], [b[0] for b in batch]
# 条件付け特徴量の開始位置をランダム抽出した後、それに相当する短い音声波形を切り出します
c_lengths = [len(c) for c in cs]
start_frames = np.array(
[
np.random.randint(
aux_context_window, cl - aux_context_window - max_time_frames
)
for cl in c_lengths
]
)
x_starts = start_frames * hop_size
x_ends = x_starts + max_time_steps
c_starts = start_frames - aux_context_window
c_ends = start_frames + max_time_frames + aux_context_window
x_cut = [x[s:e] for x, s, e in zip(xs, x_starts, x_ends)]
c_cut = [c[s:e] for c, s, e in zip(cs, c_starts, c_ends)]
# numpy.ndarray のリスト型から torch.Tensor 型に変換します
x_batch = torch.tensor(x_cut, dtype=torch.long) # (B, T)
c_batch = torch.tensor(c_cut, dtype=torch.float).transpose(2, 1) # (B, C, T')
return x_batch, c_batch
def ensure_divisible_by(feats, N):
"""Ensure that the number of frames is divisible by N.
Args:
feats (np.ndarray): Input features.
N (int): Target number of frames.
Returns:
np.ndarray: Input features with number of frames divisible by N.
"""
if N == 1:
return feats
mod = len(feats) % N
if mod != 0:
feats = feats[: len(feats) - mod]
return feats
def collate_fn_tacotron(batch, reduction_factor=1):
"""Collate function for Tacotron.
Args:
batch (list): List of tuples of the form (inputs, targets).
reduction_factor (int, optional): Reduction factor. Defaults to 1.
Returns:
tuple: Batch of inputs, input lengths, targets, target lengths and stop flags.
"""
xs = [x[0] for x in batch]
ys = [ensure_divisible_by(x[1], reduction_factor) for x in batch]
in_lens = [len(x) for x in xs]
out_lens = [len(y) for y in ys]
in_max_len = max(in_lens)
out_max_len = max(out_lens)
x_batch = torch.stack([torch.from_numpy(pad_1d(x, in_max_len)) for x in xs])
y_batch = torch.stack([torch.from_numpy(pad_2d(y, out_max_len)) for y in ys])
il_batch = torch.tensor(in_lens, dtype=torch.long)
ol_batch = torch.tensor(out_lens, dtype=torch.long)
stop_flags = torch.zeros(y_batch.shape[0], y_batch.shape[1])
for idx, out_len in enumerate(out_lens):
stop_flags[idx, out_len - 1 :] = 1.0
return x_batch, il_batch, y_batch, ol_batch, stop_flags
def set_epochs_based_on_max_steps_(train_config, steps_per_epoch, logger):
"""Set epochs based on max steps.
Args:
train_config (TrainConfig): Train config.
steps_per_epoch (int): Number of steps per epoch.
logger (logging.Logger): Logger.
"""
logger.info(f"Number of iterations per epoch: {steps_per_epoch}")
if train_config.max_train_steps < 0:
# Set max_train_steps based on nepochs
max_train_steps = train_config.nepochs * steps_per_epoch
train_config.max_train_steps = max_train_steps
logger.info(
"Number of max_train_steps is set based on nepochs: {}".format(
max_train_steps
)
)
else:
# Set nepochs based on max_train_steps
max_train_steps = train_config.max_train_steps
epochs = int(np.ceil(max_train_steps / steps_per_epoch))
train_config.nepochs = epochs
logger.info(
"Number of epochs is set based on max_train_steps: {}".format(epochs)
)
logger.info(f"Number of epochs: {train_config.nepochs}")
logger.info(f"Number of iterations: {train_config.max_train_steps}")
def save_checkpoint(
logger, out_dir, model, optimizer, epoch, is_best=False, postfix=""
):
"""Save a checkpoint.
Args:
logger (logging.Logger): Logger.
out_dir (str): Output directory.
model (nn.Module): Model.
optimizer (Optimizer): Optimizer.
epoch (int): Current epoch.
is_best (bool, optional): Whether or not the current model is the best.
Defaults to False.
postfix (str, optional): Postfix. Defaults to "".
"""
if isinstance(model, nn.DataParallel):
model = model.module
out_dir.mkdir(parents=True, exist_ok=True)
if is_best:
path = out_dir / f"best_loss{postfix}.pth"
else:
path = out_dir / "epoch{:04d}{}.pth".format(epoch, postfix)
torch.save(
{
"state_dict": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
},
path,
)
logger.info(f"Saved checkpoint at {path}")
if not is_best:
shutil.copyfile(path, out_dir / f"latest{postfix}.pth")
def plot_attention(alignment):
"""Plot attention.
Args:
alignment (np.ndarray): Attention.
"""
fig, ax = plt.subplots()
alignment = alignment.cpu().data.numpy().T
im = ax.imshow(alignment, aspect="auto", origin="lower", interpolation="none")
fig.colorbar(im, ax=ax)
plt.xlabel("Decoder time step")
plt.ylabel("Encoder time step")
return fig
def plot_2d_feats(feats, title=None):
"""Plot 2D features.
Args:
feats (np.ndarray): Input features.
title (str, optional): Title. Defaults to None.
"""
feats = feats.cpu().data.numpy().T
fig, ax = plt.subplots()
im = ax.imshow(
feats, aspect="auto", origin="lower", interpolation="none", cmap="viridis"
)
fig.colorbar(im, ax=ax)
if title is not None:
ax.set_title(title)
return fig
def setup(config, device, collate_fn):
"""Setup for traiining
Args:
config (dict): configuration for training
device (torch.device): device to use for training
collate_fn (callable): function to collate mini-batches
Returns:
(tuple): tuple containing model, optimizer, learning rate scheduler,
data loaders, tensorboard writer, and logger.
.. note::
書籍に記載のコードは、この関数を一部簡略化しています。
"""
# NOTE: hydra は内部で stream logger を追加するので、二重に追加しないことに注意
logger = getLogger(config.verbose, add_stream_handler=False)
logger.info(f"PyTorch version: {torch.__version__}")
# CUDA 周りの設定
if torch.cuda.is_available():
from torch.backends import cudnn
cudnn.benchmark = config.cudnn.benchmark
cudnn.deterministic = config.cudnn.deterministic
logger.info(f"cudnn.deterministic: {cudnn.deterministic}")
logger.info(f"cudnn.benchmark: {cudnn.benchmark}")
if torch.backends.cudnn.version() is not None:
logger.info(f"cuDNN version: {torch.backends.cudnn.version()}")
logger.info(f"Random seed: {config.seed}")
init_seed(config.seed)
# モデルのインスタンス化
model = hydra.utils.instantiate(config.model.netG).to(device)
logger.info(model)
logger.info(
"Number of trainable params: {:.3f} million".format(
num_trainable_params(model) / 1000000.0
)
)
# (optional) 学習済みモデルの読み込み
# ファインチューニングしたい場合
pretrained_checkpoint = config.train.pretrained.checkpoint
if pretrained_checkpoint is not None and len(pretrained_checkpoint) > 0:
logger.info(
"Fine-tuning! Loading a checkpoint: {}".format(pretrained_checkpoint)
)
checkpoint = torch.load(pretrained_checkpoint, map_location=device)
model.load_state_dict(checkpoint["state_dict"])
# 複数 GPU 対応
if config.data_parallel:
model = nn.DataParallel(model)
# Optimizer
optimizer_class = getattr(optim, config.train.optim.optimizer.name)
optimizer = optimizer_class(
model.parameters(), **config.train.optim.optimizer.params
)
# 学習率スケジューラ
lr_scheduler_class = getattr(
optim.lr_scheduler, config.train.optim.lr_scheduler.name
)
lr_scheduler = lr_scheduler_class(
optimizer, **config.train.optim.lr_scheduler.params
)
# DataLoader
data_loaders = get_data_loaders(config.data, collate_fn)
set_epochs_based_on_max_steps_(config.train, len(data_loaders["train"]), logger)
# Tensorboard の設定
writer = SummaryWriter(to_absolute_path(config.train.log_dir))
# config ファイルを保存しておく
out_dir = Path(to_absolute_path(config.train.out_dir))
out_dir.mkdir(parents=True, exist_ok=True)
with open(out_dir / "model.yaml", "w") as f:
OmegaConf.save(config.model, f)
with open(out_dir / "config.yaml", "w") as f:
OmegaConf.save(config, f)
return model, optimizer, lr_scheduler, data_loaders, writer, logger
|
python
|
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
# Creates a list of potential "cognates" for a pair of languages.
#
# Assumes you have lingpy (http://lingpy.org/) and tabulate
# (https://pypi.org/project/tabulate/) installed.
#
# Example usage:
#
# python3 scripts/find_cognates_lingpy.py \
# --language1=French
# --language2=Hanunoo
#
# Output:
#
# cat /var/tmp/cognates/filtered_cognates_French_Hanunoo
# ʒ u ʀ s i r a ŋ
from absl import app
from absl import flags
import collections
import csv
import os
from lingpy import *
from tabulate import tabulate
flags.DEFINE_string("output_dir", "/var/tmp/cognates",
"Output directory")
flags.DEFINE_string("language1", None, "Language 1")
flags.DEFINE_string("language2", None, "Language 1")
flags.DEFINE_string("pairlist", "list_data/cognates.csv",
"Pathname of list of cognates extracted for "
"the languages in Section 6 of Blevins & Sproat")
FLAGS = flags.FLAGS
def make_pairlist(path, l1, l2):
"""Creates pair list for l1 and l2.
Args:
dir: output directory
l1: language 1
l2: language 2
"""
pairlist = []
with open(path) as stream:
reader = csv.DictReader(stream)
for row in reader:
if row[l1] == "-" or row[l2] == "-":
continue
pairlist.append((row["GLOSS"], row[l1], row[l2]))
return pairlist
def make_initial_cognate_tsv(dir, l1, l2, pairlist):
"""Collects initial "cognates" for l1 and l2.
Args:
dir: output directory
l1: language 1
l2: language 2
pairlist: list of "cognate" pairs of l1, l2
"""
filename = "{}/initial_cognates_{}_{}".format(dir, l1, l2)
with open(filename, "w") as ostream:
ostream.write("# {} <-> {}\n".format(l1, l2))
ostream.write("ID\tTaxon\tGloss\tGlossID\tIPA\tTokens\n")
id_ = 1
gloss_id = 1
for (gloss, p1, p2) in pairlist:
if gloss == "GLOSS":
continue
ostream.write("#\n")
ostream.write(
"{}\t{}\t{}\t{}\t{}\t{}\n".format(
id_, l1, gloss, gloss_id, p1.replace(" ", ""), p1))
id_ += 1
ostream.write(
"{}\t{}\t{}\t{}\t{}\t{}\n".format(
id_, l2, gloss, gloss_id, p2.replace(" ", ""), p2))
id_ += 1
gloss_id += 1
def collect_potential_cognates(dir, l1, l2, threshold=0.55, runs=10000):
"""Collects potential cognates for l1 and l2.
Args:
dir: output directory
l1: language 1
l2: language 2
threshold: threshold for acceptance of cognate, distance from
lex.align_pairs
runs: number of runs to perform
"""
filename = "{}/initial_cognates_{}_{}".format(dir, l1, l2)
lex = LexStat(filename)
lex.get_scorer(runs=runs)
table = []
# He sorts the keys :), so we have to present them in sorted order for keying
# into his tables.
if l2 < l1:
L1, L2 = l2, l1
else:
L1, L2 = l1, l2
initial_list_len = 0
for key, (idxA, idxB) in enumerate(lex.pairs[L1, L2]):
almA, almB, dst = lex.align_pairs(idxA, idxB, mode="overlap", pprint=False)
initial_list_len += 1
if dst <= threshold:
table += [[
key+1,
lex[idxA, "concept"],
lex[idxA, "tokens"],
lex[idxB, "tokens"],
round(dst, 2)]]
# Eschew writing this out in tabular format and instead just write out l1 and
# l2, one "cognate" per line, so that this can be used directly by
#
# generate_random_cognate_lists.sh
with open("{}/filtered_cognates_{}_{}".format(dir, l1, l2), "w") as stream:
for row in table:
_, _, l1, l2, _ = row
stream.write("{}\t{}\n".format(" ".join(l1), " ".join(l2)))
def main(unused_argv):
try:
os.mkdir(FLAGS.output_dir)
except FileExistsError:
pass
pairlist = make_pairlist(FLAGS.pairlist,
FLAGS.language1,
FLAGS.language2)
make_initial_cognate_tsv(FLAGS.output_dir,
FLAGS.language1,
FLAGS.language2,
pairlist)
collect_potential_cognates(FLAGS.output_dir,
FLAGS.language1,
FLAGS.language2)
if __name__ == "__main__":
flags.mark_flag_as_required("language1")
flags.mark_flag_as_required("language2")
app.run(main)
|
python
|
from flask import flash
def Euclidean_Algorithm(number_x, number_y):
try:
x, y = int(number_x), int(number_y)
r = x % y
while r > 0:
x = y
y = r
r = x % y
else:
gcd = y
anser = str(number_x)+"と"+str(number_y)+"の最大公約数は"+str(gcd)
except:
anser = "Error"
flash("エラー:もう一度入力してください")
return anser
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.