content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from test_utils.label_to_str_voc import convert_label_to_str
def render_boxs_info_for_display(image, net_out, select_index, net_score, image_size, label_out = None):
valid_box = net_out[select_index]
valid_score = net_score[select_index]
for index, value in enumerate(select_index):
if net_score[index] > 0.5 and value == True:
# if value == True:
valid_box = net_out[index]
valid_score = net_score[index]
print("current box info is " + str(valid_box))
print("current box scores is " + str(valid_score))
if label_out is not None :
print("current label is %s"%(convert_label_to_str(label_out[index])))
ymin = int(valid_box[0] * image_size)
xmin = int(valid_box[1] * image_size)
ymax = int(valid_box[2] * image_size)
xmax = int(valid_box[3] * image_size)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), thickness=1,color=(0,0,255))
return image
def render_rectangle_box(image, box, colour = (255, 255, 255), offset = 0, thickness = 1):
"""
:param image: ้่ฆๆพ็คบ็ๅพ็
:param box: boxไฟกๆฏ
:param colour: ้ข่ฒไฟกๆฏ
:param offset: boxๅ็งป
:param thickness: ็บฟๆกๅฎฝๅบฆ
:return:
"""
height,width, channel = image.shape
y_start = int(height * box[0]) + offset
x_start = int(width * box[1]) + offset
y_end = int(height * box[2]) + offset
x_end = int(width * box[3]) + offset
image = cv2.rectangle(image,(x_start,y_start), (x_end,y_end), color=colour, thickness= thickness)
return image
|
python
|
import unittest
import numpy as np
from sca.analysis import nicv
class TestNicvUnit(unittest.TestCase):
def test_calculate_mean_x_given_y_matrix(self):
""" Tests whether the calculations of means work properly"""
traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
plain = np.array([[1], [2], [1], [2]])
keys = plain
resulting_matrix = np.zeros((9, 3))
resulting_matrix[4] = [3.5, 2.6, 7.5]
calculated_matrix = nicv.NICV.calculate_mean_x_given_y_matrix(plain, traces, 0, keys)
print(calculated_matrix)
self.assertTrue(np.allclose(calculated_matrix, resulting_matrix))
def test_calculate_single_nicv(self):
""" Tests whether the calculation of a single nicv value works properly"""
mean_x_given_y = np.array([[-0.01, 0.01, 0, 0.014]])
y = np.array([[0.1, -0.01, 0.03, 0.1]])
resulting_nicv = 0.03898876404494381
calculated_nicv = nicv.NICV.calculate_single_nicv(mean_x_given_y, y)
self.assertAlmostEqual(calculated_nicv, resulting_nicv)
def test_get_points_of_interest_indices(self):
""" Tests if the point of interest selection works properly"""
traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
plain = np.array([[1], [2], [1], [2]])
keys = plain
resulting_points_of_interest_indices = [1, 2]
calculated_points_of_interest_indices = nicv.NICV.get_points_of_interest_indices(plain, traces, 2, 0, keys)
print(calculated_points_of_interest_indices)
self.assertTrue(np.allclose(resulting_points_of_interest_indices, calculated_points_of_interest_indices))
def test_get_points_of_interest(self):
""" Tests if the point of interest selection works properly"""
traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
plain = np.array([[1], [2], [1], [2]])
keys = plain
resulting_points_of_interest = [[2, 3], [5, 6], [0.4, 9], [3, 12]]
calculated_points_of_interest = nicv.NICV.get_points_of_interest(plain, traces, 2, 0, keys)
self.assertTrue(np.allclose(resulting_points_of_interest, calculated_points_of_interest))
|
python
|
# coding: utf-8
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import tree
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
def maybe_load_loan_data(threshold=1, path='../input/loan.csv', force='n'):
def load_data():
data = pd.read_csv(path, low_memory=False)
t = len(data) / threshold
data = data.dropna(thresh=t, axis=1) # Drop any column with more than 50% missing values
return data
# conditionally load the data
try:
if df.empty or force=='y':
data = load_data()
else:
return df
except:
data = load_data()
return data
df = maybe_load_loan_data(2)
# In[ ]:
df.columns
# In[ ]:
def show_stats(df):
print ("Number of records {}".format(len(df)))
print ("Dataset Shape {}".format(df.shape))
sns.distplot(df['loan_amnt'].astype(int))
show_stats(df)
# In[ ]:
# Understand data correlations
numeric_features = df.select_dtypes(include=[np.number])
print(numeric_features.describe())
categoricals = df.select_dtypes(exclude=[np.number])
print(categoricals.describe())
corr = numeric_features.corr()
print (corr['loan_amnt'].sort_values(ascending=False)[:10], '\n')
print (corr['loan_amnt'].sort_values(ascending=False)[-10:])
''' move this to model evaluation section
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)'''
# In[ ]:
def show_dictionary(path='../input/LCDataDictionary.xlsx'):
data_dictionary = pd.read_excel(path)
print(data_dictionary.shape[0])
print(data_dictionary.columns.tolist())
data_dictionary.rename(columns={'Name': 'name',
'Description': 'description'})
return data_dictionary
dict = show_dictionary()
dict.set_index('LoanStatNew', inplace=True)
dict.loc[:]
# In[ ]:
dict[categoricals]
# In[ ]:
from pandas.tools.plotting import scatter_matrix
attributes = ['annual_inc','loan_amnt', 'revol_util', 'dti','open_acc','revol_bal','revol_util','total_rec_int' ]
# 'recoveries','acc_now_delinq','delinq_2yrs','emp_length','int_rate','funded_amnt'
scatter_matrix(df[attributes], figsize=(12,8))
# In[ ]:
def print_data_shape(df):
print ("No rows: {}".format(df.shape[0]))
print ("No cols: {}".format(df.shape[1]))
print (df.head(1).values)
print ("Columns: " + df.columns)
# In[ ]:
def proc_emp_length():
df.replace('n/a', np.nan, inplace=True)
df.emp_length.fillna(value=0, inplace=True)
df['emp_length'].replace(to_replace='[^0-9]+', value='', inplace=True, regex=True)
df['emp_length'] = df['emp_length'].astype(int)
#df.emp_length.head()
# In[ ]:
df.revol_bal.head()
#df.revol_util = pd.Series(df.revol_util).str.replace('%', '').astype(float)
# In[ ]:
print (df.emp_title.value_counts().head())
print (df.emp_title.value_counts().tail())
df.emp_title.unique().shape
# In[ ]:
df.verification_status.value_counts()
# In[ ]:
def proc_desc_len():
df['desc_lenght'] = df['desc'].fillna(0).str.len()
#df.desc_lenght
# In[ ]:
def proc_issue_d():
df['issue_month'], df['issue_year'] = zip(*df.issue_d.str.split('-'))
df.drop(['issue_d'], 1, inplace=True)
# In[ ]:
def proc_zip_code():
df['zip_code'] = df['zip_code'].str.rstrip('x')
# In[ ]:
print (df.purpose.value_counts())
print ('')
print (df.title.value_counts().head())
# In[ ]:
#df = maybe_load_loan_data(threshold=2)
df.plot(kind='barh', x='purpose', y='int_rate')
# In[ ]:
print_data_shape(df)
# In[ ]:
def proc_loan_status(df):
#mapping_dict = {'loan_status':{'Fully Paid':0, 'Charged Off': 1, 'Default': 1, 'Current': 0}}
mapping_dict = {'loan_status':{'Fully Paid':0, 'Charged Off': 1}}
df = df.replace(mapping_dict)
df = df[(df['loan_status'] == 1) | (df['loan_status'] == 0)]
return df
# In[ ]:
def show_nulls(df):
nulls = pd.DataFrame(df.isnull().sum().sort_values(ascending=False)[:25])
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
return nulls
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as torch_models
class PerceptualLoss(nn.Module):
def __init__(self, rank):
super(PerceptualLoss, self).__init__()
self.rank = rank
self.vgg19 = torch_models.vgg19(pretrained=True)
self.vgg19_relu_5_2 = nn.Sequential(*list(self.vgg19.features.children())[:-5]).eval()
for p in self.vgg19_relu_5_2.parameters():
p.requires_grad = False
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, input_, target):
input_ = (input_ - self.mean) / self.std
target = (target - self.mean) / self.std
input_ = F.interpolate(input_, mode='bilinear', size=(224, 224), align_corners=False)
target = F.interpolate(target, mode='bilinear', size=(224, 224), align_corners=False)
input_vgg = self.vgg19_relu_5_2(input_)
target_vgg = self.vgg19_relu_5_2(target)
loss = F.l1_loss(input_vgg, target_vgg)
return loss
class Color2EmbedLoss(nn.Module):
def __init__(self, rank, lambda_reconstruction=1, lambda_perceptual=0.1):
super(Color2EmbedLoss, self).__init__()
self.lambda_reconstruction = lambda_reconstruction
self.lambda_perceptual = lambda_perceptual
self.reconstruction_loss = nn.SmoothL1Loss()
self.perceptual_loss = PerceptualLoss(rank)
def forward(self, pab, gtab, prgb, gtrgb):
l_rec = self.reconstruction_loss(pab, gtab)
l_per = self.perceptual_loss(prgb, gtrgb)
return self.lambda_reconstruction * l_rec + self.lambda_perceptual * l_per, l_per, l_rec
if __name__ == '__main__':
batch = 4
pab = torch.rand(batch, 2, 256, 256)
gtab = torch.rand(batch, 2, 256, 256)
prgb = torch.rand(batch, 3, 256, 256)
gtrgb = torch.rand(batch, 3, 256, 256)
loss = Color2EmbedLoss()
print(loss(pab, gtab, prgb, gtrgb))
# print(mm(torch.rand(5, 3, 256, 256).to(0)).shape)
# summary(loss.vgg19, (3, 224, 224))
|
python
|
"""
Generates a powershell script to install Windows agent - dcos_install.ps1
"""
import os
import os.path
import gen.build_deploy.util as util
import gen.template
import gen.util
import pkgpanda
import pkgpanda.util
def generate(gen_out, output_dir):
print("Generating Powershell configuration files for DC/OS")
make_powershell(gen_out, output_dir)
def make_powershell(gen_out, output_dir):
"""Build powershell deployment script and store this at Bootstrap serve"""
output_dir = output_dir + '/windows/'
pkgpanda.util.make_directory(output_dir)
bootstrap_url = gen_out.arguments['bootstrap_url']
if gen_out.arguments['master_discovery'] == 'static':
master_list = gen_out.arguments['master_list']
elif gen_out.arguments['master_discovery'] == 'master_http_loadbalancer':
master_list = gen_out.arguments['exhibitor_address'] + ':2181'
else:
master_list = 'zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,zk-5.zk:2181'
powershell_template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'powershell/dcos_install.ps1.in')
with open(powershell_template_path, 'r') as f:
powershell_template = f.read()
powershell_script = gen.template.parse_str(powershell_template).render({
'dcos_image_commit': util.dcos_image_commit,
'generation_date': util.template_generation_date,
'bootstrap_url': bootstrap_url,
'master_list': master_list,
})
# Output the dcos install ps1 script
install_script_filename = 'dcos_install.ps1'
pkgpanda.util.write_string(install_script_filename, powershell_script)
pkgpanda.util.write_string(output_dir + install_script_filename, powershell_script)
f.close()
|
python
|
#!/usr/bin/python3
#Self Written Module to Decrypt Files
#=========================================================
#This Module is Written to Reverse_Attack of Ransomeware
#=========================================================
# Reverse_Attack
# |____*****TAKES 1 ARGUMENTS, i.e. KEY *****
# |____Initiate Decryption Process
from pathlib import Path #Used to Find the Home Path
import threading #Using Threads to Boost Search Process BY Searching Diff. Drive on Diff. Thread
from os.path import expanduser
from Crypto import Random
from Crypto.Cipher import AES
import os
import hashlib, base64
class Reverse:
def __init__(self, key):
self.decryption_key = key
self.list_of_files = []
def start(self):
home = self.get_home_dir()
target1 = home + "Pictures"
target2 = home + "Music"
target3 = home + "Downloads"
target4 = home + "Documents"
target5 = home + "Desktop"
t1 = threading.Thread(target=self.run_locate_class, args=[target1,])
t2 = threading.Thread(target=self.run_locate_class, args=[target2,])
t3 = threading.Thread(target=self.run_locate_class, args=[target3,])
t4 = threading.Thread(target=self.run_locate_class, args=[target4,])
t5 = threading.Thread(target=self.run_locate_class, args=[target5,])
t1.start()
t1.join()
t2.start()
t2.join()
t3.start()
t3.join()
t4.start()
t4.join()
t5.start()
t5.join()
for files in self.list_of_files:
decrypt = Decryptor(self.decryption_key, files)
decrypt.decrypt_file() #Starting Decryption of Each File One-BY-One
def get_home_dir(self):
return str(Path.home()) + '\\'
def run_locate_class(self, drive_name):
'''
Function to make Object of LocateTargetFiles Class
'''
starting = LocateEncryptedFiles()
list_of_files = starting.start(drive_name)
self.list_of_files.extend(list_of_files)
return True
class LocateEncryptedFiles:
def __init__(self, exclude = None):
self.files_on_system = []
self.target_extension = ['enc',]
self.exclude_dir = []
if exclude != None:
self.exclude_dir.extend(exclude)
def start(self, root_dir):
self.locate_files(root_dir)
return self.files_on_system
def locate_files(self, root_dir):
for root, _, files in os.walk(root_dir):
for f in files:
abs_file_path = os.path.join(root, f)
self.filter(self.target_extension, abs_file_path)
def filter(self, target_extension, abs_file_path):
if self.is_excluded_dir(abs_file_path) == False:
# Filtering Files On the basics of file extension
if abs_file_path.split('.')[-1] in self.target_extension:
self.files_on_system.append(abs_file_path)
else:
pass
def is_excluded_dir(self, path):
'''
@summary: Checks whether the specified path should be excluded from encryption
@param path: The path to check
@return: True if the path should be excluded from encryption, otherwise False
'''
for dir_to_exclude in self.exclude_dir:
lenght = len(dir_to_exclude)
if path[:lenght] == dir_to_exclude:
return True
return False
class Decryptor:
def __init__(self, key, file_name):
self.key = hashlib.sha256(key.encode('utf-8')).digest()
self.file_name = file_name
def pad(self, s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def decrypt(self, ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
def decrypt_file(self):
with open(self.file_name, 'rb') as fo:
ciphertext = fo.read()
dec = self.decrypt(ciphertext, self.key)
with open(self.file_name[:-4], 'wb') as fo:
fo.write(dec)
os.remove(self.file_name)
if __name__ == '__main__':
key = input("Enter Key : ")
warning = input("\n!!!Warning!!! \nIs This Key Correct [Wrong KEY Will Just Destroy The Data] y/n: ")
if warning.lower() == 'y':
print("\n[*] Reversing Attack ...")
print("\n[*] Initiating Decryption Process ...")
test = Reverse(key)
test.start()
print("\n[+] Completed Successfully : )")
elif warning.lower() == 'n':
print("\nPlease Try Later With Correct KEY !")
else:
print("\n[!] Invaid Argument : (")
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************
# @author: Haifeng CHEN - [email protected]
# @date (created): 2019-12-12 09:07
# @file: memory_monitor.py
# @brief: A tool to monitor memory usage of given process
# @internal:
# revision: 14
# last modified: 2020-03-06 12:24:48
# *****************************************************
import os
import sys
import psutil
import random
import sqlite3
import logging
import datetime
import collections
import numpy as np
import pandas as pd
from typing import Union, Tuple
from qtpy import QtCore, QtWidgets, QtGui
from utils.qapp import setHighDPI, setDarkStyle, loadQIcon
from utils.qapp import checkQLineEditValidatorState
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from parse_log import parse_memory_log
__version__ = '1.2.3'
__revision__ = 14
__app_tittle__ = 'MemoryUsageMonitor'
class MemoryLogParserRunnable(QtCore.QObject):
""" Runnable object for parsing memory log """
queue = QtCore.Signal()
ev = QtCore.Signal(object)
def __init__(self, fpath, p_name=None):
super().__init__()
self._fpath = fpath
self._p_name = p_name
self.queue.connect(self.run)
@QtCore.Slot()
def run(self):
self.ev.emit({'progress_init': ('Parsing ...', 200, 0, 0)})
try:
d = parse_memory_log(self._fpath, self._p_name)
self.ev.emit({'progress_reset': 1})
self.ev.emit({'memory_log': d})
except Exception as e:
error_msg = 'Failed to parse memory log {}. Error message is {}'.format(self._fpath, repr(e))
logging.error(error_msg)
self.ev.emit({'progress_reset': 1})
self.ev.emit({'error': error_msg})
class TreeItemsSelector(QtWidgets.QDialog):
""" A common item selector using tree widget """
def __init__(self, items: list, title='Items Selector', item_cat='Features', parent=None):
super().__init__(parent)
self.setWindowTitle(title)
self.setMinimumSize(400, 200)
self._items = {}
self._init_ui(items, item_cat)
def _init_ui(self, items, item_cat):
""" Initialize the user interface """
tree = QtWidgets.QTreeWidget()
tree.setColumnCount(1)
# tree.setHeaderHidden(True)
tree.setHeaderLabel(item_cat)
# parent = QtWidgets.QTreeWidgetItem(tree)
# parent.setText(0, '{}'.format(item_cat))
# parent.setFlags(parent.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)
for item in items:
tree_item = QtWidgets.QTreeWidgetItem(tree)
tree_item.setText(0, '{}'.format(item))
tree_item.setFlags(tree_item.flags() | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable)
tree_item.setCheckState(0, QtCore.Qt.Unchecked)
tree.itemChanged.connect(self._on_item_toggled)
btn_box = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
)
btn_box.accepted.connect(self.accept)
btn_box.rejected.connect(self.reject)
vbox_layout = QtWidgets.QVBoxLayout()
vbox_layout.addWidget(tree)
vbox_layout.addWidget(btn_box)
self.setLayout(vbox_layout)
def _on_item_toggled(self, item, column):
if item.checkState(column) == QtCore.Qt.Checked:
checked = True
elif item.checkState(column) == QtCore.Qt.Unchecked:
checked = False
self._items[item.text(column)] = checked
@property
def items(self) -> Tuple:
items = [k for k, v in self._items.items() if v]
return tuple(items)
class MemoryUsageMonitor(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self._settings = QtCore.QSettings(QtCore.QSettings.NativeFormat,
QtCore.QSettings.UserScope,
'HF_AIO', 'MemoryUsageMonitor')
self._pid = None
self._ct = ''
self._dq = collections.deque(maxlen=self._settings.value('dq_maxlen', 120, type=int))
self._progress = QtWidgets.QProgressDialog(self)
self._progress.setCancelButton(None)
self._progress.setWindowTitle(__app_tittle__)
self._progress.setWindowModality(QtCore.Qt.WindowModal)
self._progress.setMinimumWidth(300)
self._progress.reset()
self._worker_thread = QtCore.QThread()
self._worker_thread.start()
self._log_parse_runnable = None # type: Union[None, QtCore.QObject]
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._init_ui()
self._setup_shortcuts()
def _init_ui(self):
self.setMinimumSize(800, 600)
self.setWindowTitle("{0} ({1}.{2})".format(
__app_tittle__, __version__, __revision__))
# self.setWindowIcon(loadQIcon('icons/app_icon.png'))
# The main widget
widget = QtWidgets.QWidget()
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())
widget.setSizePolicy(size_policy)
# create widgets ... # the first row
ctrl_layout = self._create_main_ctrls()
# create matplotlib widgets and components
canvas = self._setup_mpl_widget()
main_layout = QtWidgets.QVBoxLayout()
main_layout.addWidget(canvas)
main_layout.addLayout(ctrl_layout)
widget.setLayout(main_layout)
self.setCentralWidget(widget)
self.statusBar().showMessage('Launched ...', 1000)
def _setup_plot_frame(self, monitor=True):
self._mpl_ax.spines['bottom'].set_color('w')
self._mpl_ax.spines['top'].set_color('w')
self._mpl_ax.spines['right'].set_color('w')
self._mpl_ax.spines['left'].set_color('w')
# white text, ticks
self._mpl_ax.set_title('Memory Usage Monitor',
color='w', fontdict={'fontsize': 10})
self._mpl_ax.set_ylabel('Usage (MB)', color='w')
self._mpl_ax.tick_params(axis='both', color='w')
self._mpl_ax.tick_params(colors='w', labelsize=8)
# dark background
color = self.palette().color(QtGui.QPalette.Window).getRgbF()
self._mpl_ax.figure.patch.set_facecolor(color)
color = self.palette().color(QtGui.QPalette.Base).getRgbF()
self._mpl_ax.set_facecolor(color)
if monitor:
x = np.linspace(0, 10 * np.pi, 100)
self.line_rss = self._mpl_ax.plot(x, np.sin(x), '-', label='Mem Usage')[0]
self.line_vms = self._mpl_ax.plot(
x, np.sin(random.random() * np.pi + x), '--', label='VM Size')[0]
self._mpl_ax.legend()
self._mpl_ax.set_xlabel('Date', color='w')
else:
self._mpl_ax.grid(True)
self._mpl_ax.set_xlabel('Elapsed Hours', color='w')
def _setup_mpl_widget(self):
canvas = FigureCanvas(Figure(figsize=(5, 3)))
self._mpl_ax = canvas.figure.subplots()
canvas.figure.set_tight_layout(True)
self.addToolBar(
QtCore.Qt.TopToolBarArea,
NavigationToolbar(self._mpl_ax.figure.canvas, self)
)
self._setup_plot_frame()
return canvas
def _create_main_ctrls(self):
layout = QtWidgets.QHBoxLayout()
label1 = QtWidgets.QLabel('Interval (second)')
interval = QtWidgets.QLineEdit()
interval.setValidator(QtGui.QIntValidator(0, 1000000000))
interval.setObjectName('interval')
interval.setAlignment(QtCore.Qt.AlignCenter)
interval.setToolTip('Data sampling interval')
interval.setText(self._settings.value('interval', '10', type=str))
interval.textEdited[str].connect(self._update_settings)
interval.textChanged.connect(self._check_validator_state)
layout.addWidget(label1)
layout.addWidget(interval)
label2 = QtWidgets.QLabel('Process name')
p_name = QtWidgets.QLineEdit()
p_name.setObjectName('process_name')
p_name.setAlignment(QtCore.Qt.AlignCenter)
p_name.setToolTip('Name of the process including the extension.'
' It is case sensitive and duplicated name not well supported!')
p_name.setText(self._settings.value('process_name', '', type=str))
p_name.textEdited[str].connect(self._update_settings)
layout.addWidget(label2)
layout.addWidget(p_name)
label3 = QtWidgets.QLabel('Buffered data length*')
dq_maxlen = QtWidgets.QLineEdit()
dq_maxlen.setValidator(QtGui.QIntValidator(0, 9999))
dq_maxlen.setObjectName('dq_maxlen')
dq_maxlen.setAlignment(QtCore.Qt.AlignCenter)
dq_maxlen.setToolTip('Maximal length of the buffered data points, press entry to apply the change on the fly!')
dq_maxlen.setText(self._settings.value('dq_maxlen', '120', type=str))
dq_maxlen.editingFinished.connect(self._on_buffer_size_changed)
dq_maxlen.textEdited[str].connect(self._update_settings)
dq_maxlen.textChanged.connect(self._check_validator_state)
layout.addWidget(label3)
layout.addWidget(dq_maxlen)
self._start_btn = QtWidgets.QPushButton('Start')
self._start_btn.clicked.connect(self._on_start)
self._start_btn.setEnabled(True)
self._stop_btn = QtWidgets.QPushButton('Stop')
self._stop_btn.clicked.connect(self._on_stop)
self._stop_btn.setEnabled(False)
layout.addWidget(self._start_btn)
layout.addWidget(self._stop_btn)
return layout
def _setup_shortcuts(self):
shortcut_t = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_T), self)
shortcut_t.activated.connect(self._toggle_window_on_top)
shortcut_s = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_S), self)
shortcut_s.activated.connect(self._toggle_start_stop)
shortcut_o = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_O), self)
shortcut_o.activated.connect(self._open_memory_log)
def _on_buffer_size_changed(self):
try:
val = self._settings.value('dq_maxlen', 120, type=int)
self._dq = collections.deque(reversed(self._dq), maxlen=val)
self._dq.reverse()
msg = 'New buffer max length is {}, current size is {}'.format(val, len(self._dq))
self.statusBar().showMessage(msg, 1000)
except Exception as e:
self.statusBar().showMessage(repr(e), 1000)
def _toggle_window_on_top(self):
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowStaysOnTopHint)
self.show()
if self.windowFlags() & QtCore.Qt.WindowStaysOnTopHint:
msg = 'Stays On Top: ON'
else:
msg = 'Stays On Top: OFF'
self.statusBar().showMessage(msg, 1000)
def _toggle_start_stop(self):
if self._timer.isActive():
self._on_stop()
else:
self._on_start()
def _on_start(self):
self._stop_btn.setEnabled(True)
self._start_btn.setEnabled(False)
interval = self._settings.value('interval', 10, type=int)
p_name = self._settings.value('process_name', '', type=str)
msg = 'Start monitor: [interval: {}, process name {}]'.format(interval, p_name)
logging.debug(msg)
self.statusBar().showMessage(msg, 1000)
# start timer
self._dq.clear()
self._pid = None
self._ct = ''
self._timer.start(interval * 1000)
self._mpl_ax.clear()
self._setup_plot_frame()
def _on_stop(self):
self._stop_btn.setEnabled(False)
self._start_btn.setEnabled(True)
msg = 'Stop monitor: [pid: {}, create time: {}]'.format(self._pid, self._ct)
logging.debug(msg)
self.statusBar().showMessage(msg, 1000)
# stop timer
self._timer.stop()
def _update_settings(self, q_str):
w = self.sender()
if isinstance(w, QtWidgets.QCheckBox):
if w.checkState() == QtCore.Qt.Checked:
self._settings.setValue(w.objectName(), '1')
else:
self._settings.setValue(w.objectName(), '0')
elif isinstance(w, QtWidgets.QLineEdit):
self._settings.setValue(w.objectName(), w.text())
elif isinstance(w, QtWidgets.QComboBox):
self._settings.setValue(w.objectName(),
'{}'.format(w.currentIndex()))
def _check_validator_state(self):
checkQLineEditValidatorState(self.sender(), self.palette().color(QtGui.QPalette.Base))
def closeEvent(self, event):
super().closeEvent(event)
def _update_process_id(self, p_name):
# try to check whether this id is still valid
if self._pid is not None:
try:
p = psutil.Process(self._pid)
if p.name() != p_name:
self._pid = None
self._ct = ''
except Exception:
msg = 'Process [{}]-[{}] is Dead'.format(self._pid, self._ct)
logging.info(msg)
self.statusBar().showMessage(msg, 1000)
self._pid, self._ct = None, ''
self._dq.clear()
self._mpl_ax.set_title(
'Memory Usage Monitor ({} Not Found)'.format(p_name),
color='w', fontdict={'fontsize': 10})
self._mpl_ax.figure.canvas.draw_idle()
# try to get a new pid
if self._pid is None:
for proc in psutil.process_iter(attrs=['pid', 'name']):
if proc.info['name'] == p_name:
self._pid = proc.info['pid']
self._ct = datetime.datetime.fromtimestamp(
proc.create_time()).strftime('%Y-%m-%d %H:%M:%S')
self._mpl_ax.set_title('Memory Usage Monitor ({} - {})'.format(p_name, self._ct),
color='w', fontdict={'fontsize': 10})
msg = 'New process [{}]-[{}] found'.format(self._pid, self._ct)
logging.info(msg)
self.statusBar().showMessage(msg, 1000)
break
def _on_timer(self):
p_name = self._settings.value('process_name', '', type=str)
self._update_process_id(p_name)
if self._pid is not None:
process = psutil.Process(self._pid)
memory_usage = process.memory_info()
logging.info('[{}]-[{}]-[{}] - [{}, {}]'.format(
self._pid, p_name, self._ct, memory_usage.rss, memory_usage.vms))
ts = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self._dq.appendleft((ts, memory_usage.rss, memory_usage.vms))
x = np.arange(0, len(self._dq))
self.line_rss.set_xdata(x)
rss = np.array([x[1] / 1024 / 1024 for x in self._dq])
self.line_rss.set_ydata(rss)
self.line_vms.set_xdata(x)
vms = np.array([x[2] / 1024 / 1024 for x in self._dq])
self.line_vms.set_ydata(vms)
self._mpl_ax.set_ylim(0, max(np.max(vms), np.max(rss)) * 1.1)
self._mpl_ax.set_xlim(
0,
min(max(len(x) * 1.2, self._dq.maxlen // 4), self._dq.maxlen)
)
ts = [x[0] for x in self._dq]
labels = []
for pos in self._mpl_ax.get_xticks():
pos = int(pos)
if pos < len(ts):
labels.append(ts[pos][5:])
else:
labels.append('')
self._mpl_ax.set_xticklabels(labels)
self._mpl_ax.figure.canvas.draw()
@QtCore.Slot(object)
def _on_assist_worker_thread_event(self, d):
""" d is python dict """
if 'error' in d:
error_msg = d['error']
QtWidgets.QMessageBox.critical(self, __app_tittle__, error_msg)
elif 'warn' in d:
warn_msg = d['warn']
QtWidgets.QMessageBox.warning(self, __app_tittle__, warn_msg)
elif 'progress_init' in d:
txt, duration, pos_min, pos_max = d['progress_init']
self._progress.setLabelText(txt)
self._progress.setMinimumDuration(duration)
self._progress.setRange(pos_min, pos_max)
self._progress.setValue(pos_min)
elif 'progress_update' in d:
self._progress.setValue(d['progress_update'])
elif 'progress_reset' in d:
self._progress.reset()
elif 'memory_log' in d:
self._draw_memory_log(d['memory_log'])
def _draw_memory_log(self, d: pd.DataFrame):
if d.empty:
p_name = self._settings.value('process_name', '', type=str)
QtWidgets.QMessageBox.warning(self, __app_tittle__,
'Memory usage log of process `{}` is not found!'.format(p_name))
return
g = d.groupby(['Process'])
items = list(g.groups.keys())
if len(items) != 1:
dlg = TreeItemsSelector(items, title='Select items to draw', item_cat='Process Information', parent=self)
if dlg.exec() == QtWidgets.QDialog.Accepted:
items = dlg.items
else:
return
if not items:
return
n = len(items)
self._progress.setRange(0, n)
self._progress.setValue(0)
self._mpl_ax.clear()
self._setup_plot_frame(False)
interval = self._settings.value('interval', 10, type=int)
length_lim = self._settings.value('length_limit', 100, type=int)
convert_to_hours = 60 * 60 / interval
not_empty_plot = False
for key, grp in g:
if key not in items or len(grp['rss']) < length_lim:
logging.warning('{} dropped, not selected or not enough length'.format(key))
else:
not_empty_plot = True
self._mpl_ax.plot(np.arange(len(grp['rss'])) / convert_to_hours, grp['rss'] / 1024 / 1024, label=key)
self._progress.setValue(self._progress.value() + 1)
if not_empty_plot:
self._mpl_ax.legend()
self._mpl_ax.figure.canvas.draw()
self._progress.reset()
def _open_memory_log(self):
log_path, _filter = QtWidgets.QFileDialog.getOpenFileName(
self, 'Select Memory Log file',
directory=self._settings.value('prev_log_dir', '.', type=str),
filter='Memory Log (*.log)')
if not log_path:
return
self._settings.setValue('prev_log_dir', os.path.dirname(log_path))
# firstly stop monitor
self._on_stop()
p_name = self._settings.value('process_name', '', type=str)
if self._log_parse_runnable is not None:
self._log_parse_runnable.ev.disconnect(self._on_assist_worker_thread_event)
# pass image to worker
self._log_parse_runnable = MemoryLogParserRunnable(log_path, p_name)
self._log_parse_runnable.moveToThread(self._worker_thread)
self._log_parse_runnable.ev.connect(self._on_assist_worker_thread_event)
self._log_parse_runnable.queue.emit()
def center(self):
frame_gm = self.frameGeometry()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
center_pt = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
frame_gm.moveCenter(center_pt)
self.move(frame_gm.topLeft())
if __name__ == "__main__":
# enable logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
# file output to record memory usage
fh = logging.FileHandler('memory.log')
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
# we also need stream output for debugging
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.WARNING)
# add the handlers to logger
logger.addHandler(fh)
logger.addHandler(ch)
# logging end
setHighDPI()
# create Qt Application
app = QtWidgets.QApplication(sys.argv)
app.setWindowIcon(loadQIcon('icons/app_icon.png'))
try:
import qtmodern.styles
qtmodern.styles.dark(app)
except ModuleNotFoundError:
setDarkStyle(app)
# update default font for Windows 10
if sys.platform == "win32":
font = QtGui.QFont("Segoe UI", 9)
app.setFont(font)
# create the MainForm
form = MemoryUsageMonitor()
form.center()
try:
import qtmodern.windows
mw = qtmodern.windows.ModernWindow(form)
mw.show()
except ModuleNotFoundError:
form.show()
sys.exit(app.exec_())
|
python
|
##ๅบๅ่งฃๅ
dict={"name":"jim","age":"1","sex":"male"}
key,value=dict.popitem();
print(key,value)
#ไฝฟ็จ*ๅทๆถ้ๅคไฝ็ๅผ
a,b,*rest=[1,2,3,4];
print(a,b,rest)
##้พๅผ่ตๅผ x=y=somfunction()
##ๅขๅผบ่ตๅผ x+=1
###ไปฃ็ ๅ
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pydoc
import subprocess
import sys
import signal
from pkg_resources import get_distribution
from termcolor import colored
from projects import config
from projects import gui
from projects import paths
from projects import projectfile
__version__ = get_distribution('projects').version
help_text = '''\
===============================================================================
_ _
(_) | |
_ __ _ __ ___ _ ___ ___| |_ ___
| '_ \| '__/ _ \| |/ _ \/ __| __/ __|
| |_) | | | (_) | | __/ (__| |_\__ \\
| .__/|_| \___/| |\___|\___|\__|___/
| | _/ |
|_| |__/
===============================================================================
i n t u i t i v e p r o j e c t m a n a g e m e n t
===============================================================================
<projects> is an easy to use project navigation tool and a Makefile-like
scripting engine. It's main purpose is to provide a simpler scripting interface
with a built in man page generator. You can define your commands with inline
documentation in Projectfiles. You can have one Projectfile in every directory
inside your project, <projects> will process them recursively.
<projects> works on every UNIX system with Python 2.7+ or 3.x installed.
<projects> is not a replacement for Makefile or CMake it is an optional wrapper
for them.
Features:
- quick project navigation with minimal typing
- Projectfile based recursive scripting system
- command concatenation and recursive separation
- automatic manual page generation
Configuration
When projects starts up for the first time, it creates it's configuration
file (only if it isn't exist already) inside your home directory: ~/.prc
By default it contains the following options in YAML format:
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ $ cat ~/.prc โ
โ max-doc-width: 80 โ
โ projects-path: ~/projects โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
projects-path [mandatory]
It's value will tell projects where it can find your projects' repositories
max-doc-width [optional]
The maximum width of the generated manual pages. If not defined, it will be
set to 80. <projects> will adapt to narrower terminals.
Usage:
p
p p
p <command>
p (-h|--help)
p (-v|--version)
p (-i|--init)
p (-w|--walk)
p (-l|--list) <command>
p (-md|--markdown) [<file_name>]
p
This command is the main trigger for projects. It behaves differently
depending on your current working directory.
OUTSIDE your projects directory, it opens the project selector screen, where
you can select your project by typing the projects name or by using the
arrows keys.
INSIDE any of your projects (inside the repository root directory) this
command will show the manual generated from the Projectfiles.
p p
This command behaves the same as the previous "p" command but it will always
display the project selector screen. This could be handy if you want to
switch projects quickly.
This is the only prohibited command name that you cannot use for your
commands.
p <command>
This is the command for executing commands defined in the Projectfiles. By
convention all defined command should start with an alphanumeric character.
The commands started with a dash reserved for <projects> itself.
The <command> keyword can be anything except the already taken keywords:
p, -h, --help, -v, --version, -i, --init, -w, --walk, -l, --list
p (-h|--help)
Brings up this help screen.
p (-v|--version)
Prints out the current <projects> version.
p (-i|--init)
Generates a template Projectfile into the current directory.
p (-w|--walk)
Lists out all directories in your project in the walk order <projects> will
follow. It marks the directories that contain a Projectfile.
p (-l|--list) <command>
Lists out the processed command bodies for the given command.
p (-md|--markdown) [<file_name>]
Generates a Markdown file from your processed Projectfiles. You can
optionally specify a name for the generated file. The default name is
README.md.
===============================================================================
_____ _ _ __ _ _
| __ \ (_) | | / _(_) |
| |__) | __ ___ _ ___ ___| |_| |_ _| | ___
| ___/ '__/ _ \| |/ _ \/ __| __| _| | |/ _ \\
| | | | | (_) | | __/ (__| |_| | | | | __/
|_| |_| \___/| |\___|\___|\__|_| |_|_|\___|
_/ |
|__/
===============================================================================
Projectfiles are the files you create in order to define commands that will be
executed with the "p <command>". Projectfiles provide a powerful and self
explanatory way to interact with your project.
You can create an example Projectfile with the "p (-i|--init)" command. The
generated Projectfile will demonstrate all provided functionality except the
recursive command concatenation since it will generate only one Projectfile.
There are mandatory and optional features you can add to Projectfile.
Mandatory:
- <projects> version
- at least one command definition header
- command body
Optional:
- main description
- variables
- command alternatives
- command dependency list
- command description
- recursive separator
Feature order:
There is a strict order where you can place each features. Between each
feature arbitrary number of empty lines are allowed. The order is the
following:
1. version
2. main description
3. variables
4. command header
5. command description
6. command body (pre, separator and post)
version [mandatory]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ from v{version} โ
โ ... โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
This feature will define the earliest version that is compatible with the
used Projectfile format. All <projects> versions greater or equal to the
defined one will be compatible with the format, but earlier versions may have
problems with future features. The first release version is v1.0.0.
If there are more Projectfiles in your project and the defined versions are
different, the smallest version will be used to maximize the functionality.
main description [optional]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ... โ
โ """ โ
โ Description for the whole project. โ
โ """ โ
โ ... โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
After the version you can define a global description of the whole project.
You can write long lines, <projects> will wrap them according to the defined
"max-doc-width" key in the ~/.prc configuration file. Single line breaks
won't break the lines in the generated manual. You have to use an empty line
in order to add a line break.
If you have multiple Projectfiles created, the main descriptions will be
concatenated with empty lines according to the walk order.
variables [optional]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ... โ
โ variable = 42 โ
โ other_variable = "This is a string with spaces" โ
โ yet_another_variable = Quotes are optional. This is still valid. โ
โ ... โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
You can define variables as well. Each variable will be used as a string. No
other variable format is currently supported. You can omit the quotes if you
want, <projects> will use the entire string you write after the "=" sign.
To use the variables you need to escape them:
$variable
${{variable}}
Both escapement is interpreted equally.
Defined variables go to the global variable pool. You cannot assign a
variable the more than once. Hence you cannot redefine a variable in a later
Projectfile (a Projectfile is thant is processed later according to the walk
order). Redefining a variable will raise an error. Since every variables go
to the global variable pool, you can use the variables in any Projectfile
independently which Projectfile you defined them. It is possible to use a
variable in the root level Projectfile that is defined in a later
Projectfile.
command header [mandatory]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ... โ
โ my_command|alternative1|alt2: [dependency1, dependency2] โ
โ ... โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
The command header feature allows you to define a command, it's alternatives
and it's dependent other commands. The first keyword is the default keyword
for the command. Alternatives are separated with the pipe "|" character.
After the keyword definitions, a colon ":" closes the command header. After
the colon, you can define a list of other commands, that are executed in the
order you defined them before the current command execution.
According to the given example you can invoke your command with the following
syntax inside your project directory:
p my_command
p alternative1
p alt2
Both will execute the same command body after the dependent commands
(dependency1 and dependency2) is executed first in the given order.
A command cannot be redefined in the same Projectfile twice. If you redefine
a command in another Projectfile, the commands' bodies will be appended to
each other according to the path relationship of these files.
command description [optional]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ... โ
โ my_command: โ
โ """ โ
โ This is a command description. โ
โ """ โ
โ ... โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
The command description will be added to the generated manual. It behaves the
same as the main description, except it requires an indentation in any way
(space, tab, count doesn't matter).
If a command is redefined in another Projectfile, the command descriptions
will be appended according to the path relationship of these files.
command body [mandatory]
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ... โ
โ my_command: โ
โ command1 โ
โ command2 โ
โ ... โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
The command body defines what commands <projects> needs to execute if you
invoke the given command with the "p <command>" syntax inside your project
directory. Commands needs to be indented in any way (at least one space).
<projects> will execute all given commands line by line.
Template Projectfile
The following Projectfile can be generated with the `p (-i|--init)` command:
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ from v1.0.0 โ
โ โ
โ """ โ
โ This is a template Projectfile you have created with the 'p (-i|--init])' โ
โ command. You can use the provided commands 'hello' and 'answer' or it's โ
โ shorter alternatives 'h' and 'ans' or 'a'. ie.: p <command>. โ
โ โ
โ You can start a new paragraph in the descriptions by inserting an empty โ
โ line like this. โ
โ โ
โ Descriptions are useful as they provide a searchable automatically โ
โ generated manual for your project for free. You can invoke this manual โ
โ with the "p" command if you are inside your project directory. โ
โ """ โ
โ โ
โ magic = 42 # variables goes to the global variable space โ
โ โ
โ hello|h: [a] โ
โ """ โ
โ This command will great you. โ
โ โ
โ There is a shorter alternative "h" for the command. It is depending โ
โ on the "a" command which is the alternative of the "answer" command. โ
โ โ
โ If you execute a command with dependencies, it's dependencies will be โ
โ executed first in the defined order. โ
โ """ โ
โ echo "Hi! This is my very own Projectfile." โ
โ โ
โ answer|ans|a: โ
โ """ โ
โ This command will give you the answer for every question. โ
โ โ
โ You can use the long "answer" keyword as well as the shorter "ans" or โ
โ "a" to execute this command. โ
โ โ
โ Inside the Projectfile, you can also refer to a command in another โ
โ command's dependency list by any of it's alternatives. โ
โ """ โ
โ echo "The answer for everything is $magic!" โ
โ # you can also use the ${{magic}} form โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
If you use the "p" command inside your project's root directory,projects will
generate a manual page from the Projectfiles you created. The previously
listed Projectfile will result the following manual page assuming that your
project is called "example" (the project name is picked from it's containing
directory's name):
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ========================================================================= โ
โ E X A M P L E โ
โ ========================================================================= โ
โ โ
โ This is a template Projectfile you have created with the "p (-i|--init])" โ
โ command. You can use the provided commands 'hello' and 'answer' or it's โ
โ shorter alternatives 'h' and 'ans' or 'a'. ie.: p <command>. โ
โ โ
โ You can start a new paragraph in the descriptions by inserting an empty โ
โ line like this. โ
โ โ
โ Descriptions are useful as they provide a searchable automatically โ
โ generated manual for your project for free. You can invoke this manual โ
โ with the "p" command if you are inside your project directory. โ
โ โ
โ โ
โ answer|ans|a: โ
โ โ
โ This command will give you the answer for every question. โ
โ โ
โ You can use the long "answer" keyword as well as the shorter "ans" or โ
โ "a" to execute this command. โ
โ โ
โ Inside the Projectfile, you can also refer to a command in another โ
โ command's dependency list by any of it's alternatives. โ
โ โ
โ โ
โ hello|h: [a] โ
โ โ
โ This command will great you. โ
โ โ
โ There is a shorter alternative "h" for the command. It is depending โ
โ on the "a" command which is the alternative of the "answer" command. โ
โ โ
โ If you execute a command with dependencies, it's dependencies will be โ
โ executed first in the defined order. โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
This manual is displayed in a pager, so you can exit with the "q" key.
Advanced Projectfile examples
Command concatenation
If you have multiple Projectfiles in your project and there are command
headers that are defined in more than one Projectfile, the command bodies
will be appended according to the path relationship of these files.
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฆโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ $ cat ./Projectfile โ $ cat ./dir/Projectfile โ
โ from v{version} โ from v{version} โ
โ my_command: โ my_command: โ
โ echo "This is the root." โ echo "This is a subdir." โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฉโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ p --walk โ
โ [x] . โ
โ [x] dir โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ p --list my_command โ
โ cd /home/user/projects/example โ
โ echo "This is the root." โ
โ cd /home/user/projects/example/dir โ
โ echo "This is the a subdir." โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ p my_command โ
โ This is the root. โ
โ This is a subdir. โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
What you can notice in this example:
1. You can use the "(-w|--walk)" and "(-l|--list)" commands to get
information about the commands will be executed by <projects>.
2. The command listing shows that the command bodies were concatenated
according to the walk order (you can check with the "(-w|--walk)"
command).
3. The concatenated command list contains directory change commands (cd)
so every command defined in a Projectfile gets executed in the same
directory level as it's Projectfile's directory level.
4. Thus the directory change commands, you can notice that each command
will execute in the same execution context regardless of the command's
length (number of lines). This is different than the Makefile
conventions, and provide a much more simpler script writing.
More complex example
There is another feature that can be used to execute post configuration eg.
executing commands after all lower order command bodies were executed. This
feature is called recursive separator ("==="). If you place this separator
inside a command's body, and there are other lower level Projectfiles in your
project, the command bodies will be appended in a special, recursive order.
In a Projectfile , all commands before the separator are called the "pre"
commands, and all the commands after the separator are called the "post"
commands. The seprator in every command body is optional. If there is no
separator, all the command lines in the command body will be handled as a
"pre" command block. Similarly if the command body starts with a separator
the whole body will be used as a post block.
If there are no lower level Projectfiles, and you have a command with
separated body, the sepration will be ignored.
If you have lower level Projectfiles, the base level pre commands will be
executed first then the execution will jump to the lower level Projectfile.
After the lower level Projectfile's command script gets executed, the
execution will be jump back after the base level separator, and the base post
block will be executed.
If the lower level Projectfile has separated command bodies, and there are
yet another lower level Projectfile, the execution will jump down
recursively until the last possible separation is executed.
The following example will demonstrate this behavior:
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฆโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ $ cat ./Projectfile โ $ cat ./A/Projectfile โ
โ from v{version} โ from v{version} โ
โ my_command: โ my_command: โ
โ echo "pre root" โ echo "pre A" โ
โ === โ === โ
โ echo "post root" โ echo "post A" โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ cat ./A/B/Projectfile โ $ cat ./C/Projectfile โ
โ from v{version} โ from v{version} โ
โ my_command: โ my_command: โ
โ echo "listing inside A/B" โ echo "pre C" โ
โ ls -1 โ === โ
โ echo "done" โ echo "post C" โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฉโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ ls -1 A/B โ
โ Projectfile โ
โ file1 โ
โ file2 โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ p --walk โ
โ [x] . โ
โ [x] A โ
โ [x] A/B โ
โ [x] C โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ p --list my_command โ
โ cd /home/user/projects/example โ
โ echo "pre root" โ
โ cd /home/user/projects/example/A โ
โ echo "pre A" โ
โ cd /home/user/projects/example/A/B โ
โ echo "listing inside A/B" โ
โ ls -1 โ
โ echo "done" โ
โ cd /home/user/projects/example/A โ
โ echo "post A" โ
โ cd /home/user/projects/example/C โ
โ echo "pre C" โ
โ echo "post C" โ
โ cd /home/user/projects/example โ
โ echo "post root" โ
โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฃ
โ $ p my_command โ
โ pre root โ
โ pre A โ
โ listing inside A/B โ
โ Projectfile โ
โ file1 โ
โ file2 โ
โ done โ
โ post A โ
โ pre C โ
โ post C โ
โ post root โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
What you can notice in this example:
1. The recursive separators works as described. The post commands are
executed after the pre commands for that level and all the recursive
lower level other commands executed.
2. Commands get executed in the same level where the Projectfile they are
defined in is located.
3. Automatic directory changing command insertion is smart enough to insert
only the absolute necessary directory changing commands. If there are no
lower level commands, but the recursive separator exists, no directory
changing will be inserted before the post commands. If there are no pre
commands, no directory cahnging will be happen before the recursive
separator content. Same goes to the post commands. If there are no post
commands, no directory changing commands will be inserted after the
recursive separator's content is executed.
TIP: You can always create a template Projectfile with the "(-i|--init)"
command.
'''.format(version=__version__)
return_path = ''
def path_setting_callback(path):
global return_path
return_path = path
def process_command(command_name, data):
command = data['commands'][command_name]
if 'alias' in command:
command = data['commands'][command['alias']]
if 'dependencies' in command:
for dep in command['dependencies']:
process_command(dep, data)
echoed_commands = []
for line in command['script']:
if '&&' in line:
line = line.split('&&')
line = [l.strip() for l in line]
else:
line = [line.strip()]
for l in line:
if l.startswith('echo'):
echoed_commands.append('printf "\033[1;32m> " && {0} && printf "\033[0m"'.format(l))
elif l.startswith('cd'):
p = l.split('cd')
p = p[1].strip()
echoed_commands.append('printf "\033[0;34m@ {0}\033[0m\n" && {1}'.format(p, l))
else:
echoed_commands.append('printf "\033[1;33m$ {0}\033[0m\n" && {0}'.format(l))
concatenated_commands = ' && '.join(echoed_commands)
execute_call(concatenated_commands)
def execute_call(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() is not None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
output, error = process.communicate()
exit_code = process.returncode
if exit_code != 0:
sys.stderr.write('\r\033[1;31m[ERROR {}]\033[0;31m Error during execution!\033[0m\n'.format(exit_code))
def execute(args, data, conf):
if args:
for command_name in args:
if command_name in data['commands']:
try:
process_command(command_name, data)
except (KeyboardInterrupt):
sigterm_handle(None, None)
else:
pass
else:
gui.show_project_details(data, conf['max-doc-width'])
def sigterm_handle(signal, frame):
sys.stderr.write('\r\r\033[1;31m[!]\033[0;31m User interrupt..\033[0m\n')
sys.exit(1)
def main(args):
signal.signal(signal.SIGTSTP, sigterm_handle)
try:
conf = config.get()
if not os.path.isdir(conf['projects-path']):
os.mkdir(conf['projects-path'])
print("Projects root was created: {}".format(conf['projects-path']))
print("You can put your projects here.")
with open(os.path.join(os.path.expanduser('~'), '.p-path'), 'w+') as f:
f.write(conf['projects-path'])
return
else:
if not os.listdir(conf['projects-path']):
print("Your projects directory is empty. Nothing to do..")
with open(os.path.join(os.path.expanduser('~'), '.p-path'), 'w+') as f:
f.write(conf['projects-path'])
return
args = args[2:]
if len(args) == 1:
if args[0] in ['-v', '--version']:
print(__version__)
return
elif args[0] in ['-i', '--init']:
if paths.inside_project(conf['projects-path']):
if os.path.isfile('Projectfile'):
print('You already have a Projectfile in this directory.. Nothing to do ;)')
else:
projectfile_content = projectfile.DEFAULT_PROJECTFILE.format(__version__)
with open('Projectfile', 'w+') as f:
f.write(projectfile_content)
print('Projectfile created. Use the "p" command to invoke the manual.')
else:
print('You are not inside any of your projects. Use the "p" command to navigate into one.')
return
elif args[0] in ['-h', '--help']:
pydoc.pager(help_text)
return
elif args[0] in ['-w', '--walk']:
if paths.inside_project(conf['projects-path']):
print(projectfile.get_walk_order(os.getcwd()))
else:
print('You are not inside any of your projects. Use the "p" command to navigate into one.')
return
elif args[0] in ['p']:
handle_project_selection(conf)
return
elif args[0] in ['-l', '--list']:
print('Command name missing after this option. Cannot list the command body..\np (-l|--list) <command>')
return
elif args[0] in ['-md', '--markdown']:
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
data['name'] = project_root['name']
md_content = gui.generate_markdown(data)
with open(os.path.join(project_root['path'], 'README.md'), 'w+') as f:
f.write(md_content)
print("README.md file was generated into your project's root.")
return
if len(args) == 2:
if args[0] in ['-l', '--list']:
command = args[1]
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
if command in data['commands']:
if 'alias' in data['commands'][command]:
command = data['commands'][command]['alias']
for line in data['commands'][command]['script']:
print(line)
else:
print('Invalid command: "{}"\nAvailable commands:'.format(command))
for c in data['commands']:
print(c)
return
elif args[0] in ['-md', '--markdown']:
name = args[1]
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
data['name'] = project_root['name']
md_content = gui.generate_markdown(data)
with open(os.path.join(project_root['path'], name), 'w+') as f:
f.write(md_content)
print("A markdown file named \"{}\" was generated into your project's root.".format(name))
return
if paths.inside_project(conf['projects-path']):
handle_inside_project(args, conf)
else:
handle_project_selection(conf)
except projectfile.error.ProjectfileError as e:
error = e.args[0]
message = 'Projectfile error!\n{}'.format(error['error'])
if 'path' in error:
message = '{}\nPath: {}/Projectfile'.format(message, error['path'])
if 'line' in error:
message = '{}\nLine: {}'.format(message, error['line'])
print(colored(message, 'red'))
sys.exit(-1)
except config.ConfigError as e:
error = e.args[0]
message = 'Config error!\n{}'.format(error)
print(colored(message, 'red'))
sys.exit(-1)
def handle_project_selection(conf):
gui.select_project(
paths.list_dir_for_path(conf['projects-path']),
path_setting_callback
)
if return_path:
with open(os.path.join(os.path.expanduser('~'), '.p-path'), 'w+') as f:
f.write(os.path.join(os.path.expanduser(conf['projects-path']), return_path))
def handle_inside_project(args, conf):
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
data['name'] = project_root['name']
execute(args, data, conf)
|
python
|
"""Frigate API client."""
from __future__ import annotations
import asyncio
import logging
import socket
from typing import Any, Dict, List, cast
import aiohttp
import async_timeout
from yarl import URL
TIMEOUT = 10
_LOGGER: logging.Logger = logging.getLogger(__name__)
HEADERS = {"Content-type": "application/json; charset=UTF-8"}
# ==============================================================================
# Please do not add HomeAssistant specific imports/functionality to this module,
# so that this library can be optionally moved to a different repo at a later
# date.
# ==============================================================================
class FrigateApiClientError(Exception):
"""General FrigateApiClient error."""
class FrigateApiClient:
"""Frigate API client."""
def __init__(self, host: str, session: aiohttp.ClientSession) -> None:
"""Construct API Client."""
self._host = host
self._session = session
async def async_get_version(self) -> str:
"""Get data from the API."""
return cast(
str,
await self.api_wrapper(
"get", str(URL(self._host) / "api/version"), decode_json=False
),
)
async def async_get_stats(self) -> dict[str, Any]:
"""Get data from the API."""
return cast(
Dict[str, Any],
await self.api_wrapper("get", str(URL(self._host) / "api/stats")),
)
async def async_get_events(
self,
camera: str | None = None,
label: str | None = None,
zone: str | None = None,
after: int | None = None,
before: int | None = None,
limit: int | None = None,
has_clip: bool | None = None,
has_snapshot: bool | None = None,
) -> list[dict[str, Any]]:
"""Get data from the API."""
params = {
"camera": camera,
"label": label,
"zone": zone,
"after": after,
"before": before,
"limit": limit,
"has_clip": int(has_clip) if has_clip is not None else None,
"has_snapshot": int(has_snapshot) if has_snapshot is not None else None,
}
return cast(
List[Dict[str, Any]],
await self.api_wrapper(
"get",
str(
URL(self._host)
/ "api/events"
% {k: v for k, v in params.items() if v is not None}
),
),
)
async def async_get_event_summary(
self,
has_clip: bool | None = None,
has_snapshot: bool | None = None,
) -> list[dict[str, Any]]:
"""Get data from the API."""
params = {
"has_clip": int(has_clip) if has_clip is not None else None,
"has_snapshot": int(has_snapshot) if has_snapshot is not None else None,
}
return cast(
List[Dict[str, Any]],
await self.api_wrapper(
"get",
str(
URL(self._host)
/ "api/events/summary"
% {k: v for k, v in params.items() if v is not None}
),
),
)
async def async_get_config(self) -> dict[str, Any]:
"""Get data from the API."""
return cast(
Dict[str, Any],
await self.api_wrapper("get", str(URL(self._host) / "api/config")),
)
async def async_get_path(self, path: str) -> Any:
"""Get data from the API."""
return await self.api_wrapper("get", str(URL(self._host) / f"{path}/"))
async def api_wrapper(
self,
method: str,
url: str,
data: dict | None = None,
headers: dict | None = None,
decode_json: bool = True,
) -> Any:
"""Get information from the API."""
if data is None:
data = {}
if headers is None:
headers = {}
try:
async with async_timeout.timeout(TIMEOUT, loop=asyncio.get_event_loop()):
if method == "get":
response = await self._session.get(
url, headers=headers, raise_for_status=True
)
if decode_json:
return await response.json()
return await response.text()
if method == "put":
await self._session.put(url, headers=headers, json=data)
elif method == "patch":
await self._session.patch(url, headers=headers, json=data)
elif method == "post":
await self._session.post(url, headers=headers, json=data)
except asyncio.TimeoutError as exc:
_LOGGER.error(
"Timeout error fetching information from %s: %s",
url,
exc,
)
raise FrigateApiClientError from exc
except (KeyError, TypeError) as exc:
_LOGGER.error(
"Error parsing information from %s: %s",
url,
exc,
)
raise FrigateApiClientError from exc
except (aiohttp.ClientError, socket.gaierror) as exc:
_LOGGER.error(
"Error fetching information from %s: %s",
url,
exc,
)
raise FrigateApiClientError from exc
|
python
|
import jieba
import jieba.posseg as pseg #่ฏๆงๆ ๆณจ
import jieba.analyse as anls #ๅ
ณ้ฎ่ฏๆๅ
class Fenci:
def __init__(self):
pass
#ๅ
จๆจกๅผๅ็ฒพ็กฎๆจกๅผ
def cut(self,word,cut_all=True):
return jieba.cut(word, cut_all=True)
#ๆ็ดขๅผๆๆจกๅผ
# def cut_for_search(self,word):
# return jieba.cut_for_search(word)
if __name__ == "__main__":
seg_list = Fenci().cut("ไฝ ไธ็นไนไธๅฅฝ็")
print("ใcutใ๏ผ" + "/ ".join(seg_list))
seg_list = Fenci().cut_for_search("ไฝ ไธ็นไนไธๅฅฝ็")
print("ใcut for searchใ๏ผ" + "/ ".join(seg_list))
|
python
|
'''
Unit tests for the environments.py module.
'''
import boto3
import json
import pytest
from mock import patch
from moto import ( mock_ec2,
mock_s3 )
from deployer.exceptions import ( EnvironmentExistsException,
InvalidCommandException)
import deployer.environments as env
import deployer.tests.MyBoto3 as MyBoto3
fake_boto3 = MyBoto3.MyBoto3()
def mock_run_cmd(args, cwd=None):
print("CWD: {}, Running command: {}".format(cwd, " ".join(args)))
return 0
def mock_inst_is_running(instance_id):
return True
@pytest.fixture
def mock_config(scope="function"):
return {
"terraform": "[email protected]:group/project.git?branch=made_up_branch",
"aws_profile": "tests-random",
"aws_region": "us-east-1",
"availability_zones": [
'us-east-1b',
'us-east-1c',
'us-east-1d',
'us-east-1e'
],
"account_id": "123456789012",
"environment": {
"name": "myenvname",
"version": "a",
},
'tags': {
'system_type' : 'mock_product'
},
"env_name": "myenvname-a",
"tf_state": "myenvname-a.tfstate",
"tf_state_bucket": "123456789012-myproj-tfstate",
"project_config": "123456789012-myproj-data",
"project": 'myproj',
"tfvars" : '/tmp/test_tmp_dir/vars.tf',
"tf_root": '/tmp/test_tmp_dir/terraform',
"tmpdir" : '/tmp/test_tmp_dir'
}
@mock_ec2
def mock_vpcs(scope="function"):
ec2c = boto3.client('ec2',
region_name='us-east-1',
aws_access_key_id='',
aws_secret_access_key='',
aws_session_token='')
vpc1 = ec2c.create_vpc(CidrBlock='10.1.0.0/16').get('Vpc').get('VpcId')
vpc2 = ec2c.create_vpc(CidrBlock='10.2.0.0/16').get('Vpc').get('VpcId')
vpc3 = ec2c.create_vpc(CidrBlock='10.3.0.0/16').get('Vpc').get('VpcId')
ec2c.create_tags(Resources = [ vpc1 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-a'},
{'Key':'env',
'Value' : 'myenvname-a'} ])
ec2c.create_tags(Resources = [ vpc2 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-b'},
{'Key':'env',
'Value' : 'myenvname-b'} ])
ec2c.create_tags(Resources = [ vpc3 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-c'},
{'Key':'env',
'Value' : 'myenvname-c'} ])
return ec2c
@mock_s3
@mock_ec2
def test_create_env_exists(mock_config):
expected_arn = [ "arn:aws:ec2:us-east-1:419934374614:instance/i-c3bef428" ]
expected_msg = "\n\nAn environment with the name {} already exists."
expected_msg += "\nPlease tear it down before trying to rebuild."
expected_msg += "\n\n{}".format(json.dumps(expected_arn, indent=4))
env_name = mock_config['env_name']
if 'tags' in mock_config and 'system_type' in mock_config['tags']:
env_name = "-".join([mock_config['tags']['system_type'], env_name ])
s3client = boto3.client('s3')
s3client.create_bucket(Bucket="123456789012-myproj-tfstate")
with pytest.raises(EnvironmentExistsException) as e:
ec2c = boto3.client('ec2')
vpc1 = ec2c.create_vpc(CidrBlock='10.1.0.0/16').get('Vpc').get('VpcId')
ec2c.create_tags(Resources = [ vpc1 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-a'},
{'Key':'env',
'Value' : 'myenvname-a'},
{'Key' : 'system_type',
'Value' : 'mock_product'} ])
with patch('deployer.aws.instance_is_running', mock_inst_is_running):
with patch('deployer.utils.run_command', mock_run_cmd):
with patch('deployer.aws.boto3', fake_boto3):
env.create(mock_config)
from termcolor import colored
assert(e.value.args[0] == colored(expected_msg.format(env_name), 'red'))
return
@mock_s3
@mock_ec2
def test_create_env_does_not_exist(mock_config):
mock_config['environment']['name'] = 'myotherenvname'
mock_config['environment']['version'] = 'z'
s3client = boto3.client('s3')
s3client.create_bucket(Bucket="123456789012-myproj-tfstate")
with patch('deployer.utils.run_command', mock_run_cmd):
with patch('deployer.aws.boto3', fake_boto3):
assert env.create(mock_config)
return
def test_precheck_valid_keys(mock_config):
actions = [ 'create', 'destroy' ]
for action in actions:
with patch('deployer.utils.run_command', mock_run_cmd):
env._precheck(mock_config, action)
return
def test_precheck_invalid_key(mock_config):
with patch('deployer.utils.run_command', mock_run_cmd):
with pytest.raises(InvalidCommandException):
env._precheck(mock_config, 'invalid_command')
return
@mock_ec2
def test_list_deployed_environment_versions(mock_config):
mock_vpcs()
env_name = mock_config['environment']['name']
with patch('deployer.aws.boto3', fake_boto3):
existing_env_versions = env.list_deployed_environment_versions(env_name)
assert existing_env_versions == [ 'a', 'b', 'c' ]
return
@mock_ec2
def test_get_next_env_version(mock_config):
mock_vpcs()
env_name = mock_config['environment']['name']
expected = 'd'
with patch('deployer.aws.boto3', fake_boto3):
with patch('deployer.aws.instance_is_running', mock_inst_is_running):
next_version = env.get_next_version(env_name)
assert expected == next_version
|
python
|
#!/usr/bin/env python3
# coding: utf-8
# print ใฎๅบๅๆใซๆฅๆฌ่ชใงใใจใฉใผใๅบใชใใใใซใใใใพใใชใ
import sys
import io
sys.stdout = io.TextIOWrapper( sys.stdout.buffer, encoding='utf-8' )
# CGIใจใใฆๅฎ่กใใ้ใฎใใฉใผใ ๆ
ๅ ฑใๅใๅบใใฉใคใใฉใช
import cgi
form_data = cgi.FieldStorage( keep_blank_values = True )
# MySQLใใผใฟใใผในๆฅ็ถ็จใฉใคใใฉใช
import MySQLdb
con = None
cur = None
# ใใใ็ป้ขใฎHTMLใๅบๅใใใกใฝใใ
def print_html():
# html ้ๅง
print( '<!DOCTYPE html>' )
print( '<html>' )
# head ๅบๅ
print( '<head>' )
print( '<meta charset="UTF-8">' )
print( '</head>' )
# body ้ๅง
print( '<body>' )
print( '<p>ใฒใจใใจๆฒ็คบๆฟ</p>' )
# ๆธใ่พผใฟใใฉใผใ ใๅบๅ
print( '<form action="" method="POST">' )
print( '<input type="hidden" name="method_type" value="tweet">' )
print( '<input type="text" name="poster_name" value="" placeholder="ใชใพใ">' )
print( '<br>' )
print( '<textarea name="body_text" value="" placeholder="ๆฌๆ"></textarea>' )
print( '<input type="submit" value="ๆ็จฟ">' )
print( '</form>' )
# ็ฝซ็ทใๅบๅ
print( '<hr>' )
# ๆธใ่พผใฟใฎไธ่ฆงใๅๅพใใSQLๆใไฝๆ
sql = "select * from posts"
# SQLใๅฎ่ก
cur.execute( sql )
# ๅๅพใใๆธใ่พผใฟใฎไธ่ฆงใฎๅ
จใฌใณใผใใๅใๅบใ
rows = cur.fetchall()
# ๅ
จใฌใณใผใใใ1ใฌใณใผใใใคๅใๅบใใซใผใๅฆ็
for row in rows:
print( '<div class="meta">' )
print( '<span class="id">' + row[ 'id' ] + '</span>' )
print( '<span class="name">' + row[ 'name' ] + '</span>' )
print( '<span class="date">' + row[ 'created_at' ] + '</span>' )
print( '</div>' )
print( '<div class="message"><span>' + row[ 'body' ] + '</span></div>' )
# body ้ใ
print( '</body>' )
# html ้ใ
print( '</html>' )
# ใใฉใผใ ็ต็ฑใฎใขใฏใปในใๅฆ็ใใใกใฝใใ
def proceed_methods():
# ใใฉใผใ ใฎ็จฎ้กใๅๅพ๏ผไปใฎใจใใๆธใ่พผใฟใฎใฟ๏ผ
method = form_data[ 'method_type' ].value
# tweet ๏ผๆธใ่พผใฟ๏ผ ใ ใฃใใ
if( method == 'tweet' ):
# ๅๅใๅใๅบใ
poster_name = form_data[ 'poster_name' ].value
# ๆ็จฟๅ
ๅฎนใๅใๅบใ
body_text = form_data[ 'body_text' ].value
# ๆ็จฟใใใผใฟใใผในใซๆธใ่พผใSQLๆใไฝๆ
sql = 'insert into posts ( name, body ) values ( %s, %s )'
# ๅใๅบใใๅๅใจๆ็จฟๅ
ๅฎนใใปใใใใฆSQLใๅฎ่ก
cur.execute( sql, ( poster_name, body_text ) )
con.commit()
# ๅฆ็ใซๆๅใใใใใใ็ป้ขใซ่ชๅ้ท็งปใใใใผใธใๅบๅ
print( '<!DOCTYPE html>' )
print( '<html>' )
print( ' <head>' )
print( ' <meta http-equiv="refresh" content="5; url=./">' )
print( ' </head>' )
print( ' <body>' )
print( ' ๅฆ็ใๆๅใใพใใใ5็งๅพใซๅ
ใฎใใผใธใซๆปใใพใใ' )
print( ' </body>' )
print( '</html>' )
# ใกใคใณๅฆ็ใๅฎ่กใใใกใฝใใ
def main():
# CGIใจใใฆๅฎ่กใใใใใฎใใพใใชใ
print( 'Content-Type: text/html; charset=utf-8' )
print( '' )
# ใใใงใใผใฟใใผในใซๆฅ็ถใใฆใใ
global con, cur
try:
con = MySQLdb.connect(
host = 'xxx.xxx.xxx.xxx',
user = 'yourname',
passwd = 'yourpassword',
db = 'yourdbname',
use_unicode = True,
charset = 'utf8'
)
except MySQLdb.Error as e:
print( 'ใใผใฟใใผในๆฅ็ถใซๅคฑๆใใพใใใ' )
print( e )
# ใใผใฟใใผในใซๆฅ็ถใงใใชใใฃใๅ ดๅใฏใใใใงๅฆ็ใ็ตไบ
exit()
cur = con.cursor( MySQLdb.cursors.DictCursor )
# ใใฉใผใ ็ต็ฑใฎใขใฏใปในใใๅคๅฎ
if( 'method_type' in form_data ):
# ใใฉใผใ ็ต็ฑใฎใขใฏใปในใงใใๅ ดๅใฏใใใฉใผใ ใฎ็จฎ้กใซๅพใฃใฆๅฆ็ใๅฎ่ก
proceed_methods()
else:
# ใใฉใผใ ็ต็ฑใฎใขใฏใปในใงใชใๅ ดๅใฏใ้ๅธธใฎใใใ็ป้ขใ่กจ็คบ
print_html()
# ไธ้ใใฎๅฆ็ใๅฎไบใใใๆๅพใซใใผใฟใใผในใๅๆญใใฆใใ
cur.close()
con.close()
# Pythonในใฏใชใใใจใใฆๅฎ่กใใใๅ ดๅใฎใฟๅฎ่ก
if __name__ == "__main__":
# main() ใๅฎ่ก
main()
|
python
|
#!/usr/bin/env python3
# coding: utf-8
import os
import sys
import re
import numpy as np
#==============================================================================#
def atomgroup_header(atomgroup):
"""
Return a string containing info about the AtomGroup
containing the total number of atoms,
the including residues and the number of residues.
Useful for writing output file headers.
"""
unq_res, n_unq_res = np.unique(
atomgroup.residues.resnames, return_counts=True)
return "{} atom(s): {}".format(
atomgroup.n_atoms, ", ".join(
"{} {}".format(*i) for i in np.vstack([n_unq_res, unq_res]).T))
def fill_template(template, vars, s = "<", e = ">"):
"""
Search and replace tool for filling template files.
Replaces text bounded by the delimiters `s` and `e`
with values found in the lookup dictionary `vars`.
"""
exp = s + "\w*" + e
matches = re.findall(exp, template)
for m in matches:
key = m[1:-1]
template = template.replace(m, str(vars.get(key, m)))
return template
def save_path(prefix = ""):
"""Returns a formatted output location for a given file prefix."""
if prefix != "" and prefix[-1] != "/":
prefix += "_"
output = prefix if os.path.dirname(prefix) else os.path.join(os.getcwd(), prefix)
if not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(prefix))
return output
#==============================================================================#
def nearest_power_two(n):
"""
Select the closest i such that n<=2**i.
"""
current_exp = int(np.ceil(np.log2(n+1)))
if n == 2**current_exp:
n_fft = n
if n < 2**current_exp:
n_fft = 2**current_exp
elif n > 2**current_exp:
n_fft = 2**(current_exp+1)
return n_fft
def zero_pad(x, n):
"""
Pad an array to length `n` with zeros.
If the original array length is greater than `n`,
a copy of the original array is returned with it's length unchanged.
"""
nx = len(x)
if n < nx:
n = nx
new = np.zeros((n, *x.shape[1:]), dtype = x.dtype)
new[:nx] = x
return new
def bin_data(arr, nbins, after = 1, log = True):
"""
Averages array values in bins for easier plotting.
"""
# Determine indices to average between
if log:
bins = np.logspace(np.log10(after), np.log10(len(arr)-1), nbins+1).astype(int)
else:
bins = np.linspace(after, len(arr), nbins+1).astype(int)
bins = np.unique(np.append(np.arange(after), bins))
avg = np.zeros(len(bins)-1, dtype = arr.dtype)
for i in range(len(bins)-1):
avg[i] = np.mean(arr[bins[i]:bins[i+1]])
return avg
|
python
|
from pydantic import BaseModel
class ConsumerResponse(BaseModel):
topic: str
timestamp: str
product_name: str
product_id: int
success: bool
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `te_python` module."""
import pytest
import requests
from te_python import te_python
def test_te_python_initialization():
response = te_python.email_get_details('a53b7747d6bd3f59076d63469d92924e00f407ff472e5a539936453185ecca6c')
assert isinstance(response, dict)
def test_update_api_url():
# make a request to localhost (which should fail)... this makes sure that the base_api_url is being properly used
with pytest.raises(requests.ConnectionError):
te_python.email_get_details('a53b7747d6bd3f59076d63469d92924e00f407ff472e5a539936453185ecca6c')
|
python
|
from django.db import models
class Person(models.Model):
first_name = models.CharField(max_length=64)
surname = models.CharField(max_length=64)
class Meta:
app_label = 'person'
db_table = 'person'
ordering = ('surname', 'first_name')
|
python
|
from util.Tile import Tile
from util.Button import Button
from util.Maze import Maze
from algorithms.BFS import BFS
from algorithms.DFS import DFS
from algorithms.GFS import GFS
from algorithms.AStar import AStar
from math import floor
import pygame
class Grid:
def __init__(self, width, height, tile_w, colorPalette,
line_w = 1, menuOffset = 0, txtSize = 42, nSolutions = 4, fpsFast = 45, fpsSlow = 10):
self.WIDTH = width
self.HEIGHT = height
self.colorPalette = colorPalette
self.Y_OFFSET = menuOffset
self.LINE_W = line_w
self.nSolutions = nSolutions
self.fpsFast = fpsFast
self.fpsSlow = fpsSlow
self.TILE_W = tile_w # should be divisible by width and height so it all works out
self.RECT_OFF = floor(self.LINE_W / 2) # offset due to the line's width
# array of tiles
self.tiles = []
for h in range(0, self.HEIGHT, self.TILE_W):
row = []
for w in range(0, self.WIDTH, self.TILE_W):
# menu offset is included in the height
row.append(Tile(w, h + self.Y_OFFSET, self.TILE_W, self.RECT_OFF))
self.tiles.append(row)
# array of buttons
pygame.font.init()
bigFont = pygame.font.SysFont('Calibri', txtSize)
self.algButtons = [
Button(110, 50, "Depth FS", bigFont, self.colorPalette),
Button(315, 50, "Breadth FS", bigFont, self.colorPalette),
Button(530, 50, "Greedy FS", bigFont, self.colorPalette),
Button(740, 50, "A-Star", bigFont, self.colorPalette)
]
smallFont = pygame.font.SysFont('Calibri', floor(txtSize / 2))
self.otherButtons = {
"Maze" : Button(110, 150, "Generate Maze", smallFont, self.colorPalette),
"Clear" : Button(280, 150, "Clear", smallFont, self.colorPalette),
"Slow" : Button(725, 150, "Slow", smallFont, self.colorPalette),
"Fast" : Button(800, 150, "Fast", smallFont, self.colorPalette)
}
self.otherButtons["Slow"].highlightTrue()
self.FPS = self.fpsSlow
# origin and target tile -> for dragging them
self.originTile = self.tiles[0][0]
self.originTile.updateState("origin")
self.targetTile = self.tiles[-1][-1]
self.targetTile.updateState("target")
# maze generator
self.mazeGen = None
# for mouse dragging
self.leftBeingClicked = False
self.rightBeingClicked = False
self.originDragged = False
self.targetDragged = False
# algorithm selected stores the selected button representing the choice of algorithm
self.algorithmSelected = None
self.updateAlgorithm(self.algButtons[0])
# algorithm is the algorithm object in itself
self.algorithm = None
self.solved = False
def draw(self, screen):
self.drawGrid(screen)
self.drawTiles(screen)
self.drawButtons(screen)
def update(self, state):
# DRAW STATE
if state == "draw":
(x, y) = pygame.mouse.get_pos()
(xGrid, yGrid) = self.pixelsToGrid(x, y)
if y > self.Y_OFFSET:
clickedTile = self.tiles[yGrid][xGrid]
if self.leftBeingClicked and clickedTile != self.originTile and clickedTile != self.targetTile:
clickedTile.updateState("wall")
elif self.rightBeingClicked and clickedTile != self.originTile and clickedTile != self.targetTile:
clickedTile.updateState("tile")
elif self.originDragged and clickedTile != self.targetTile:
self.originTile.updateState("tile")
self.originTile = clickedTile
self.originTile.updateState("origin")
elif self.targetDragged and clickedTile != self.originTile:
self.targetTile.updateState("tile")
self.targetTile = clickedTile
self.targetTile.updateState("target")
# SOLVE STATE
elif state == "solve":
if self.algorithm.stepSearch() == 1:
self.solved = True
self.updateTilesState(self.algorithm.seen, "seen")
self.updateTilesState(self.algorithm.path, "path")
self.updateTilesState([self.algorithm.getCurrent()], "current")
def drawGrid(self, screen):
# + 1 so that the last lines are included
for w in range(0, self.WIDTH + 1, self.TILE_W):
pygame.draw.line(screen, self.colorPalette["DARKBLUE"], (w, self.Y_OFFSET), (w, self.HEIGHT + self.Y_OFFSET), self.LINE_W)
for h in range(self.Y_OFFSET, self.HEIGHT + self.Y_OFFSET + 1, self.TILE_W):
pygame.draw.line(screen, self.colorPalette["DARKBLUE"], (0, h), (self.WIDTH, h), self.LINE_W)
def drawTiles(self, screen):
for row in self.tiles:
for tile in row:
state = tile.getState()
color = self.colorPalette["GRAY"] # default is gray
# with python 3.10 a switch case statement would work
if state == "wall":
color = self.colorPalette["DARKBLUE"]
elif state == "seen":
color = self.colorPalette["BLUE"]
elif state == "path":
color = self.colorPalette["MINT"]
elif state == "current":
color = self.colorPalette["ORANGE"]
elif state == "origin":
color = self.colorPalette["GREEN"]
elif state == "target":
color = self.colorPalette["RED"]
pygame.draw.rect(screen, color, tile.getRect())
def drawButtons(self, screen):
for button in self.algButtons:
button.draw(screen)
for key in self.otherButtons:
self.otherButtons[key].draw(screen)
def clickDown(self, x, y, left, state): # update tiles according to a click down and and x,y coord of the mouse
# left argument is true if it was a left click, false if it was a right click
if (y < self.Y_OFFSET):
self.menuClick(x, y, state)
elif state == "draw":
if self.originTile.wasItClicked(x, y):
self.originDragged = True
elif self.targetTile.wasItClicked(x, y):
self.targetDragged = True
elif left:
self.leftBeingClicked = True
else:
self.rightBeingClicked = True
def menuClick(self, x, y, state):
if state == "draw":
for button in self.algButtons:
if button.clicked(x, y):
self.updateAlgorithm(button)
return
if state == "draw":
if self.otherButtons["Maze"].clicked(x, y): # generate a maze
self.mazeGen = Maze(
len(self.tiles[0]),
len(self.tiles),
self.pixelsToGrid(*self.originTile.getPosition()),
self.pixelsToGrid(*self.targetTile.getPosition())
)
newMap = self.mazeGen.createMaze(self.nSolutions)
self.changeToNewMap(newMap)
elif self.otherButtons["Clear"].clicked(x, y):
self.changeToNewMap() # leave empty to clear it
if self.otherButtons["Slow"].clicked(x, y):
self.FPS = self.fpsSlow
self.otherButtons["Slow"].highlightTrue()
self.otherButtons["Fast"].highlightFalse()
elif self.otherButtons["Fast"].clicked(x, y):
self.FPS = self.fpsFast
self.otherButtons["Fast"].highlightTrue()
self.otherButtons["Slow"].highlightFalse()
def changeToNewMap(self, newMap = None):
if newMap == None:
for h in range(len(self.tiles)):
for w in range(len(self.tiles[0])):
if self.tiles[h][w] != self.originTile and self.tiles[h][w] != self.targetTile:
self.tiles[h][w].updateState("tile")
else:
for h in range(len(newMap)):
for w in range(len(newMap[h])):
if self.tiles[h][w] != self.originTile and self.tiles[h][w] != self.targetTile:
if newMap[h][w]:
self.tiles[h][w].updateState("wall")
else:
self.tiles[h][w].updateState("tile")
def updateAlgorithm(self, newAlgorithm):
for button in self.algButtons:
button.highlightFalse()
newAlgorithm.highlightTrue()
self.algorithmSelected = newAlgorithm.text
def clickUp(self):
self.leftBeingClicked = False
self.rightBeingClicked = False
self.originDragged = False
self.targetDragged = False
def defineAlgorithm(self):
# the map is not solved
self.solved = False
# if this is not the first time running an algorithm we have to clean all non wall / tile tiles
self.removePathGrid()
originPos = self.pixelsToGrid(*self.originTile.getPosition())
targetPos = self.pixelsToGrid(*self.targetTile.getPosition())
if self.algorithmSelected == "Breadth FS":
self.algorithm = BFS(originPos, targetPos, self.getGrid())
elif self.algorithmSelected == "Depth FS":
self.algorithm = DFS(originPos, targetPos, self.getGrid())
elif self.algorithmSelected == "Greedy FS":
self.algorithm = GFS(originPos, targetPos, self.getGrid())
elif self.algorithmSelected == "A-Star":
self.algorithm = AStar(originPos, targetPos, self.getGrid())
def removePathGrid(self):
for row in self.tiles:
for tile in row:
tmp = tile.getState()
if tmp != "wall" and tmp != "origin" and tmp != "target":
tile.updateState("tile")
def getGrid(self):
grid = []
for row in self.tiles:
boolRow = []
for tile in row:
if tile.getState() == "wall":
boolRow.append(True)
else:
boolRow.append(False)
grid.append(boolRow)
return grid
def updateTilesState(self, coords, state):
for coord in coords:
(x, y) = coord
if self.tiles[y][x] != self.originTile and self.tiles[y][x] != self.targetTile:
self.tiles[y][x].updateState(state)
def pixelsToGrid(self, x, y):
return (floor(x / self.TILE_W), floor((y - self.Y_OFFSET) / self.TILE_W))
|
python
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import operator
import os
import time
from google.api_core.exceptions import ResourceExhausted
from google.cloud import bigquery_datatransfer_v1
from google.protobuf.timestamp_pb2 import Timestamp
RETRY_DELAY = 10
class TimeoutError(Exception):
"""Raised when the BQ transfer jobs haven't all finished within the allotted time"""
pass
def main(
source_project_id: str,
source_bq_dataset: str,
target_project_id: str,
target_bq_dataset: str,
service_account: str,
timeout: int,
):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
transfer_config_name = f"{source_project_id}-{source_bq_dataset}-copy"
existing_config = find_existing_config(
client, target_project_id, transfer_config_name
)
if not existing_config:
existing_config = create_transfer_config(
client,
source_project_id,
source_bq_dataset,
target_project_id,
target_bq_dataset,
transfer_config_name,
service_account,
)
trigger_config(client, existing_config)
wait_for_completion(client, existing_config, timeout)
def find_existing_config(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
gcp_project: str,
transfer_config_name: str,
) -> bigquery_datatransfer_v1.types.TransferConfig:
all_transfer_configs = client.list_transfer_configs(
request=bigquery_datatransfer_v1.types.ListTransferConfigsRequest(
parent=f"projects/{gcp_project}"
)
)
return next(
(
config
for config in all_transfer_configs
if config.display_name == transfer_config_name
),
None,
)
def wait_for_completion(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
running_config: bigquery_datatransfer_v1.types.TransferConfig,
timeout: int,
) -> None:
_start = int(time.time())
while True:
latest_runs = []
latest_runs.append(latest_transfer_run(client, running_config))
logging.info(f"States: {[str(run.state) for run in latest_runs]}")
# Mark as complete when all runs have succeeded
if all([str(run.state) == "TransferState.SUCCEEDED" for run in latest_runs]):
return
# Stop the process when it's longer than the allotted time
if int(time.time()) - _start > timeout:
raise TimeoutError
time.sleep(RETRY_DELAY)
def latest_transfer_run(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
config: bigquery_datatransfer_v1.types.TransferConfig,
) -> bigquery_datatransfer_v1.types.TransferRun:
transfer_runs = client.list_transfer_runs(parent=config.name)
return max(transfer_runs, key=operator.attrgetter("run_time"))
def create_transfer_config(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
source_project_id: str,
source_dataset_id: str,
target_project_id: str,
target_dataset_id: str,
display_name: str,
service_account: str,
) -> bigquery_datatransfer_v1.types.TransferConfig:
transfer_config = bigquery_datatransfer_v1.TransferConfig(
destination_dataset_id=target_dataset_id,
display_name=display_name,
data_source_id="cross_region_copy",
dataset_region="US",
params={
"overwrite_destination_table": True,
"source_project_id": source_project_id,
"source_dataset_id": source_dataset_id,
},
schedule_options=bigquery_datatransfer_v1.ScheduleOptions(
disable_auto_scheduling=True
),
)
request = bigquery_datatransfer_v1.types.CreateTransferConfigRequest(
parent=client.common_project_path(target_project_id),
transfer_config=transfer_config,
service_account_name=service_account,
)
return client.create_transfer_config(request=request)
def trigger_config(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
config: bigquery_datatransfer_v1.types.TransferConfig,
) -> None:
now = time.time()
seconds = int(now)
nanos = int((now - seconds) * pow(10, 9))
try:
client.start_manual_transfer_runs(
request=bigquery_datatransfer_v1.types.StartManualTransferRunsRequest(
parent=config.name,
requested_run_time=Timestamp(seconds=seconds, nanos=nanos),
)
)
except ResourceExhausted:
logging.info(
f"Transfer job is currently running for config ({config.display_name}) {config.name}."
)
return
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_project_id=os.environ["SOURCE_PROJECT_ID"],
source_bq_dataset=os.environ["SOURCE_BQ_DATASET"],
target_project_id=os.environ["TARGET_PROJECT_ID"],
target_bq_dataset=os.environ["TARGET_BQ_DATASET"],
service_account=os.environ["SERVICE_ACCOUNT"],
timeout=int(os.getenv("TIMEOUT", 1200)),
)
|
python
|
from discord.ext import commands
from discord_bot.bot import Bot
class Admin(commands.Cog):
"""Admin commands that only bot owner can run"""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="shutdown", hidden=True)
@commands.is_owner()
async def shutdow(self, ctx: commands.Context):
"""Closes all connections and shuts down the bot"""
await ctx.send("Shutting down the bot...")
await self.bot.close()
@commands.group(name="extension", aliases=["ext"], hidden=True)
@commands.is_owner()
async def ext(self, ctx: commands.Context):
"""A command to load, reload, unload extensions."""
if ctx.invoked_subcommand is None:
await ctx.reply("This command requires a subcommand to be passed")
@ext.command(name="load", aliases=["l"])
async def load(self, ctx: commands.Context, arg: str):
"""A command to load extensions."""
try:
self.bot.load_extension(f"discord_bot.cogs.{arg}")
await ctx.reply(f"Successfully loaded extension {arg}")
except Exception as e:
await ctx.reply(f"Failed to load ext {arg}\n{e}")
@ext.command(name="unload", aliases=["u"])
async def unload(self, ctx: commands.Context, arg: str):
"""A command to unload extensions"""
try:
self.bot.unload_extension(f"discord_bot.cogs.{arg}")
await ctx.reply(f"Successfully unloaded extension {arg}")
except Exception as e:
await ctx.reply(f"Failed to unload ext {arg}\n{e}")
@ext.command(name="reload", aliases=["r"])
async def reload(self, ctx: commands.Context, arg: str):
"""A command to reload extensions."""
try:
self.bot.reload_extension(f"discord_bot.cogs.{arg}")
await ctx.reply(f"Successfully reloaded extension {arg}")
except Exception as e:
await ctx.reply(f"Failed to reload ext {arg}\n{e}")
def setup(bot: Bot):
bot.add_cog(Admin(bot))
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceMetadata(Model):
"""Represents a Resource metadata.
:param kind: Possible values include: 'bearerAuthenticationConnection',
'sshKeyAuthenticationConnection', 'apiKeyAuthenticationConnection',
'basicAuthenticationConnection', 'firstPartyADConnection',
'amazonS3Connection', 'adlsGen2', 'd365Sales', 'd365Marketing',
'attachCds', 'ftp', 'facebookAds', 'amlWorkspace', 'mlStudioWebservice',
'adRoll', 'rollWorks', 'constantContact', 'campaignMonitor', 'http',
'dotDigital', 'mailchimp', 'linkedIn', 'googleAds', 'marketo',
'microsoftAds', 'omnisend', 'sendGrid', 'sendinblue', 'activeCampaign',
'autopilot', 'klaviyo', 'snapchat', 'powerBI', 'azureSql', 'synapse'
:type kind: str or ~dynamics.customerinsights.api.models.enum
:param resource_id: Gets the Id of the resource.
:type resource_id: str
:param operation_id: Gets the Id of the operation being performed on the
resource.
:type operation_id: str
:param name: Gets the Name of the resource.
:type name: str
:param description: Gets the Description of the resource.
:type description: str
:param key_vault_metadata_id: MetadataId for Linked KeyVaultMetadata
:type key_vault_metadata_id: str
:param mapped_secrets:
:type mapped_secrets:
~dynamics.customerinsights.api.models.MappedSecretMetadata
:param version: Version number of this object.
:type version: long
:param updated_by: UPN of the user who last updated this record.
:type updated_by: str
:param updated_utc: Time this object was last updated.
:type updated_utc: datetime
:param created_by: Email address of the user who created this record.
:type created_by: str
:param created_utc: Time this object was initially created.
:type created_utc: datetime
:param instance_id: Customer Insights instance id associated with this
object.
:type instance_id: str
"""
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'operation_id': {'key': 'operationId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'key_vault_metadata_id': {'key': 'keyVaultMetadataId', 'type': 'str'},
'mapped_secrets': {'key': 'mappedSecrets', 'type': 'MappedSecretMetadata'},
'version': {'key': 'version', 'type': 'long'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'updated_utc': {'key': 'updatedUtc', 'type': 'iso-8601'},
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_utc': {'key': 'createdUtc', 'type': 'iso-8601'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, *, kind=None, resource_id: str=None, operation_id: str=None, name: str=None, description: str=None, key_vault_metadata_id: str=None, mapped_secrets=None, version: int=None, updated_by: str=None, updated_utc=None, created_by: str=None, created_utc=None, instance_id: str=None, **kwargs) -> None:
super(ResourceMetadata, self).__init__(**kwargs)
self.kind = kind
self.resource_id = resource_id
self.operation_id = operation_id
self.name = name
self.description = description
self.key_vault_metadata_id = key_vault_metadata_id
self.mapped_secrets = mapped_secrets
self.version = version
self.updated_by = updated_by
self.updated_utc = updated_utc
self.created_by = created_by
self.created_utc = created_utc
self.instance_id = instance_id
|
python
|
from ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
python
|
from gui import GUI
program = GUI()
program.run()
|
python
|
#!/usr/bin/env python3
#
# RoarCanvasCommandsEdit.py
# Copyright (c) 2018, 2019 Lucio Andrรฉs Illanes Albornoz <[email protected]>
#
from GuiFrame import GuiCommandDecorator, GuiCommandListDecorator, GuiSelectDecorator
import wx
class RoarCanvasCommandsEdit():
@GuiCommandDecorator("Hide assets window", "Hide assets window", ["toolHideAssetsWindow.png"], None, False)
def canvasAssetsWindowHide(self, event):
self.parentFrame.assetsWindow.Show(False)
self.parentFrame.menuItemsById[self.canvasAssetsWindowHide.attrDict["id"]].Enable(False)
self.parentFrame.menuItemsById[self.canvasAssetsWindowShow.attrDict["id"]].Enable(True)
toolBar = self.parentFrame.toolBarItemsById[self.canvasAssetsWindowHide.attrDict["id"]][0]
toolBar.EnableTool(self.canvasAssetsWindowHide.attrDict["id"], False)
toolBar.EnableTool(self.canvasAssetsWindowShow.attrDict["id"], True)
toolBar.Refresh()
@GuiCommandDecorator("Show assets window", "Show assets window", ["toolShowAssetsWindow.png"], None, False)
def canvasAssetsWindowShow(self, event):
self.parentFrame.assetsWindow.Show(True)
self.parentFrame.menuItemsById[self.canvasAssetsWindowHide.attrDict["id"]].Enable(True)
self.parentFrame.menuItemsById[self.canvasAssetsWindowShow.attrDict["id"]].Enable(False)
toolBar = self.parentFrame.toolBarItemsById[self.canvasAssetsWindowHide.attrDict["id"]][0]
toolBar.EnableTool(self.canvasAssetsWindowHide.attrDict["id"], True)
toolBar.EnableTool(self.canvasAssetsWindowShow.attrDict["id"], False)
toolBar.Refresh()
@GuiSelectDecorator(0, "Solid brush", "Solid brush", None, None, True)
def canvasBrush(self, f, idx):
def canvasBrush_(self, event):
pass
setattr(canvasBrush_, "attrDict", f.attrList[idx])
setattr(canvasBrush_, "isSelect", True)
return canvasBrush_
@GuiCommandListDecorator(0, "Decrease brush width", "Decrease brush width", ["toolDecrBrushW.png"], None, None)
@GuiCommandListDecorator(1, "Decrease brush height", "Decrease brush height", ["toolDecrBrushH.png"], None, None)
@GuiCommandListDecorator(2, "Decrease brush size", "Decrease brush size", ["toolDecrBrushHW.png"], [wx.ACCEL_CTRL, ord("-")], None)
@GuiCommandListDecorator(3, "Increase brush width", "Increase brush width", ["toolIncrBrushW.png"], None, None)
@GuiCommandListDecorator(4, "Increase brush height", "Increase brush height", ["toolIncrBrushH.png"], None, None)
@GuiCommandListDecorator(5, "Increase brush size", "Increase brush size", ["toolIncrBrushHW.png"], [wx.ACCEL_CTRL, ord("+")], None)
def canvasBrushSize(self, f, dimension, incrFlag):
def canvasBrushSize_(event):
if (dimension < 2) and not incrFlag:
if self.parentCanvas.brushSize[dimension] > 1:
self.parentCanvas.brushSize[dimension] -= 1
self.update(brushSize=self.parentCanvas.brushSize)
elif (dimension < 2) and incrFlag:
self.parentCanvas.brushSize[dimension] += 1
self.update(brushSize=self.parentCanvas.brushSize)
elif dimension == 2:
[self.canvasBrushSize(f, dimension_, incrFlag)(None) for dimension_ in [0, 1]]
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasBrushSize_, "attrDict", f.attrList[dimension + (0 if not incrFlag else 3)])
return canvasBrushSize_
@GuiCommandListDecorator(0, "Decrease canvas height", "Decrease canvas height", ["toolDecrCanvasH.png"], [wx.ACCEL_CTRL, wx.WXK_UP], None)
@GuiCommandListDecorator(1, "Decrease canvas width", "Decrease canvas width", ["toolDecrCanvasW.png"], [wx.ACCEL_CTRL, wx.WXK_LEFT], None)
@GuiCommandListDecorator(2, "Decrease canvas size", "Decrease canvas size", ["toolDecrCanvasHW.png"], None, None)
@GuiCommandListDecorator(3, "Increase canvas height", "Increase canvas height", ["toolIncrCanvasH.png"], [wx.ACCEL_CTRL, wx.WXK_DOWN], None)
@GuiCommandListDecorator(4, "Increase canvas width", "Increase canvas width", ["toolIncrCanvasW.png"], [wx.ACCEL_CTRL, wx.WXK_RIGHT], None)
@GuiCommandListDecorator(5, "Increase canvas size", "Increase canvas size", ["toolIncrCanvasHW.png"], None, None)
def canvasCanvasSize(self, f, dimension, incrFlag):
def canvasCanvasSize_(event):
if (dimension < 2) and not incrFlag:
if dimension == 0:
if self.parentCanvas.canvas.size[1] > 1:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0], self.parentCanvas.canvas.size[1] - 1])
elif dimension == 1:
if self.parentCanvas.canvas.size[0] > 1:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0] - 1, self.parentCanvas.canvas.size[1]])
elif (dimension < 2) and incrFlag:
if dimension == 0:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0], self.parentCanvas.canvas.size[1] + 1])
elif dimension == 1:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0] + 1, self.parentCanvas.canvas.size[1]])
elif dimension == 2:
[self.canvasCanvasSize(f, dimension_, incrFlag)(None) for dimension_ in [0, 1]]
setattr(canvasCanvasSize_, "attrDict", f.attrList[dimension + (0 if not incrFlag else 3)])
return canvasCanvasSize_
@GuiSelectDecorator(0, "Colour #00", "Colour #00 (Bright White)", None, [wx.ACCEL_CTRL, ord("0")], False)
@GuiSelectDecorator(1, "Colour #01", "Colour #01 (Black)", None, [wx.ACCEL_CTRL, ord("1")], False)
@GuiSelectDecorator(2, "Colour #02", "Colour #02 (Blue)", None, [wx.ACCEL_CTRL, ord("2")], False)
@GuiSelectDecorator(3, "Colour #03", "Colour #03 (Green)", None, [wx.ACCEL_CTRL, ord("3")], False)
@GuiSelectDecorator(4, "Colour #04", "Colour #04 (Red)", None, [wx.ACCEL_CTRL, ord("4")], False)
@GuiSelectDecorator(5, "Colour #05", "Colour #05 (Light Red)", None, [wx.ACCEL_CTRL, ord("5")], False)
@GuiSelectDecorator(6, "Colour #06", "Colour #06 (Pink)", None, [wx.ACCEL_CTRL, ord("6")], False)
@GuiSelectDecorator(7, "Colour #07", "Colour #07 (Yellow)", None, [wx.ACCEL_CTRL, ord("7")], False)
@GuiSelectDecorator(8, "Colour #08", "Colour #08 (Light Yellow)", None, [wx.ACCEL_CTRL, ord("8")], False)
@GuiSelectDecorator(9, "Colour #09", "Colour #09 (Light Green)", None, [wx.ACCEL_CTRL, ord("9")], False)
@GuiSelectDecorator(10, "Colour #10", "Colour #10 (Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("0")], False)
@GuiSelectDecorator(11, "Colour #11", "Colour #11 (Light Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("1")], False)
@GuiSelectDecorator(12, "Colour #12", "Colour #12 (Light Blue)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("2")], False)
@GuiSelectDecorator(13, "Colour #13", "Colour #13 (Light Pink)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("3")], False)
@GuiSelectDecorator(14, "Colour #14", "Colour #14 (Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("4")], False)
@GuiSelectDecorator(15, "Colour #15", "Colour #15 (Light Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("5")], False)
def canvasColour(self, f, idx):
def canvasColour_(event):
if event.GetEventType() == wx.wxEVT_TOOL:
self.parentCanvas.brushColours[0] = idx
elif event.GetEventType() == wx.wxEVT_TOOL_RCLICKED:
self.parentCanvas.brushColours[1] = idx
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColour_, "attrDict", f.attrList[idx])
setattr(canvasColour_, "isSelect", True)
return canvasColour_
@GuiSelectDecorator(0, "Transparent colour", "Transparent colour", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("6")], False)
def canvasColourAlpha(self, f, idx):
def canvasColourAlpha_(event):
if event.GetEventType() == wx.wxEVT_TOOL:
self.parentCanvas.brushColours[0] = -1
elif event.GetEventType() == wx.wxEVT_TOOL_RCLICKED:
self.parentCanvas.brushColours[1] = -1
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColourAlpha_, "attrDict", f.attrList[idx])
setattr(canvasColourAlpha_, "isSelect", True)
return canvasColourAlpha_
@GuiSelectDecorator(0, "Transparent colour", "Transparent colour", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("6")], False)
def canvasColourAlphaBackground(self, f, idx):
def canvasColourAlphaBackground_(event):
self.parentCanvas.brushColours[1] = -1
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColourAlphaBackground_, "attrDict", f.attrList[idx])
setattr(canvasColourAlphaBackground_, "isSelect", True)
return canvasColourAlphaBackground_
@GuiSelectDecorator(0, "Colour #00", "Colour #00 (Bright White)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("0")], False)
@GuiSelectDecorator(1, "Colour #01", "Colour #01 (Black)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("1")], False)
@GuiSelectDecorator(2, "Colour #02", "Colour #02 (Blue)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("2")], False)
@GuiSelectDecorator(3, "Colour #03", "Colour #03 (Green)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("3")], False)
@GuiSelectDecorator(4, "Colour #04", "Colour #04 (Red)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("4")], False)
@GuiSelectDecorator(5, "Colour #05", "Colour #05 (Light Red)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("5")], False)
@GuiSelectDecorator(6, "Colour #06", "Colour #06 (Pink)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("6")], False)
@GuiSelectDecorator(7, "Colour #07", "Colour #07 (Yellow)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("7")], False)
@GuiSelectDecorator(8, "Colour #08", "Colour #08 (Light Yellow)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("8")], False)
@GuiSelectDecorator(9, "Colour #09", "Colour #09 (Light Green)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("9")], False)
@GuiSelectDecorator(10, "Colour #10", "Colour #10 (Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("0")], False)
@GuiSelectDecorator(11, "Colour #11", "Colour #11 (Light Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("1")], False)
@GuiSelectDecorator(12, "Colour #12", "Colour #12 (Light Blue)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("2")], False)
@GuiSelectDecorator(13, "Colour #13", "Colour #13 (Light Pink)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("3")], False)
@GuiSelectDecorator(14, "Colour #14", "Colour #14 (Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("4")], False)
@GuiSelectDecorator(15, "Colour #15", "Colour #15 (Light Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("5")], False)
def canvasColourBackground(self, f, idx):
def canvasColourBackground_(event):
self.parentCanvas.brushColours[1] = idx
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColourBackground_, "attrDict", f.attrList[idx])
setattr(canvasColourBackground_, "isSelect", True)
return canvasColourBackground_
@GuiCommandDecorator("Flip colours", "Flip colours", ["toolColoursFlip.png"], [wx.ACCEL_CTRL, ord("I")], True)
def canvasColoursFlip(self, event):
self.parentCanvas.brushColours = [self.parentCanvas.brushColours[1], self.parentCanvas.brushColours[0]]
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
@GuiCommandDecorator("Copy", "&Copy", ["", wx.ART_COPY], None, False)
def canvasCopy(self, event):
pass
@GuiCommandDecorator("Cut", "Cu&t", ["", wx.ART_CUT], None, False)
def canvasCut(self, event):
pass
@GuiCommandDecorator("Delete", "De&lete", ["", wx.ART_DELETE], None, False)
def canvasDelete(self, event):
pass
@GuiCommandDecorator("Paste", "&Paste", ["", wx.ART_PASTE], None, False)
def canvasPaste(self, event):
pass
@GuiCommandDecorator("Redo", "&Redo", ["", wx.ART_REDO], [wx.ACCEL_CTRL, ord("Y")], False)
def canvasRedo(self, event):
self.parentCanvas.undo(redo=True); self.update(size=self.parentCanvas.canvas.size, undoLevel=self.parentCanvas.canvas.patchesUndoLevel);
@GuiCommandDecorator("Undo", "&Undo", ["", wx.ART_UNDO], [wx.ACCEL_CTRL, ord("Z")], False)
def canvasUndo(self, event):
self.parentCanvas.undo(); self.update(size=self.parentCanvas.canvas.size, undoLevel=self.parentCanvas.canvas.patchesUndoLevel);
# vim:expandtab foldmethod=marker sw=4 ts=4 tw=0
|
python
|
#!/usr/bin/python3
"""
Module installation file
"""
from setuptools import Extension
from setuptools import setup
extension = Extension(
name='fipv',
include_dirs=['include'],
sources=['fipv/fipv.c'],
extra_compile_args=['-O3'],
)
setup(ext_modules=[extension])
|
python
|
import os
import time
import hashlib
import gzip
import shutil
from subprocess import run
from itertools import chain
CACHE_DIRECTORY = "downloads_cache"
def decompress_gzip_file(filepath):
decompressed = filepath + ".decompressed"
if not os.path.exists(decompressed):
with gzip.open(filepath, 'rb') as f_in:
with open(decompressed, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return decompressed
def download_if_modified(url):
"""Download a file only if is has been modified via curl, see https://superuser.com/a/1159510"""
url_hash = hashlib.md5(url.encode()).hexdigest()
curr_dir = os.path.dirname(os.path.realpath(__file__))
filename = f'{curr_dir}/{CACHE_DIRECTORY}/{url_hash}'
print(f'Download {url} if it has been modified, destination is {filename}')
# If file exists and was modified today do not check for update
check_for_update = True
if os.path.exists(filename):
file_stat = os.stat(filename)
file_age_seconds = (time.time() - file_stat.st_mtime)
if file_age_seconds < 60 * 60 * 24:
check_for_update = False
print('File on disk is less than a day old, do not check for update.')
if check_for_update:
run(chain(
('curl', '-s', url),
('-o', filename),
('-z', filename) if os.path.exists(filename) else (),
))
filepath = os.path.abspath(filename)
# Auto decompress gzip files
if url.endswith('.gz'):
return decompress_gzip_file(filepath)
return filepath
|
python
|
from MainWindow import Ui_MainWindow
from PyQt6 import QtWidgets
import sys
class CalcWindow(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.equalButton.clicked.connect(self.calculation)
self.cButton.clicked.connect(self.pressC)
self.plusorminusButton.clicked.connect(self.change_sign)
self.arrowButton.clicked.connect(self.remove_it)
self.percentButton.clicked.connect(self.percent)
self.zeroButton.clicked.connect(self.zero)
self.oneButton.clicked.connect(self.one)
self.twoButton.clicked.connect(self.two)
self.threeButton.clicked.connect(self.three)
self.fourButton.clicked.connect(self.four)
self.fiveButton.clicked.connect(self.five)
self.sixButton.clicked.connect(self.six)
self.sevenButton.clicked.connect(self.seven)
self.eightButton.clicked.connect(self.eight)
self.nineButton.clicked.connect(self.nine)
self.plusButton.clicked.connect(self.plus)
self.minusButton.clicked.connect(self.subtract)
self.divideButton.clicked.connect(self.divide)
self.multiplyButton.clicked.connect(self.multiply)
self.periodButton.clicked.connect(self.dot_it)
def pressC(self):
# if self.pressed == 'c':
self.outputLabel.setText("0")
def pressButton(self):
screen = self.outputLabel.text()
print(screen)
if screen == "0" or screen == "NaN" or screen == "Incomplete":
self.outputLabel.setText("0")
self.outputLabel.setText(f'{screen}')
#Numbers and symbols
def zero(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "0")
def one(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "1")
def two(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "2")
def three(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "3")
def four(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "4")
def five(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "5")
def six(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "6")
def seven(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "7")
def eight(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "8")
def nine(self):
text = self.outputLabel.text()
if text == "0":
text = ""
self.outputLabel.setText(text + "9")
def dot(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + ".")
def plus(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "+")
def multiply(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "*")
def divide(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "/")
def subtract(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "-")
def percent(self):
text = self.outputLabel.text()
self.outputLabel.setText(text + "%")
# Remove a symbol
def remove_it(self):
screen = self.outputLabel.text()
print(len(screen))
if len(screen) > 1:
screen = screen[:-1]
else:
screen = "0"
self.outputLabel.setText(f'{screen}')
def calculation(self):
screen = self.outputLabel.text()
try:
answer = eval(screen)
self.outputLabel.setText(f"{answer}")
except ZeroDivisionError:
self.outputLabel.setText("NaN")
except SyntaxError:
self.outputLabel.setText("Incomplete")
except NameError:
self.pressC()
# Change sign of the number
def change_sign(self):
screen = self.outputLabel.text()
if "-" in screen:
self.outputLabel.setText(f"{screen[1:]}")
print(f"{screen[0]}")
else:
self.outputLabel.setText(f'-{screen}')
# Add a decimal
def check_symbol(self, s, arr):
result = []
for i in arr:
if i in s:
result.append(i)
return result
def dot_it(self):
screen = self.outputLabel.text()
if screen[-1] == ".":
pass
elif "." in screen:
symbolList = ["+", "-", "/", "*"]
result = self.check_symbol(screen, symbolList)
if result is not []:
for symbol in result:
num = screen.rindex(symbol)
if "." not in screen[num-1:]:
self.outputLabel.setText(f'{screen}.')
else:
self.outputLabel.setText(f'{screen}.')
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = CalcWindow()
ui.show()
sys.exit(app.exec())
|
python
|
from tornado.web import HTTPError
class APIError(HTTPError):
def __init__(
self,
status_code: int,
reason: str,
message: str = None,
details: dict = None,
):
log_message = ': '.join(map(str, filter(None, [message, details]))) or None
super().__init__(
status_code=status_code,
reason=reason,
log_message=log_message,
)
self.message = message
self.details = details
class InternalError(APIError):
def __init__(self):
super().__init__(500, 'Internal error')
class NotFoundError(APIError):
def __init__(self, message: str):
super().__init__(404, 'Not found', message)
class InvalidMethod(APIError):
def __init__(self):
super().__init__(405, 'Invalid method')
class ValidationError(APIError):
def __init__(self, message: str, details: dict = None):
super().__init__(400, 'Invalid request', message, details)
|
python
|
#!/usr/bin/python
# Report generator
import api
import fn
import sys
import dumper
def main(argv):
config = {}
config["usagetext"] = ("repgen.py (-s SEARCHPREFIX|-a) [-c CONFIGFILE]\n"+
" This script generates a report for all servers if -a is used,\n"+
"or just the servers with SEARCHPREFIX in the server label if -s is used.\n\n"+
"Make sure you correctly configure config.conf.\n"+
"you can use -c to specify a different configuration file. Otherwise, ./config.conf is assumed.\n\n"
"In config.conf: search_field will determine the metadata field that SEARCHPREFIX is applied to\n"+
" to create the list of servers that will be reported on.\n"+
"The output configuration value will determine the output format for the information.\n"+
" Text is mainly for debugging and may not produce as much meaningful information as html or pdf.\n"+
" HTML and PDF files are placed in the ./outfile folder. If it doesn't exist, the script will fail.")
config["configfile"] = "config.conf"
serverolist = []
config = fn.set_config_items(config,argv)
serverolist = fn.build_server_list(config['host'], config['authtoken'], config['search_string'], config['search_field'], config['prox'])
serverolist = fn.enrich_server_data(config['host'], config['authtoken'], serverolist, config['prox'])
# Here we re-write the config if the logo file is on the local filesystem, because relative paths don't work well with PDF rendering.
if fn.where_is_img(config['logo_url'])[0] == 'local' and config['output'] == 'pdf':
try:
config['logo_url'] = fn.where_is_img(config['logo_url'])[1]
except:
# Here we empty that field in case the data was bad...
config['logo_url'] = ''
fn.handle_output(config, serverolist)
if __name__ == "__main__":
print sys.argv[1:]
main(sys.argv[1:])
|
python
|
import os
import logging
import json
from codecs import open
from collections import Counter
import numpy as np
import spacy
from tqdm import tqdm
"""
The content of this file is mostly copied from https://github.com/HKUST-KnowComp/R-Net/blob/master/prepro.py
"""
nlp = spacy.blank("en")
def word_tokenize(sent):
doc = nlp(sent)
return [token.text for token in doc]
def convert_idx(text, tokens):
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
print("Token {} cannot be found".format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans
def _get_answer_span(answer, spans, texts):
text = answer["text"]
start = answer["answer_start"]
end = start + len(text)
texts.append(text)
answer_span = []
# this loop finds the overlap of answer and context
for idx, span in enumerate(spans):
if not (end <= span[0] or start >= span[1]):
answer_span.append(idx)
return answer_span[0], answer_span[-1]
def keep_unique_answers(y1, y2):
if len(y1) > 0:
a, b = zip(*list(set([(i, j) for i, j in zip(y1, y2)])))
return a, b
return y1, y2
def process_file(filename, data_type, word_counter, char_counter, version="v2.0"):
"""
filename: json file to read
data_type : 'train'/'test'/'dev'
word_counter: Just a counter for word occurence
char_counter: Just a counter for char
"""
print("Generating {} examples...\n".format(data_type))
examples = []
eval_examples = {}
total = 0
with open(filename, "r") as fh:
source = json.load(fh)
for article in tqdm(source["data"]):
for para in article["paragraphs"]:
# tokenize the para and store the span of each token in spans
# we store spans because we get position of answer start and the answer in the data
context = para["context"].replace("''", '" ').replace("``", '" ')
context_tokens = word_tokenize(context)
spans = convert_idx(context, context_tokens)
context_chars = [list(token) for token in context_tokens]
for token in context_tokens:
word_counter[token] += len(para["qas"])
for char in token:
char_counter[char] += len(para["qas"])
for qa in para["qas"]:
total += 1
ques = qa["question"].replace("''", '" ').replace("``", '" ')
ques_tokens = word_tokenize(ques)
ques_chars = [list(token) for token in ques_tokens]
for token in ques_tokens:
word_counter[token] += 1
for char in token:
char_counter[char] += 1
if version == "v2.0":
y1s, y2s = [], []
answer_texts = []
plausible_y1s, plausible_y2s = [], []
plausible_answer_texts = []
is_impossible = bool(qa["is_impossible"])
# if answering is impossible, some qas might have plausible answer and we record that.
if is_impossible:
for answer in qa["plausible_answers"]:
y1, y2 = _get_answer_span(
answer, spans, plausible_answer_texts
)
plausible_y1s.append(y1)
plausible_y2s.append(y2)
plausible_y1s, plausible_y2s = keep_unique_answers(
plausible_y1s, plausible_y2s
)
else:
for answer in qa["answers"]:
y1, y2 = _get_answer_span(answer, spans, answer_texts)
y1s.append(y1)
y2s.append(y2)
y1s, y2s = keep_unique_answers(y1s, y2s)
example = {
"context_tokens": context_tokens,
"context_chars": context_chars,
"ques_tokens": ques_tokens,
"ques_chars": ques_chars,
"y1s": y1s,
"y2s": y2s,
"plausible_y1s": plausible_y1s,
"plausible_y2s": plausible_y2s,
"id": total,
"uuid": qa["id"],
"is_impossible": is_impossible,
}
examples.append(example)
eval_examples[str(total)] = {
"context": context,
"spans": spans,
"answers": answer_texts,
"plausible_answers": plausible_answer_texts,
"uuid": qa["id"],
"is_impossible": is_impossible,
}
elif version == "v1.1": # v1.1 case
y1s, y2s = [], []
answer_texts = []
for answer in qa["answers"]:
y1, y2 = _get_answer_span(answer, spans, answer_texts)
y1s.append(y1)
y2s.append(y2)
y1s, y2s = keep_unique_answers(y1s, y2s)
example = {
"context_tokens": context_tokens,
"context_chars": context_chars,
"ques_tokens": ques_tokens,
"ques_chars": ques_chars,
"y1s": y1s,
"y2s": y2s,
"id": total,
"uuid": qa["id"],
}
examples.append(example)
# note eval files are now indexed by uuid here
eval_examples[str(total)] = {
"context": context,
"spans": spans,
"answers": answer_texts,
"uuid": qa["id"],
}
print(f"{len(examples)} questions in total")
return examples, eval_examples
def get_embedding(counter, data_type, limit=-1, emb_file=None, vec_size=None):
print("Generating {} embedding...".format(data_type))
embedding_dict = {}
filtered_elements = [k for k, v in counter.items() if v > limit]
# load from file if there is
if emb_file is not None:
assert vec_size is not None
with open(emb_file, "r") as fh:
for line in tqdm(fh):
array = line.split()
l = len(array)
word = "".join(array[0 : l - vec_size])
vector = list(map(float, array[l - vec_size : l]))
if word in counter and counter[word] > limit:
embedding_dict[word] = vector
print(
"{} / {} tokens have corresponding {} embedding vector".format(
len(embedding_dict), len(filtered_elements), data_type
)
)
# random embedding initialization
else:
assert vec_size is not None
for token in filtered_elements:
embedding_dict[token] = [
np.random.normal(scale=0.1) for _ in range(vec_size)
]
print(
"{} tokens have corresponding embedding vector".format(
len(filtered_elements)
)
)
# NULL and OOV are index 0 and 1 and zero vectors
NULL = "--NULL--"
OOV = "--OOV--"
token2idx_dict = {token: idx for idx, token in enumerate(embedding_dict.keys(), 2)}
token2idx_dict[NULL] = 0
token2idx_dict[OOV] = 1
embedding_dict[NULL] = [0.0 for _ in range(vec_size)]
embedding_dict[OOV] = [0.0 for _ in range(vec_size)]
idx2emb_dict = {idx: embedding_dict[token] for token, idx in token2idx_dict.items()}
emb_mat = [idx2emb_dict[idx] for idx in range(len(idx2emb_dict))]
return emb_mat, token2idx_dict
def convert_to_features(config, data, word2idx_dict, char2idx_dict):
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2idx_dict:
return word2idx_dict[each]
return 1
def _get_char(char):
if char in char2idx_dict:
return char2idx_dict[char]
return 1
def filter_func(example):
return (
len(example["context_tokens"]) > para_limit
or len(example["ques_tokens"]) > ques_limit
)
example = {}
context, question = data
context = context.replace("''", '" ').replace("``", '" ')
question = question.replace("''", '" ').replace("``", '" ')
example["context_tokens"] = word_tokenize(context)
example["ques_tokens"] = word_tokenize(question)
example["context_chars"] = [list(token) for token in example["context_tokens"]]
example["ques_chars"] = [list(token) for token in example["ques_tokens"]]
spans = convert_idx(context, example["context_tokens"])
para_limit = config.para_limit
ques_limit = config.ques_limit
ans_limit = config.ans_limit
char_limit = config.char_limit
if filter_func(example):
print(" Warning: Context/Question length is over the limit")
context_idxs = np.zeros([para_limit], dtype=np.int32)
context_char_idxs = np.zeros([para_limit, char_limit], dtype=np.int32)
ques_idxs = np.zeros([ques_limit], dtype=np.int32)
ques_char_idxs = np.zeros([ques_limit, char_limit], dtype=np.int32)
y1 = np.zeros([para_limit], dtype=np.float32)
y2 = np.zeros([para_limit], dtype=np.float32)
for i, token in enumerate(example["context_tokens"][:para_limit]):
context_idxs[i] = _get_word(token)
for i, token in enumerate(example["ques_tokens"][:ques_limit]):
ques_idxs[i] = _get_word(token)
for i, token in enumerate(example["context_chars"][:para_limit]):
for j, char in enumerate(token[:char_limit]):
context_char_idxs[i, j] = _get_char(char)
for i, token in enumerate(example["ques_chars"][:ques_limit]):
for j, char in enumerate(token[:char_limit]):
ques_char_idxs[i, j] = _get_char(char)
return context_idxs, context_char_idxs, ques_idxs, ques_char_idxs, spans
def build_features(
config, examples, data_type, out_file, word2idx_dict, char2idx_dict, is_test=False
):
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2idx_dict:
return word2idx_dict[each]
return word2idx_dict["--OOV--"]
def _get_char(char):
if char in char2idx_dict:
return char2idx_dict[char]
return char2idx_dict["--OOV--"]
def filter_func(example, is_test=False):
# in case of test filter nothing
if is_test:
return False
if version == "v2.0":
if example["is_impossible"]:
return (
len(example["context_tokens"]) > para_limit
or len(example["ques_tokens"]) > ques_limit
)
return (
len(example["context_tokens"]) > para_limit
or len(example["ques_tokens"]) > ques_limit
or (example["y2s"][-1] - example["y1s"][-1]) > ans_limit
)
para_limit = config.para_limit
ques_limit = config.ques_limit
ans_limit = config.ans_limit
char_limit = config.char_limit
version = config.version
print(f"Processing {data_type} examples...")
total = 0
meta = {}
N = len(examples)
context_idxs = []
context_char_idxs = []
ques_idxs = []
ques_char_idxs = []
y1s = []
y2s = []
ids = []
uuids = []
id_to_uuid = {}
if version == "v2.0":
impossibles = []
for n, example in tqdm(enumerate(examples)):
# if filter returns true, then move to next example
if filter_func(example, is_test):
continue
total += 1
context_idx = np.zeros([para_limit], dtype=np.int32)
context_char_idx = np.zeros([para_limit, char_limit], dtype=np.int32)
ques_idx = np.zeros([ques_limit], dtype=np.int32)
ques_char_idx = np.zeros([ques_limit, char_limit], dtype=np.int32)
for i, token in enumerate(example["context_tokens"][:para_limit]):
context_idx[i] = _get_word(token)
for i, token in enumerate(example["ques_tokens"][:ques_limit]):
ques_idx[i] = _get_word(token)
for i, token in enumerate(example["context_chars"][:para_limit]):
for j, char in enumerate(token[:char_limit]):
context_char_idx[i, j] = _get_char(char)
for i, token in enumerate(example["ques_chars"][:ques_limit]):
for j, char in enumerate(token[:char_limit]):
ques_char_idx[i, j] = _get_char(char)
if version == "v2.0":
if not example["is_impossible"]:
starts, ends = example["y1s"], example["y2s"]
elif config.use_plausible is True and len(example["plausible_y1s"]) > 0:
starts, ends = example["plausible_y1s"], example["plausible_y2s"]
else:
starts, ends = [-1], [-1]
# append one example for each possible answer
for start, end in zip(starts, ends):
ques_char_idxs.append(ques_char_idx)
context_idxs.append(context_idx)
ques_idxs.append(ques_idx)
context_char_idxs.append(context_char_idx)
y1s.append(start)
y2s.append(end)
ids.append(example["id"])
impossibles.append(example["is_impossible"])
uuids.append(example["uuid"])
id_to_uuid[example["id"]] = example["uuid"]
else:
starts, ends = example["y1s"], example["y2s"]
for start, end in zip(starts, ends):
ques_char_idxs.append(ques_char_idx)
context_idxs.append(context_idx)
ques_idxs.append(ques_idx)
context_char_idxs.append(context_char_idx)
y1s.append(start)
y2s.append(end)
ids.append(example["id"])
uuids.append(example["uuid"])
id_to_uuid[example["id"]] = example["uuid"]
if version == "v2.0":
np.savez(
out_file,
context_idxs=np.array(context_idxs),
context_char_idxs=np.array(context_char_idxs),
ques_idxs=np.array(ques_idxs),
ques_char_idxs=np.array(ques_char_idxs),
y1s=np.array(y1s),
y2s=np.array(y2s),
ids=np.array(ids),
impossibles=np.array(impossibles),
uuids=np.array(uuids),
)
else:
np.savez(
out_file,
context_idxs=np.array(context_idxs),
context_char_idxs=np.array(context_char_idxs),
ques_idxs=np.array(ques_idxs),
ques_char_idxs=np.array(ques_char_idxs),
y1s=np.array(y1s),
y2s=np.array(y2s),
ids=np.array(ids),
uuids=np.array(uuids),
)
print("Built {} / {} instances of features in total".format(len(y1s), N))
print("Processed {} instances of features in total".format(total))
meta["total"] = len(y1s)
meta["id_to_uuid"] = id_to_uuid
return meta
def save(filename, obj, message=None):
if message is not None:
print("Saving {}...".format(message))
with open(filename, "w") as fh:
json.dump(obj, fh, indent=4, sort_keys=True)
def preprocess(args, config):
word_counter, char_counter = Counter(), Counter()
# get embeddings
word_emb_file = config.glove_word_file
char_emb_file = config.glove_char_file if config.pretrained_char else None
# handle train file
train_examples, train_eval = process_file(
config.raw_train_file, "train", word_counter, char_counter, config.version
)
dev_examples, dev_eval = process_file(
config.raw_dev_file, "dev", word_counter, char_counter, config.version
)
if os.path.exists(config.raw_test_file):
test_examples, test_eval = process_file(
config.raw_test_file, "test", word_counter, char_counter
)
# Note that we are getting embeddings for as much as data as possible (train/test/dev) while training.
word_emb_mat, word2idx_dict = get_embedding(
word_counter, "word", emb_file=word_emb_file, vec_size=config.word_emb_dim
)
char_emb_mat, char2idx_dict = get_embedding(
char_counter, "char", emb_file=char_emb_file, vec_size=config.char_emb_dim
)
build_features(
config, train_examples, "train", config.train_file, word2idx_dict, char2idx_dict
)
dev_meta = build_features(
config,
dev_examples,
"dev",
config.dev_file,
word2idx_dict,
char2idx_dict,
is_test=True,
)
if os.path.exists(config.raw_test_file):
test_meta = build_features(
config,
test_examples,
"test",
config.test_record_file,
word2idx_dict,
char2idx_dict,
is_test=True,
)
save(config.word_emb_file, word_emb_mat, message="word embedding")
save(config.char_emb_file, char_emb_mat, message="char embedding")
save(config.word2idx_file, word2idx_dict, message="word dictionary")
save(config.char2idx_file, char2idx_dict, message="char dictionary")
save(config.train_eval_file, train_eval, message="train eval")
save(config.dev_eval_file, dev_eval, message="dev eval")
save(config.dev_meta_file, dev_meta, message="dev meta")
if os.path.exists(config.raw_test_file):
save(config.test_eval_file, test_eval, message="test eval")
save(config.test_meta_file, test_meta, message="test meta")
|
python
|
'''
Clase principal, contiene la logica de ejecuciรณn del servidor y rutas para consumo de la API
'''
from entities.profile import Profile
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
from flask_httpauth import HTTPBasicAuth
from datetime import datetime
import pandas as pd
import numpy as np
from sessionManager import SessionManager as sm
from dbManager import Querys
from formManager import FormManager
from entities.user import User
from csv1.csvcleaner import Csvcleaner
from entities.opinionSheet import OpinionSheet
from entities.dataSheet import Datasheet
from entities.attribute import Attribute
from recommendationManger import RecommendationManager
from recommenderCore.contentBased import ContentBased
from entities.requestResult import RequestResult
from entities.history import History
from entities.automobile import Automobile
from dataExportManager import DataExportManager
from clusteringModel.kmodesManager import KmodesManager
from csv1.csvcleaner import Csvcleaner
from comprehend.analyzer import Analyzer
from sqlalchemy import create_engine
import pymysql
# VARIABLES
app = Flask(__name__)
api = Api(app)
auth = HTTPBasicAuth()
MyConnection = Querys(app)
# Metodo de verificacion de password para autenticaciรณn basica
@auth.verify_password
def verify_password(username, password):
userQ=MyConnection.getUserByUsername(username)
if(userQ):
user = User("0",userQ[2],"0","0")
user.setPasswordHash(userQ[4])
if not user or not user.verify_password(password):
print("usuario '{0}' no autorizado".format(username))
return False
print("usuario '{0}' autorizado".format(username))
return userQ[1:3]
print("usuario '{0}' no autorizado".format(username))
return False
# Principal
class home(Resource):
def get(self):
#lis=MyConnection.getCursorParams()
#db_connection_str = 'mysql+pymysql://'+lis[1]+':'+lis[2]+'@'+lis[0]+'/'+lis[3]
#db_connection = create_engine(db_connection_str)
return jsonify({"message": "Bienvenido a recommendautos"})
# Bienvenida a usuario
class wellcome(Resource):
@auth.login_required
def get(self):
return jsonify({"message":"{}".format(auth.current_user()[0])})
# Registro de nuevos usaurios
class addUser(Resource):
def post(self):
user1=User(request.json['personname'],request.json['username'],request.json['email'],request.json['password'])
user1.hash_password()
if(MyConnection.addNewUser(user1)):
print("El usuario '{}' se agrego satisfactoriamente".format(user1.getUserName()))
return jsonify({"message":"Usuario agregado satisfactoriamente", "user": user1.get_userBasic()})
print("Error al agregar al usuario '{}'".format(user1.getUserName()))
return jsonify({"message":"Error al agregar nuevo usuario", "user": user1.get_userBasic()})
# Consultar si un nombre de usuario esta regisytrado
class checkUser(Resource):
def get(self,user_name):
user=MyConnection.getUserByUsername(user_name)
if(user):
print("El nombre de usuario '{}' ya existe".format(user_name))
return jsonify({"message":"El usuario ya existe"})
print("El nombre de usuario '{}' no existe".format(user_name))
return jsonify({"message":"El usuario no existe"})
# Consultar si un correo electronico ya esta registrado
class checkEmail(Resource):
def get(self):
pass
# Realizar el inicio de sesiรณn
class verifyUser(Resource):
def post(self):
fakeUser=User("person",request.json['username'],"email",request.json['password'])
GUID=request.json['id']
user=MyConnection.getUserByUsername(fakeUser.getUserName())
if(user):
fakeUser.setPasswordHash(user[4])
if(fakeUser.verify_password(request.json['password'])):
sk=sm.generateSessionkey(user[0],GUID)
if(MyConnection.addSk(user[0],sk,"ACTIVOS")):
fakeUser.setId(sk)
fakeUser.setPersonName(user[1])
fakeUser.setEmail(user[3])
print("El usuario {} accedio satisfactoriamente".format(fakeUser.getUserName()))
return jsonify({"message":"El usuario accedio satisfactoriamente", "user": fakeUser.get_user()})
print("Error al agregar sk en db")
print("el usuario no existe o contraseรฑa icorecta")
return jsonify({"message":"Error de autenticaciรณn", "user": fakeUser.get_user()})
# Obtener o actualizar la informacion de un usuario
class dataUser(Resource):
@auth.login_required
def post(self):
fakeUser=User("0","0","0","0")
fakeUser.setId(request.json['id'])
user=MyConnection.getUserBySessionKey(fakeUser.getId()) # el id que maneja la app es el sessionkey (es cambiante)
if(user):
fakeUser.setPersonName(user[1])
fakeUser.setUserName(user[2])
fakeUser.setPassword("password")# El password nunca se envia como uan respuesta de servidor
fakeUser.setEmail(user[4])
print("Datos del usuario {} encontrados correctamente".format(fakeUser.getUserName()))
return jsonify({"message":"Autenticacion correcta, usuario encontrado", "user": fakeUser.get_user()})
print("Error al obtener los datos del usuario con sk: '{}'".format(fakeUser.getId()))
return jsonify({"message":"Error: No se autentico correctamente o el usuario no existe", "user": fakeUser.get_user()})
def patch(self):
fakeUser=User(request.json['personname'],request.json['username'],request.json['email'],request.json['password'])
fakeUser.hash_password()
sk=request.json['id'] # id en la app es el session key
fakeUser.setId(sk)
id=MyConnection.getIdBySessionKey(sk)
if(id):
if(MyConnection.updateUser(fakeUser, id[0])):
user=MyConnection.getUserById(id[0])
if(user):
fakeUser.setPersonName(user[1])
fakeUser.setUserName(user[2])
fakeUser.setPassword("password") #el password nunca se envia como una respuesta
fakeUser.setEmail(user[3])
print("El usuario {},ha sido actualizado correctamente".format(fakeUser.getUserName()))
return jsonify({"message":"Usuario actualizado correctamente", "user": fakeUser.get_user()})
print("El usuario {},ha sido actualizado correctamente, error al retornar nuevos datos".format(fakeUser.getUserName()))
return jsonify({"message":"Usuario actualizado, error al retornar nuevo usuario", "user": fakeUser.get_user()})
print("Error al actualizar datos del usuario{}, id no encontrado".format(fakeUser.getUserName()))
return jsonify({"message":"Error al actualizar datos de usuario", "user": fakeUser.get_user()})
# Obtener formulario
class getForm(Resource):
def get(self):
formulario=FormManager.buildForm(MyConnection)
return jsonify(formulario.getForm())
# Obtener recomendacion
class getRecom(Resource):
def post(self):
#myString = json.dumps(request.json, sort_keys=True, indent=4)
#print(myString)
now = datetime.now()
id=MyConnection.getIdBySessionKey(request.json['user']['id']) #obtengo id mediante su sessionKey #sk=request.json['user']['id']
if (id):
idReq=MyConnection.addRequest("FormA",now,id[0])# genero una nueva solicitud
if(idReq):
result=RecommendationManager.getRecommendation(request.json['form'],idReq[0],MyConnection)
if(result):
return jsonify(result)
else:
return jsonify({"idRecommendation":"100"})
else:
return jsonify({"idRecommendation":"100"})
# Obtener historial
class getHistory(Resource):
@auth.login_required
def get(self):
idUser=MyConnection.getIdByUsername(auth.current_user()[1])
hRequests=MyConnection.getHistoryRequestByIdUser(idUser)
print(hRequests)
print(len(hRequests))
if(hRequests):
arrRequests=[]
for hRequest in hRequests:
data_Autos=MyConnection.getAutosByIdReq(hRequest[0])
dataProfile=MyConnection.getProfileById(hRequest[2])
userprofile=Profile(dataProfile[0],dataProfile[1],dataProfile[2])
arrAutos=[]
for data_Auto in data_Autos:
arrAutos.append(Automobile(data_Auto[1],data_Auto[2],data_Auto[3],data_Auto[4],data_Auto[5]))
form=FormManager.buildFormResponse(MyConnection,hRequest[0])
arrRequests.append(RequestResult(hRequest[0],hRequest[1],userprofile,hRequest[3],arrAutos,form))
response=History(len(arrRequests),arrRequests)
return jsonify(response.getHistory())
else:
#response=History(0,RequestResult(0,0,0,0,0,0))
return jsonify({"requests":0})
# Obtener detalle de vehiculos
class getCarDetails(Resource):
def post(self):
print(request.json['id'])
attribs=MyConnection.getAttributesByIdAuto(request.json['id'])
if(attribs):
print(attribs)
arrAttribs=[]
for attrib in attribs:
arrAttribs.append(Attribute(attrib[0],attrib[1],attrib[2]))
opinions=MyConnection.getOpinions(request.json['id'])
if(opinions):
opinionsheet=OpinionSheet(request.json['id'],opinions[0],opinions[1],opinions[2])
else:
urlA=MyConnection.getUrlAuto(request.json['id'])
opinionsheet=OpinionSheet(request.json['id'],'','',urlA[0])
datasheet=Datasheet(request.json['id'],arrAttribs,opinionsheet)
print(datasheet.getDataSheet())
return jsonify(datasheet.getDataSheet())
return jsonify({'message':'error'})
# exportarDatos
class exportData(Resource):
def get(self):
msg='failed'
msg=DataExportManager.exportAttributes(MyConnection)
print('exportAttributes ok')
msg=ContentBased.generateOverview() #genera overview
print('generateOverview ok')
msg=DataExportManager.exportAutos(MyConnection)
print('exportAutos ok')
msg=DataExportManager.exportAutosAttributes(MyConnection)
print('exportAutosAttributes ok')
msg=DataExportManager.exportTags(MyConnection)
print('exportTags ok')
msg=DataExportManager.exportTagsAttributes(MyConnection)
print('exportTagsAttributes ok')
msg=DataExportManager.exportResponsesAttributes(MyConnection)
print('exportResponsesAttributes ok')
Csvcleaner.generateScoreSheet()
print('generateScoreSheet ok')
msg=DataExportManager.exportScoresheet(MyConnection)
print('exportScoresheet ok')
msg=DataExportManager.exportForms(MyConnection)#solo pasa a numeric, no a bd--
print('exportForms ok')
msg=Csvcleaner.generateScoreSheet()
print('generateScoreSheet ok')
msg=DataExportManager.exportScoresheet(MyConnection)
print('exportScoresheet ok')
print('Datos exportados con exito!! ')
return jsonify('status: '+msg)
# Entrenar modelo
class trainModel(Resource):
def get(self):
msg='ok'
k=6
#msg=KmodesManager.generateModel(k,MyConnection,'Cao')
msg=KmodesManager.defineProfiles(MyConnection,k)##===aun no se ejecuta
#ContentBased.generateOverview() #solo cuando hay cambios en los datos de coches
return msg
# Entrenar modelo
class updateProfiles(Resource):
def post(self):
msg='Error'
if(MyConnection.updateProfileByNcluster(request.json['nombrePerfil'],request.json['descripcionPerfil'],request.json['cluster'])):
msg='perfiles actualizados!'
return msg
# ASOCIACION DE RECURSOS Y RUTAS
api.add_resource(home,"/")
api.add_resource(wellcome,"/wellcome")
api.add_resource(addUser,"/signUp")
api.add_resource(checkUser,"/signUp/user/<string:user_name>")
api.add_resource(checkEmail,"/signUp/email/<string:user_email>")
api.add_resource(verifyUser,"/logIn")
api.add_resource(dataUser,"/user")
api.add_resource(getForm,"/form")
api.add_resource(getRecom,"/recom")
api.add_resource(getHistory,"/history")
api.add_resource(getCarDetails,"/details")
api.add_resource(exportData,"/exportData")
api.add_resource(trainModel,"/trainModel")
api.add_resource(updateProfiles,"/setProfile")
# CONFIGURACION DE EJCUCION
if __name__ == "__main__":
app.run(host= '0.0.0.0',debug=True)
|
python
|
from glitchtip.permissions import ScopedPermission
class ReleasePermission(ScopedPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin", "project:releases"],
"POST": ["project:write", "project:admin", "project:releases"],
"PUT": ["project:write", "project:admin", "project:releases"],
"DELETE": ["project:admin", "project:releases"],
}
def get_user_scopes(self, obj, user):
return obj.organization.get_user_scopes(user)
class ReleaseFilePermission(ReleasePermission):
def get_user_scopes(self, obj, user):
return obj.release.organization.get_user_scopes(user)
|
python
|
# StandAlone Version
"""
Created on Thu Apr 2 19:28:15 2020
@author: Yao Tang
"""
import arcpy
import os
arcpy.env.overwriteOutput = True
arcpy.env.addOutputsToMap = True
## Specify workspace(Usually the folder of the .mxd file)
arcpy.env.workspace = "L:/Projects/3020/006-01/6-0 DRAWINGS AND FIGURES/6-2 GIS/GIS/shp"
## Specify the input folder of the photos
PhotosFolder = r"L:\Projects\3020\006-01\8-0 DESIGN PHASE\DATA AND INFORMATION\Task 3 Data Collection\Geotag2ndRound\MissingInShp"
## Specify the name and the path of the output layer (GeoPhotosToPoint is the name of the layer)
## Create a geodatabase
## (A database file, only one database file is needed for the project)
database_name = "Photos_YT_2.gdb"
try:
arcpy.CreateFileGDB_management(arcpy.env.workspace, database_name)
except:
print "File already created"
print "program proceed"
GridFolderList = os.listdir(PhotosFolder)
print GridFolderList
photoOption = "ALL_PHOTOS"
fieldName3 = "FacilityID"
fieldName4 = "Note"
for grid in GridFolderList:
PhotoFolderList = os.listdir(PhotosFolder +"/" + grid)
print PhotoFolderList
for folder in PhotoFolderList:
inFolder = PhotosFolder +"/" + grid + "/" + folder
outFeatures = database_name + "/" + grid + "_" + folder
badPhotosList = outFeatures + "_NoGPS"
arcpy.GeoTaggedPhotosToPoints_management(inFolder, outFeatures, badPhotosList, photoOption)
inFeatures = outFeatures
arcpy.AddXY_management(inFeatures)
arcpy.AddField_management(inFeatures, fieldName3, "TEXT")
arcpy.AddField_management(inFeatures, fieldName4, "TEXT")
|
python
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="crudini",
version="0.9.3",
author="Pรกdraig Brady",
author_email="[email protected]",
description=("A utility for manipulating ini files"),
license="GPLv2",
keywords="ini config edit",
url="http://github.com/pixelb/crudini",
long_description=read('README'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Topic :: System :: Systems Administration",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Programming Language :: Python :: 2",
],
install_requires=['iniparse>=0.3.2'],
scripts=["crudini"]
)
|
python
|
def abc209d():
from collections import deque
n, Q = map(int, input().split())
g = [list() for _ in range(n)]
for _ in range(n - 1):
a, b = map(int, input().split())
a, b = a - 1, b - 1
g[a].append(b)
g[b].append(a)
c = [-1] * n
q = deque([0])
c[0] = 0
while len(q) > 0:
node = q.popleft()
for nxt in g[node]:
if c[nxt] != -1: continue
c[nxt] = 1 - c[node]
q.append(nxt)
ans = []
for _ in range(Q):
a, b = map(int, input().split())
a, b = a - 1, b - 1
if c[a] == c[b]:
ans.append("Town")
else:
ans.append("Road")
for item in ans:
print(item)
abc209d()
|
python
|
from sim.agents.agents import *
from sim.agents.multiagents import *
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, French National Center for Scientific Research (CNRS)
# Distributed under the (new) BSD License. See LICENSE for more info.
from pyqtgraph.Qt import QtCore
import pyqtgraph as pg
from pyqtgraph.util.mutex import Mutex
import numpy as np
from ..core import (Node, register_node_type, ThreadPollInput)
from ..core.stream.ringbuffer import RingBuffer
import distutils.version
try:
import scipy.signal
HAVE_SCIPY = True
# scpy.signal.sosfilt was introduced in scipy 0.16
assert distutils.version.LooseVersion(scipy.__version__)>'0.16'
except ImportError:
HAVE_SCIPY = False
try:
import pyopencl
mf = pyopencl.mem_flags
HAVE_PYOPENCL = True
except ImportError:
HAVE_PYOPENCL = False
class SosFiltfilt_Base:
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
self.coefficients = coefficients
if self.coefficients.ndim==2:
self.nb_section =self. coefficients.shape[0]
if self.coefficients.ndim==3:
self.nb_section = self.coefficients.shape[1]
self.nb_channel = nb_channel
self.dtype = np.dtype(dtype)
self.chunksize = chunksize
self.overlapsize = overlapsize
shape = ((chunksize+overlapsize)*5, nb_channel)
self.forward_buffer = RingBuffer(shape, dtype, double=True)
self.backward_chunksize = self.chunksize+self.overlapsize
def compute_one_chunk(self, pos, data):
assert self.chunksize == data.shape[0], 'Chunksize is bad {} instead of{}'.format(data.shape[0], self.chunksize)
forward_chunk_filtered = self.compute_forward(data)
#~ forward_chunk_filtered = forward_chunk_filtered.astype(self.dtype)
self.forward_buffer.new_chunk(forward_chunk_filtered, index=pos)
start = pos-self.chunksize-self.overlapsize
if start>0:
backward_chunk = self.forward_buffer.get_data(start,pos)
backward_filtered = self.compute_backward(backward_chunk)
backward_filtered = backward_filtered[:self.chunksize]
return pos-self.overlapsize, backward_filtered
elif pos>self.overlapsize:
backward_chunk = self.forward_buffer.get_data(0,pos)
backward_filtered = self.compute_backward(backward_chunk)
backward_filtered = backward_filtered[:-self.overlapsize]
return pos-self.overlapsize, backward_filtered
else:
return None, None
def compute_forward(self, chunk):
raise NotImplementedError
def compute_backward(self, chunk):
raise NotImplementedError
class SosFiltfilt_Scipy(SosFiltfilt_Base):
"""
Implementation with scipy.
"""
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
self.zi = np.zeros((self.nb_section, 2, self.nb_channel), dtype= dtype)
def compute_forward(self, chunk):
forward_chunk_filtered, self.zi = scipy.signal.sosfilt(self.coefficients, chunk, zi=self.zi, axis=0)
forward_chunk_filtered = forward_chunk_filtered.astype(self.dtype)
return forward_chunk_filtered
def compute_backward(self, chunk):
backward_filtered = scipy.signal.sosfilt(self.coefficients, chunk[::-1, :], zi=None, axis=0)
backward_filtered = backward_filtered[::-1, :]
backward_filtered = backward_filtered.astype(self.dtype)
return backward_filtered
class SosFiltfilt_OpenCl_Base(SosFiltfilt_Base):
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
assert self.dtype == np.dtype('float32')
assert self.chunksize is not None, 'chunksize for opencl must be fixed'
self.coefficients = self.coefficients.astype(self.dtype)
if self.coefficients.ndim==2: #(nb_section, 6) to (nb_channel, nb_section, 6)
self.coefficients = np.tile(self.coefficients[None,:,:], (nb_channel, 1,1))
if not self.coefficients.flags['C_CONTIGUOUS']:
self.coefficients = self.coefficients.copy()
assert self.coefficients.shape[0]==self.nb_channel, 'wrong coefficients.shape'
assert self.coefficients.shape[2]==6, 'wrong coefficients.shape'
self.nb_section = self.coefficients.shape[1]
self.ctx = pyopencl.create_some_context()
#TODO : add arguments gpu_platform_index/gpu_device_index
#self.devices = [pyopencl.get_platforms()[self.gpu_platform_index].get_devices()[self.gpu_device_index] ]
#self.ctx = pyopencl.Context(self.devices)
self.queue = pyopencl.CommandQueue(self.ctx)
#host arrays
self.zi1 = np.zeros((nb_channel, self.nb_section, 2), dtype= self.dtype)
self.zi2 = np.zeros((nb_channel, self.nb_section, 2), dtype= self.dtype)
self.output1 = np.zeros((self.chunksize, self.nb_channel), dtype= self.dtype)
self.output2 = np.zeros((self.backward_chunksize, self.nb_channel), dtype= self.dtype)
#GPU buffers
self.coefficients_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.coefficients)
self.zi1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi1)
self.zi2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi2)
self.input1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output1.nbytes)
self.output1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output1.nbytes)
self.input2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output2.nbytes)
self.output2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output2.nbytes)
#nb works
kernel = self.kernel%dict(forward_chunksize=self.chunksize, backward_chunksize=self.backward_chunksize,
nb_section=self.nb_section, nb_channel=self.nb_channel)
prg = pyopencl.Program(self.ctx, kernel)
self.opencl_prg = prg.build(options='-cl-mad-enable')
class SosFilfilt_OpenCL_V1(SosFiltfilt_OpenCl_Base):
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_OpenCl_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
self.global_size = (self.nb_channel, )
self.local_size = (self.nb_channel, )
def compute_forward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
pyopencl.enqueue_copy(self.queue, self.input1_cl, chunk)
kern_call = getattr(self.opencl_prg, 'forward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input1_cl, self.output1_cl, self.coefficients_cl, self.zi1_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output1, self.output1_cl)
forward_chunk_filtered = self.output1
return forward_chunk_filtered
def compute_backward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
self.zi2[:]=0
pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
if chunk.shape[0]==self.backward_chunksize:
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk)
else:
#side effect at the begining
chunk2 = np.zeros((self.backward_chunksize, self.nb_channel), dtype=self.dtype)
chunk2[-chunk.shape[0]:, :] = chunk
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk2)
kern_call = getattr(self.opencl_prg, 'backward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input2_cl, self.output2_cl, self.coefficients_cl, self.zi2_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output2, self.output2_cl)
if chunk.shape[0]==self.backward_chunksize:
forward_chunk_filtered = self.output2
else:
#side effect at the begining
forward_chunk_filtered = self.output2[-chunk.shape[0]:, :]
return forward_chunk_filtered
kernel = """
#define forward_chunksize %(forward_chunksize)d
#define backward_chunksize %(backward_chunksize)d
#define nb_section %(nb_section)d
#define nb_channel %(nb_channel)d
__kernel void sos_filter(__global float *input, __global float *output, __constant float *coefficients,
__global float *zi, int chunksize, int direction) {
int chan = get_global_id(0); //channel indice
int offset_filt2; //offset channel within section
int offset_zi = chan*nb_section*2;
int idx;
float w0, w1,w2;
float res;
for (int section=0; section<nb_section; section++){
offset_filt2 = chan*nb_section*6+section*6;
w1 = zi[offset_zi+section*2+0];
w2 = zi[offset_zi+section*2+1];
for (int s=0; s<chunksize;s++){
if (direction==1) {idx = s*nb_channel+chan;}
else if (direction==-1) {idx = (chunksize-s-1)*nb_channel+chan;}
if (section==0) {w0 = input[idx];}
else {w0 = output[idx];}
w0 -= coefficients[offset_filt2+4] * w1;
w0 -= coefficients[offset_filt2+5] * w2;
res = coefficients[offset_filt2+0] * w0 + coefficients[offset_filt2+1] * w1 + coefficients[offset_filt2+2] * w2;
w2 = w1; w1 =w0;
output[idx] = res;
}
zi[offset_zi+section*2+0] = w1;
zi[offset_zi+section*2+1] = w2;
}
}
__kernel void forward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi){
sos_filter(input, output, coefficients, zi, forward_chunksize, 1);
}
__kernel void backward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi) {
sos_filter(input, output, coefficients, zi, backward_chunksize, -1);
}
"""
class SosFilfilt_OpenCL_V3(SosFiltfilt_OpenCl_Base):
def __init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize):
SosFiltfilt_OpenCl_Base.__init__(self, coefficients, nb_channel, dtype, chunksize, overlapsize)
self.global_size = (self.nb_channel, self.nb_section)
self.local_size = (1, self.nb_section)
def compute_forward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
pyopencl.enqueue_copy(self.queue, self.input1_cl, chunk)
kern_call = getattr(self.opencl_prg, 'forward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input1_cl, self.output1_cl, self.coefficients_cl, self.zi1_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output1, self.output1_cl)
forward_chunk_filtered = self.output1
return forward_chunk_filtered
def compute_backward(self, chunk):
if not chunk.flags['C_CONTIGUOUS']:
chunk = chunk.copy()
self.zi2[:]=0
pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
if chunk.shape[0]==self.backward_chunksize:
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk)
else:
#side effect at the begining
chunk2 = np.zeros((self.backward_chunksize, self.nb_channel), dtype=self.dtype)
chunk2[-chunk.shape[0]:, :] = chunk
pyopencl.enqueue_copy(self.queue, self.input2_cl, chunk2)
kern_call = getattr(self.opencl_prg, 'backward_filter')
event = kern_call(self.queue, self.global_size, self.local_size,
self.input2_cl, self.output2_cl, self.coefficients_cl, self.zi2_cl)
event.wait()
pyopencl.enqueue_copy(self.queue, self.output2, self.output2_cl)
if chunk.shape[0]==self.backward_chunksize:
forward_chunk_filtered = self.output2
else:
#side effect at the begining
forward_chunk_filtered = self.output2[-chunk.shape[0]:, :]
return forward_chunk_filtered
kernel = """
#define forward_chunksize %(forward_chunksize)d
#define backward_chunksize %(backward_chunksize)d
#define nb_section %(nb_section)d
#define nb_channel %(nb_channel)d
__kernel void sos_filter(__global float *input, __global float *output, __constant float *coefficients,
__global float *zi, int chunksize, int direction) {
int chan = get_global_id(0); //channel indice
int section = get_global_id(1); //section indice
int offset_filt2; //offset channel within section
int offset_zi = chan*nb_section*2;
int idx;
float w0, w1,w2;
float res;
int s2;
w1 = zi[offset_zi+section*2+0];
w2 = zi[offset_zi+section*2+1];
for (int s=0; s<chunksize+(3*nb_section);s++){
barrier(CLK_GLOBAL_MEM_FENCE);
s2 = s-section*3;
if (s2>=0 && (s2<chunksize)){
offset_filt2 = chan*nb_section*6+section*6;
if (direction==1) {idx = s2*nb_channel+chan;}
else if (direction==-1) {idx = (chunksize-s2-1)*nb_channel+chan;}
if (section==0) {w0 = input[idx];}
else {w0 = output[idx];}
w0 -= coefficients[offset_filt2+4] * w1;
w0 -= coefficients[offset_filt2+5] * w2;
res = coefficients[offset_filt2+0] * w0 + coefficients[offset_filt2+1] * w1 + coefficients[offset_filt2+2] * w2;
w2 = w1; w1 =w0;
output[idx] = res;
}
}
zi[offset_zi+section*2+0] = w1;
zi[offset_zi+section*2+1] = w2;
}
__kernel void forward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi){
sos_filter(input, output, coefficients, zi, forward_chunksize, 1);
}
__kernel void backward_filter(__global float *input, __global float *output, __constant float *coefficients, __global float *zi) {
sos_filter(input, output, coefficients, zi, backward_chunksize, -1);
}
"""
sosfiltfilt_engines = { 'scipy' : SosFiltfilt_Scipy, 'opencl' : SosFilfilt_OpenCL_V1, 'opencl3' : SosFilfilt_OpenCL_V3 }
class SosFiltfiltThread(ThreadPollInput):
def __init__(self, input_stream, output_stream, timeout = 200, parent = None):
ThreadPollInput.__init__(self, input_stream, timeout = timeout, return_data=True, parent = parent)
self.output_stream = output_stream
self.mutex = Mutex()
def process_data(self, pos, data):
with self.mutex:
pos2, chunk_filtered = self.filter_engine.compute_one_chunk(pos, data)
if pos2 is not None:
self.output_stream.send(chunk_filtered, index=pos2)
def set_params(self, engine, coefficients, nb_channel, dtype, chunksize, overlapsize):
assert engine in sosfiltfilt_engines
EngineClass = sosfiltfilt_engines[engine]
with self.mutex:
self.filter_engine = EngineClass(coefficients, nb_channel, dtype, chunksize, overlapsize)
class OverlapFiltfilt(Node, QtCore.QObject):
"""
Node for filtering with forward-backward method (filtfilt).
This use sliding overlap technics.
The chunksize and the overlapsize are important for the accuracy of filtering.
You need to study them carfully, otherwise the result should be the same as a
real filtfilt ona long term signal. You must check the residual between real offline filtfitl
and this online OverlapFiltfilt.
Note that the chunksize have a strong effect on low frequency.
This uses Second Order (sos) coeeficient.
It internally use scipy.signal.sosfilt which is available only on scipy >0.16
The chunksize need to be fixed.
For overlapsize there are 2 cases:
1- overlapsize<chunksize/2 : natural case. each chunk partailly overlap.
The overlap are on sides, the central part come from one chunk.
2 - overlapsize>chunksize/2: chunk are fully averlapping. There is no central part.
In the 2 cases, for each arrival of new chunk at [-chunksize:],
the computed chunk at [-(chunksize+overlapsize):-overlapsize] is released.
The coefficients.shape must be (nb_section, 6).
If pyopencl is avaible you can do SosFilter.configure(engine='opencl')
In that cases the coefficients.shape can also be (nb_channel, nb_section, 6)
this help for having different filter on each channels.
The opencl engine prefer inernally (channel, sample) ordered.
In case not a copy is done. So the input ordering do impact performences.
"""
_input_specs = {'signals' : dict(streamtype = 'signals')}
_output_specs = {'signals' : dict(streamtype = 'signals')}
def __init__(self, parent = None, **kargs):
QtCore.QObject.__init__(self, parent)
Node.__init__(self, **kargs)
assert HAVE_SCIPY, "SosFilter need scipy>0.16"
def _configure(self, chunksize=1024, overlapsize=512, coefficients = None, engine='scipy'):
"""
Set the coefficient of the filter.
See http://scipy.github.io/devdocs/generated/scipy.signal.sosfilt.html for details.
"""
self.chunksize = chunksize
self.overlapsize = overlapsize
self.engine = engine
self.set_coefficients(coefficients)
def after_input_connect(self, inputname):
self.nb_channel = self.input.params['shape'][1]
for k in ['sample_rate', 'dtype', 'shape', ]:
self.output.spec[k] = self.input.params[k]
def _initialize(self):
self.thread = SosFiltfiltThread(self.input, self.output)
self.thread.set_params(self.engine, self.coefficients, self.nb_channel,
self.output.params['dtype'], self.chunksize, self.overlapsize)
def _start(self):
self.thread.last_pos = None
self.thread.start()
def _stop(self):
self.thread.stop()
self.thread.wait()
def set_coefficients(self, coefficients):
self.coefficients = coefficients
if self.initialized():
self.thread.set_params(self.engine, self.coefficients, self.nb_channel,
self.output.params['dtype'], self.chunksize, self.overlapsize)
register_node_type(OverlapFiltfilt)
|
python
|
"""
Copyright 2018 The Johns Hopkins University Applied Physics Laboratory.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Trains a dense (per-pixel) classifier on EM data.
"""
from __future__ import print_function
__author__ = 'mjp, Nov 2016'
__license__ = 'Apache 2.0'
import os
import sys
import time
import json
import numpy as np
np.random.seed(9999)
from keras import backend as K
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from cnn_tools import *
from data_tools import *
K.set_image_dim_ordering('th')
if __name__ == '__main__':
with open('/jobs/train_job_params.json') as f:
params = json.load(f)
# -------------------------------------------------------------------------
rmt = BossRemote('/jobs/boss_config.cfg')
img_chan = ChannelResource(params['img_channel'],
params['collection'],
params['experiment'],
type='image',
datatype='uint8')
lbl_chan = ChannelResource(params['lbl_channel'],
params['collection'],
params['experiment'],
type='annotation',
datatype='uint64')
# Get the image data from the BOSS
x_train = rmt.get_cutout(img_chan, params['resolution'],
params['x_rng'],
params['y_rng'],
params['z_rng'])
y_train = rmt.get_cutout(lbl_chan, params['resolution'],
params['x_rng'],
params['y_rng'],
params['z_rng'])
# Data must be [slices, chan, row, col] (i.e., [Z, chan, Y, X])
x_train = x_train[:, np.newaxis, :, :].astype(np.float32)
y_train = y_train[:, np.newaxis, :, :].astype(np.float32)
# Pixel values must be in [0,1]
x_train /= 255.
y_train = (y_train > 0).astype('float32')
tile_size = tuple(params['tile_size'])
train_pct = params['train_pct']
# -------------------------------------------------------------------------
# Data must be [slices, chan, row, col] (i.e., [Z, chan, Y, X])
# split into train and valid
train_slices = range(int(train_pct * x_train.shape[0]))
x_train = x_train[train_slices, ...]
y_train = y_train[train_slices, ...]
valid_slices = range(int(train_pct * x_train.shape[0]), x_train.shape[0])
x_valid = x_train[valid_slices, ...]
y_valid = y_train[valid_slices, ...]
print('[info]: training data has shape: %s' % str(x_train.shape))
print('[info]: training labels has shape: %s' % str(y_train.shape))
print('[info]: validation data has shape: %s' % str(x_valid.shape))
print('[info]: validation labels has shape: %s' % str(y_valid.shape))
print('[info]: tile size: %s' % str(tile_size))
# train model
tic = time.time()
model = create_unet((1, tile_size[0], tile_size[1]))
if params['do_synapse']:
model.compile(optimizer=Adam(lr=1e-4),
loss=pixelwise_crossentropy_loss_w,
metrics=[f1_score])
else:
model.compile(optimizer=Adam(lr=1e-4),
loss=pixelwise_crossentropy_loss,
metrics=[f1_score])
# if weights_file:
# model.load_weights(weights_file)
train_model(x_train, y_train, x_valid, y_valid, model,
params['output_dir'], do_augment=params['do_augment'],
n_epochs=params['n_epochs'], mb_size=params['mb_size'],
n_mb_per_epoch=params['n_mb_per_epoch'],
save_freq=params['save_freq'])
print('[info]: total time to train model: %0.2f min' %
((time.time() - tic)/60.))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
|
python
|
"Thread safe RLock defined for lru cache."
# https://stackoverflow.com/questions/16567958/when-and-how-to-use-pythons-rlock
def RLock():
"""
Make the container thread safe if running in a threaded context.
"""
import threading
return threading.RLock()
|
python
|
import logging
_LOGGER = logging.getLogger(__name__)
def decode(packet):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolEverflourish.cpp
"""
data = packet["data"]
house = data & 0xFFFC00
house >>= 10
unit = data & 0x300
unit >>= 8
unit += 1
method = data & 0xF
# _LOGGER.debug("Everflourish (data=%x, house=%d, "
# "unit=%d, method=%d)",
# data, house, unit, method)
if house > 16383 or unit < 1 or unit > 4:
# not everflourish
return
if method == 0:
method = "turnoff"
elif method == 15:
method = "turnon"
elif method == 10:
method = "learn"
else:
# not everflourish
return
return dict(
packet,
_class="command",
model="selflearning",
house=house,
unit=unit,
method=method,
)
def encode(method):
"""
https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolEverflourish.cpp
"""
raise NotImplementedError()
|
python
|
# script to copy history from a FITS table to the FITS header
# FITS images only, works in current directory
# Argument:
# 1) Name of input FITS
# example:
# Python scriptHi2Header.py myImage.fits
import sys, Obit, Image, History, OSystem, OErr
# Init Obit
err=OErr.OErr()
ObitSys=OSystem.OSystem ("Hi2Header", 1, 100, 1, ["None"], 1, ["./"], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# Files (FITS)
inFile = sys.argv[1]
inDisk = 0
# Set data
inImage = Image.newPFImage("Input image", inFile, inDisk, 1, err)
OErr.printErrMsg(err, "Error initializing")
# For debugging
#Obit.Bomb()
# Make history
inInfo = Image.PGetList(inImage)
outInfo = Image.PGetList(inImage)
inHistory = History.History("history", inInfo, err)
outHistory = History.History("history", outInfo, err)
OErr.printErrMsg(err, "Error initializing history")
History.PCopy2Header(inHistory, outHistory, err)
OErr.printErrMsg(err, "Error copying history to FITS header")
# Say something
print "Copied History table to FITS header for",inFile
# Shutdown Obit
OErr.printErr(err)
|
python
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'superDigit' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. STRING n
# 2. INTEGER k
#
def superDigit(n, k):
if((len(n) == 1)and(k>1)):
n,k = str(int(n)*k),1
len_n = len(n)
if(len_n==1):
return (int(n))
else:
suma = 0
isImp = len_n%2
for i in range(0,int((len_n/2))+isImp):
pos_f = i
pos_b = len_n-i-1
if(pos_f != pos_b):
suma += (int(n[pos_f])+int(n[pos_b]))
else:
suma += int(n[pos_f])
return superDigit(str(suma),k)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = first_multiple_input[0]
k = int(first_multiple_input[1])
result = superDigit(n, k)
fptr.write(str(result) + '\n')
fptr.close()
|
python
|
#
# Copyright 2015-2020 Andrey Galkin <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function, absolute_import
import unittest
import subprocess
import os
import sys
import stat
import shutil
import json
import platform
from collections import OrderedDict
from futoin.cid.util import executil
CIDTEST_BIN = os.environ.get('CIDTEST_BIN', None)
if CIDTEST_BIN:
CIDTEST_BIN_EXT = False
else :
CIDTEST_BIN_EXT = True
CIDTEST_BIN = os.path.dirname( __file__ ) + '/../bin/cid'
class cid_UTBase ( unittest.TestCase ) :
IS_LINUX = platform.system() == 'Linux'
IS_MACOS = platform.system() == 'Darwin'
NO_COMPILE = os.environ.get('CIDTEST_NO_COMPILE', '0') == '1'
ALLOW_SRC_BUILDS = not NO_COMPILE
CIDTEST_BIN = CIDTEST_BIN
TEST_DIR = 'invalid'
TEST_RUN_DIR = os.environ.get('CIDTEST_RUN_DIR', os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', 'testrun')
))
_create_test_dir = False
__test__ = False
_dev_null = open(os.devnull, 'w')
_stdout_log = open(os.path.join(TEST_RUN_DIR, 'stdout.log'), 'a+')
#_stderr_log = open(os.path.join(TEST_RUN_DIR, 'stderr.log'), 'a+')
_stderr_log = _stdout_log
@classmethod
def setUpClass( cls ):
print('Python: ' + sys.executable)
try:
os.makedirs( cls.TEST_RUN_DIR )
except:
pass
os.chdir( cls.TEST_RUN_DIR )
os.environ['HOME'] = cls.TEST_RUN_DIR
cache_dir = os.path.join(os.environ['HOME'], '.cache', 'futoin-cid')
for cleanup_dir in (cache_dir, cls.TEST_DIR):
if os.path.exists( cleanup_dir ) :
for ( path, dirs, files ) in os.walk( cleanup_dir ) :
for id in dirs + files :
try:
os.chmod( os.path.join( path, id ), stat.S_IRWXU )
except:
pass
shutil.rmtree( cleanup_dir )
if cls._create_test_dir:
os.mkdir(cls.TEST_DIR)
os.chdir(cls.TEST_DIR)
def _goToBase( self ):
os.chdir( self.TEST_DIR )
def setUp( self ):
self._goToBase()
@classmethod
def _call_cid( cls, args, stdin=None, stdout=None, returncode=0, ignore=False, retout=False, merge_stderr=False ) :
cmd = []
if CIDTEST_BIN_EXT:
cmd.append(sys.executable)
if retout:
(r, w) = os.pipe()
stdout = w
cmd.append( CIDTEST_BIN )
cmd += args
if stdout is None:
stdout = cls._stdout_log
stderr = cls._stderr_log
if merge_stderr:
stderr=subprocess.STDOUT
print( 'Test Call: ' + subprocess.list2cmdline(cmd), file=cls._stderr_log )
cls._stderr_log.flush()
p = subprocess.Popen(
cmd,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr
)
if stdin is not None:
p.stdin.write( stdin )
p.wait()
if retout:
os.close(w)
res = os.read(r, 32*1024)
os.close(r)
if ignore:
return p.returncode == returncode
if p.returncode != returncode:
raise RuntimeError( "Failed" )
if retout:
return executil.toString(res)
return True
@classmethod
def _writeFile( cls, file_name, content ):
with open(file_name, 'w') as content_file:
content_file.write( content )
content_file.write( "\n" )
@classmethod
def _writeJSON( cls, file_name, content ):
cls._writeFile( file_name, json.dumps( content ) )
@classmethod
def _readFile( cls, file_name ):
with open(file_name, 'r') as content_file:
content = content_file.read()
return content
@classmethod
def _readJSON( cls, file_name ):
content = cls._readFile(file_name)
object_pairs_hook = lambda pairs: OrderedDict( pairs )
return json.loads( content, object_pairs_hook=object_pairs_hook )
@classmethod
def _redirectAsyncStdIO( cls ):
os.dup2(cls._dev_null.fileno(), 0)
os.dup2(cls._stdout_log.fileno(), 1)
os.dup2(cls._stderr_log.fileno(), 2)
def _firstGet(self, url):
import requests, time
for i in range(15):
try:
res = requests.get(url, timeout=3)
if res.ok:
return res
else:
time.sleep(1)
except:
time.sleep(1)
else:
self.assertTrue(False)
class cid_Tool_UTBase ( cid_UTBase ) :
__test__ = False
TOOL_NAME = 'invalid'
TOOL_ENV = {}
_env_backup = None
@classmethod
def setUpClass( cls ):
cls._env_backup = {}
cls.TEST_DIR = os.path.join(cls.TEST_RUN_DIR, 'tool_'+cls.TOOL_NAME)
super(cid_Tool_UTBase, cls).setUpClass()
os.mkdir( cls.TEST_DIR )
os.chdir( cls.TEST_DIR )
for k, v in cls.TOOL_ENV.items():
cls._env_backup[k] = os.environ.get(k, None)
os.environ[k] = v
@classmethod
def tearDownClass( cls ):
for k, v in cls._env_backup.items():
if v:
os.environ[k] = v
else:
del os.environ[k]
|
python
|
import stringcase
from importlib import import_module
from .metadata import Metadata
from .resource import Resource
from .package import Package
from . import helpers
from . import errors
class Pipeline(Metadata):
"""Pipeline representation
API | Usage
-------- | --------
Public | `from frictionless import Pipeline`
For now, only the `package` type is supported where `steps` should
conform to the `dataflows`s processors. The File class inherits
from the Metadata class all the metadata's functionality
```python
pipeline = Pipeline(
{
"type": "package",
"steps": [
{"type": "load", "spec": {"loadSource": "data/table.csv"}},
{"type": "set_type", "spec": {"name": "id", "type": "string"}},
{"type": "dump_to_path", "spec": {"outPath": tmpdir}},
],
}
)
pipeline.run()
```
Parameters:
descriptor (str|dict): pipeline descriptor
name? (str): pipeline name
type? (str): pipeline type
steps? (dict[]): pipeline steps
"""
def __init__(self, descriptor=None, *, name=None, type=None, source=None, steps=None):
self.setinitial("name", name)
self.setinitial("type", type)
self.setinitial("source", source)
self.setinitial("steps", steps)
super().__init__(descriptor)
@Metadata.property
def name(self):
"""
Returns:
str?: pipeline name
"""
return self.get("name")
@Metadata.property
def type(self):
"""
Returns:
str?: pipeline type
"""
return self.get("type", "resource")
@Metadata.property
def source(self):
"""
Returns:
dict[]?: pipeline source
"""
return self.get("source")
@Metadata.property
def steps(self):
"""
Returns:
dict[]?: pipeline steps
"""
return self.get("steps")
# Run
def run(self):
"""Run the pipeline"""
steps = import_module("frictionless.steps")
transforms = import_module("frictionless.transform")
# TODO: it will not work for nested steps like steps.resource_transform
items = []
for step in self.steps:
func = getattr(steps, stringcase.snakecase(step["type"]))
items.append(func(**helpers.create_options(step["spec"])))
if self.type == "resource":
source = Resource(self.source)
return transforms.transform_resource(source, steps=items)
else:
source = Package(self.source)
return transforms.transform_package(source, steps=items)
# Metadata
metadata_Error = errors.PipelineError
metadata_profile = { # type: ignore
"type": "object",
"required": ["type", "source", "steps"],
"properties": {
"name": {"type": "string"},
"type": {"type": "string"},
"source": {"type": "object"},
"steps": {
"type": "array",
"items": {"type": "object", "required": ["type", "spec"]},
},
},
}
|
python
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from contextlib import contextmanager
from dataclasses import dataclass
from textwrap import dedent
from typing import Any
from pants.engine.internals.engine_testutil import (
assert_equal_with_printing,
remove_locations_from_traceback,
)
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import Get, rule
from pants.engine.unions import UnionRule, union
from pants.testutil.rule_runner import QueryRule
from pants.testutil.test_base import TestBase
@dataclass(frozen=True)
class A:
pass
@dataclass(frozen=True)
class B:
pass
def fn_raises(x):
raise Exception(f"An exception for {type(x).__name__}")
@rule(desc="Nested raise")
def nested_raise(b: B) -> A:
fn_raises(b)
return A()
@rule
def consumes_a_and_b(a: A, b: B) -> str:
return str(f"{a} and {b}")
@dataclass(frozen=True)
class C:
pass
@rule
def transitive_b_c(c: C) -> B:
return B()
@dataclass(frozen=True)
class D:
b: B
@rule
async def transitive_coroutine_rule(c: C) -> D:
b = await Get(B, C, c)
return D(b)
@union
class UnionBase:
pass
@union
class UnionWithNonMemberErrorMsg:
@staticmethod
def non_member_error_message(subject):
return f"specific error message for {type(subject).__name__} instance"
class UnionWrapper:
def __init__(self, inner):
self.inner = inner
class UnionA:
@staticmethod
def a() -> A:
return A()
@rule
def select_union_a(union_a: UnionA) -> A:
return union_a.a()
class UnionB:
@staticmethod
def a() -> A:
return A()
@rule
def select_union_b(union_b: UnionB) -> A:
return union_b.a()
# TODO: add MultiGet testing for unions!
@rule
async def a_union_test(union_wrapper: UnionWrapper) -> A:
union_a = await Get(A, UnionBase, union_wrapper.inner)
return union_a
class UnionX:
pass
@rule
async def error_msg_test_rule(union_wrapper: UnionWrapper) -> UnionX:
# NB: We install a UnionRule to make UnionWrapper a member of this union, but then we pass the
# inner value, which is _not_ registered.
_ = await Get(A, UnionWithNonMemberErrorMsg, union_wrapper.inner)
raise AssertionError("The statement above this one should have failed!")
class TypeCheckFailWrapper:
"""This object wraps another object which will be used to demonstrate a type check failure when
the engine processes an `await Get(...)` statement."""
def __init__(self, inner):
self.inner = inner
@rule
async def a_typecheck_fail_test(wrapper: TypeCheckFailWrapper) -> A:
# This `await` would use the `nested_raise` rule, but it won't get to the point of raising since
# the type check will fail at the Get.
_ = await Get(A, B, wrapper.inner) # noqa: F841
return A()
@dataclass(frozen=True)
class CollectionType:
# NB: We pass an unhashable type when we want this to fail at the root, and a hashable type
# when we'd like it to succeed.
items: Any
@rule
async def c_unhashable(_: CollectionType) -> C:
# This `await` would use the `nested_raise` rule, but it won't get to the point of raising since
# the hashability check will fail.
_result = await Get(A, B, list()) # noqa: F841
return C()
@rule
def boolean_and_int(i: int, b: bool) -> A:
return A()
@contextmanager
def assert_execution_error(test_case, expected_msg):
with test_case.assertRaises(ExecutionError) as cm:
yield
test_case.assertIn(expected_msg, remove_locations_from_traceback(str(cm.exception)))
class SchedulerTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
consumes_a_and_b,
QueryRule(str, (A, B)),
transitive_b_c,
QueryRule(str, (A, C)),
transitive_coroutine_rule,
QueryRule(D, (C,)),
UnionRule(UnionBase, UnionA),
UnionRule(UnionWithNonMemberErrorMsg, UnionWrapper),
select_union_a,
UnionRule(union_base=UnionBase, union_member=UnionB),
select_union_b,
a_union_test,
QueryRule(A, (UnionWrapper,)),
error_msg_test_rule,
QueryRule(UnionX, (UnionWrapper,)),
boolean_and_int,
QueryRule(A, (int, bool)),
)
def test_use_params(self):
# Confirm that we can pass in Params in order to provide multiple inputs to an execution.
a, b = A(), B()
result_str = self.request(str, [a, b])
self.assertEqual(result_str, consumes_a_and_b(a, b))
# And confirm that a superset of Params is also accepted.
result_str = self.request(str, [a, b, self])
self.assertEqual(result_str, consumes_a_and_b(a, b))
# But not a subset.
expected_msg = "No installed QueryRules can compute str given input Params(A), but"
with self.assertRaisesRegex(Exception, re.escape(expected_msg)):
self.request(str, [a])
def test_transitive_params(self):
# Test that C can be provided and implicitly converted into a B with transitive_b_c() to satisfy
# the selectors of consumes_a_and_b().
a, c = A(), C()
result_str = self.request(str, [a, c])
self.assertEqual(
remove_locations_from_traceback(result_str),
remove_locations_from_traceback(consumes_a_and_b(a, transitive_b_c(c))),
)
# Test that an inner Get in transitive_coroutine_rule() is able to resolve B from C due to
# the existence of transitive_b_c().
self.request(D, [c])
def test_consumed_types(self):
assert {A, B, C, str} == set(
self.scheduler.scheduler.rule_graph_consumed_types([A, C], str)
)
def test_strict_equals(self):
# With the default implementation of `__eq__` for boolean and int, `1 == True`. But in the
# engine that behavior would be surprising, and would cause both of these Params to intern
# to the same value, triggering an error. Instead, the engine additionally includes the
# type of a value in equality.
assert A() == self.request(A, [1, True])
@contextmanager
def _assert_execution_error(self, expected_msg):
with assert_execution_error(self, expected_msg):
yield
def test_union_rules(self):
self.request(A, [UnionWrapper(UnionA())])
self.request(A, [UnionWrapper(UnionB())])
# Fails due to no union relationship from A -> UnionBase.
with self._assert_execution_error("Type A is not a member of the UnionBase @union"):
self.request(A, [UnionWrapper(A())])
def test_union_rules_no_docstring(self):
with self._assert_execution_error("specific error message for UnionA instance"):
self.request(UnionX, [UnionWrapper(UnionA())])
class SchedulerWithNestedRaiseTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
a_typecheck_fail_test,
c_unhashable,
nested_raise,
QueryRule(A, (TypeCheckFailWrapper,)),
QueryRule(A, (B,)),
QueryRule(C, (CollectionType,)),
)
def test_get_type_match_failure(self):
"""Test that Get(...)s are now type-checked during rule execution, to allow for union
types."""
with self.assertRaises(ExecutionError) as cm:
# `a_typecheck_fail_test` above expects `wrapper.inner` to be a `B`.
self.request(A, [TypeCheckFailWrapper(A())])
expected_regex = "WithDeps.*did not declare a dependency on JustGet"
self.assertRegex(str(cm.exception), expected_regex)
def test_unhashable_root_params_failure(self):
"""Test that unhashable root params result in a structured error."""
# This will fail at the rust boundary, before even entering the engine.
with self.assertRaisesRegex(TypeError, "unhashable type: 'list'"):
self.request(C, [CollectionType([1, 2, 3])])
def test_unhashable_get_params_failure(self):
"""Test that unhashable Get(...) params result in a structured error."""
# This will fail inside of `c_unhashable_dataclass`.
with self.assertRaisesRegex(ExecutionError, "unhashable type: 'list'"):
self.request(C, [CollectionType(tuple())])
def test_trace_includes_rule_exception_traceback(self):
# Execute a request that will trigger the nested raise, and then directly inspect its trace.
request = self.scheduler.execution_request([A], [B()])
_, throws = self.scheduler.execute(request)
with self.assertRaises(ExecutionError) as cm:
self.scheduler._raise_on_error([t for _, t in throws])
trace = remove_locations_from_traceback(str(cm.exception))
assert_equal_with_printing(
self,
dedent(
f"""\
1 Exception encountered:
Engine traceback:
in select
in {self.__module__}.{nested_raise.__name__}
Traceback (most recent call last):
File LOCATION-INFO, in nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception(f"An exception for {{type(x).__name__}}")
Exception: An exception for B
"""
),
trace,
)
|
python
|
# Copyright 2016 Pavle Jonoski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from troup.node import Node
import logging
def configure_node_parser():
parser = ArgumentParser(prog='troup', description='Run single node')
# Node
parser.add_argument('--node', help='Node ID')
parser.add_argument('--neighbours', default='', nargs='+', help='Neighbour nodes')
# Async IO server props
parser.add_argument('--host', default='', help='Async IO server hostname')
parser.add_argument('--port', default=7000, help='Async IO server port')
# Store
parser.add_argument('--storage-root', default='.data', help='Root path of the storage directory')
# System statistics
parser.add_argument('--stats-update-interval', default=30000, help='Statistics update interval in milliseconds')
parser.add_argument('--log-level', '-l', default='info', help='Logging level')
parser.add_argument('--lock', action='store_true', help='Write node info in global lock file')
parser.add_argument('--debug', action='store_true', help='Activate the debug command-line interactive interface')
parser.add_argument('-v', '--version', action='store_true', help='Print version and exit')
return parser
def run_node():
import signal
parser = configure_node_parser()
args = parser.parse_args()
if args.version:
from troup.metadata import __version__
print(__version__)
return
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
config = {
'store': {
'path': args.storage_root
},
'server': {
'hostname': args.host,
'port': args.port
},
'stats': {
'update_interval': args.stats_update_interval
},
'neighbours': args.neighbours,
'lock': args.lock
}
node = Node(node_id=args.node, config=config)
def handle_node_shutdown(signal, frame):
node.stop()
signal.signal(signal.SIGINT, handle_node_shutdown)
if args.debug:
from troup.debug import run_debug_cli
run_debug_cli()
node.start()
return node
|
python
|
f = open('3_input.txt').read().splitlines()
def life_support_rating(o2, co2):
return o2 * co2
def filter(list, i=0, co2=False): # where `i` is the bit position
if len(list) == 1:
return list[0]
count = 0
for item in list:
count += int(item[i])
dom_num = 1 if count >= len(list) / 2 else 0 # prefers 1 in case of ties by default
pref_num = dom_num if co2 == False else abs(dom_num - 1) # pref_num is opposite if co2 reading sought
list = [x for x in list if int(x[i]) == pref_num]
return filter(list, i + 1, co2)
o2_rating = filter(f) # oxygen generator rating
co2_rating = filter(f, co2 = True) # co2 scrubber rating
answer = life_support_rating(int(o2_rating, 2), int(co2_rating, 2))
|
python
|
import pytest
import numpy
from thinc.layers import Embed
from ...layers.uniqued import uniqued
from numpy.testing import assert_allclose
from hypothesis import given
from hypothesis.strategies import integers, lists, composite
ROWS = 10
# This test uses a newer hypothesis feature than the skanky flatmap-style
# I used previously. This is much nicer, although it still takes some getting
# used to. The key feature is this composite decorator. It injects a function,
# 'draw'.
@composite
def lists_of_integers(draw, columns=2, lo=0, hi=ROWS - 1):
# We call draw to get example values, which we can manipulate.
# Here we get a list of integers, where each member of the list
# should be between a min and max value.
int_list = draw(lists(integers(min_value=lo, max_value=hi)))
# Now we can use this int list to make an array, and it'll be the arrays
# that our functions receive.
# We trim the list, so we're of length divisible by columns.
int_list = int_list[len(int_list) % columns :]
# And make the array and reshape it.
array = numpy.array(int_list, dtype="uint64")
return array.reshape((-1, columns))
@pytest.fixture
def model(nO=128):
return Embed(nO, ROWS, column=0).initialize()
def test_uniqued_calls_init():
calls = []
embed = Embed(5, 5, column=0)
embed.init = lambda *args, **kwargs: calls.append(True)
embed.initialize()
assert calls == [True]
uembed = uniqued(embed)
uembed.initialize()
assert calls == [True, True]
@given(X=lists_of_integers(lo=0, hi=ROWS - 1))
def test_uniqued_doesnt_change_result(model, X):
umodel = uniqued(model, column=model.attrs["column"]).initialize()
Y, bp_Y = model(X, is_train=True)
Yu, bp_Yu = umodel(X, is_train=True)
assert_allclose(Y, Yu)
dX = bp_Y(Y)
dXu = bp_Yu(Yu)
assert_allclose(dX, dXu)
if X.size:
pass
# TODO: This test is a problem, because we exceed the embedding table.
# Fix it with a better cap.
# Check that different inputs do give different results
# Z, bp_Z = model(X + 1, is_train=True)
# with pytest.raises(AssertionError):
# assert_allclose(Y, Z)
|
python
|
from datasets.SOT.dataset import SingleObjectTrackingDatasetSequence_MemoryMapped
from ._common import _check_bounding_box_validity
class SOTSequenceSequentialSampler:
def __init__(self, sequence: SingleObjectTrackingDatasetSequence_MemoryMapped):
assert len(sequence) > 0
self.sequence = sequence
self.index = 0
def get_name(self):
return self.sequence.get_name()
def move_next(self):
if self.index + 1 >= len(self.sequence):
return False
self.index += 1
return True
def current(self):
frame = self.sequence[self.index]
assert any(v > 0 for v in frame.get_image_size())
image_path = frame.get_image_path()
bounding_box = frame.get_bounding_box()
bounding_box_validity_flag = frame.get_bounding_box_validity_flag()
bounding_box = _check_bounding_box_validity(bounding_box, bounding_box_validity_flag, frame.get_image_size())
return image_path, bounding_box
def reset(self):
self.index = 0
def length(self):
return len(self.sequence)
|
python
|
from django.db import models
from django.urls import reverse
class ImportantDate(models.Model):
date = models.DateField()
desc = models.CharField(max_length=100)
def __str__(self):
return "{} - {}".format(self.date, self.desc)
def get_absolute_url(self):
return reverse('formschapter:impdate_detail', args=[str(self.pk)])
class Meta:
ordering = ('-date',)
|
python
|
from django.contrib import admin
from purchasing.models import PurchasedOrder
class PurchasedOrderAdmin(admin.ModelAdmin):
readonly_fields = ['expiration_date']
admin.site.register(PurchasedOrder)
|
python
|
from typing import Dict, Optional
from sqlalchemy import column, literal_column, select
from panoramic.cli.husky.core.sql_alchemy_util import (
quote_identifier,
safe_identifier,
sort_columns,
)
from panoramic.cli.husky.service.blending.blending_taxon_manager import (
BlendingTaxonManager,
)
from panoramic.cli.husky.service.blending.dataframe_joins import blend_dataframes
from panoramic.cli.husky.service.blending.dimension_phase_builder import (
DimensionPhaseBuilder,
)
from panoramic.cli.husky.service.blending.features.override_mapping.manager import (
OverrideMappingManager,
)
from panoramic.cli.husky.service.blending.metric_phase_builder import MetricPhaseBuilder
from panoramic.cli.husky.service.blending.tel_planner import TelPlanner
from panoramic.cli.husky.service.context import HuskyQueryContext
from panoramic.cli.husky.service.filter_builder.enums import (
FilterClauseType,
SimpleFilterOperator,
)
from panoramic.cli.husky.service.filter_builder.filter_clauses import (
TaxonValueFilterClause,
)
from panoramic.cli.husky.service.query_builder import QueryBuilder
from panoramic.cli.husky.service.select_builder.exceptions import (
UnsupportedAggregationType,
)
from panoramic.cli.husky.service.types.api_data_request_types import (
ApiDataRequest,
BlendingDataRequest,
ComparisonConfig,
InternalDataRequest,
)
from panoramic.cli.husky.service.types.api_scope_types import ComparisonScopeType
from panoramic.cli.husky.service.types.types import (
BlendingQueryInfo,
Dataframe,
DataframeColumn,
QueryInfo,
)
from panoramic.cli.husky.service.utils.taxon_slug_expression import TaxonExpressionStr
class ComparisonRequestBuilder:
"""
Helper class for building Husky comparison subrequests.
"""
@classmethod
def _build_comparison_subrequest(
cls, original_subrequest: ApiDataRequest, comparison: ComparisonConfig, taxon_manager: BlendingTaxonManager
) -> InternalDataRequest:
subrequest: InternalDataRequest = original_subrequest.to_internal_model()
# Reset all filters. Getting comparison can only be filtered by project filters or company id.
subrequest.preaggregation_filters = None
# Reset limit and order by. Does not make sense for comparison.
subrequest.limit = None
subrequest.order_by = []
# Get taxon slugs we need for comparison subrequest.
subrequest.taxons = sorted(list(taxon_manager.get_comparison_subrequest_raw_taxons(subrequest, comparison)))
if comparison.scope == ComparisonScopeType.company:
# If company scope, we add a filter on the company id and remove project filters and accounts
# Eventually, we could fetch list of all accounts under a company and filter on that, since that will
# probably be faster.
subrequest.scope.preaggregation_filters = TaxonValueFilterClause(
{
'type': FilterClauseType.TAXON_VALUE.value,
'taxon': 'company_id',
'operator': SimpleFilterOperator.EQ.value,
'value': subrequest.scope.company_id,
}
)
return subrequest
@classmethod
def _build_comparison_blend_query(
cls,
ctx: HuskyQueryContext,
config_arg: BlendingDataRequest,
taxon_manager: BlendingTaxonManager,
query_info: BlendingQueryInfo,
) -> Optional[Dataframe]:
"""
Builds comparison query for each subrequest and then blends them all into one comparison dataframe.
"""
dataframes = []
config = BlendingDataRequest(config_arg.to_native()) # Clone, coz we will be modifying subqueries
assert config.comparison, 'Comparison must be defined when trying to build comparison query..'
comparison: ComparisonConfig = config.comparison
for _subrequest in config.data_subrequests:
subrequest = cls._build_comparison_subrequest(_subrequest, comparison, taxon_manager)
data_source = subrequest.properties.data_source
# if no comparison taxons were found for this subrequest, skip creating comparison query for it as well
if len(subrequest.taxons) == 0:
continue
bm_sub_query_info = QueryInfo.create(subrequest)
query_info.comparison_subrequests_info.append(bm_sub_query_info)
# Build comparison dataframe and add it to a list.
# TODO pass down TelPlan for comparisons
# ComparisonRequestBuilder might have added filters (typically for company id project id)
# Me create new filter templates for this comparison subrequest.
filter_templates = TelPlanner.get_preaggregation_filter_templates(
ctx,
[subrequest.preaggregation_filters, subrequest.scope.preaggregation_filters],
taxon_manager.taxon_map,
data_source,
)
dataframes.append(
QueryBuilder.build_query(
ctx,
subrequest,
bm_sub_query_info,
taxon_manager.used_taxons,
dimension_templates=taxon_manager.plan.comparison_data_source_formula_templates[data_source],
filter_templates=filter_templates,
)
)
# if no comparison subrequests were created, there is no need to blend data frames
if len(dataframes) == 0:
return None
# Blend all comparison dataframes into one
# TODO pass down TelPlan for comparisons
data_source_formula_templates = taxon_manager.plan.comparison_data_source_formula_templates
dataframe = blend_dataframes(ctx, dataframes, data_source_formula_templates)
# Prefix all comparison metric columns with 'comparison@' and create comparison taxon for it.
query = dataframe.query
final_columns = []
aliased_taxon_by_slug: Dict[TaxonExpressionStr, DataframeColumn] = dict()
for slug, df_column in dataframe.slug_to_column.items():
# Alias metrics with comparison@ prefix, and select dimensions..
if df_column.taxon.is_dimension:
new_taxon = df_column.taxon.copy(deep=True)
new_slug = TaxonExpressionStr(f'{slug}')
else:
new_slug, new_taxon = BlendingTaxonManager.create_comparison_taxon(df_column.taxon)
final_columns.append(query.c[safe_identifier(slug)].label(new_taxon.slug_safe_sql_identifier))
aliased_taxon_by_slug[new_slug] = DataframeColumn(new_slug, new_taxon, df_column.quantity_type)
for pre_formulas in data_source_formula_templates.values():
# and also select the dim columns from dim templates.
for pre_formula in pre_formulas:
final_columns.append(literal_column(quote_identifier(pre_formula.label, ctx.dialect)))
renamed_cols_query = select(sort_columns(final_columns)).select_from(dataframe.query)
return Dataframe(renamed_cols_query, aliased_taxon_by_slug, dataframe.used_model_names)
@classmethod
def build_comparison_query(
cls,
ctx: HuskyQueryContext,
config_arg: BlendingDataRequest,
taxon_manager: BlendingTaxonManager,
override_mapping_manager: OverrideMappingManager,
query_info: BlendingQueryInfo,
) -> Optional[Dataframe]:
comp_df = cls._build_comparison_blend_query(ctx, config_arg, taxon_manager, query_info)
if comp_df is None or len(taxon_manager.plan.comparison_dimension_formulas) == 0:
# There are no comparison dim formulas, means the rows are already grouped correctly
return comp_df
comp_df = DimensionPhaseBuilder.calculate_dataframe(
taxon_manager.plan.comparison_dimension_formulas,
override_mapping_manager.comparison_override_mapping_tel_data,
override_mapping_manager.cte_map,
comp_df,
)
# After dimension join, there could have been a merge (coalesce). We need to group them by the merged column
# once more, to keep single row per dimension.. otherwise we will get row fanout when left joining with
# data dataframe
group_by_cols = []
selectors = []
for dim_formula in taxon_manager.plan.comparison_dimension_formulas:
group_by_cols.append(column(dim_formula.label))
for df_column in comp_df.slug_to_column.values():
taxon = df_column.taxon
col = column(df_column.name)
if taxon.is_dimension:
group_by_cols.append(col)
else:
agg_type = taxon.tel_metadata_aggregation_type
agg_fn = None
if agg_type:
agg_fn = MetricPhaseBuilder.AGGREGATION_FUNCTIONS_MAP.get(agg_type)
if agg_fn is None:
raise UnsupportedAggregationType(taxon)
col = agg_fn(col).label(df_column.name)
selectors.append(col)
selectors.extend(group_by_cols)
query = select(sort_columns(selectors)).select_from(comp_df.query).group_by(*group_by_cols)
return Dataframe(query, comp_df.slug_to_column, comp_df.used_model_names)
|
python
|
import theano
import theano.tensor as T
import treeano
from treeano.sandbox.nodes import bttf_mean
fX = theano.config.floatX
@treeano.register_node("bachelor_normalization")
class BachelorNormalizationNode(treeano.NodeImpl):
hyperparameter_names = ("bttf_alpha",
"alpha",
"epsilon",
"normalization_axes",
"update_averages",
"deterministic")
def compute_output(self, network, in_vw):
alpha = network.find_hyperparameter(["bttf_alpha", "alpha"], 0.95)
epsilon = network.find_hyperparameter(["epsilon"], 1e-4)
normalization_axes = network.find_hyperparameter(["normalization_axes"],
(1,))
# HACK: using "deterministic" to mean test time
deterministic = network.find_hyperparameter(["deterministic"], False)
update_averages = network.find_hyperparameter(["update_averages"],
not deterministic)
alpha = treeano.utils.as_fX(alpha)
if update_averages:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_with_updates
else:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_no_updates
state_shape = tuple([in_vw.shape[axis] for axis in normalization_axes])
state_pattern = ["x"] * in_vw.ndim
for idx, axis in enumerate(normalization_axes):
state_pattern[axis] = idx
def make_state(name, tags, default_inits=None):
if default_inits is None:
default_inits = []
return network.create_vw(
name=name,
is_shared=True,
shape=state_shape,
tags=tags,
default_inits=default_inits,
).variable
gamma = make_state("gamma", {"parameter", "weight"})
beta = make_state("beta", {"parameter", "bias"})
# mean of input
mean = make_state("mean", {"state"})
# gradient of mean of input
mean_grad = make_state("mean_grad", {"state"})
# mean of input^2
squared_mean = make_state("squared_mean", {"state"},
# initializing to 1, so that std = 1
default_inits=[treeano.inits.ConstantInit(1.)])
# gradient of mean of input^2
squared_mean_grad = make_state("squared_mean_grad", {"state"})
in_var = in_vw.variable
mean_axes = tuple([axis for axis in range(in_var.ndim)
if axis not in normalization_axes])
batch_mean = in_var.mean(axis=mean_axes)
squared_batch_mean = T.sqr(in_var).mean(axis=mean_axes)
# expectation of input (x)
E_x = backprop_to_the_future_mean(batch_mean,
mean,
mean_grad,
alpha)
# TODO try mixing batch mean with E_x
# expectation of input squared
E_x_squared = backprop_to_the_future_mean(squared_batch_mean,
squared_mean,
squared_mean_grad,
alpha)
# HACK mixing batch and rolling means
# E_x = 0.5 * E_x + 0.5 * batch_mean
# E_x_squared = 0.5 * E_x_squared + 0.5 * squared_batch_mean
if 1:
mu = E_x
sigma = T.sqrt(E_x_squared - T.sqr(E_x) + epsilon)
mu = mu.dimshuffle(state_pattern)
sigma = sigma.dimshuffle(state_pattern)
gamma = gamma.dimshuffle(state_pattern)
beta = beta.dimshuffle(state_pattern)
else:
# HACK mixing current value
E_x = E_x.dimshuffle(state_pattern)
E_x_squared = E_x_squared.dimshuffle(state_pattern)
gamma = gamma.dimshuffle(state_pattern)
beta = beta.dimshuffle(state_pattern)
E_x = 0.1 * in_var + 0.9 * E_x
E_x_squared = 0.1 * T.sqr(in_var) + 0.9 * E_x_squared
mu = E_x
sigma = T.sqrt(E_x_squared - T.sqr(E_x) + epsilon)
if 0:
# HACK don't backprop through sigma
sigma = T.consider_constant(sigma)
if 1:
# HACK using batch mean
mu = batch_mean
mu = mu.dimshuffle(state_pattern)
if 0:
# HACK using batch variance
sigma = T.sqrt(in_var.var(axis=mean_axes) + epsilon)
sigma = sigma.dimshuffle(state_pattern)
out_var = (in_var - mu) * (T.exp(gamma) / sigma) + beta
network.create_vw(
name="default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
if 1:
# HACK monitoring state
network.create_vw(
name="mu_mean",
variable=mu.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="sigma_mean",
variable=sigma.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="gamma_mean",
variable=gamma.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="beta_mean",
variable=beta.mean(),
shape=(),
tags={"monitor"},
)
@treeano.register_node("bachelor_normalization2")
class BachelorNormalization2Node(treeano.NodeImpl):
hyperparameter_names = ("bttf_alpha",
"alpha",
"epsilon",
"normalization_axes",
"update_averages",
"deterministic")
def compute_output(self, network, in_vw):
alpha = network.find_hyperparameter(["bttf_alpha", "alpha"], 0.95)
epsilon = network.find_hyperparameter(["epsilon"], 1e-4)
normalization_axes = network.find_hyperparameter(["normalization_axes"],
(1,))
# HACK: using "deterministic" to mean test time
deterministic = network.find_hyperparameter(["deterministic"], False)
update_averages = network.find_hyperparameter(["update_averages"],
not deterministic)
alpha = treeano.utils.as_fX(alpha)
if update_averages:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_with_updates
else:
backprop_to_the_future_mean = bttf_mean.backprop_to_the_future_mean_no_updates
state_shape = tuple([in_vw.shape[axis] for axis in normalization_axes])
state_pattern = ["x"] * in_vw.ndim
for idx, axis in enumerate(normalization_axes):
state_pattern[axis] = idx
def make_state(name, tags, default_inits=None):
if default_inits is None:
default_inits = []
return network.create_vw(
name=name,
is_shared=True,
shape=state_shape,
tags=tags,
default_inits=default_inits,
).variable
gamma = make_state("gamma", {"parameter", "weight"})
beta = make_state("beta", {"parameter", "bias"})
# mean of input
mean = make_state("mean", {"state"})
# gradient of mean of input
mean_grad = make_state("mean_grad", {"state"})
var_state_mean = make_state("var_state_mean", {"state"},
# initializing to 1, so that std = 1
default_inits=[treeano.inits.ConstantInit(1.)])
var_state_mean_grad = make_state("var_state_mean_grad", {"state"})
in_var = in_vw.variable
mean_axes = tuple([axis for axis in range(in_var.ndim)
if axis not in normalization_axes])
batch_mean = in_var.mean(axis=mean_axes)
# expectation of input (x)
E_x = backprop_to_the_future_mean(batch_mean,
mean,
mean_grad,
alpha)
# TODO try mixing batch mean with E_x
if 1:
batch_var_state = 1. / T.sqrt(in_var.var(axis=mean_axes) + epsilon)
var_state = backprop_to_the_future_mean(batch_var_state,
var_state_mean,
var_state_mean_grad,
alpha)
inv_std = var_state
# HACK mixing batch and rolling means
# E_x = 0.5 * E_x + 0.5 * batch_mean
# E_x_squared = 0.5 * E_x_squared + 0.5 * squared_batch_mean
mu = E_x
mu = mu.dimshuffle(state_pattern)
inv_std = inv_std.dimshuffle(state_pattern)
gamma = gamma.dimshuffle(state_pattern)
beta = beta.dimshuffle(state_pattern)
out_var = (in_var - mu) * (T.exp(gamma) * inv_std) + beta
network.create_vw(
name="default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
if 1:
# HACK monitoring state
network.create_vw(
name="mu_mean",
variable=mu.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="var_state_effective_mean",
variable=var_state.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="gamma_mean",
variable=gamma.mean(),
shape=(),
tags={"monitor"},
)
network.create_vw(
name="beta_mean",
variable=beta.mean(),
shape=(),
tags={"monitor"},
)
|
python
|
print("My name is John")
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2013 Simonas Kazlauskas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation
from os import path, makedirs
from hashlib import sha1
from gi.repository import GObject, GLib
from quodlibet.util.path import escape_filename, xdg_get_cache_home
class CoverSourcePlugin(GObject.Object):
"""
Plugins that given a song should provide a cover art.
The plugin should override following methods and properties:
@staticmethod priority()
@property cover_path(self)
fetch_cover(self)
Refer to default function implementation's documentation in order to
understand their role.
"""
__gsignals__ = {
'fetch-success': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'fetch-failure': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'search-complete': (GObject.SignalFlags.RUN_LAST, None, (object,))
}
def __init__(self, song, cancellable=None):
self.song = song
self.cancellable = cancellable
super(CoverSourcePlugin, self).__init__()
@staticmethod
def priority():
"""
Should return float in range [0.0, 1.0] suggesting priority of the
cover source. Whether value returned by this method is respected or
not is not guaranteed.
As a rule of thumb, source's reliability and quality should be
compared with other sources and given score between two sources that
come close in quality and reliability.
There's a table of value ranges sources should respect:
* (0.9, 1.0] - user's preferred methods (set by configuration; example:
preferring embed cover art);
* (0.7, 0.9] - local covers;
* (0.4, 0.7] - accurate (> 99%) source of high quality (>= 200x200)
covers;
* (0.2, 0.4] - accurate (> 99%) source of low quality (< 200x200)
covers;
* (0.0, 0.2] - not very accurate (<= 99%) source of covers, even if
they're high quality;
* 0.0 - reserved for the fallback cover source.
"""
return 0.0
@property
def cover_directory(self):
return cover_dir
@property
def cover_filename(self):
"""
Return the filename of the cover which hopefully should not change
between songs in the same album and still be unique enough to
uniquely identify most (or even better โ all) of the albums.
The string returned must not contain any characters illegal in
most common filesystems. These include /, ?, <, >, \, :, *, |, โ and ^.
Staying in the bounds of ASCII is highly encouraged.
Perchance the song lacks data to generate the filename of cover for
this provider, None shall be returned.
"""
key = sha1()
# Should be fine as long as the same interpreter is used.
key.update(repr(self.song.album_key))
return escape_filename(key.hexdigest())
@property
def cover_path(self):
"""
Should return the path where cover is expected to be cached. The
location should be based in common cache location available in variable
`cover_dir` of this module.
It doesn't necessarily mean the cover is actually at the returned
location neither that it will be stored there at any later time.
"""
return path.join(self.cover_directory, self.cover_filename)
@property
def cover(self):
"""
Method to get cover file from cover provider for a specific song.
Should always return a file-like object opened as read-only if any
and None otherwise.
"""
cp = self.cover_path
try:
return open(cp, 'rb') if cp and path.isfile(cp) else None
except IOError:
print_w('Failed reading album art "%s"'.format(path))
def search(self):
"""
Start searching for cover art from a source.
After search is completed the `search-complete` event must be emitted
regardless of search outcome with a list of dictionaries containing
`album`, `artist` and `cover` keys as an argument. If search was
unsuccessful, empty list should be returned.
By convention better quality and more accurate covers are expected to
appear first in the list.
"""
self.emit('search-complete', [])
def fetch_cover(self):
"""
Method to ask source fetch the cover from its source into location at
`self.cover_path`.
If this method succeeds in putting the image from its source into
`self.cover_path`, `fetch-success` signal shall be emitted and
`fetch-failure` otherwise.
Return value of this function doesn't have any meaning whatsoever.
"""
self.fail('This source is incapable of fetching covers')
def fail(self, message):
"""
Shorthand method for emitting `fetch-failure` signals.
Most common use pattern would be:
return self.fail("Failure message")
"""
self.emit('fetch-failure', message)
cover_dir = path.join(xdg_get_cache_home(), 'quodlibet', 'covers')
try:
makedirs(cover_dir)
except OSError:
pass
|
python
|
# Edit by Tianyu Ma
# coding: utf-8
"""
=====
Third step: merge csv files
=====
"""
|
python
|
import json
import string
import csv
fname = './data/obama_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
fname = './data/michelle_obama_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
fname = './data/donald_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
fname = './data/melina_trump_speech.txt'
fhand = open(fname, 'r')
text = fhand.read()
lines = text.split('\n')
line_count = len(lines)
word_count = 0
for line in lines:
words = line.split()
for word in words:
if word == " ":
continue
word_count += 1
print(f"File name: {fname}")
print(f"Line Count: {line_count}")
print(f"Word Count: {word_count}")
def most_spoken_languages(fname, n):
fhand = open(fname, 'r')
data = fhand.read()
countries = json.loads(data)
count_dic = {}
output = []
for country in countries:
languages = country["languages"]
for language in languages:
if language not in count_dic:
count_dic[language] = 1
else:
count_dic[language] += 1
for k, v in count_dic.items():
tup = (v, k)
output.append(tup)
output.sort(key=lambda x: x[0], reverse=True)
required_output = []
count = 0
for item in output:
if count == n:
break
required_output.append(item)
count += 1
return required_output
print(most_spoken_languages('./data/countries_data.json', 10))
print(most_spoken_languages('./data/countries_data.json', 3))
def most_populated_countries(fname, n):
fhand = open(fname, 'r')
data = fhand.read()
countries = json.loads(data)
output = []
for country in countries:
new_dic = {}
new_dic['country'] = country['name']
new_dic['population'] = country['population']
output.append(new_dic)
output.sort(key=lambda x: x['population'], reverse=True)
required_output = []
count = 0
for item in output:
if count == n:
break
required_output.append(item)
count += 1
return required_output
print(most_populated_countries('./data/countries_data.json', 10))
print(most_populated_countries('./data/countries_data.json', 3))
fname = './data/email_exchanges_big.txt'
fhand = open(fname, 'r')
data = fhand.read()
lst = data.split('\n')
count = 0
for line in lst:
if line.startswith('From'):
count += 1
print(f"There are {count} incoming email addresses")
def find_most_common_words(fname, n):
fhand = open(fname, 'r')
data = fhand.read()
lines = data.split('\n')
word_dic = {}
output = []
for line in lines:
words = line.split()
for word in words:
if word == ' ':
continue
if word in word_dic:
word_dic[word] += 1
else:
word_dic[word] = 1
for k, v in word_dic.items():
tup = (v, k)
output.append(tup)
output.sort(key=lambda x: x[0], reverse=True)
required_output = []
count = 0
for item in output:
if count == n:
break
required_output.append(item)
count += 1
return required_output
print(
f"10 most frequent words in obama_speech.txt are: \n{find_most_common_words('./data/obama_speech.txt', 10)} ")
print(
f"10 most frequent words in michelle_obama_speech.txt are: \n{find_most_common_words('./data/michelle_obama_speech.txt', 10)} ")
print(
f"10 most frequent words in donald_speech.txt are: \n{find_most_common_words('./data/donald_speech.txt', 10)} ")
print(
f"10 most frequent words in melina_trump_speech.txt are: \n{find_most_common_words('./data/melina_trump_speech.txt', 10)} ")
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up',
'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
def clean_text(fname):
fhand = open(fname, 'r')
data = fhand.read()
lines = data.split('\n')
word_lst = []
for line in lines:
words = line.split()
for word in words:
if word in stop_words or word in string.punctuation:
continue
else:
word_lst.append(word)
return word_lst
def check_text_similarity(lst1, lst2):
output = []
for word in lst1:
if word in lst2:
output.append(word)
print(f"Total number of similar words are {len(output)}")
print(f"Similar words are: \n{output}")
michelle_lst = clean_text('./data/michelle_obama_speech.txt')
melina_lst = clean_text('./data/melina_trump_speech.txt')
check_text_similarity(michelle_lst, melina_lst)
print(
f"10 most frequent words in romeo_and_juliet.txt are: \n{find_most_common_words('./data/romeo_and_juliet.txt', 10)} ")
fname = './data/hacker_news.csv'
fhand = open(fname, 'r')
lines = csv.reader(fhand, delimiter=',')
python_count = 0
js_count = 0
java_count = 0
for line in lines:
for item in line:
words = item.split()
if 'python' in words or 'Python' in words:
python_count += 1
if 'javascript' in words or 'Javascript' in words or 'JavaScript' in words:
js_count += 1
if 'java' in words or 'Java' in words:
java_count += 1
print(f"Number of Lines having python are {python_count}")
print(f"Number of Lines having javascript are {js_count}")
print(f"Number of Lines having java are {java_count}")
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-23 21:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Words',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('color', models.CharField(blank=True, max_length=10)),
('words', models.TextField()),
('countwords', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
|
python
|
# forๅพช็ฏๆฏไธ็ง้ๅๅ่กจ็ๆๆๆนๅผ, ไฝๅจforๅพช็ฏไธญไธๅบไฟฎๆนๅ่กจ, ๅฆๅๅฐๅฏผ่ดPython้พไปฅ่ท่ธชๅ
ถไธญ็ๅ
็ด .
# ่ฆๅจ้ๅๅ่กจ็ๅๆถๅฏนๅ
ถ่ฟ่กไฟฎๆน, ๅฏไฝฟ็จwhileๅพช็ฏ.
# ๅจๅ่กจไน้ด็งปๅจๅ
็ด
unconfirmed_users = ['alice', 'brian', 'candace'] # ๅพ
้ช่ฏ็จๆทๅ่กจ
confirmed_users = [] # ๅทฒ้ช่ฏ็จๆทๅ่กจ
# ้ๅๅ่กจๅฏน็จๆท่ฟ่ก้ช่ฏ
while unconfirmed_users: # ๅฝๅ่กจไธไธบ็ฉบๆถ่ฟๅTrue, ๅฝๅ่กจไธบ็ฉบๆถ, ่ฟๅFalse
current_user = unconfirmed_users.pop() # ๅๅบ้่ฆ้ช่ฏ็็จๆท
print('้ช่ฏ็จๆท: ' + current_user.title())
confirmed_users.append(current_user) # ๅฐๅทฒ้ช่ฏ็็งปๅฐๅทฒ้ช่ฏ็จๆทๅ่กจ
print('\nไปฅไธ็จๆทๅทฒ็ป็ป่ฟ้ช่ฏ: ')
for user in confirmed_users:
print('\t' + user)
print('\nๆชๅฎๆ้ช่ฏ็็จๆทไธบ: ')
print(unconfirmed_users)
# ไธบไปไนไธ่ฝ็จforไปฃๆฟwhile
print('\n')
print('้็จforๅพช็ฏ็ๆนๅผๆฅๅฎ็ฐไธ่ฟฐๆไฝ: ')
unconfirmed_users = ['alice', 'brian', 'candace'] # ๅพ
้ช่ฏ็จๆทๅ่กจ
confirmed_users = [] # ๅทฒ้ช่ฏ็จๆทๅ่กจ
for unconfirmed_user in unconfirmed_users:
# current_user = unconfirmed_users.pop() # forๅพช็ฏไธญ, ไธ่ฝ้็จpopๆฅๅ ้คๅ
็ด , ไผๅบ็ฐ้ๅ้ฎ้ข
print('้ช่ฏ็จๆท: ' + unconfirmed_user.title())
confirmed_users.append(unconfirmed_user) # ๅฐๅทฒ้ช่ฏ็็งปๅฐๅทฒ้ช่ฏ็จๆทๅ่กจ
# unconfirmed_users.remove(unconfirmed_user) # forๅพช็ฏไธญ, ไธ่ฝ้็จremoveๆฅๅ ้คๅ
็ด , ไผๅบ็ฐ้ๅ้ฎ้ข
print('\nไปฅไธ็จๆทๅทฒ็ป็ป่ฟ้ช่ฏ: ')
for user in confirmed_users:
print('\t' + user)
print('\nๆชๅฎๆ้ช่ฏ็็จๆทไธบ: ')
print(unconfirmed_users)
# ๅ ้คๅ
ๅซ็นๅฎๅผ็ๆๆๅ่กจๅ
็ด
# ้่ฟwhileๅพช็ฏไธๆญๅคๆญๅ่กจไธญๆฏๅฆๅญๅจ็นๅฎๅ
็ด , ๅญๅจๅฐฑๅฐๅฎๅ ้ค
print('\n')
pets = ['dog', 'cat', 'dog', 'goldfish', 'cat', 'rabbit', 'cat']
print('ๅๅงๅ็ๆฐๆฎไธบ: ')
print(pets)
cat_name = 'cat'
while cat_name in pets:
pets.remove(cat_name)
print('ๅ ้คๅ็งฐไธบ' + cat_name + '็ๅฎ ็ฉๅ, ๅฎ ็ฉๅ่กจไธบ: ')
print(pets)
# ไฝฟ็จ็จๆท่พๅ
ฅๆฅๅกซๅ
ๅญๅ
ธ
responses = {}
polling_active = True
while polling_active:
name = input("\nWhat is your name? ") # ่ทๅ็จๆท่พๅ
ฅ็ๅงๅ
response = input("Which mountain would you like to climb someday? ") # ่ทๅ็จๆทๆณ่ฆ็ฌ็ๅฑฑ
responses[name] = response # ๅฐ็จๆท็ๆฐๆฎๅญๅ
ฅๅญๅ
ธ
repeat = input('Would you like to let another person respond? (yes/ no) ')
if repeat == 'no': # ๅคๆญ่ฐๆฅๆฏๅฆ็ปๆ
polling_active = False
print('\n---Poll Result---') # ๆๅฐ่ฐๆฅ็ปๆ
for name, response in responses.items():
print(name + ' would like to climb ' + response + '.')
|
python
|
from datetime import datetime
from lib import d,t
def main():
r = t.t()
r = (r+(' '+(d.d())))
print(f'{r} :)')
return(0x01)
if(__name__==('__main__')): main()
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Script to help in managing Usenet hierarchies. It generates control
# articles and handles PGP keys (generation and management).
#
# signcontrol.py -- v. 1.4.0 -- 2014/10/26
#
# Written and maintained by Julien รLIE.
#
# This script is distributed under the MIT License. Please see the LICENSE
# section below for more information.
#
# Feel free to use it. I would be glad to know whether you find it useful for
# your hierarchy. Any bug reports, bug fixes, and improvements are very much
# welcome.
#
# Contact:
# <http://www.trigofacile.com/maths/contact/index.htm>
# Issue tracker:
# <https://github.com/Julien-Elie/usenet-signcontrol/issues>
#
# Upstream web site:
# <http://www.trigofacile.com/divers/usenet/clefs/signcontrol.htm>
# Github repository:
# <https://github.com/Julien-Elie/usenet-signcontrol>
# Please also read:
# <http://www.eyrie.org/~eagle/faqs/usenet-hier.html>
#
# History:
#
# v. 1.4.0: 2014/10/26 -- add the --no-tty flag to gpg when --passphrase is
# also used. Otherwise, an error occurs when running signcontrol
# from cron. Thanks to Matija Nalis for the bug report.
# - Add the PGP2_COMPATIBILITY parameter to generate control
# articles compatible with MIT PGP 2.6.2 (or equivalent).
# - When managing PGP keys, their full uid is now expected, instead
# of only a subpart.
# - Listing secret keys now also shows their fingerprint.
# - Improve documentation, along with the creation of a Git
# repository on Github.
#
# v. 1.3.3: 2011/07/11 -- automatically generate an Injection-Date: header
# field, and sign it. It will prevent control articles from being
# maliciously reinjected into Usenet, and replayed by news servers
# compliant with RFC 5537 (that is to say without cutoff on the
# Date: header field when an Injection-Date: header field exists).
#
# v. 1.3.2: 2009/12/23 -- use local time instead of UTC (thanks to Adam
# H. Kerman for the suggestion).
# - Add flags to gpg when called: --emit-version, --no-comments,
# --no-escape-from-lines and --no-throw-keyids. Otherwise, the
# signature may not be valid (thanks to Robert Spier for the
# bug report).
#
# v. 1.3.1: 2009/12/20 -- compliance with RFC 5322 (Internet Message Format):
# use "-0000" instead of "+0000" to indicate a time zone at Universal
# Time ("-0000" means that the time is generated on a system that
# may be in a local time zone other than Universal Time); also remove
# the Sender: header field.
# - When a line in the body of a control article started with
# "Sender", a bug in signcontrol prevented the article from being
# properly signed.
#
# v. 1.3.0: 2009/07/28 -- remove the charset for a multipart/mixed block
# in newgroup articles, change the default serial number from 0 to 1
# in checkgroups articles, allow the user to interactively modify
# his message (thanks to Matija Nalis for the idea).
#
# v. 1.2.1: 2008/12/07 -- ask for confirmation when "(Moderated)" is misplaced
# in a newsgroup description.
#
# v. 1.2.0: 2008/11/17 -- support for USEPRO: checkgroups scope, checkgroups
# serial numbers and accurate Content-Type: header fields.
#
# v. 1.1.0: 2007/05/09 -- fix the newgroups line when creating a newsgroup,
# use a separate config file, possibility to import signcontrol from
# other scripts and use its functions.
#
# v. 1.0.0: 2007/05/01 -- initial release.
# THERE IS NOTHING USEFUL TO PARAMETER IN THIS FILE.
# The file "signcontrol.conf" contains all your parameters
# and it will be parsed.
CONFIGURATION_FILE = 'signcontrol.conf'
import os
import re
import sys, traceback
import time
import shlex
# Current time.
TIME = time.localtime()
def treat_exceptions(type, value, stacktrace):
""" Pretty print stack traces of this script, in case an error occurs.
Arguments: type (the type of the exception)
value (the value of the exception)
stacktrace (the traceback of the exception)
No return value (the script exits with status 2)
"""
print "-----------------------------------------------------------"
print "\n".join(traceback.format_exception(type, value, stacktrace))
print "-----------------------------------------------------------"
raw_input('An error has just occurred.')
sys.exit(2)
sys.excepthook = treat_exceptions
def print_error(error):
""" Pretty print error messages.
Argument: error (the error to print)
No return value
"""
print
print '--> ' + error + ' <--'
print
def pretty_time(localtime):
""" Return the Date: header field.
Argument: localtime (a time value, representing local time)
Return value: a string suitable to be used in a Date: header field
"""
# As "%z" does not work on every platform with strftime(), we compute
# the time zone offset.
# You might want to use UTC with either "+0000" or "-0000", also changing
# time.localtime() to time.gmtime() for the definition of TIME above.
if localtime.tm_isdst > 0 and time.daylight:
offsetMinutes = - int(time.altzone / 60)
else:
offsetMinutes = - int(time.timezone / 60)
offset = "%+03d%02d" % (offsetMinutes / 60.0, offsetMinutes % 60)
return time.strftime('%a, %d %b %Y %H:%M:%S ' + offset, localtime)
def serial_time(localtime):
""" Return a checkgroups serial number.
Argument: localtime (a time value, representing local time)
Return value: a string suitable to be used as a serial number
"""
# Note that there is only one serial per day.
return time.strftime('%Y%m%d', localtime)
def epoch_time(localtime):
""" Return the number of seconds since epoch.
Argument: localtime (a time value, representing local time)
Return value: the number of seconds since epoch, as a string
"""
return str(int(time.mktime(localtime)))
def read_configuration(file):
""" Parse the configuration file.
Argument: file (path to the signcontrol.conf configuration file)
Return value: a dictionary {parameter: value} representing
the contents of the configuration file
"""
TOKENS = ['PROGRAM_GPG', 'PGP2_COMPATIBILITY', 'ID', 'MAIL', 'HOST',
'ADMIN_GROUP', 'NAME',
'CHECKGROUPS_SCOPE', 'URL',
'NEWGROUP_MESSAGE_MODERATED', 'NEWGROUP_MESSAGE_UNMODERATED',
'RMGROUP_MESSAGE', 'PRIVATE_HIERARCHY', 'CHECKGROUPS_FILE',
'ENCODING']
if not os.path.isfile(file):
print 'The configuration file is absent.'
raw_input('Please install it before using this script.')
sys.exit(2)
config_file = shlex.shlex(open(file, 'r'), posix=True)
config = dict()
parameter = None
while True:
token = config_file.get_token()
if not token:
break
if token[0] in '"\'':
token = token[1:-1]
if token in TOKENS:
parameter = token
elif token != '=' and parameter:
if parameter == 'PGP2_COMPATIBILITY':
if token == 'True' or token == 'true':
config[parameter] = [('--pgp2', '-pgp2'), ('', '')]
elif token == 'Only' or token == 'only':
config[parameter] = [('--pgp2', '-pgp2')]
else:
config[parameter] = [('', '')]
elif parameter == 'PRIVATE_HIERARCHY':
if token == 'True' or token == 'true':
config[parameter] = True
else:
config[parameter] = False
else:
config[parameter] = token
parameter = None
for token in TOKENS:
if not config.has_key(token):
print 'You must update the configuration file.'
print 'The parameter ' + token + ' is missing.'
raw_input('Please download the latest version of the configuration file and parameter it before using this script.')
sys.exit(2)
return config
def read_checkgroups(path):
""" Parse a checkgroups file.
Argument: path (path of the checkgroups file)
Return value: a dictionary {newsgroup: description} representing
the contents of the checkgroups
"""
# Usually for the first use of the script.
if not os.path.isfile(path):
print 'No checkgroups file found.'
print 'Creating an empty checkgroups file...'
write_checkgroups(dict(), path)
groups = dict()
for line in file(path):
line2 = line.strip()
while line2.find('\t\t') != -1:
line2 = line2.replace('\t\t', '\t')
try:
group, description = line2.split('\t')
groups[group] = description
except:
print_error('The current checkgroups is badly formed.')
print 'The offending line is:'
print line
print
raw_input('Please correct it before using this script.')
sys.exit(2)
return groups
def write_checkgroups(groups, path):
""" Write the current checkgroups file.
Arguments: groups (a dictionary representing a checkgroups)
path (path of the checkgroups file)
No return value
"""
keys = groups.keys()
keys.sort()
checkgroups_file = file(path, 'wb')
for key in keys:
if len(key) < 8:
checkgroups_file.write(key + '\t\t\t' + groups[key] + '\n')
elif len(key) < 16:
checkgroups_file.write(key + '\t\t' + groups[key] + '\n')
else:
checkgroups_file.write(key + '\t' + groups[key] + '\n')
checkgroups_file.close()
print 'Checkgroups file written.'
print
def choice_menu():
""" Print the initial menu, and waits for the user to make a choice.
Return value: the number representing the user's choice
"""
while True:
print
print 'What do you want to do?'
print '-----------------------'
print '1. Generate a newgroup control article (create or change a newsgroup)'
print '2. Generate an rmgroup control article (remove a newsgroup)'
print '3. Generate a checkgroups control article (list of newsgroups)'
print '4. Manage my PGP keys (generate/import/export/remove/revoke)'
print '5. Quit'
print
try:
choice = int(raw_input('Your choice (1-5): '))
if int(choice) not in range(1,6):
raise ValueError()
print
return choice
except:
print_error('Please enter a number between 1 and 5.')
def manage_menu():
""" Print the menu related to the management of PGP keys, and waits
for the user to make a choice.
Return value: the number representing the user's choice
"""
while True:
print
print 'What do you want to do?'
print '-----------------------'
print '1. See the current installed keys'
print '2. Generate a new pair of secret/public keys'
print '3. Export a public key'
print '4. Export a secret key'
print '5. Import a secret key'
print '6. Remove a pair of secret/public keys'
print '7. Revoke a secret key'
print '8. Quit'
print
try:
choice = int(raw_input('Your choice (1-8): '))
if int(choice) not in range(1,9):
raise ValueError()
print
return choice
except:
print_error('Please enter a number between 1 and 8.')
def generate_signed_message(config, file_message, group, message_id, type, passphrase=None, flag=''):
""" Generate signed control articles.
Arguments: config (the dictionary of parameters from signcontrol.conf)
file_message (the file name of the message to sign)
group (the name of the newsgroup)
message_id (the Message-ID of the message)
type (the type of the control article)
passphrase (if given, the passphrase of the private key)
flag (if given, the additional flag(s) to pass to gpg)
No return value
"""
signatureWritten = False
if passphrase:
os.system(config['PROGRAM_GPG'] + ' --emit-version --no-comments --no-escape-from-lines --no-throw-keyids --armor --detach-sign --local-user "='+ config['ID'] + '" --no-tty --passphrase "' + passphrase + '" --output ' + file_message + '.pgp ' + flag + ' ' + file_message + '.txt')
else:
os.system(config['PROGRAM_GPG'] + ' --emit-version --no-comments --no-escape-from-lines --no-throw-keyids --armor --detach-sign --local-user "='+ config['ID'] + '" --output ' + file_message + '.pgp ' + flag + ' ' + file_message + '.txt')
if not os.path.isfile(file_message + '.pgp'):
print_error('Signature generation failed.')
print 'Please verify the availability of the secret key.'
return
result = file(file_message + '.sig', 'wb')
for line in file(file_message + '.txt', 'rb'):
if signatureWritten:
result.write(line)
continue
if not line.startswith('X-Signed-Headers'):
# From: is the last signed header field.
if not line.startswith('From'):
result.write(line)
else:
# Rewrite the From: line exactly as we already wrote it.
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n')
result.write('Approved: ' + config['MAIL'] + '\n')
if type == 'checkgroups' and not config['PRIVATE_HIERARCHY']:
result.write('Newsgroups: ' + group + ',news.admin.hierarchies\n')
result.write('Followup-To: ' + group + '\n')
else:
result.write('Newsgroups: ' + group + '\n')
result.write('Path: not-for-mail\n')
result.write('X-Info: ' + config['URL'] + '\n')
result.write('\tftp://ftp.isc.org/pub/pgpcontrol/README.html\n')
result.write('MIME-Version: 1.0\n')
if type == 'newgroup':
result.write('Content-Type: multipart/mixed; boundary="signcontrol"\n')
elif type == 'checkgroups':
result.write('Content-Type: application/news-checkgroups; charset=' + config['ENCODING'] + '\n')
else: # if type == 'rmgroup':
result.write('Content-Type: text/plain; charset=' + config['ENCODING'] + '\n')
result.write('Content-Transfer-Encoding: 8bit\n')
for line2 in file(file_message + '.pgp', 'r'):
if line2.startswith('-'):
continue
if line2.startswith('Version:'):
version = line2.replace('Version: ', '')
version = version.replace(' ', '_')
result.write('X-PGP-Sig: ' + version.rstrip() + ' Subject,Control,Message-ID,Date,Injection-Date,From\n')
elif len(line2) > 2:
result.write('\t' + line2.rstrip() + '\n')
signatureWritten = True
result.close()
os.remove(file_message + '.pgp')
print
if flag:
print 'Do not worry if the program complains about detached signatures or MD5.'
print 'You can now post the file ' + file_message + '.sig using rnews'
print 'or a similar tool.'
print
#print 'Or you can also try to send it with IHAVE. If it fails, it means that the article'
#print 'has not been sent. You will then have to manually use rnews or a similar program.'
#if raw_input('Do you want to try? (y/n) ') == 'y':
# import nntplib
# news_server = nntplib.NNTP(HOST, PORT, USER, PASSWORD)
# news_server.ihave(message_id, file_message + '.sig')
# news_server.quit()
# print 'The control article has just been sent!'
def sign_message(config, file_message, group, message_id, type, passphrase=None):
""" Sign a control article.
Arguments: config (the dictionary of parameters from signcontrol.conf)
file_message (the file name of the message to sign)
group (the name of the newsgroup)
message_id (the Message-ID of the message)
type (the type of the control article)
passphrase (if given, the passphrase of the private key)
No return value
"""
articles_to_generate = len(config['PGP2_COMPATIBILITY'])
i = 1
for (flag, suffix) in config['PGP2_COMPATIBILITY']:
if articles_to_generate > 1:
print
print 'Generation of control article ' + str(i) + '/' + str(articles_to_generate)
i += 1
if suffix:
additional_file = file(file_message + suffix + '.txt', 'wb')
additional_message_id = message_id.replace('@', suffix + '@', 1)
for line in file(file_message + '.txt', 'rb'):
if line == 'Message-ID: ' + message_id + '\n':
line = 'Message-ID: ' + additional_message_id + '\n'
additional_file.write(line)
additional_file.close()
generate_signed_message(config, file_message + suffix, group, additional_message_id, type, passphrase, flag)
os.remove(file_message + suffix + '.txt')
else:
generate_signed_message(config, file_message, group, message_id, type, passphrase, flag)
def generate_newgroup(groups, config, group=None, moderated=None, description=None, message=None, passphrase=None):
""" Create a new group.
Arguments: groups (the dictionary representing the checkgroups)
config (the dictionary of parameters from signcontrol.conf)
group (if given, the name of the newsgroup)
moderated (if given, whether the newsgroup is moderated)
description (if given, the description of the newsgroup)
message (if given, the text to write in the control article)
passphrase (if given, the passphrase of the private key)
No return value
"""
while not group:
group = raw_input('Name of the newsgroup to create: ').lower()
components = group.split('.')
if len(components) < 2:
group = None
print_error('The group must have at least two components.')
elif not components[0][0:1].isalpha():
group = None
print_error('The first component must start with a letter.')
elif components[0] in ['control', 'example', 'to']:
group = None
print_error('The first component must not be "control", "example" or "to".')
elif re.search('[^a-z0-9+_.-]', group):
group = None
print_error('The group must not contain characters other than [a-z0-9+_.-].')
for component in components:
if component in ['all', 'ctl']:
group = None
print_error('Sequences "all" and "ctl" must not be used as components.')
elif not component[0:1].isalnum():
group = None
print_error('Each component must start with a letter or a digit.')
elif component.isdigit():
group = None
print_error('Each component must contain at least one non-digit character.')
if groups.has_key(group):
print
print 'The newsgroup ' + group + ' already exists.'
print 'These new settings (status and description) will override the current ones.'
print
if moderated is None:
if raw_input('Is ' + group + ' a moderated newsgroup? (y/n) ' ) == 'y':
moderated = True
print
print 'There is no need to add " (Moderated)" at the very end of the description.'
print 'It will be automatically added, if not already present.'
print
else:
moderated = False
while not description:
print
print 'The description should start with a capital and end in a period.'
description = raw_input("Description of " + group + ": ")
if len(description) > 56:
print_error('The description is too long. You should shorten it.')
if raw_input('Do you want to continue despite this recommendation? (y/n) ') != 'y':
description = None
continue
moderated_count = description.count('(Moderated)')
if moderated_count > 0:
if not moderated:
if description.endswith(' (Moderated)'):
description = None
print_error('The description must not end with " (Moderated)".')
continue
else:
print_error('The description must not contain "(Moderated)".')
if raw_input('Do you want to continue despite this recommendation? (y/n) ') != 'y':
description = None
continue
elif moderated_count > 1 or not description.endswith(' (Moderated)'):
print_error('The description must not contain "(Moderated)".')
if raw_input('Do you want to continue despite this recommendation? (y/n) ') != 'y':
description = None
continue
if not message:
print
print 'The current message which will be sent is:'
print
if moderated:
message = config['NEWGROUP_MESSAGE_MODERATED'].replace('$GROUP$', group)
else:
message = config['NEWGROUP_MESSAGE_UNMODERATED'].replace('$GROUP$', group)
print message
print
if raw_input('Do you want to change it? (y/n) ') == 'y':
print
print 'Please enter the message you want to send.'
print 'End it with a line containing only "." (a dot).'
print
message = ''
buffer = raw_input('Message: ') + '\n'
while buffer != '.\n':
message += buffer.rstrip() + '\n'
buffer = raw_input('Message: ') + '\n'
print
print
print 'Here is the information about the newsgroup:'
print 'Name: ' + group
if moderated:
print 'Status: moderated'
if not description.endswith(' (Moderated)'):
description += ' (Moderated)'
else:
print 'Status: unmoderated'
print 'Description: ' + description
print 'Message: '
print
print message
print
if raw_input('Do you want to generate a control article for ' + group + '? (y/n) ') == 'y':
print
file_newgroup = group + '-' + epoch_time(TIME)
result = file(file_newgroup + '.txt', 'wb')
result.write('X-Signed-Headers: Subject,Control,Message-ID,Date,Injection-Date,From\n')
if moderated:
result.write('Subject: cmsg newgroup ' + group + ' moderated\n')
result.write('Control: newgroup ' + group + ' moderated\n')
else:
result.write('Subject: cmsg newgroup ' + group + '\n')
result.write('Control: newgroup ' + group + '\n')
message_id = '<newgroup-' + group + '-' + epoch_time(TIME) + '@' + config['HOST'] + '>'
result.write('Message-ID: ' + message_id + '\n')
result.write('Date: ' + pretty_time(TIME) + '\n')
result.write('Injection-Date: ' + pretty_time(TIME) + '\n')
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n\n')
result.write('This is a MIME NetNews control message.\n')
result.write('--signcontrol\n')
result.write('Content-Type: text/plain; charset=' + config['ENCODING'] + '\n\n')
result.write(message + '\n')
result.write('\n\n--signcontrol\n')
result.write('Content-Type: application/news-groupinfo; charset=' + config['ENCODING'] + '\n\n')
result.write('For your newsgroups file:\n')
if len(group) < 8:
result.write(group + '\t\t\t' + description + '\n')
elif len(group) < 16:
result.write(group + '\t\t' + description + '\n')
else:
result.write(group + '\t' + description + '\n')
result.write('\n--signcontrol--\n')
result.close()
sign_message(config, file_newgroup, group, message_id, 'newgroup', passphrase)
os.remove(file_newgroup + '.txt')
if raw_input('Do you want to update the current checkgroups file? (y/n) ') == 'y':
groups[group] = description
write_checkgroups(groups, config['CHECKGROUPS_FILE'])
def generate_rmgroup(groups, config, group=None, message=None, passphrase=None):
""" Remove a group.
Arguments: groups (the dictionary representing the checkgroups)
config (the dictionary of parameters from signcontrol.conf)
group (if given, the name of the newsgroup)
message (if given, the text to write in the control article)
passphrase (if given, the passphrase of the private key)
No return value
"""
while not group:
group = raw_input('Name of the newsgroup to remove: ' ).lower()
if not groups.has_key(group):
print
print 'The newsgroup ' + group + ' does not exist.'
print 'Yet, you can send an rmgroup message for it if you want.'
print
if raw_input('Do you want to generate a control article to *remove* ' + group + '? (y/n) ') == 'y':
print
if not message:
print 'The current message which will be sent is:'
print
message = config['RMGROUP_MESSAGE'].replace('$GROUP$', group)
print message
print
if raw_input('Do you want to change it? (y/n) ') == 'y':
print
print 'Please enter the message you want to send.'
print 'End it with a line containing only "." (a dot).'
print
message = ''
buffer = raw_input('Message: ') + '\n'
while buffer != '.\n':
message += buffer.rstrip() + '\n'
buffer = raw_input('Message: ') + '\n'
print
file_rmgroup = group + '-' + epoch_time(TIME)
result = file(file_rmgroup + '.txt', 'wb')
result.write('X-Signed-Headers: Subject,Control,Message-ID,Date,Injection-Date,From\n')
result.write('Subject: cmsg rmgroup ' + group + '\n')
result.write('Control: rmgroup ' + group + '\n')
message_id = '<rmgroup-' + group + '-' + epoch_time(TIME) + '@' + config['HOST'] + '>'
result.write('Message-ID: ' + message_id + '\n')
result.write('Date: ' + pretty_time(TIME) + '\n')
result.write('Injection-Date: ' + pretty_time(TIME) + '\n')
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n\n')
result.write(message + '\n')
result.close()
sign_message(config, file_rmgroup, group, message_id, 'rmgroup', passphrase)
os.remove(file_rmgroup + '.txt')
if groups.has_key(group):
if raw_input('Do you want to update the current checkgroups file? (y/n) ') == 'y':
del groups[group]
write_checkgroups(groups, config['CHECKGROUPS_FILE'])
def generate_checkgroups(config, passphrase=None, serial=None):
""" List the groups of the hierarchy.
Arguments: config (the dictionary of parameters from signcontrol.conf)
passphrase (if given, the passphrase of the private key)
serial (if given, the serial value to use)
No return value
"""
while serial not in range(0,100):
try:
print 'If it is your first checkgroups for today, leave it blank (default is 1).'
print 'Otherwise, increment this revision number by one.'
serial = int(raw_input('Revision to use (1-99): '))
print
except:
serial = 1
serial = '%02d' % serial
file_checkgroups = 'checkgroups-' + epoch_time(TIME)
result = file(file_checkgroups + '.txt', 'wb')
result.write('X-Signed-Headers: Subject,Control,Message-ID,Date,Injection-Date,From\n')
result.write('Subject: cmsg checkgroups ' + config['CHECKGROUPS_SCOPE'] + ' #' + serial_time(TIME) + serial + '\n')
result.write('Control: checkgroups ' + config['CHECKGROUPS_SCOPE'] + ' #' + serial_time(TIME) + serial + '\n')
message_id = '<checkgroups-' + epoch_time(TIME) + '@' + config['HOST'] + '>'
result.write('Message-ID: ' + message_id + '\n')
result.write('Date: ' + pretty_time(TIME) + '\n')
result.write('Injection-Date: ' + pretty_time(TIME) + '\n')
result.write('From: ' + config['NAME'] + ' <' + config['MAIL'] + '>\n\n')
for line in file(config['CHECKGROUPS_FILE'], 'r'):
result.write(line.rstrip() + '\n')
result.close()
sign_message(config, file_checkgroups, config['ADMIN_GROUP'], message_id, 'checkgroups', passphrase)
os.remove(file_checkgroups + '.txt')
def manage_keys(config):
""" Useful wrappers around the gpg program to manage PGP keys
(generate, import, export, remove, and revoke).
Argument: config (the dictionary of parameters from signcontrol.conf)
No return value
"""
choice = 0
while choice != 8:
choice = manage_menu()
if choice == 1:
print 'You currently have the following secret keys installed:'
print
os.system(config['PROGRAM_GPG'] + ' --list-secret-keys --with-fingerprint')
print 'Please note that the uid of your secret key and the value of'
print 'the ID parameter set in signcontrol.conf should be the same.'
elif choice == 2:
print
print '-----------------------------------------------------------------------'
print 'Please put the e-mail address from which you will send control articles'
print 'in the key ID (the real name field). And leave the other fields blank,'
print 'for better compatibility with Usenet software.'
print 'Choose a 2048-bit RSA key which never expires.'
print 'You should also provide a passphrase, for security reasons.'
print 'There is no need to edit the key after it has been generated.'
print
print 'Please note that the key generation may not finish if it is launched'
print 'on a remote server, owing to a lack of enough entropy. Use your own'
print 'computer instead and import the key on the remote one afterwards.'
print '-----------------------------------------------------------------------'
print
os.system(config['PROGRAM_GPG'] + ' --gen-key --allow-freeform-uid')
print
print 'After having generated these keys, you should export your PUBLIC key'
print 'and make it public (in the web site of your hierarchy, along with'
print 'a current checkgroups, and also announce it in news.admin.hierarchies).'
print 'You can also export your PRIVATE key for backup only.'
elif choice == 3:
print 'The key will be written to the file public-key.asc.'
key_name = raw_input('Please enter the uid of the public key to export: ')
os.system(config['PROGRAM_GPG'] + ' --armor --output public-key.asc --export "=' + key_name + '"')
elif choice == 4:
print 'The key will be written to the file private-key.asc.'
key_name = raw_input('Please enter the uid of the secret key to export: ')
os.system(config['PROGRAM_GPG'] + ' --armor --output private-key.asc --export-secret-keys "=' + key_name + '"')
if os.path.isfile('private-key.asc'):
os.chmod('private-key.asc', 0400)
print
print 'Be careful: it is a security risk to export your private key.'
print 'Please make sure that nobody has access to it.'
elif choice == 5:
raw_input('Please put it in a file named secret-key.asc and press enter.')
os.system(config['PROGRAM_GPG'] + ' --import secret-key.asc')
print
print 'Make sure that both the secret and public keys have properly been imported.'
print 'Their uid should be put as the value of the ID parameter set in signcontrol.conf.'
elif choice == 6:
key_name = raw_input('Please enter the uid of the key to *remove*: ')
os.system(config['PROGRAM_GPG'] + ' --delete-secret-and-public-key "=' + key_name + '"')
elif choice == 7:
key_name = raw_input('Please enter the uid of the secret key to revoke: ')
os.system(config['PROGRAM_GPG'] + ' --gen-revoke "=' + key_name + "'")
print
if __name__ == "__main__":
""" The main function.
"""
config = read_configuration(CONFIGURATION_FILE)
if not os.path.isfile(config['PROGRAM_GPG']):
print 'You must install GnuPG <http://www.gnupg.org/> and edit this script to put'
print 'the path to the gpg binary.'
raw_input('Please install it before using this script.')
sys.exit(2)
choice = 0
while choice != 5:
groups = read_checkgroups(config['CHECKGROUPS_FILE'])
# Update time whenever we come back to the main menu.
TIME = time.localtime()
choice = choice_menu()
if choice == 1:
generate_newgroup(groups, config)
elif choice == 2:
generate_rmgroup(groups, config)
elif choice == 3:
generate_checkgroups(config)
elif choice == 4:
manage_keys(config)
# Embedded documentation.
POD = """
=head1 NAME
signcontrol.py - Generate PGP-signed control articles for Usenet hierarchies
=head1 SYNOPSIS
B<python signcontrol.py>
=head1 DESCRIPTION
B<signcontrol.py> is a Python script aimed at Usenet hierarchy
administrators so as to help them in maintaining the canonical lists
of newsgroups in the hierarchies they administer.
This script is also useful to manage PGP keys: generation, import,
export, removal, and revokal. It works on every platform on which
Python and GnuPG are available (Windows, Linux, etc.).
It enforces best practices regarding the syntax of Usenet control
articles.
Getting started is as simple as:
=over 4
=item 1.
Downloading and installing Python (L<http://www.python.org/>). However,
make sure to use S<Python 2.x> because B<signcontrol.py> is not compatible
yet with S<Python 3.x>.
=item 2.
Downloading and installing GnuPG (L<http://www.gnupg.org/>).
=item 3.
Downloading both the B<signcontrol.py> script and its F<signcontrol.conf>
configuration file.
=item 4.
Editing the F<signcontrol.conf> configuration file so that the parameters
it defines properly fit your installation.
=item 5.
Running C<python signcontrol.py>.
=back
=head1 SUPPORT
The B<signcontrol.py> home page is:
http://www.trigofacile.com/divers/usenet/clefs/signcontrol.htm
It will always point to the current version of the script, and contains
instructions written in French.
For bug tracking, please use the issue tracker provided by Github:
https://github.com/Julien-Elie/usenet-signcontrol
=head1 SOURCE REPOSITORY
B<signcontrol.py> is maintained using Git. You can access the current
source by cloning the repository at:
https://github.com/Julien-Elie/usenet-signcontrol.git
or access it via the web at:
https://github.com/Julien-Elie/usenet-signcontrol
When contributing modifications, either patches or Git pull requests
are welcome.
=head1 CONFIGURATION FILE
The following parameters can be modified in the F<signcontrol.conf>
configuration file:
=over 4
=item B<PROGRAM_GPG>
The path to the GPG executable. It is usually
C<C:\Progra~1\GNU\GnuPG\gpg.exe> or C</usr/bin/gpg>.
=item B<PGP2_COMPATIBILITY>
Whether compatibility with MIT S<PGP 2.6.2> (or equivalent) should
be kept. Though this is now fairly obsolete, a few news servers still
haven't been updated to be able to process newer and more secure signing
algorithms. Such servers do not recognize recent signing algorithms;
however, current news servers may refuse to process messages signed
with the insecure MD5 algorithm.
Possible values are C<True>, C<False> or C<Only> (default is C<False>).
When set to C<True>, B<signcontrol> will generate two control articles:
one in a format compatible with MIT S<PGP 2.6.2> (or equivalent) and
another with a newer and more secure format. Sending these two control
articles will then ensure a widest processing.
When set to C<False>, B<signcontrol> will generate control articles in
only a newer and more secure format.
When set to C<Only>, B<signcontrol> will generate control articles in
only a format compatible with MIT S<PGP 2.6.2> (or equivalent).
=item B<ID>
The ID of the PGP key used to sign control articles. Note that if you
do not already have a PGP key, it can be generated by B<signcontrol.py>.
As for Usenet hierarchy management is concerned, the ID is usually a
mere e-mail.
=item B<MAIL>
The e-mail from which control articles are sent. It is usually the ID
of the PGP key used to sign them.
=item B<HOST>
The host which appears in the second part of the Message-ID of control
articles generated. It is usually the name of a news server.
=item B<ADMIN_GROUP>
An existing newsgroup of the hierarchy (where checkgroups control
articles will be fed). If an administrative newsgroup exists, put it.
Otherwise, any other newsgroup of the hierarchy will be fine.
=item B<NAME>
The name which appears in the From: header field. You should only use
ASCII characters. Otherwise, you have to MIME-encode it (for instance:
C<=?ISO-8859-15?Q?Julien_=C9LIE?=>).
=item B<CHECKGROUPS_SCOPE>
The scope of the hierarchy according to Section
5.2.3 of RFC 5537 (also known as USEPRO, available at
L<https://tools.ietf.org/html/rfc5537#section-5.2.3>). For instance:
C<fr> (for fr.*), C<de !de.alt> (for de.* excepting de.alt.*) or
C<de.alt> (for de.alt.*).
=item B<URL>
The URL where the public PGP key can be found. If you do not have any,
leave C<ftp://ftp.isc.org/pub/pgpcontrol/README>. If you want to add
more URLs (like the home page of the hierarchy), use a multi-line text
where each line, except for the first, begins with a tabulation.
=item B<NEWGROUP_MESSAGE_MODERATED>, B<NEWGROUP_MESSAGE_UNMODERATED>,
B<RMGROUP_MESSAGE>
The message which will be written in the corresponding control article.
All occurrences of C<$GROUP$> will be replaced by the name of the
newsgroup.
=item B<PRIVATE_HIERARCHY>
Whether the hierarchy is public or private. If it is private (that is
to say if it is intended to remain in a local server with private access
and if it is not fed to other Usenet news servers), the value should
be C<True>, so that checkgroups control articles are not crossposted
to the news.admin.hierarchies newsgroup. Possible values are C<True>
or C<False> (default is C<False>).
=item B<CHECKGROUPS_FILE>
The file which contains the current checkgroups.
=item B<ENCODING>
The encoding of control articles. The default value is C<ISO-8859-1>.
=back
=head1 USEFUL RESOURCES
Here are some resources that can be useful to be aware of:
=over 4
=item Usenet Hierarchy Administration FAQ
L<http://www.eyrie.org/~eagle/faqs/usenet-hier.html>
=item Usenet hierarchy information
L<http://usenet.trigofacile.com/hierarchies/>
=item Hosting service for hierarchy administrators
L<http://www.news-admin.org/>
=back
=head1 LICENSE
The B<signcontrol.py> package as a whole is covered by the following
copyright statement and license:
Copyright (c) 2007, 2008, 2009, 2011, 2014 Julien รLIE
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
=head1 HISTORY
B<signcontrol.py> was written by Julien รLIE.
=head1 SEE ALSO
gpg(1).
=cut
"""
|
python
|
with open('p11_grid.txt', 'r') as file:
lines = file.readlines()
n = []
for line in lines:
a = line.split(' ')
b = []
for i in a:
b.append(int(i))
n.append(b)
N = 0
for i in range(20):
for j in range(20):
horizontal, vertical, diag1, diag2 = 0, 0, 0, 0
if j < 17:
horizontal = n[i][j]*n[i][j+1]*n[i][j+2]*n[i][j+3]
if horizontal > N:
N = horizontal
if i < 17:
vertical = n[i][j]*n[i+1][j]*n[i+2][j]*n[i+3][j]
if vertical > N:
N = vertical
if i < 17 and j < 17:
diag1 = n[i][j]*n[i+1][j+1]*n[i+2][j+2]*n[i+3][j+3]
if diag1 > N:
N = diag1
if i > 3 and j < 17:
diag2 = n[i-1][j]*n[i-2][j+1]*n[i-3][j+2]*n[i-4][j+3]
if diag2 > N:
N = diag2
print(N)
|
python
|
# -*- coding: utf-8 -*-
#
# COMMON
#
page_action_basket = "ะะพัะทะธะฝะฐ"
page_action_enter = "ะะพะนัะธ"
page_action_add = "ะะพะฑะฐะฒะธัั"
page_action_cancel = "ะัะผะตะฝะฐ"
page_action_yes = "ะะฐ"
page_action_save = "ะกะพั
ัะฐะฝะธัั"
page_action_action = "ะะตะนััะฒะธะต"
page_action_modify = "ะธะทะผะตะฝะธัั"
page_action_remove = "ัะดะฐะปะธัั"
page_message_error = "ะัะธะฑะบะฐ!"
page_remove_question = "ะั ะดะตะนััะฒะธัะตะปัะฝะพ ั
ะพัะธัะต ัะดะฐะปะธัั"
admin_options_manage_category = "ะ ะตะดะฐะบัะธัะพะฒะฐัั ะบะฐัะตะณะพัะธะธ ะบะฝะธะณ"
admin_options_manage_cover = "ะ ะตะดะฐะบัะธัะพะฒะฐัั ะฟะตัะตะฟะปัั"
admin_options_manage_quality = "ะ ะตะดะฐะบัะธัะพะฒะฐัั ะบะฐัะตััะฒะพ"
admin_options_manage_language = "ะ ะตะดะฐะบัะธัะพะฒะฐัั ัะทัะบ ะธะทะดะฐะฝะธั"
admin_options_manage_books = "ะ ะตะดะฐะบัะธัะพะฒะฐัั ะบะฝะธะณะธ"
admin_options_statistics = "ะกัะฐัะธััะธัะตัะบะธะต ะดะฐะฝะฝัะต"
#
# CATEGORY PAGE
#
page_manage_category_title = "ะะฐะทะฒะฐะฝะธะต ัะฒะพะตะณะพ ัะฐะนัะฐ: ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะบะฐัะตะณะพัะธะน ะบะฝะธะณ"
page_manage_category_banner = "ะะฐะทะฒะฐะฝะธะต ัะฐะนัะฐ"
page_manage_category_sub_title = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะบะฐัะตะณะพัะธะน ะบะฝะธะณ"
page_manage_category_modal_title_add = "ะะพะฑะฐะฒะธัั ะฝะพะฒัั ะบะฐัะตะณะพัะธั"
page_manage_category_modal_title_edit = "ะะทะผะตะฝะธัั ะบะฐัะตะณะพัะธั"
page_manage_category_name = "ะะฐัะตะณะพัะธั"
page_manage_category_desc = "ะะฟะธัะฐะฝะธะต ะบะฐัะตะณะพัะธะธ"
page_manage_category_super_category = "ะะปะฐะฒะฝะฐั ะบะฐัะตะณะพัะธั"
page_manage_category_it_is_main = "ััะพ ะณะปะฐะฒะฝะฐั ะบะฐัะตะณะพัะธั"
page_manage_category_remove_object_name = "ะบะฐัะตะณะพัะธั"
page_manage_category_remove_success = "ะะฐัะตะณะพัะธั ััะฟะตัะฝะพ ัะดะฐะปะตะฝะฐ."
page_manage_category_remove_error = "ะะต ัะดะฐะปะพัั ัะดะฐะปะธัั ะบะฐัะตะณะพัะธั."
page_manage_category_add_exists_alert = "ะะฐัะตะณะพัะธั ั ัะฐะบะธะผ ะธะผะตะฝะตะผ ัะถะต ัััะตััะฒัะตั!"
page_manage_category_add_name_input = "ะะฐะทะฒะฐะฝะธะต ะบะฐัะตะณะพัะธะธ"
page_manage_category_add_desc_input = "ะัะฐัะบะพะต ะพะฟะธัะฐะฝะธะต ะบะฐัะตะณะพัะธะธ"
page_manage_category_add_name_chose_super_cat = "ะัะฑะตัะตัะต ะณะปะฐะฒะฝัั ะบะฐัะตะณะพัะธั"
page_manage_category_add_note_1 = "ะัะปะธ ะดะฐะฝะฝะฐั ะบะฐัะตะณะพัะธั ัะฒะปัะตััั ะณะปะฐะฒะฝะพะน, ัะพ ะพััะฐะฒััะต ััะพ ะฟะพะปะต ะฑะตะปัะผ."
#
# COVER PAGE
#
page_manage_cover_title = "ะะฐะทะฒะฐะฝะธะต ัะฒะพะตะณะพ ัะฐะนัะฐ: ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ัะธะฟะพะฒ ะฟะตัะตะฟะปััะฐ"
page_manage_cover_banner = "ะะฐะทะฒะฐะฝะธะต ัะฐะนัะฐ"
page_manage_cover_sub_title = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ัะธะฟะพะฒ ะฟะตัะตะฟะปััะฐ"
page_manage_cover_modal_title_add = "ะะพะฑะฐะฒะธัั ะฝะพะฒัะน ะฟะตัะตะฟะปัั"
page_manage_cover_modal_title_edit = "ะะทะผะตะฝะธัั ะฟะตัะตะฟะปัั"
page_manage_cover_name = "ะะตัะตะฟะปัั"
page_manage_cover_remove_object_name = "ะฟะตัะตะฟะปัั"
page_manage_cover_remove_success = "ะะตัะตะฟะปัั ััะฟะตัะฝะพ ัะดะฐะปัะฝ."
page_manage_cover_remove_error = "ะะต ัะดะฐะปะพัั ัะดะฐะปะธัั ะฟะตัะตะฟะปัั."
page_manage_cover_add_exists_alert = "ะะตัะตะฟะปัั ั ัะฐะบะธะผ ะธะผะตะฝะตะผ ัะถะต ัััะตััะฒัะตั!"
page_manage_cover_add_name_input = "ะะตัะตะฟะปัั"
#
# QUALITY PAGE
#
page_manage_quality_title = "ะะฐะทะฒะฐะฝะธะต ัะฒะพะตะณะพ ัะฐะนัะฐ: ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะบะฐัะตััะฒะฐ"
page_manage_quality_banner = "ะะฐะทะฒะฐะฝะธะต ัะฐะนัะฐ"
page_manage_quality_sub_title = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะบะฐัะตััะฒะฐ"
page_manage_quality_modal_title_add = "ะะพะฑะฐะฒะธัั ะฝะพะฒะพะต ะบะฐัะตััะฒะพ"
page_manage_quality_modal_title_edit = "ะะทะผะตะฝะธัั ะบะฐัะตััะฒะพ"
page_manage_quality_name = "ะะฐัะตััะฒะพ"
page_manage_quality_desc = "ะะฟะธัะฐะฝะธะต ะบะฐัะตััะฒะฐ"
page_manage_quality_remove_object_name = "ะบะฐัะตััะฒะพ"
page_manage_quality_remove_success = "ะะฐัะตััะฒะพ ััะฟะตัะฝะพ ัะดะฐะปะตะฝะพ."
page_manage_quality_remove_error = "ะะต ัะดะฐะปะพัั ัะดะฐะปะธัั ะบะฐัะตััะฒะพ."
page_manage_quality_add_exists_alert = "ะะฐัะตััะฒะพ ั ัะฐะบะธะผ ะธะผะตะฝะตะผ ัะถะต ัััะตััะฒัะตั!"
page_manage_quality_add_name_input = "ะะฐัะตััะฒะพ"
page_manage_quality_add_desc_input = "ะัะฐัะบะพะต ะพะฟะธัะฐะฝะธะต ะบะฐัะตััะฒะฐ"
#
# LANGUAGE PAGE
#
page_manage_language_title = "ะะฐะทะฒะฐะฝะธะต ัะฒะพะตะณะพ ัะฐะนัะฐ: ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ัะทัะบะฐ"
page_manage_language_banner = "ะะฐะทะฒะฐะฝะธะต ัะฐะนัะฐ"
page_manage_language_sub_title = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ัะทัะบะฐ"
page_manage_language_modal_title_add = "ะะพะฑะฐะฒะธัั ะฝะพะฒัะน ัะทัะบ"
page_manage_language_modal_title_edit = "ะะทะผะตะฝะธัั ัะทัะบ"
page_manage_language_name = "ะฏะทัะบ"
page_manage_language_remove_object_name = "ัะทัะบ"
page_manage_language_remove_success = "ะฏะทัะบ ััะฟะตัะฝะพ ัะดะฐะปัะฝ."
page_manage_language_remove_error = "ะะต ัะดะฐะปะพัั ัะดะฐะปะธัั ัะทัะบ."
page_manage_language_add_exists_alert = "ะฏะทัะบ ั ัะฐะบะธะผ ะธะผะตะฝะตะผ ัะถะต ัััะตััะฒัะตั!"
page_manage_language_add_name_input = "ะฏะทัะบ"
#
# BOOKS PAGE
#
page_manage_book_title = "ะะฐะทะฒะฐะฝะธะต ัะฒะพะตะณะพ ัะฐะนัะฐ: ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะบะฝะธะณ"
page_manage_book_banner = "ะะฐะทะฒะฐะฝะธะต ัะฐะนัะฐ"
page_manage_book_sub_title = "ะ ะตะดะฐะบัะธัะพะฒะฐะฝะธะต ะบะฝะธะณ"
page_manage_book_title_add = "ะะพะฑะฐะฒะธัั ะฝะพะฒัั ะบะฝะธะณั"
page_manage_book_title_edit = "ะะทะผะตะฝะธัั ะดะฐะฝะฝัะต ะบะฝะธะณะธ"
page_manage_book_add_name_input = "ะะฐะทะฒะฐะฝะธะต ะบะฝะธะณะธ"
page_manage_book_add_author_input = "ะะฒัะพั"
page_manage_book_add_desc_input = "ะะฟะธัะฐะฝะธะต ะบะฝะธะณะธ"
page_manage_book_add_name_chose_category = "ะะฐัะตะณะพัะธั"
page_manage_book_add_name_chose_cover = "ะะตัะตะฟะปัั"
page_manage_book_add_name_chose_quality = "ะะฐัะตััะฒะพ"
page_manage_book_add_name_chose_language = "ะฏะทัะบ"
page_manage_book_add_price_input = "ะฆะตะฝะฐ ะบะฝะธะณะธ ะฒ ัะพัะผะฐัะต ####.##"
page_manage_book_add_price_label = "ะฆะตะฝะฐ"
page_manage_book_add_discount_input = "ะกะบะธะดะบะฐ ะฝะฐ ะบะฝะธะณั ะฒ ัะพัะผะฐัะต ####.##"
page_manage_book_add_currency_input = "ะณัะฝ."
page_manage_book_add_priory_check = "ะัะดะตะปะธัั ััั ะบะฝะธะณั"
page_manage_book_add_upload_files = "ะะฐะณััะทะธัั ัะพัะพะณัะฐัะธะธ ะบะฝะธะณ"
page_manage_book_name = "ะกะฟะธัะพะบ ะบะฝะธะณ"
page_manage_book_remove_object_name = "ะบะฝะธะณั"
page_manage_book_remove_success = "ะะฝะธะณะฐ ััะฟะตัะฝะพ ัะดะฐะปะตะฝะฐ."
page_manage_book_remove_error = "ะะต ัะดะฐะปะพัั ัะดะฐะปะธัั ะบะฝะธะณั."
page_manage_book_reference_num_label = "ะะพะผะตั ัััะปะบะธ"
|
python
|
#!/usr/bin/env python3
# -*-coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on October 14, 2014
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
"""
import os
from veles.config import root
from veles.tests import timeout
from veles.znicz.tests.functional import StandardTest
import veles.znicz.tests.research.SpamKohonen.spam_kohonen as spam_kohonen
# FIXME(v.markovtsev): remove this when Kohonen is ported to CUDA
root.common.engine.backend = "ocl"
class TestSpamKohonen(StandardTest):
@classmethod
def setUpClass(cls):
root.spam_kohonen.loader.validation_ratio = 0.0
root.spam_kohonen.update({
"forward": {"shape": (8, 8),
"weights_stddev": 0.05,
"weights_filling": "uniform"},
"decision": {"epochs": 5},
"downloader": {
"url":
"https://s3-eu-west-1.amazonaws.com/veles.forge/"
"SpamKohonen/spam.tar",
"directory": root.common.dirs.datasets,
"files": [os.path.join("spam", "spam.txt.xz")]},
"loader": {"minibatch_size": 80,
"force_numpy": True,
"ids": True,
"classes": False,
"file":
os.path.join(root.common.dirs.datasets,
"spam", "spam.txt.xz")},
"train": {"gradient_decay": lambda t: 0.001 / (1.0 + t * 0.0002),
"radius_decay": lambda t: 1.0 / (1.0 + t * 0.0002)},
"exporter": {"file": "classified_fast4.txt"}})
@timeout(700)
def test_spamkohonen(self):
self.info("Will test spam kohonen workflow")
workflow = spam_kohonen.SpamKohonenWorkflow(self.parent)
workflow.initialize(device=self.device)
workflow.run()
self.assertIsNone(workflow.thread_pool.failure)
diff = workflow.decision.weights_diff
self.assertAlmostEqual(diff, 3.577783, places=6)
self.assertEqual(5, workflow.loader.epoch_number)
self.info("All Ok")
if __name__ == "__main__":
StandardTest.main()
|
python
|
from typing import List
# ------------------------------- solution begin -------------------------------
class Solution:
def canWinNim(self, n: int) -> bool:
return n % 4 == 0
# ------------------------------- solution end - --------------------------------
if __name__ == '__main__':
input = 4
print("Input: {}".format(input))
solution = Solution()
print("Output: {}".format(solution.canWinNim(input)))
|
python
|
bat = int(input('bateria = '))
def batery (bat):
if bat == 0:
print('morri')
elif bat > 0 and bat < 21:
print('conecte o carreador')
elif bat > 20 and bat < 80:
print('carregando...')
elif bat > 79 and bat < 100:
print('estou de boa')
elif bat == 100:
print('pode tirar o carregador')
elif bat > 100:
print('estou ligadasso')
return bat
print(batery(bat))
batery(bat)
|
python
|
#coding:utf8
'''
Created on 2016ๅนด4ๆ20ๆฅ
@author: wb-zhaohaibo
'''
import MySQLdb
print MySQLdb
conn = MySQLdb.Connect(
host="127.0.0.1",
port=3306,
user="root",
passwd="admin",
db="testsql",
charset="utf8"
)
cursor = conn.cursor()
sql = "select * from student"
cursor.execute(sql)
print cursor.rowcount
rs = cursor.fetchone()
print rs
rs = cursor.fetchmany(3)
print rs
rs = cursor.fetchall()
print rs
cursor.close()
conn.close()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script for updating pymoprhy2 dictionaries (Russian and Ukrainian).
Please note that it is resource-heavy: it requires > 3GB free RAM and about
1GB on HDD for temporary files.
Usage:
update.py (ru|uk) (download|compile|package|cleanup) ...
update.py (ru|uk) all
update.py -h | --help
"""
from __future__ import print_function
import os
import time
import shutil
import subprocess
from docopt import docopt
from cookiecutter.main import cookiecutter
from pymorphy2 import opencorpora_dict
OUT_PATH = "compiled-dicts"
RU_DICT_URL = "http://opencorpora.org/files/export/dict/dict.opcorpora.xml.bz2"
RU_CORPORA_URL = "http://opencorpora.org/files/export/annot/annot.opcorpora.xml.bz2"
RU_DICT_XML = "dict.opcorpora.xml"
RU_CORPORA_XML = "annot.corpus.xml"
UK_DICT_URL = "https://drive.google.com/uc?id=0B4mUAylazDVbUXFIRGJ2S01ibGM&export=download"
UK_DICT_XML = "full-uk.xml"
def _download_bz2(url, out_name):
subprocess.check_call("curl --progress-bar '%s' | bunzip2 > '%s'" % (url, out_name), shell=True)
class RussianBuilder(object):
def download(self):
print("Downloading OpenCorpora dictionary...")
_download_bz2(RU_DICT_URL, RU_DICT_XML)
print("Downloading OpenCorpora corpus...")
_download_bz2(RU_CORPORA_URL, RU_CORPORA_XML)
print("")
def compile(self):
print("Compiling the dictionary")
subprocess.check_call(["./build-dict.py", RU_DICT_XML, OUT_PATH,
"--lang", "ru",
"--corpus", RU_CORPORA_XML,
"--clear"])
print("")
def package(self):
print("Creating Python package")
cookiecutter(
template="cookiecutter-pymorphy2-dicts",
no_input=True,
overwrite_if_exists=True,
extra_context={
'lang': 'ru',
'lang_full': 'Russian',
'version': get_version(corpus=True, timestamp=False),
}
)
def cleanup(self):
shutil.rmtree(OUT_PATH, ignore_errors=True)
if os.path.exists(RU_DICT_XML):
os.unlink(RU_DICT_XML)
if os.path.exists(RU_CORPORA_XML):
os.unlink(RU_CORPORA_XML)
class UkrainianBuilder(object):
def download(self):
print("Downloading and converting LanguageTool dictionary...")
subprocess.check_call(['lt_convert.py', UK_DICT_URL, UK_DICT_XML])
print("")
def compile(self):
print("Compiling the dictionary")
subprocess.check_call(["./build-dict.py", UK_DICT_XML, OUT_PATH,
"--lang", "uk",
"--clear"])
print("")
def package(self):
print("Creating Python package")
cookiecutter("cookiecutter-pymorphy2-dicts", no_input=True, extra_context={
'lang': 'uk',
'lang_full': 'Ukrainian',
'version': get_version(corpus=False, timestamp=True),
})
def cleanup(self):
shutil.rmtree(OUT_PATH, ignore_errors=True)
if os.path.exists(RU_DICT_XML):
os.unlink(RU_DICT_XML)
def get_version(corpus=False, timestamp=False):
meta = dict(opencorpora_dict.load(OUT_PATH).meta)
if corpus:
tpl = "{format_version}.{source_revision}.{corpus_revision}"
else:
tpl = "{format_version}.{source_revision}.1"
if timestamp:
tpl += ".%s" % (int(time.time()))
return tpl.format(**meta)
if __name__ == '__main__':
args = docopt(__doc__)
if args['all']:
args['download'] = args['compile'] = args['package'] = True
if args['ru']:
builder = RussianBuilder()
elif args['uk']:
builder = UkrainianBuilder()
else:
raise ValueError("Language is not known")
if args['download']:
builder.download()
if args['compile']:
builder.compile()
if args['package']:
builder.package()
if args['cleanup']:
builder.cleanup()
|
python
|
import Selenium_module as zm
print("Zillow Downloader")
url = input("URL: ")
image_links, title = zm.get_links(url)
zm.get_images(image_links, title)
zm.cleanup_exit()
|
python
|
from flask_wtf import FlaskForm
from datetime import datetime
from wtforms import BooleanField, DateTimeField, HiddenField, SelectField, StringField, SubmitField, ValidationError
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Required, Optional
from .. models import Element, EventFrame
class EventFrameForm(FlaskForm):
element = QuerySelectField("Element", validators = [Required()], get_label = "Name")
eventFrameTemplate = QuerySelectField("Event Frame Template", validators = [Required()], get_label = "Name")
sourceEventFrameTemplate = SelectField("Source Event Frame Template Filter", validators = [Optional()], coerce = int)
activeSourceEventFramesOnly = BooleanField("Active Event Frames Sources Only")
sourceEventFrame = SelectField("Source Event Frame", validators = [Optional()], coerce = int)
startTimestamp = DateTimeField("Start Timestamp", default = datetime.utcnow, validators = [Required()])
startUtcTimestamp = HiddenField()
endTimestamp = DateTimeField("End Timestamp", validators = [Optional()])
endUtcTimestamp = HiddenField()
name = StringField("Name", default = lambda : int(datetime.utcnow().timestamp()), validators = [Required()])
eventFrameId = HiddenField()
eventFrameTemplateId = HiddenField()
parentEventFrameId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_endTimestamp(self, field):
if self.startTimestamp.data is not None and self.endTimestamp.data is not None:
if self.endTimestamp.data < self.startTimestamp.data:
raise ValidationError("The End Timestamp must occur after the Start Timestamp.")
if self.parentEventFrameId.data:
parentEventFrame = EventFrame.query.get_or_404(self.parentEventFrameId.data)
if parentEventFrame.EndTimestamp:
endUtcTimestamp = datetime.strptime(self.endUtcTimestamp.data, "%Y-%m-%d %H:%M:%S")
if endUtcTimestamp > parentEventFrame.EndTimestamp:
raise ValidationError("This timestamp is outside of the parent event frame.")
def validate_startTimestamp(self, field):
if self.startTimestamp.data is not None:
startUtcTimestamp = datetime.strptime(self.startUtcTimestamp.data, "%Y-%m-%d %H:%M:%S")
if self.parentEventFrameId.data:
parentEventFrame = EventFrame.query.get_or_404(self.parentEventFrameId.data)
error = False
if parentEventFrame.EndTimestamp:
if startUtcTimestamp < parentEventFrame.StartTimestamp or startUtcTimestamp > parentEventFrame.EndTimestamp:
error = True
else:
if startUtcTimestamp < parentEventFrame.StartTimestamp:
error = True
if error:
raise ValidationError("This timestamp is outside of the parent event frame.")
else:
validationError = False
eventFrame = EventFrame.query.filter_by(ElementId = self.element.data.ElementId,
EventFrameTemplateId = self.eventFrameTemplateId.data, StartTimestamp = self.startUtcTimestamp.data).first()
if eventFrame:
if self.eventFrameId.data == "":
# Trying to add a new eventFrame using a startTimestamp that already exists.
validationError = True
else:
if int(self.eventFrameId.data) != eventFrame.EventFrameId:
# Trying to change the startTimestamp of an eventFrame to a startTimestamp that already exists.
validationError = True
if validationError:
raise ValidationError('The start timestamp "{}" already exists.'.format(field.data))
|
python
|
import numpy as np
import os
import cv2
import sys
import time
import dlib
import glob
import argparse
import voronoi as v
def checkDeepFake(regions):
return True
def initialize_predictor():
# Predictor
ap = argparse.ArgumentParser()
if len(sys.argv) > 1:
predictor_path = sys.argv[1]
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
return predictor,detector
else:
print("ERROR : Please give the model as argument.")
return None,None
def extract_features(fileDirectory,videos,labels,show_results = False,frame_rate = 50):
predictor,detector = initialize_predictor()
if predictor is None:
return
for filename in videos:
currentfile = os.path.join(fileDirectory,filename)
if currentfile:
print('Opening the file with name ' + currentfile)
cap = cv2.VideoCapture(currentfile)
face_id = 0
while(cap.isOpened() and not(cv2.waitKey(1) & 0xFF == ord('q'))):
prev_features = []
ret, frame = cap.read()
features = []
if frame is None:
break
img = v.preprocessing(frame)
regions = detector(img, 0)
if regions:
# loop over the face detections
for (i, rect) in enumerate(regions):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
vor_features = v.createVoronoi(img,predictor,rect,face_id + i,show_results=show_results)
features.append(vor_features)
if show_results and cv2.waitKey(1) & 0xFF == ord('q'):
break
face_id =+ 1
if show_results:
cv2.imshow("Frame",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if show_results:
cap.release()
cv2.destroyAllWindows()
else:
print("Could not find the directory")
def pad_images(fileDirectory):
max_w = 80
max_h = 100
# for filename in os.listdir(fileDirectory):
# currentfile = os.path.join(fileDirectory,filename)
# if currentfile:
# img = cv2.imread(currentfile)
# ht, wd, cc = img.shape
# if ht > max_h:
# max_h = ht
# if wd > max_w:
# max_w = wd
print("Max_w #{} Max_h #{}",max_w, max_h)
for filename in os.listdir(fileDirectory):
currentfile = os.path.join(fileDirectory,filename)
if currentfile:
img = cv2.imread(currentfile)
ht, wd, cc= img.shape
result = np.full((max_h,max_w,cc), (0,0,0), dtype=np.uint8)
# compute center offset
xx = (max_w - wd) // 2
yy = (max_h - ht) // 2
# copy img image into center of result image
result[yy:yy+ht, xx:xx+wd] = img
cv2.imwrite("features/"+ filename, result)
|
python
|
r""" FSS-1000 few-shot semantic segmentation dataset """
import os
import glob
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
import PIL.Image as Image
import numpy as np
class DatasetFSS(Dataset):
def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize):
self.split = split
self.benchmark = 'fss'
self.shot = shot
self.base_path = os.path.join(datapath, 'FSS-1000')
# Given predefined test split, load randomly generated training/val splits:
# (reference regarding trn/val/test splits: https://github.com/HKUSTCV/FSS-1000/issues/7))
with open('./data/splits/fss/%s.txt' % split, 'r') as f:
self.categories = f.read().split('\n')[:-1]
self.categories = sorted(self.categories)
self.class_ids = self.build_class_ids()
self.img_metadata = self.build_img_metadata()
self.transform = transform
def __len__(self):
return len(self.img_metadata)
def __getitem__(self, idx):
query_name, support_names, class_sample = self.sample_episode(idx)
query_img, query_mask, support_imgs, support_masks = self.load_frame(query_name, support_names)
query_img, query_mask = self.transform(query_img, query_mask)
query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[-2:], mode='nearest').squeeze()
support_transformed = [self.transform(support_img, support_cmask) for support_img, support_cmask in zip(support_imgs, support_masks)]
support_masks = [x[1] for x in support_transformed]
support_imgs = torch.stack([x[0] for x in support_transformed])
support_masks_tmp = []
for smask in support_masks:
smask = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[-2:], mode='nearest').squeeze()
support_masks_tmp.append(smask)
support_masks = torch.stack(support_masks_tmp)
batch = {'query_img': query_img,
'query_mask': query_mask,
'query_name': query_name,
'support_imgs': support_imgs,
'support_masks': support_masks,
'support_names': support_names,
'class_id': torch.tensor(class_sample)}
return batch
def load_frame(self, query_name, support_names):
query_img = Image.open(query_name).convert('RGB')
support_imgs = [Image.open(name).convert('RGB') for name in support_names]
query_id = query_name.split('/')[-1].split('.')[0]
query_name = os.path.join(os.path.dirname(query_name), query_id) + '.png'
support_ids = [name.split('/')[-1].split('.')[0] for name in support_names]
support_names = [os.path.join(os.path.dirname(name), sid) + '.png' for name, sid in zip(support_names, support_ids)]
query_mask = self.read_mask(query_name)
support_masks = [self.read_mask(name) for name in support_names]
return query_img, query_mask, support_imgs, support_masks
def read_mask(self, img_name):
mask = torch.tensor(np.array(Image.open(img_name).convert('L')))
mask[mask < 128] = 0
mask[mask >= 128] = 1
return mask
def sample_episode(self, idx):
query_name = self.img_metadata[idx]
class_sample = self.categories.index(query_name.split('/')[-2])
if self.split == 'val':
class_sample += 520
elif self.split == 'test':
class_sample += 760
support_names = []
while True: # keep sampling support set if query == support
support_name = np.random.choice(range(1, 11), 1, replace=False)[0]
support_name = os.path.join(os.path.dirname(query_name), str(support_name)) + '.jpg'
if query_name != support_name: support_names.append(support_name)
if len(support_names) == self.shot: break
return query_name, support_names, class_sample
def build_class_ids(self):
if self.split == 'trn':
class_ids = range(0, 520)
elif self.split == 'val':
class_ids = range(520, 760)
elif self.split == 'test':
class_ids = range(760, 1000)
return class_ids
def build_img_metadata(self):
img_metadata = []
for cat in self.categories:
img_paths = sorted([path for path in glob.glob('%s/*' % os.path.join(self.base_path, cat))])
for img_path in img_paths:
if os.path.basename(img_path).split('.')[1] == 'jpg':
img_metadata.append(img_path)
return img_metadata
|
python
|
# The Python print statement is often used to output variables.
# To combine both text and a variable, Python uses the '+' character:
x = "awesome"
print("Python is " + x)
# You can also use the '+' character to add a variable to another variable:
x = "Python is "
y = "awesome"
z = x + y
print(z)
# For numbers, the '+' character works as a mathematical operators:
x = 5
y = 10
print(x + y)
# If you try to combine a string and a number, Python will give you an error:
x = 5
y = "John"
print(x + y)
|
python
|
from keras.applications.resnet50 import ResNet50 as RN50
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard
from keras import optimizers
from keras import backend
import matplotlib.pyplot as plt
import os
end='activation_37'
#end='activation_'+str(idx)
BOARD_PATH = 'boards/'
EXPERIMENT_NAME = f'training_50epoch_LRFull'
MODEL_FNAME = f'models/modelRN50_{EXPERIMENT_NAME}.h5'
EPOCH_ARR=[50, 100, 200]
train_data_dir='../datasets/MIT_split/train'
val_data_dir='../datasets/MIT_split/test'
test_data_dir='../datasets/MIT_split/test'
img_width = 224
img_height=224
batch_size=32
validation_samples=807
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_first', 'channels_last'}
if dim_ordering == 'channels_first':
# 'RGB'->'BGR'
x = x[ ::-1, :, :]
# Zero-center by mean pixel
x[ 0, :, :] -= 103.939
x[ 1, :, :] -= 116.779
x[ 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[:, :, ::-1]
# Zero-center by mean pixel
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
return x
LR_list = [0.1, 0.01, 0.0001]
# LR_list = [0.1]
LR_results_dict = {}
d = {}
for EPOCHS in EPOCH_ARR:
for LR in LR_list:
results_dir=f'learningRateDiffs/epochs_{EPOCHS}_LR_{LR}'
results_txt_file = f"{results_dir}/results_{EPOCHS}_LR_{LR}"
if not os.path.exists(results_dir):
os.makedirs(results_dir)
with open(f"{results_txt_file}.txt", "a") as fi:
fi.write("Epochs\tLearning_Rate\tAccuracy\tValidation_accuracy\tLoss\tValidation_loss\n")
# create the base pre-trained model
base_model = RN50(weights='imagenet')
plot_model(base_model, to_file=f'{results_dir}/RN50_base.png', show_shapes=True, show_layer_names=True)
# base_model.summary()
#cropping the model
x = base_model.layers[-2].output
intermediate = 'inter'
x = Dense(8, activation='softmax',name=intermediate)(x)
model = Model(base_model.input, x)
plot_model(model, to_file=f'{results_dir}/modelRN50_{EXPERIMENT_NAME}.png', show_shapes=True, show_layer_names=True)
#Freezing layers
#for layer in base_model.layers:
# layer.trainable = False
#Unfreezeing layers
#for idx in range(-2,end,-1):
# base_model.layers[idx].trainable=True
new_opt = optimizers.Adadelta(learning_rate= LR)
model.compile(loss='categorical_crossentropy',optimizer=new_opt, metrics=['accuracy'])
for layer in model.layers:
print(layer.name, layer.trainable)
#preprocessing_function=preprocess_input,
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
preprocessing_function=preprocess_input,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None)
train_generator = datagen.flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
test_generator = datagen.flow_from_directory(test_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = datagen.flow_from_directory(val_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
tbCallBack = TensorBoard(log_dir=BOARD_PATH+EXPERIMENT_NAME, histogram_freq=0, write_graph=True)
history=model.fit_generator(train_generator,
steps_per_epoch=(int(1881//batch_size)+1),
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps= (int(validation_samples//batch_size)+1), callbacks=[tbCallBack])
result = model.evaluate_generator(test_generator, validation_samples)
print( result)
#saving model
model.save(f'{results_dir}/modelRN50_{EXPERIMENT_NAME}.h5')
# list all data in history
if True:
# summarize history for accuracy
print(history.history.keys())
accuracy = history.history['accuracy']
validation_accuracy = history.history['val_accuracy']
loss = history.history['loss']
validation_loss = history.history['val_loss']
LR_results_structured = [accuracy, validation_accuracy, loss, validation_loss]
LR_results_dict[f'{LR}'] = LR_results_structured
print(LR_results_dict)
with open(f"{results_txt_file}.txt", "a") as fi:
fi.write(f'{EPOCHS}\t{LR}\t{accuracy[-1]}\t{validation_accuracy[-1]}\t{loss[-1]}\t{validation_loss[-1]}\n')
with open(f"{results_txt_file}_raw.txt", "a") as fi:
fi.write(f'accuracy\tvalidation_accuracy\tloss\tvalidation_loss\n')
for a, va, l, vl in zip(accuracy, validation_accuracy, loss, validation_loss):
fi.write(f'{a}\t{va}\t{l}\t{vl}\n')
plt.plot(accuracy)
plt.plot(validation_accuracy)
plt.title(f'Learning_rate = {LR} accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(f'{results_dir}/acc_{LR}.jpg')
plt.close()
# summarize history for loss
plt.plot(loss)
plt.plot(validation_loss)
plt.title(f'Learning_rate = {LR} model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(f'{results_dir}/loss_{LR}.jpg')
plt.close()
backend.clear_session()
for tmpLR in LR_list:
plt.plot(LR_results_dict[f'{tmpLR}'][0])
plt.plot(LR_results_dict[f'{tmpLR}'][1])
plt.title(f'{EPOCHS} Epochs Accuracy Aggregate')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_0.1', 'validation_0.1', 'train_0.01', 'validation_0.01',
'train_0.001', 'validation_0.001'], loc='upper left')
plt.savefig(f'learningRateDiffs/graph_{EPOCHS}.jpg')
plt.close()
|
python
|
import ctypes
# Implements the Array ADT using array capabilities of the ctypes module.
class Array :
# Creates an array with size elements.
def __init__( self, size ):
assert size > 0, "Array size must be > 0"
self._size = size
# Create the array structure using the ctypes module.
PyArrayType = ctypes.py_object * size
self._elements = PyArrayType()
# Initialize each element.
self.clear(None)
# Returns the size of the array.
def __len__( self ):
return self._size
# Gets the contents of the index element.
def __getitem__( self, index ):
assert index >= 0 and index < len(self), "Array subscript out of range"
return self._elements[ index ]
# Puts the value in the array element at index position.
def __setitem__( self, index, value ):
assert index >= 0 and index < len(self), "Array subscript out of range"
self._elements[ index ] = value
# Clears the array by setting each element to the given value.
def clear( self, value ):
for i in range( len(self) ) :
self._elements[i] = value
# Returns the array's iterator for traversing the elements.
def __iter__( self ):
return _ArrayIterator( self. _elements )
# An iterator for the Array ADT.
class _ArrayIterator :
def __init__( self, the_array ):
self._array_ref = the_array
self._cur_index = 0
def __iter__( self ):
return self
def __next__( self ):
if self._cur_index < len( self._array_ref ) :
entry = self._array_ref[ self._cur_index ]
self._cur_index += 1
return entry
else:
raise StopIteration
# Implementation of the Array2D ADT using an array of arrays.
class Array2D :
# Creates a 2 -D array of size numRows x numCols.
def __init__( self, num_rows, num_cols ):
# Create a 1 -D array to store an array reference for each row.
self.rows = Array( num_rows )
# Create the 1 -D arrays for each row of the 2 -D array.
for i in range( num_rows ) :
self.rows[i] = Array( num_cols )
# Returns the number of rows in the 2 -D array.
def num_rows( self ):
return len( self.rows )
# Returns the number of columns in the 2 -D array.
def num_cols( self ):
return len( self.rows[0] )
# Clears the array by setting every element to the given value.
def clear( self, value ):
for row in range( self.num_rows() ):
row.clear( value )
# Gets the contents of the element at position [i, j]
def __getitem__( self, index_tuple ):
assert len(index_tuple) == 2, "Invalid number of array subscripts."
row = index_tuple[0]
col = index_tuple[1]
assert row >= 0 and row < self.num_rows() \
and col >= 0 and col < self.num_cols(), \
"Array subscript out of range."
array_1d = self.rows[row]
return array_1d[col]
# Sets the contents of the element at position [i,j] to value.
def __setitem__( self, index_tuple, value ):
assert len(index_tuple) == 2, "Invalid number of array subscripts."
row = index_tuple[0]
col = index_tuple[1]
assert row >= 0 and row < self.num_rows() \
and col >= 0 and col < self.num_cols(), \
"Array subscript out of range."
array_1d = self.rows[row]
array_1d[col] = value
class DynamicArray:
"""A dynamic array class akin to a simplified Python list."""
def __init__(self):
"""Create an empty array."""
self._n = 0 # count actual elements
self._capacity = 1 # default array capacity
self._A = self._make_array(self._capacity) # low-level array
def __len__(self):
"""Return number of elements stored in the array."""
return self._n
def __getitem__(self, k):
"""Return element at index k."""
if not 0 <= k < self. n:
raise IndexError( 'invalid index' )
return self._A[k] # retrieve from array
def append(self, obj):
"""Add object to end of the array."""
if self._n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
self._A[self._n] = obj
self._n += 1
def _resize(self, c): # nonpublic utitity
"""Resize internal array to capacity c."""
B = self._make_array(c) # new (bigger) array
for k in range(self._n): # for each existing value
B[k] = self._A[k]
self._A = B # use the bigger array
self._capacity = c
def _make_array(self, c): # nonpublic utitity
"""Return new array with capacity c."""
return (c * ctypes.py_object)( ) # see ctypes documentation
def insert(self, k, value):
"""Insert value at index k, shifting subsequent values rightward."""
# (for simplicity, we assume 0 <= k <= n in this verion)
if self. n == self._capacity: # not enough room
self._resize(2 * self._capacity) # so double capacity
for j in range(self._n, k, -1): # shift rightmost first
self._A[j] = self._A[j - 1]
self._A[k] = value # store newest element
self._n += 1
def remove(self, value):
"""Remove first occurrence of value( or raise ValueError)."""
# note: we do not consider shrinking the dynamic array in this version
for k in range(self._n):
if self._A[k] == value: # found a match!
for j in range(k, self._n - 1): # shift others to fill gap
self._A[j] = self._A[j + 1]
self._A[self._n - 1] = None # help garbage collection
self._n -= 1 # we have one less item
return # exit immediately
raise ValueError( "value not found" ) # only reached if no match
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""format manifest with more metadata."""
import argparse
import functools
import json
import jsonlines
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.frontend.utility import load_cmvn
from paddlespeech.s2t.io.utility import feat_type
from paddlespeech.s2t.utils.utility import add_arguments
from paddlespeech.s2t.utils.utility import print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('cmvn_path', str,
'examples/librispeech/data/mean_std.json',
"Filepath of cmvn.")
add_arg('unit_type', str, "char", "Unit type, e.g. char, word, spm")
add_arg('vocab_path', str,
'examples/librispeech/data/vocab.txt',
"Filepath of the vocabulary.")
add_arg('manifest_paths', str,
None,
"Filepaths of manifests for building vocabulary. "
"You can provide multiple manifest files.",
nargs='+',
required=True)
# bpe
add_arg('spm_model_prefix', str, None,
"spm model prefix, spm_model_%(bpe_mode)_%(count_threshold), only need when `unit_type` is spm")
add_arg('output_path', str, None, "filepath of formated manifest.", required=True)
# yapf: disable
args = parser.parse_args()
def main():
print_arguments(args, globals())
fout = open(args.output_path, 'w', encoding='utf-8')
# get feat dim
filetype = args.cmvn_path.split(".")[-1]
mean, istd = load_cmvn(args.cmvn_path, filetype=filetype)
feat_dim = mean.shape[0] #(D)
print(f"Feature dim: {feat_dim}")
text_feature = TextFeaturizer(args.unit_type, args.vocab_path, args.spm_model_prefix)
vocab_size = text_feature.vocab_size
print(f"Vocab size: {vocab_size}")
# josnline like this
# {
# "input": [{"name": "input1", "shape": (100, 83), "feat": "xxx.ark:123"}],
# "output": [{"name":"target1", "shape": (40, 5002), "text": "a b c de"}],
# "utt2spk": "111-2222",
# "utt": "111-2222-333"
# }
count = 0
for manifest_path in args.manifest_paths:
with jsonlines.open(str(manifest_path), 'r') as reader:
manifest_jsons = list(reader)
for line_json in manifest_jsons:
output_json = {
"input": [],
"output": [],
'utt': line_json['utt'],
'utt2spk': line_json.get('utt2spk', 'global'),
}
# output
line = line_json['text']
if isinstance(line, str):
# only one target
tokens = text_feature.tokenize(line)
tokenids = text_feature.featurize(line)
output_json['output'].append({
'name': 'target1',
'shape': (len(tokenids), vocab_size),
'text': line,
'token': ' '.join(tokens),
'tokenid': ' '.join(map(str, tokenids)),
})
else:
# isinstance(line, list), multi target in one vocab
for i, item in enumerate(line, 1):
tokens = text_feature.tokenize(item)
tokenids = text_feature.featurize(item)
output_json['output'].append({
'name': f'target{i}',
'shape': (len(tokenids), vocab_size),
'text': item,
'token': ' '.join(tokens),
'tokenid': ' '.join(map(str, tokenids)),
})
# input
line = line_json['feat']
if isinstance(line, str):
# only one input
feat_shape = line_json['feat_shape']
assert isinstance(feat_shape, (list, tuple)), type(feat_shape)
filetype = feat_type(line)
if filetype == 'sound':
feat_shape.append(feat_dim)
else: # kaldi
raise NotImplementedError('no support kaldi feat now!')
output_json['input'].append({
"name": "input1",
"shape": feat_shape,
"feat": line,
"filetype": filetype,
})
else:
# isinstance(line, list), multi input
raise NotImplementedError("not support multi input now!")
fout.write(json.dumps(output_json) + '\n')
count += 1
print(f"{args.manifest_paths} Examples number: {count}")
fout.close()
if __name__ == '__main__':
main()
|
python
|
# Unit test set_out_sample_residuals ForecasterAutoreg
# ==============================================================================
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import LinearRegression
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=True)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10. ,20., 20.],
[11., 24.33333333, 24.33333333]
]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=True)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=False)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10. ,20., 20.],
[11., 24.33333333, 24.33333333]
]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=False)
pd.testing.assert_frame_equal(results, expected)
|
python
|
import os
import gzip
import cPickle
from config import config
for fold in range(5):
filename = os.path.join(config.data_dir, 'atis.fold' + str(fold) + '.pkl.gz')
with gzip.open(filename, 'rb') as f:
train_set, valid_set, test_set, dicts = cPickle.load(f)
labels2idx_, tables2idx_, words2idx_ = dicts['labels2idx'], dicts['tables2idx'], dicts['words2idx']
idx2labels = {v: k for k, v in labels2idx_.items()}
idx2tables = {v: k for k, v in tables2idx_.items()}
idx2words = {v: k for k, v in words2idx_.items()}
train_x, train_ne, train_label = train_set
for sentence, ne, label in zip(train_x, train_ne, train_label):
print(sentence, ne, label)
print (' '.join([idx2labels[i] for i in label])); print ('\n')
print (' '.join([idx2tables[i] for i in ne])); print ('\n')
print (' '.join([idx2words[i] for i in sentence])); print ('\n')
exit()
|
python
|
import unittest
from bsim.connection import *
class TestConnectionMethods(unittest.TestCase):
def test_data(self):
c = Connection(debug=False)
c.delay_start = [0, 0, 3, 0, 1, 0, 0, 0, 0, 2, 0, 0]
c.delay_num = [1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0]
c.rev_delay_start = [0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 3]
c.rev_delay_num = [0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
c.rev_map2sid = [0, 3, 1, 2]
gpu = c.to_gpu()
cpu = c.from_gpu(gpu, only_struct=False)
self.assertListEqual(c.delay_start, list(cast(cpu.delay_start, POINTER(c_int*cpu.n_len)).contents))
self.assertListEqual(c.delay_num, list(cast(cpu.delay_num, POINTER(c_int*cpu.n_len)).contents))
self.assertListEqual(c.rev_delay_start, list(cast(cpu.rev_delay_start, POINTER(c_int*cpu.r_n_len)).contents))
self.assertListEqual(c.rev_delay_num, list(cast(cpu.rev_delay_num, POINTER(c_int*cpu.r_n_len)).contents))
self.assertListEqual(c.rev_map2sid, list(cast(cpu.rev_map2sid, POINTER(c_int*cpu.s_len)).contents))
if __name__ == '__main__':
print('Testing {}: '.format(__file__[:-3]))
unittest.main()
print('\n')
|
python
|
class History(object):
def __init__(self, name, userID):
self.name = name
self.userID = userID
self.history = []
def logMessage(self, lastMessage):
if len(self.history) > 10:
self.history.pop()
self.history.append(lastMessage)
def getLastMessages(self, num):
historyWanted = int(num)
if historyWanted > len(self.history):
historyWanted = len(self.history)
lastMessages = self.history[--historyWanted:]
return lastMessages
|
python
|
import logging
__title__ = 'django_nine.tests.base'
__author__ = 'Artur Barseghyan'
__copyright__ = '2015-2019 Artur Barseghyan'
__license__ = 'GPL-2.0-only OR LGPL-2.1-or-later'
__all__ = (
'LOG_INFO',
'log_info',
)
logger = logging.getLogger(__name__)
LOG_INFO = True
def log_info(func):
"""Logs some useful info."""
if not LOG_INFO:
return func
def inner(self, *args, **kwargs):
result = func(self, *args, **kwargs)
logger.info('\n\n%s' % func.__name__)
logger.info('============================')
if func.__doc__:
logger.info('""" %s """' % func.__doc__.strip())
logger.info('----------------------------')
if result is not None:
logger.info(result)
logger.info('\n++++++++++++++++++++++++++++')
return result
return inner
|
python
|
import os
import datetime
import MySQLdb
con = MySQLdb.connect(host='DBSERVER', user='DBUSER', passwd='DBPASSWD', db='DB')
cur = con.cursor()
cur.execute("SHOW TABLES")
data = "SET FOREIGN_KEY_CHECKS = 0; \n"
tables = []
for table in cur.fetchall():
tables.append(table[0])
for table in tables:
if table != "fos_user" and table != 'udala':
# Begiratu ea udala_id eremua existitzen den
cur.execute("SHOW columns from `" + str(table) + "` where field='udala_id' \n")
badu = cur.rowcount
if badu == 1:
data += "-- BADU!! \n \n \n"
data += "DELETE FROM `" + str(table) + "` WHERE udala_id=64; \n"
cur.execute("SELECT * FROM `" + str(table) + "` WHERE udala_id=64;")
else:
data += "-- EZ DU !! \n \n"
data += "DELETE FROM `" + str(table) + "`; \n"
cur.execute("SELECT * FROM `" + str(table) + "`;")
for row in cur.fetchall():
data += "INSERT INTO `" + str(table) + "` VALUES("
first = True
for field in row:
if not first:
data += ', '
if (type(field) is long) or (type(field) is int) or (type(field) is float):
data += str(field)
first = False
elif field is None:
data += str('NULL')
first = False
else:
data += '"' + str(field).replace("\"", "\'") + '"'
first = False
data += ");\n"
data += "\n\n"
data += "SET FOREIGN_KEY_CHECKS = 1; \n"
FILE = open("export_zerbikat.sql","w")
FILE.writelines(data)
FILE.close()
|
python
|
from .data_parallel import CustomDetDataParallel
from .sync_batchnorm import convert_model
|
python
|
import os
from setuptools import setup
setup(name='NNApp01',
version='0.1.0',
description='NN Programming Assignment ',
author='Dzmitry Buhryk',
author_email='[email protected]',
license='MIT',
install_requires=['flask', 'werkzeug'],
tests_require=['requests', 'flask', 'werkzeug', 'urllib3'],
packages=['app01', 'test'],
include_package_data=True,
package_data={
'app01': ['static/index_t.html', 'resources/Keyword.txt'],
'test': ['resources/*']
},
package_dir={
'app01': 'app01',
'test': 'test'
},
zip_safe=False)
|
python
|
from multiprocessing import Process
import envServer
from distutils.dir_util import copy_tree
from random import shuffle
import sys
sys.path.append("../pyutil")
sys.path.append("..")
import signal
import parseNNArgs
import traceback
import threading
import pickle
import shutil
import glob
import os
import random
import time
import json
import math
import numpy as np
import scipy.ndimage
from dqnQNN import DQN
from replay_buffer import ReplayBuffer
from environment import Environment
import logDqn
import outDir
import tensorflow as tf
from tensorflow.python.framework import ops
# from tensorflow.python import debug as tf_debug
def printT(s):
sys.stdout.write(s + '\n')
class dqnRunner():
def __init__(self, sess, params, out_dir=None, agentB_sess= None):
self.params = params
self.sess = sess
self.agentB_sess = agentB_sess
self.lock = threading.Lock()
self.modelStoreIntv = 150
self.bufferStoreIntv = 150
self.annealSteps = params['annealSteps']
self.state_dim = params['pxRes']
if self.params['verbose']:
printT("tensorflow version: {}".format(tf.__version__))
# create environment
self.env = Environment(sess, params, self)
self.numActions = self.env.numActions
# load classifier for reward calculation
if self.params['classNN'] is not None:
with tf.device("/device:CPU:0"):
self.rewardClassNet = ClassConvNetEval(self.sess, params)
self.env.rewardClassNet = self.rewardClassNet
# just gets or resets global_step
self.global_step = None
variables = tf.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
for v in variables:
if "global_step" in v.name:
self.global_step = v
if self.global_step is None:
self.global_step = tf.Variable(0, name='global_step',
trainable=False)
self.resetGlStep = tf.assign(self.global_step, 0)
# load actual dqn
self.q = DQN(self.sess, self.params['out_dir'],
self.global_step, self.params, self.numActions)
self.evalMethods= ["agent","random"]
self.evalMethod="agent"
self.qAgentB=None
if (not self.params['agentB'] is None) and self.params['interEval']:
self.qAgentB = DQN(self.agentB_sess, self.params['out_dir'],
self.global_step, self.params, self.numActions,agentB=True)
self.evalMethod="agentA"
self.evalMethods= ["agentA","random", "fixed","agentB"]
self.sess.as_default()
# replay buffer (size and type)
if self.params['replaySz'] is None:
self.replayBufferSize = 1000000
else:
self.replayBufferSize = self.params['replaySz']
self.replay = ReplayBuffer(self.replayBufferSize)
# variables for exploration decay
self.action_step = tf.Variable(0, name='action_step',
trainable=False, dtype=tf.int32)
self.increment_ac_step_op = tf.assign(self.action_step,
self.action_step+1)
self.global_action_step = tf.Variable(0, name='global_action_step',
trainable=False, dtype=tf.int32)
self.increment_gac_step_op = tf.assign(self.global_action_step,
self.global_action_step+1)
self.episode_step = tf.Variable(0, name='episode_step',
trainable=False, dtype=tf.int32)
self.increment_ep_step_op = tf.assign(self.episode_step,
self.episode_step+1)
self.resetEpStep = tf.assign(self.episode_step, 0)
self.resetAcStep = tf.assign(self.action_step, 0)
self.resetGAcStep = tf.assign(self.global_action_step, 0)
# save state
self.saver = tf.train.Saver(max_to_keep=self.params['keepNewestModels'] )
fn = os.path.join(self.params['out_dir'], "mainLoopTime.txt")
self.mainLoopTimeFile = open(fn, "a")
fn_ = os.path.join(self.params['out_dir'], "learnLoopTime.txt")
self.learnLoopTimeFile = open(fn_, "a")
# main function, runs the learning process
def run(self):
# debugging variables, for tensorboard
if self.params['evaluation']:
# evaluation episodes, no exploration
eval_reward = tf.Variable(0., name="evalReward")
eval_reward_op = tf.summary.scalar("Eval-Reward", eval_reward)
eval_disc_reward = tf.Variable(0., name="evalDiscReward")
eval_disc_reward_op = tf.summary.scalar("Eval-Reward_discounted",
eval_disc_reward)
eval_stepCount = tf.Variable(0., name="evalStepCount")
eval_stepCount_op = tf.summary.scalar("Eval-StepCount", eval_stepCount)
eval_sum_vars = [eval_reward, eval_disc_reward, eval_stepCount]
eval_sum_op = tf.summary.merge([eval_reward_op,
eval_disc_reward_op,
eval_stepCount_op])
# (discounted) reward per episode
episode_reward = tf.Variable(0., name="episodeReward")
episode_reward_op = tf.summary.scalar("Reward", episode_reward)
episode_disc_reward = tf.Variable(0., name="episodeDiscReward")
episode_disc_reward_op = tf.summary.scalar("Reward_discounted",
episode_disc_reward)
# average (max q)
episode_ave_max_q = tf.Variable(0., name='epsideAvgMaxQ')
episode_ave_max_q_op = tf.summary.scalar("Qmax_Value",
episode_ave_max_q)
# number of steps for episode
stepCount = tf.Variable(0., name="stepCount")
stepCount_op = tf.summary.scalar("StepCount", stepCount)
# number of learning iterations(total number of mini batches so far)
global_step_op = tf.summary.scalar("GlobalStep", self.global_step)
# current exploration epsilon
epsilonVar = tf.Variable(0., name="epsilon")
epsilonVar_op = tf.summary.scalar("Epsilon", epsilonVar)
summary_vars = [episode_reward, episode_disc_reward, episode_ave_max_q,
stepCount, epsilonVar]
summary_ops = tf.summary.merge([episode_reward_op,
episode_disc_reward_op,
episode_ave_max_q_op,
stepCount_op, epsilonVar_op])
self.writer = tf.summary.FileWriter(os.path.join(self.params['out_dir'], "train"),
self.sess.graph)
self.action_vars = []
self.action_ops = []
for a in range(self.numActions):
action = tf.Variable(0., name="qval_action_" + str(a))
action_op = tf.summary.scalar("Q-Value_Action_"+str(a), action)
self.action_vars.append(action)
self.action_ops.append(action_op)
self.action_ops = tf.summary.merge(self.action_ops)
# initialize all tensorflow variables
# and finalize graph (cannot be modified anymore)
self.sess.run(tf.initialize_all_variables())
self.sess.graph.finalize()
# for debugging, variable values before and after
if self.params['veryveryverbose']:
variables = tf.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope="DQN")
for v in variables:
if v.name.endswith("conv1_2/weights:0"):
print(v.name, self.sess.run(v))
# do we want to use pretrained weights for the dqn
# from the classifier or a pretrained agent?
if self.params['resume']:
pass
elif self.params['useClassNN']:
print("restoring dqn net from classNN: {}".format(
self.params['classNN']))
if "ckpt" in self.params['classNN']:
self.q.saver.restore(
self.sess,
self.params['classNN'])
else:
self.q.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['classNN']))
elif self.params['dqnNN'] is not None:
print("restoring dqn net from dqnNN: {}".format(
self.params['dqnNN']))
if "ckpt" in self.params['dqnNN']:
self.q.saver.restore(
self.sess,
self.params['dqnNN'])
else:
self.q.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['dqnNN']))
# main network weights are set, now run target init op
self.sess.run(self.q.target_nn_init_op)
if (self.params['agentB'] is not None) and self.params['interEval']:
print("restoring agentB net from {}".format(
self.params['agentB']))
if "ckpt" in self.params['agentB']:
self.qAgentB.saver.restore(
self.agentB_sess,
self.params['agentB'])
else:
self.qAgentB.saver.restore(
self.agentB_sess,
tf.train.latest_checkpoint(self.params['agentB']))
# for debugging, variable values before and after
if self.params['veryveryverbose']:
variables = tf.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope="DQN")
for v in variables:
if v.name.endswith("conv1_2/weights:0"):
print(v.name, self.sess.run(v))
print("initialize classifier network")
if self.params['classNN'] is not None:
print("restoring reward class net from classNN: {}".format(
self.params['classNN']))
if "ckpt" in self.params['classNN']:
self.rewardClassNet.saver.restore(
self.sess,
self.params['classNN'])
else:
self.rewardClassNet.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['classNN']))
# load previously trained model
if not self.params['resume'] and self.params['loadModel']:
if "ckpt" in self.params['loadModel']:
self.saver.restore(
self.sess,
self.params['loadModel'])
else:
self.saver.restore(
self.sess,
tf.train.latest_checkpoint(self.params['loadModel']))
printT("Model {} restored.".format(self.params['loadModel']))
# load previously filled replay buffer
if not self.params['resume'] and self.params['loadReplay'] is not None:
self.replay.load(self.params['loadReplay'])
printT("Buffer {} restored.".format(self.params['loadReplay']))
# resume old run
if self.params['resume']:
self.saver.restore(sess, tf.train.latest_checkpoint(
os.path.join(self.params['out_dir'], "models")))
printT("Model {} restored.".format(tf.train.latest_checkpoint(
os.path.join(self.params['out_dir'], "models"))))
# if not self.params['interEval'] :
self.replay.load(os.path.join(self.params['out_dir'],
"replayBuffer"))
printT("Buffer {} restored.".format(self.params['out_dir']))
else:
self.sess.run(self.resetGlStep)
# start immediately for interactive test runs
try:
if os.environ['IS_INTERACTIVE'] == 'true' \
and \
not self.params['sleep']:
self.params['startLearning'] = 1
except KeyError:
pass
# exploration variables
self.startEpsilon = self.params['epsilonStart']
self.endEpsilon = self.params['epsilonStop']
self.epsilon = sess.run(epsilonVar)
# evaluation/learning/exploration
self.evalEp = False
self.learning = True
self.pauseLearning = False
self.pauseExploring = False
self.stopLearning = False
self.stopExploring = False
self.qValFileExpl = open(os.path.join(self.params['out_dir'], "qValExpl.txt"), "a")
self.qValFileEval = open(os.path.join(self.params['out_dir'], "qValEval.txt"), "a")
self.actionLogFile = open(os.path.join(self.params['out_dir'], "actionLog.txt"), "a")
self.episodeLogFile = open(os.path.join(self.params['out_dir'], "episodeLog.txt"), "a")
self.episodeEvalLogFile = open(os.path.join(self.params['out_dir'], "episodeEvalLog.txt"), "a")
# remove stop/termination file
if os.path.exists("stop"):
os.remove(os.path.join(params['out_dir'], "stop"))
# reset
if self.params['onlyLearn']:
sess.run(self.resetEpStep)
sess.run(self.resetAcStep)
if self.params['onlyLearn']:
self.learn()
exit()
# multi-threaded
# learning and exploration threads act independently?
if self.params['async']:
t = threading.Thread(target=self.learnWrap)
t.daemon = True
t.start()
if self.params['evaluation']:
# evaluate this often
evalEpReward = 0
evalEpDiscReward = 0
evalEpStepCount = 0
evalIntv = 25
evalCnt = 40
evalOc = 0
# start exploration
self.episode = sess.run(self.episode_step)
if self.params['verbose']:
printT("start Episode: {}".format(self.episode))
acs = sess.run(self.action_step)
if self.params['verbose']:
printT("start action step: {}".format(acs))
self.globActStep = acs
gacs = sess.run(self.global_action_step)
if self.params['verbose']:
printT("start global action step: {}".format(gacs))
self.gac = gacs
while self.episode<self.params['numEpisodes']:
self.episode = sess.run(self.episode_step)
sess.run(self.increment_ep_step_op)
if self.params['verbose']:
print ("STARTING NEW EPISODE:"+ str(self.episode))
# do we want to explore/gather samples?
while self.stopExploring:
time.sleep(1)
# evaluation episode (no exploration?)
if self.params['evaluation'] and self.episode % (evalIntv+evalCnt) < evalCnt:
self.evalEp = True
if self.episode % (evalIntv+evalCnt) == 0:
if self.params['verbose']:
printT("Start Eval Episodes!")
evalOc += 1
elif self.params['onlyLearn'] or \
(self.params['limitExploring'] is not None \
and self.replay.size() >= self.params['limitExploring']):
self.pauseExploring = True
self.evalEp = False
else:
self.evalEp = False
# reset simulation/episode state
terminal = False
ep_reward = 0
ep_disc_reward = 0
ep_ave_max_q = 0
self.inEpStep = 0
if self.params['interEval']:
self.evalMethod = self.evalMethods[self.episode % (len(self.evalMethods))]
# reset environment
# set start state and allowed actions
nextState, allowedActions, terminal = self.env.reset(self.episode, self.evalEp, globActStep=self.globActStep)
allowedV=self.calcAllowedActionsVector(allowedActions)
if nextState is None:
# unable to get state
# restart with new episode
continue
lastTime=time.time()
# step forward until terminal
while not terminal:
if os.path.exists(os.path.join(params['out_dir'], "stop")):
self.terminate()
if self.params['async']:
if not t.isAlive():
printT("alive {}".format(t.isAlive()))
printT("Exception in user code:")
printT('-'*60)
traceback.print_exc(file=sys.stdout)
printT('-'*60)
sys.stdout.flush()
t.join(timeout=None)
os._exit(-1)
# state <- nextstate
state = nextState
# choose action
# random or according to dqn (depending on epsilon)
self.inEpStep += 1
if not self.evalEp:
sess.run(self.increment_ac_step_op)
self.globActStep += 1
sess.run(self.increment_gac_step_op)
self.gac += 1
epsStep=max(0,self.globActStep-(self.params['startLearning'] /4.0) )
tmp_step = min(epsStep, self.annealSteps)
self.epsilon = (self.startEpsilon - self.endEpsilon) * \
(1 - tmp_step / self.annealSteps) + \
self.endEpsilon
action = self.getActionID(state, allowedV)
if self.evalMethod=="fixed":
action=self.params['fixedAction']
# We choose a random action in these cases
rnm=np.random.rand()
if self.params['veryveryverbose']:
printT("rnm:"+str(rnm)+ " self.epsilon:"+ str(self.epsilon)+" |self.params['randomEps']:"+str(self.params['randomEps'])+" e:"+str(self.episode))
if (self.evalMethod == "random") or (not self.pauseExploring) and (not self.evalEp) and (self.episode < self.params['randomEps'] or rnm < self.epsilon):
if self.params['verbose']:
printT("randomly selecting action")
action = np.random.choice(allowedActions)
if self.params['verbose']:
printT("\nEpisode: {}, Step: {}, Time:{}, Next action (e-greedy {}): {}".format(
self.episode,
self.globActStep,
time.ctime(),
self.epsilon,
action))
else: # We let the DQN choose the action
if self.params['verbose']:
printT("Greedyly selecting action:")
if self.params['verbose']:
printT("\nEpisode: {}, Step: {}, Time:{}, Next action: {}".format(
self.episode, self.globActStep, time.ctime(), action))
# perform selected action and
# get new state, reward, and termination-info
nextState, reward, terminal, terminalP, allowedActions = self.env.act(action, self.episode, self.inEpStep , self.globActStep, self.evalEp)
if self.params['veryveryverbose']:
print('ACTIONLOG:',str(self.globActStep),str(self.episode), str(self.inEpStep), action, self.evalEp, terminal, terminalP, reward, self.epsilon, self.evalMethod)
self.actionLogFile.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), str(self.globActStep),str(self.episode), str(self.inEpStep),
action, self.evalEp, terminal, terminalP, reward, self.epsilon, self.evalMethod))
self.actionLogFile.flush()
allowedV=self.calcAllowedActionsVector(allowedActions)
# accumulate episode reward
ep_disc_reward += pow(self.params['gamma'], self.inEpStep-1) * reward
ep_reward += reward
if (self.evalMethod == "agent") and not self.evalEp and not self.pauseExploring:
self.insertSamples(np.copy(state),
action, reward, terminal,
np.copy(nextState),
np.copy(allowedV))
# do logging inside of one episode
# we do not want to lose any data
if self.params['storeModel'] and \
((self.globActStep+1) % self.modelStoreIntv) == 0:
logDqn.logModel(self)
if self.params['storeBuffer'] and \
((self.globActStep+1) % self.bufferStoreIntv) == 0:
logDqn.logBuffer(self)
# if training/exploration not decoupled, do one learning step
if not self.params['async']:
for i in range(8):
self.learn()
sys.stdout.flush()
cTime=time.time()
usedTime=cTime-lastTime
# do we want to pause exploration thread?
# (to simulate slower stm)
if not self.pauseExploring and \
not self.evalEp and \
self.params['sleep'] and \
self.params['async'] and \
(self.replay.size() >= self.params['startLearning']) and \
(self.replay.size() >= self.params['miniBatchSize']):
if self.params['sleepA'] is not None:
sleepingTime=self.params['sleepA'] - usedTime
if sleepingTime >0:
time.sleep(sleepingTime)
else:
time.sleep(60)
cTime=time.time()
usedTime=cTime-lastTime
lastTime=cTime
self.mainLoopTimeFile.write(str(cTime)+" "+str(usedTime)+ "\n")
self.mainLoopTimeFile.flush()
# terminate episode after x steps
# even if no good state has been reached
if self.inEpStep == self.params['stepsTillTerm']:
self.env.switchApproachArea()
break
# end episode
# otherwise store episode summaries and print log
if self.evalEp:
evalEpReward += ep_reward
evalEpDiscReward += ep_disc_reward
evalEpStepCount += self.inEpStep
if self.episode % (evalIntv+evalCnt) == (evalCnt-1):
summary_str = self.sess.run(eval_sum_op, feed_dict={
eval_sum_vars[0]: evalEpReward/float(evalCnt),
eval_sum_vars[1]: evalEpDiscReward/float(evalCnt),
eval_sum_vars[2]: evalEpStepCount/float(evalCnt)
})
self.writer.add_summary(summary_str, evalOc-1)
evalEpReward = 0.0
evalEpDiscReward = 0.0
evalEpStepCount = 0.0
if self.params['veryveryverbose']:
printT("step count-eval: {}".format(self.inEpStep))
if self.params['veryverbose']:
printT('Time: {} | Reward: {} | Discounted Reward: {} | Eval-Episode {}'.
format(time.ctime(), ep_reward, ep_disc_reward, self.episode))
self.episodeEvalLogFile.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), self.episode,
ep_reward, ep_disc_reward, self.inEpStep, self.epsilon))
self.episodeEvalLogFile.flush()
else:
if self.params['evaluation']:
et = self.episode - (evalOc * evalCnt)
else:
et = self.episode
summary_str = self.sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_disc_reward,
summary_vars[2]: ep_ave_max_q / float(max(self.inEpStep,1)),
summary_vars[3]: self.inEpStep,
summary_vars[4]: self.epsilon
})
self.writer.add_summary(summary_str, et)
self.writer.flush()
if self.params['veryveryverbose']:
printT("step count: {}".format(self.inEpStep))
if self.params['veryveryverbose']:
printT('Time: {} | Reward: {} | Discounted Reward: {} | Episode {} | Buffersize: {}'.
format(time.ctime(), ep_reward, ep_disc_reward, self.episode,
self.replay.size()))
self.episodeLogFile.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), self.episode,
ep_reward, ep_disc_reward, self.inEpStep, self.epsilon, self.evalMethod))
self.episodeLogFile.flush()
# log some stuff
if self.params['storeModel'] and \
((self.episode+1) % self.modelStoreIntv) == 0:
logDqn.logModel(self)
if self.params['storeBuffer'] and \
((self.episode+1) % self.bufferStoreIntv) == 0:
logDqn.logBuffer(self)
statsIntv = 100
sys.stdout.flush()
# stop learning after last episode
self.learning = False
sys.stdout.flush()
def terminate(self):
printT("terminating...........")
sys.stdout.flush()
self.logStuff()
sys.stdout.flush()
printT("EXIT NOW!")
sys.stdout.flush()
exit(0)
def learnWrap(self):
try:
self.learn()
except:
printT("learn wrap failed")
printT("Exception in user code:")
printT('-'*60)
traceback.print_exc(file=sys.stdout)
printT('-'*60)
sys.stdout.flush()
os._exit(-1)
def learn(self):
y_batch = np.zeros((self.params['miniBatchSize'], 1))
tmp = np.zeros((self.params['miniBatchSize'], self.numActions))
lastTime=time.time()
count=0
while self.learning:
# Throtteling to allow the other thread a chance
count+=1
cTime=time.time()
loopTime=cTime-lastTime
lastTime=cTime
self.learnLoopTimeFile.write(str(cTime)+" "+str(loopTime)+ "\n")
self.learnLoopTimeFile.flush()
if self.stopLearning:
time.sleep(5.0)
continue
if self.replay.size() < self.params['startLearning'] or \
self.replay.size() < self.params['miniBatchSize'] or \
self.evalEp:
if self.params['async']:
time.sleep(5.0)
continue
else:
return
s_batch, a_batch, r_batch, t_batch, ns_batch, allowed_batch = \
self.replay.sample_batch(self.params['miniBatchSize'])
if self.params['doubleDQN']:
qValsNewState = self.estimate_ddqn(ns_batch, allowed_batch, p=False, mem=tmp)
else:
qValsNewState = self.predict_target_nn(ns_batch)
for i in range(self.params['miniBatchSize']):
if t_batch[i]:
y_batch[i] = r_batch[i]
else:
y_batch[i] = r_batch[i] + self.params['gamma'] * qValsNewState[i]
gS, qs, delta = self.update(s_batch, a_batch, y_batch)
if self.params['noHardResetDQN']:
self.update_targets()
elif (gS+1) % self.params['resetFreq'] == 0:
self.update_targets()
if not self.params['async']:
return
if self.params['onlyLearn']:
if (gS+1) % 1000 == 0:
logDqn.logModel(self)
# Returns vector of length 'self.numActions' containing
# Zeros for allowed actions
# '-inf' for forbidden actions
def calcAllowedActionsVector(self, allowedActions):
allowedV=np.zeros(shape=(self.numActions))
allowedV[:]=float("-inf") # init all actions as fobidden
for i in allowedActions:
allowedV[i]=0 # mark actions as allowed
return allowedV
# get action id for max q
def getActionID(self, state, allowedActionsV):
if self.params['interEval'] and self.evalMethod == 'agentB':
if self.params['verbose']:
print("PREDICTING WITH AGENTB:")
qs = self.qAgentB.run_predict(state)
print(qs)
else:
if self.params['verbose']:
print("PREDICTING WITH AGENT:")
qs = self.q.run_predict(state)
if self.evalEp:
self.qValFileEval.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), str(self.globActStep),str(self.episode), str(self.inEpStep), qs[0], allowedActionsV))
self.qValFileEval.flush()
else:
self.qValFileExpl.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(time.time(), str(self.globActStep),str(self.episode), str(self.inEpStep), qs[0], allowedActionsV))
self.qValFileExpl.flush()
var_dict = {}
for a in range(self.numActions):
var_dict[self.action_vars[a]] = qs[0][a]
summary_str = self.sess.run(self.action_ops, feed_dict=var_dict)
self.writer.add_summary(summary_str, self.gac)
self.writer.flush()
printT("Q-values:" + str(qs))
qs = qs + allowedActionsV
return np.argmax(qs, axis=1)[0]
# update dqn main network
def update(self, states, actionIDs, targets):
step, out, delta, loss = self.q.run_train(states, actionIDs, targets)
# network diverged?
if np.isnan(loss):
printT("ABORT: NaN")
sys.stdout.flush()
os._exit(-1)
return step, out, delta
# update dqn target network
def update_targets(self):
self.q.run_update_target_nn()
# estimate q values using double dqn
# get values of target network for actions where main network is max
def estimate_ddqn(self, states, allowedActionsV, p=False, mem=None):
qs = self.q.run_predict(states)
if p:
if self.params['veryveryverbose']:
print("allowedActionsV.shape"+ str(allowedActionsV.shape))
print("qs.shape"+ str(qs.shape))
qs+=allowedActionsV # add '-inf' to the q values of forbidden actions
if p:
if self.params['veryveryverbose']:
print(states)
print(qs.shape)
print(states.shape)
printT("qs: {}".format(qs))
maxA = np.argmax(qs, axis=1)
qs = self.q.run_predict_target(states)
mem.fill(0)
mem[np.arange(maxA.size), maxA] = 1
mem = mem * qs
mem = np.sum(mem, axis=1)
return mem
# predict dqns
def predict_target_nn(self, states):
qs = self.q.run_predict_target(states)
return np.max(qs, axis=1)
def predict_nn(self, states):
qs = self.q.run_predict(states)
return np.max(qs, axis=1)
# insert samples into replay buffer
def insertSamples(self, stateScaled, action, reward, terminal,
newStateScaled, allowedActionsV):
stateScaled.shape = (stateScaled.shape[1],
stateScaled.shape[2],
stateScaled.shape[3])
newStateScaled.shape = (newStateScaled.shape[1],
newStateScaled.shape[2],
newStateScaled.shape[3])
states=(stateScaled,np.rot90(stateScaled, 2),np.fliplr(stateScaled), np.flipud(stateScaled) )
newStates=(newStateScaled,np.rot90(newStateScaled, 2),np.fliplr(newStateScaled), np.flipud(newStateScaled) )
if(self.params['fullAugmentation']):
self.lock.acquire()
for i in range(4):
for j in range(4):
self.replay.add(states[i], action, reward, terminal, allowedActionsV,
newStates[j])
self.lock.release()
else:
self.lock.acquire()
self.replay.add(stateScaled, action, reward, terminal, allowedActionsV,
newStateScaled)
self.replay.add(
np.ascontiguousarray(np.rot90(stateScaled, 2)),
action, reward, terminal, allowedActionsV,
np.ascontiguousarray(np.rot90(newStateScaled, 2)))
self.replay.add(
np.ascontiguousarray(np.fliplr(stateScaled)),
action, reward, terminal, allowedActionsV,
np.ascontiguousarray(np.fliplr(newStateScaled)))
self.replay.add(
np.ascontiguousarray(np.flipud(stateScaled)),
action, reward, terminal, allowedActionsV,
np.ascontiguousarray(np.flipud(newStateScaled)))
self.lock.release()
# if we want to stop if buffer is full
# or limit exploration
if self.pauseExploring == False and \
self.replay.size() == self.replayBufferSize:
if self.params['termAtFull']:
printT("Buffer FULL!")
self.logStuff()
self.pauseExploring = True
# exit()
elif self.pauseExploring == False and \
self.params['limitExploring'] is not None and \
self.replay.size() >= self.params['limitExploring']:
if self.params['termAtFull']:
printT("Buffer FULL!")
self.logStuff()
self.pauseExploring = True
def logStuff(self):
logDqn.logModel(self)
logDqn.logBuffer(self)
if __name__ == "__main__":
np.set_printoptions(linewidth=np.inf)
# load parameters from command line and config file
params = parseNNArgs.parseArgs()
if params['onlyLearn'] and \
not params['loadReplay'] and \
not params['loadModel']:
print("invalid parameters! onlyLearn only avaiable in combination with loadReplay and loadModel")
exit(-232)
params['type'] = "agent"
# resuming previous run?
if params['resume']:
out_dir = os.getcwd()
print("resuming... {}".format(out_dir))
newRun = False
else:
out_dir = outDir.setOutDir(params)
# copy all scripts to out_dir (for potential later reuse)
copy_tree(os.getcwd(), out_dir)
os.makedirs(os.path.join(out_dir, "models"))
os.makedirs(os.path.join(out_dir, "imgs"))
os.makedirs(os.path.join(out_dir, "imgsCollect"))
print("new start... {}".format(out_dir))
config = json.dumps(params)
with open(os.path.join(out_dir, "config"), 'w') as f:
f.write(config)
newRun = True
params['out_dir'] = out_dir
print("Results/Summaries/Logs will be written to: {}\n".format(out_dir))
#pipe log to file if not in interactive mode
interactive=False
try:
if os.environ['IS_INTERACTIVE'] == 'true':
interactive=True
except KeyError:
pass
if not interactive:
print("LogFile="+ os.path.join(out_dir, "log"))
sys.stdout.flush()
logFile = open(os.path.join(out_dir, "log"), 'a')
sys.stdout = sys.stderr = logFile
if params['startServer']:
p = Process(target=envServer.main, args=(params,))
p.start()
time.sleep(15)
# add paths to load classifier later on (reward calculation)
if params['classNN']:
if "ckpt" not in params['classNN']:
sys.path.insert(1, params['classNN'])
else:
sys.path.insert(1, os.path.dirname(params['classNN']))
try:
from classifierEval import ClassConvNetEval
except:
print("Failed to import form 'classifierEval.'")
print("Maybe the path to your classifier net is specified wrong?")
print(str(os.path.dirname(params['classNN'])))
exit(-1)
# start tensorflow session and start learning
if params['noGPU']:
tfconfig = tf.ConfigProto(
device_count = {'GPU': 0}
)
else:
tfconfig = None
if params['agentB'] is not None:
agentB_sess_ = tf.Session()
else:
agentB_sess_= None
with tf.Session(config=tfconfig) as sess:
rl = dqnRunner(sess, params, out_dir=out_dir, agentB_sess = agentB_sess_)
rl.run()
|
python
|
#!/usr/bin/env python3
#-*- encoding: UTF-8 -*-
def main():
try:
nota1 = float(input("1ยช nota: "))
nota2 = float(input("2ยช nota: "))
except:
print("Apenas valores numรฉricos devem ser informados!")
if(nota1 < 0 or nota1 > 10 or nota2 < 0 or nota2 > 10):
print("Notas invรกlidas!")
else:
print(f"1ยช Nota: {nota1}\n2ยช Nota: {nota2}\nMรฉdia aritmรฉtica simples: {(nota1 + nota2)/2}")
if(__name__ == "__main__"):
main()
|
python
|
from . import util
ut = util.Util()
reload(util)
class ViewerMarlin():
def open_file(self, path):
with open(path) as f:
l = f.readlines()
# print(type(l))
# print(len(l))
# print(l)
return l
def get_value_move(self, str_):
### Split Elements
### Remove n
str_.replace("\n", "")
### Remove Comments
if ";" in str_:
str_rm_comment = str_.split(";")
new_str = str_rm_comment[0]
else:
new_str = str_
### Gcode (per Line)
# print(new_str)
### Split Space
elements = new_str.split()
### init
xx = None
yy = None
zz = None
ee = None
for i in xrange(len(elements)):
elm = elements[i]
### Get Value
if ("X" in elm):
tmp_x = elm.split("X")
xx = float(tmp_x[1])
elif ("Y" in elm):
tmp_y = elm.split("Y")
yy = float(tmp_y[1])
elif ("Z" in elm):
tmp_z = elm.split("Z")
zz = float(tmp_z[1])
elif ("E" in elm):
tmp_e = elm.split("E")
ee = float(tmp_e[1])
return [xx, yy, zz, ee]
def gcode_operate_move(self, gcode_line):
none_list = [None, None, None, None]
### Move
if ("G0" in gcode_line) or \
("G1" in gcode_line) or \
("G00" in gcode_line) or \
("G01" in gcode_line) or \
("G92 E0" in gcode_line):
### get position
return self.get_value_move(gcode_line)
### Commment Out
elif (";" in gcode_line[0]) or (gcode_line == "\n"):
return none_list
### Setting G
elif ("G4" in gcode_line) or \
("G04" in gcode_line) or \
("G21" in gcode_line) or \
("G28" in gcode_line) or \
("G90" in gcode_line) or \
("G91" in gcode_line) or \
("G92" in gcode_line):
return none_list
### Setting M
elif ("M82" in gcode_line) or \
("M84" in gcode_line) or \
("M104" in gcode_line) or \
("M106" in gcode_line) or \
("M107" in gcode_line) or \
("M109" in gcode_line) or \
("M140" in gcode_line) or \
("M190" in gcode_line) or \
("M204" in gcode_line) or \
("M205" in gcode_line):
return none_list
### Setting T
elif ("T0" in gcode_line) or \
("T1" in gcode_line):
return none_list
else:
# return none_list
return "bug!"
def gcode_to_array(self, path):
### open gcode
gcode = self.open_file(path)
### Get Vaules from gcode
values = []
for i in xrange(len(gcode)):
gcode_line = gcode[i]
### XYZE
elements = self.gcode_operate_move(gcode_line)
## DEBUG ALL
# print(i, gcode_line)
### DEBUG bug
if (elements == "bug!"):
print(i, gcode_line)
## DEBUG
values.append(elements)
### Padding Previous Value(None)
values_zip = ut.zip_matrix(values)
# print(len(values_zip))
new_values = []
for j in xrange(len(values_zip)):
list_ = values_zip[j]
list_pad = ut.padding_previous_value(list_)
new_values.append(list_pad)
gcode_values = ut.zip_matrix(new_values)
# print(len(values))
# print(len(gcode_values), len(gcode_values[0]))
return gcode_values
def segment_extrude(self, xyze):
### Segment Print / Travel
### https://docs.google.com/spreadsheets/d/1S4SQ-NT09Nh8sb3Lg6FSauKB1rZPMwLSDjvnrKerXFs/edit?usp=sharing
array_seg = []
list_seg = []
for j in xrange(len(xyze)):
xxx, yyy, zzz, eee = xyze[j]
item = [xxx, yyy, zzz]
# print(j)
# print(j, xyze[j])
### Index[0]
if (j == 0):
x1, y1, z1, e1 = xyze[j]
x2, y2, z2, e2 = xyze[j + 1]
bool_b = e1 < e2
if (bool_b == True):
list_seg = []
list_seg.append(item)
### Index[0] - Index[Last - 1]
elif (j > 0) and (j < (len(xyze) - 1)):
x0, y0, z0, e0 = xyze[j - 1]
x1, y1, z1, e1 = xyze[j]
x2, y2, z2, e2 = xyze[j + 1]
bool_a = e0 < e1
bool_b = e1 < e2
if (bool_a == False) and (bool_b == True):
list_seg = []
list_seg.append(item)
elif (bool_a == True) and (bool_b == True):
list_seg.append(item)
elif (bool_a == True) and (bool_b == False):
list_seg.append(item)
array_seg.append(list_seg)
elif (bool_a == False) and (bool_b == False):
pass
else:
print("Error!!")
### Index[Last]
elif (j == (len(xyze) - 1)):
x0, y0, z0, e0 = xyze[j - 1]
x1, y1, z1, e1 = xyze[j]
bool_a = e0 < e1
if (bool_a == True):
list_seg.append(item)
array_seg.append(list_seg)
# print(array_out)
return array_seg
def remove_invalid_polylines(self, array_seg):
### Remove Invalid Polylines (Remove Same Element as the Previous One)
layers = []
for k in xrange(len(array_seg)):
tmp_layer = array_seg[k]
tmp_removed = ut.remove_previous_elements(tmp_layer)
if len(tmp_removed) != 1:
layers.append(tmp_removed)
return layers
def draw_path(self, values_4):
### Remove Same Element as the Previous One
xyze = ut.remove_previous_elements(values_4)
### print(len(values_4), len(xyze))
### Segment Print / Travel
array_seg = self.segment_extrude(xyze)
### Remove Invalid Polylines (Remove Same Element as the Previous One)
layers = self.remove_invalid_polylines(array_seg)
"""
### Draw All Path
pts = []
for i in xrange(len(xyze)):
x, y, z, e = values_4[i]
pt = [x, y, z]
pts.append(pt)
"""
return layers
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tofbot, a friendly IRC bot.
# You may redistribute it under the Simplified BSD License.
# If we meet some day, and you think this stuff is worth it,
# you can buy us a beer in return.
#
# Copyright (c) 2011,2015 Etienne Millon <[email protected]>
# Martin Kirchgessner <[email protected]>
# Nicolas Dumazet <[email protected]>
# Quentin Sabah <[email protected]>
# Christophe-Marie Duquesne <[email protected]>
"""
./bot.py [options] [legacy-arguments]
Legacy-arguments:
NICK CHANNEL [CHANNEL...]
Don't prepend a # to chan names
Tofbot will connect to freenode.net
"""
from datetime import datetime
from irc import Bot
import time
import random
import sys
import os
import plugins
import types
from toflib import *
from toflib import _simple_dispatch, _simple_conf_dispatch, urls_in
import re
from optparse import OptionParser
import json
import atexit
import socket
import traceback
import plugins.euler
import plugins.lolrate
import plugins.donnezmoi
import plugins.jokes
import plugins.twitter
import plugins.dassin
import plugins.eightball
import plugins.sed
import plugins.rick
import plugins.expand
import plugins.like
import plugins.ponce
import plugins.lag
random.seed()
class AutosaveEvent(CronEvent):
def __init__(self, bot, filename):
CronEvent.__init__(self, None)
self.filename = filename
self.bot = bot
def fire(self):
self.bot.save(self.filename)
class Tofbot(Bot):
# Those attributes are published and can be changed by irc users
# value is a str to object converter. It could do sanitization:
# if value is incorrect, raise ValueError
_mutable_attributes = {
"TGtime":int,
"memoryDepth":int
}
def __init__(self, nick=None, name=None, channels=None, password=None, debug=True):
Bot.__init__(self, nick, name, channels, password)
self.joined = False
self.autoTofadeThreshold = 98
self.riddleMaxDist = 2
self.debug = debug
self.TGtime = 5
self.pings = {}
self.memoryDepth = 20
self.lolRateDepth = 8
self.msgMemory = []
self.cron = Cron()
self.plugins = self.load_plugins()
self.startMsgs = []
self.msgHandled = False
def run(self, host=None):
if host == None and not hasattr(self,'host'):
raise Exception("run: no host set or given")
if self.nick == None:
raise Exception("run: no nick set")
if self.name == None:
raise Exception("run: no name set")
self.host = host or self.host
Bot.run(self, self.host)
def load_plugins(self):
d = os.path.dirname(__file__)
plugindir = os.path.join(d, 'plugins')
plugin_instances = {}
for m in dir(plugins):
if type(getattr(plugins,m)) != types.ModuleType:
continue
plugin = getattr(plugins, m)
for n in dir(plugin):
c = getattr(plugin, n)
if type(c) not in [types.ClassType, types.TypeType]:
continue
name = c.__name__
if name.startswith('Plugin'):
instance = c(self)
plugin_name = name[6:].lower()
plugin_instances[plugin_name] = instance
return plugin_instances
# line-feed-safe
def msg(self, chan, msg):
self.msgHandled = True
for m in msg.split("\n"):
Bot.msg(self, chan, m)
def log(self, msg):
if self.debug:
print(msg)
def try_join(self, args):
if (args[0] in ['End of /MOTD command.',
"This server was created ... I don't know"]
):
for chan in self.channels:
self.write(('JOIN', chan))
self.joined = True
def dispatch(self, origin, args):
self.log("o=%s n=%s a=%s" % (origin.sender, origin.nick, args))
is_config = False
senderNick = origin.nick
commandType = args[1]
# if command type is 'BOTCONFIG', bypass the try_join
# because we are configuring the bot before any
# connection.
if commandType != 'BOTCONFIG':
if not self.joined:
self.try_join(args)
return
else:
is_config = 1
args.remove('BOTCONFIG')
commandType = args[1]
if commandType == 'JOIN':
for m in self.startMsgs:
self.msg(self.channels[0], m)
self.startMsgs = []
for p in self.plugins.values():
p.on_join(args[0], senderNick)
elif commandType == 'KICK' and args[3] == self.nick:
reason = args[0]
chan = args[2]
self.write(('JOIN', chan))
for p in self.plugins.values():
p.on_kick(chan, reason)
elif commandType == 'PRIVMSG':
msg_text = args[0]
msg = msg_text.split(" ")
cmd = msg[0]
chan = args[2]
self.pings[senderNick] = datetime.now()
if is_config == False:
self.cron.tick()
if len(cmd) == 0:
return
urls = urls_in(msg_text)
self.msgHandled = False
# We only allow one plugin to answer, so we trigger them
# in random order
for p in self.plugins.values():
if not self.msgHandled:
p.handle_msg(msg_text, chan, senderNick)
for url in urls:
p.on_url(url)
if chan == self.channels[0] and cmd[0] != '!':
self.msgMemory.append("<" + senderNick + "> " + msg_text)
if len(self.msgMemory) > self.memoryDepth:
del self.msgMemory[0]
if len(cmd) == 0 or cmd[0] != '!':
return
cmd = cmd[1:]
chan = None
if len(self.channels) == 0:
chan = 'config'
else:
chan = self.channels[0]
if cmd in _simple_dispatch:
act = self.find_cmd_action("cmd_" + cmd)
act(chan, msg[1:], senderNick)
elif is_config and (cmd in _simple_conf_dispatch):
act = self.find_cmd_action("confcmd_" + cmd)
act(chan, msg[1:], senderNick)
elif cmd == 'context':
self.send_context(senderNick)
elif cmd == 'help':
self.send_help(senderNick)
elif commandType == 'PING':
self.log('PING received in bot.py')
elif commandType == 'ERROR':
traceback.print_exc(file=sys.stdout)
else: # Unknown command type
self.log('Unknown command type : %s' % commandType)
def find_cmd_action(self, cmd_name):
targets = self.plugins.values()
targets.insert(0, self)
for t in targets:
if (hasattr(t, cmd_name)):
action = getattr(t, cmd_name)
return action
def nop(self, chan, args):
pass
return nop
def safe_getattr(self, key):
if key not in self._mutable_attributes:
return None
if not hasattr(self, key):
return "(None)"
else:
return str(getattr(self, key))
def safe_setattr(self, key, value):
try:
converter = self._mutable_attributes.get(key)
if converter is None:
return False
value = converter(value)
setattr(self, key, value)
return True
except ValueError:
pass
@confcmd(1)
def confcmd_chan(self, chan, args):
new_chan = args[0]
if self.channels.count(new_chan) == 0:
self.channels.append(new_chan)
@confcmd(1)
def confcmd_server(self, chan, args):
host = args[0].strip()
self.host = host
@confcmd(1)
def confcmd_port(self, chan, args):
port = int(args[0].strip())
self.port = port
@confcmd(1)
def confcmd_nick(self, chan, args):
nick = args[0].strip()
self.nick = nick
self.user = nick
@confcmd(1)
def confcmd_name(self, chan, args):
name = args[0].strip()
self.name = name
@confcmd(1)
def confcmd_loadchanges(self, chan, args):
filename = args[0].strip()
if not os.path.exists(filename):
return
with open(filename) as f:
changes = f.readlines()
self.startMsgs += changes
@cmd(1)
def cmd_ping(self, chan, args):
"Find when X was last online"
who = args[0]
if who in self.pings:
self.msg(chan,
"Last message from %s was on %s (btw my local time is %s)" %
(who, self.pings[who].__str__(), datetime.now().__str__() ))
else:
self.msg(chan, "I havn't seen any message from " + who)
@cmd(1)
def cmd_get(self, chan, args):
"Retrieve a configuration variable's value"
key = args[0]
value = self.safe_getattr(key)
if value is None:
self.msg(chan, "Ne touche pas ร mes parties privรฉes !")
else:
self.msg(chan, "%s = %s" % (key, value))
@cmd(2)
def cmd_set(self, chan, args):
"Set a configuration variable's value"
key = args[0]
value = args[1]
ok = self.safe_setattr(key, value)
if not ok:
self.msg(chan, "N'รฉcris pas sur mes parties privรฉes !")
def send_context(self, to):
"Gives you last messages from the channel"
intro = "Last " + str(len(self.msgMemory)) + " messages sent on " + self.channels[0] + " :"
self.msg(to, intro)
for msg in self.msgMemory:
self.msg(to, msg)
def send_help(self, to):
"Show this help message"
maxlen = 1 + max(map(len, _simple_dispatch))
self.msg(to, "Commands should be entered in the channel or by private message")
self.msg(to, '%*s - %s' % (maxlen, "!help", self.send_help.__doc__))
self.msg(to, '%*s - %s' % (maxlen, "!context", self.send_context.__doc__))
for cmd in _simple_dispatch:
f = self.find_cmd_action("cmd_" + cmd)
self.msg(to, '%*s - %s' % (maxlen, "!"+cmd, f.__doc__))
self.msg(to, "you can also !get or !set " + ", ".join(self._mutable_attributes.keys()))
self.msg(to, "If random-tofades are boring you, enter 'TG " + self.nick + "' (but can be cancelled by GG " + self.nick + ")")
def load(self, filename):
try:
with open(filename) as f:
state = json.load(f)
if state['version'] != 1:
return False
for name, plugin_state in state['plugins'].items():
try:
plugin = self.plugins[name]
plugin.load(plugin_state)
except KeyError:
pass
except IOError as e:
print "Can't load state. Error: ", e
def save(self, filename):
try:
with open(filename, 'w') as f:
state = { 'version': 1
, 'plugins': {}
}
for name, plugin in self.plugins.items():
plugin_state = plugin.save()
state['plugins'][name] = plugin_state
json.dump(state, indent=4, fp=f)
except IOError as e:
print "Can't save state. Error: ", e
def __main():
class FakeOrigin:
pass
def bot_config(b, cmd):
o = FakeOrigin
o.sender = 'bot_config'
o.nick = 'bot_config'
b.dispatch(o, [cmd.strip(), 'BOTCONFIG','PRIVMSG','#bot_config'])
# default timeout for urllib2, in seconds
socket.setdefaulttimeout(15)
# option parser
parser = OptionParser(__doc__)
parser.add_option("-x","--execute", dest="cmds",action="append",help="File to execute prior connection. Can be used several times.")
parser.add_option("-s","--host", dest="host",help="IRC server hostname")
parser.add_option("-p","--port", dest="port",help="IRC server port")
parser.add_option("-k","--nick", dest="nick",help="Bot nickname",default='Tofbot')
parser.add_option("-n","--name", dest="name",help="Bot name",default='Tofbot')
parser.add_option("-c","--channel",dest="channel",action="append",help="Channel to join (without # prefix). Can be used several times.")
parser.add_option("--password", dest="password")
parser.add_option("-d","--debug", action="store_true", dest="debug", default=False)
(options,args) = parser.parse_args();
# legacy arguments handled first
# (new-style arguments prevail)
if len(args) > 0:
options.nick = options.nick or args[0]
options.channel = options.channel or []
for chan in args[1:]:
if options.channel.count(chan) == 0:
options.channel.append(chan)
# initialize Tofbot
# using command-line arguments
b = Tofbot(options.nick, options.name, options.channel, options.password, options.debug)
# execute command files
# these commands may override command-line arguments
options.cmds = options.cmds or []
for filename in options.cmds:
cmdsfile = open(filename,'r')
for line in cmdsfile:
bot_config(b, line)
# Restore serialized data
state_file = "state.json"
b.load(state_file)
# Perform auto-save periodically
autosaveEvent = AutosaveEvent(b, state_file)
b.cron.schedule(autosaveEvent)
# ... and save at exit
@atexit.register
def save_atexit():
print("Exiting, saving state...")
b.save(state_file)
print("Done !")
# default host when legacy-mode
if options.host == None and len(options.cmds) == 0 and len(args) > 0:
options.host = 'irc.freenode.net'
b.run(options.host)
if __name__ == "__main__":
try:
__main()
except Exception, ex:
import traceback
dumpFile = open("_TOFDUMP.txt","w")
traceback.print_exc(None, dumpFile)
dumpFile.close()
raise ex
|
python
|
# Copyright 2016 Peter Dymkar Brandt All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
PortfolioReport generates visualizations of past performance of a portfolio of
financial instruments.
Example:
# See historical_data documentation for more info.
data = historical_data.HistoricalData(historical_data_config,
tor_scraper_config)
daily = data.get_daily()
if daily is None:
return
print portfolio_report.PortfolioReport({
'subject_format': 'Portfolio Report -- {}',
}, daily).get_report()
"""
import io
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import PIL
import plot_utils
class PortfolioReport(object):
"""Contains all functionality for the portfolio_report module.
"""
_FILENAME = 'report.png'
_STYLE_SHEET = 'ggplot'
_TEXT_COLOR = (.3, .3, .3, 1.0)
_BAR_ALPHA = .67
_TITLE_DOLLAR_FORMAT = '${:,.2f}'
_REPORT_COLS = 2
def __init__(self, portfolio_report_config, daily):
"""PortfolioReport must be initialized with args similar to those shown
in the example at the top of this file.
Args:
portfolio_report_config: Determines the behavior of this instance.
daily: pandas.DataFrame of prices of the same type returned by
historical_data.get_daily(). Rows represent dates in ascending
order, and columns represent financial instruments.
"""
self._config = portfolio_report_config
self._daily = daily
def _get_percent_returns(self, cumulative=False):
"""Calculate percent returns for the entire time period, either
cumulative from the beginning or separately for each day.
"""
if cumulative is True:
return self._daily['adj_close'] / (
self._daily['adj_close'].ix[0, :]) - 1.0
else:
return self._daily['adj_close'].pct_change()
def _get_dollar_values(self, group=False):
"""Calculate the value of portfolio holdings using closing prices.
Optionally aggregate the values into groups provided in config.
"""
dates = sorted(self._config['dates'])
# Copy dataframe and zero data before earliest portfolio date.
dollar_values = self._daily['close'].copy()
dollar_values.ix[
dollar_values.index < pd.to_datetime(str(dates[0])), :] = 0.0
# Loop thru dates and calculate each date range using bitmask index.
for i, item in enumerate(dates):
index = dollar_values.index >= pd.to_datetime(str(item))
if i < (len(dates) - 1):
index = index & (
dollar_values.index < pd.to_datetime(str(dates[i + 1])))
for key in list(dollar_values.columns.values):
value = self._config['dates'][item]['symbols'].get(key)
if value is None:
dollar_values.ix[index, key] = 0.0
else:
dollar_values.ix[index, key] *= value * self._config[
'value_ratio']
if group is True:
dollar_values = self._sum_symbol_groups(dollar_values)
return dollar_values
def _get_dollar_returns(self, group=False):
"""Calculate the dollar returns for portfolio holdings. Optionally
aggregate the returns into groups provided in config.
"""
dollar_values = self._get_dollar_values()
percent_returns = self._get_percent_returns()
dollar_returns = dollar_values * percent_returns
if group is True:
dollar_returns = self._sum_symbol_groups(dollar_returns)
return dollar_returns
def _get_profit_and_loss(self):
"""Calculate the profit and loss of the portfolio over time.
"""
profit_and_loss = self._get_dollar_values().sum(1)
dates = sorted(self._config['dates'])
# Correct spike on first portfolio date.
first_date = np.argmax(
profit_and_loss.index >= pd.to_datetime(str(dates[0])))
profit_and_loss.ix[first_date:] -= profit_and_loss.ix[first_date]
# Adjust for capital changes.
for i, item in enumerate(dates):
if i > 0:
index = profit_and_loss.index >= pd.to_datetime(str(item))
profit_and_loss.ix[index] -= self._config[
'dates'][item]['capital_change'] * self._config[
'value_ratio']
return profit_and_loss
def _sum_symbol_groups(self, data_frame):
"""Sum columns of dataframe using symbol_groups in config.
"""
sum_data_frame = pd.DataFrame()
for key, value in sorted(self._config['symbol_groups'].iteritems()):
sum_data_frame[key] = data_frame[value].sum(1)
return sum_data_frame
def plot_dollar_change_bars(self, group=False):
"""Plot the change in dollars for the most recent day as a bar plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
dollar_values = self._get_dollar_values(group).ix[-1, :]
dollar_returns = self._get_dollar_returns(group).ix[-1, :]
percent_returns = dollar_returns / dollar_values
labels = plot_utils.get_percent_strings(percent_returns)
bar_colors = plot_utils.get_conditional_colors(
percent_returns, self._BAR_ALPHA)
title = ('1-Day Change | ' + self._TITLE_DOLLAR_FORMAT + (
'\n')).format(np.sum(dollar_returns))
plot = dollar_returns.plot(kind='bar', color=bar_colors)
plot.set_title(title, color=self._TEXT_COLOR)
plot.set_xticklabels(dollar_returns.index, rotation=0)
plot_utils.format_y_ticks_as_dollars(plot)
plot_utils.add_bar_labels(plot, labels, self._TEXT_COLOR)
return plot
def plot_percent_return_lines(self):
"""Plot percent returns for each symbol for the entire time period as a
line plot.
"""
percent_returns = self._get_percent_returns(True)
title = 'Symbol Returns\n'
plot = percent_returns.plot(kind='line', ax=plt.gca())
plot.set_title(title, color=self._TEXT_COLOR)
plot_utils.format_x_ticks_as_dates(plot)
plot_utils.format_y_ticks_as_percents(plot)
plot_utils.format_legend(plot, self._TEXT_COLOR)
return plot
def plot_dollar_value_bars(self, group=False):
"""Plot the dollar value of portfolio holdings for the most recent day
as a bar plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
dollar_values = self._get_dollar_values(group).ix[-1, :]
percents = dollar_values / np.sum(dollar_values)
labels = plot_utils.get_percent_strings(percents)
title = 'Portfolio Weights\n'
plot = dollar_values.plot(kind='bar', alpha=self._BAR_ALPHA)
plot.set_title(title, color=self._TEXT_COLOR)
plot.set_xticklabels(dollar_values.index, rotation=0)
plot_utils.format_y_ticks_as_dollars(plot)
plot_utils.add_bar_labels(plot, labels, self._TEXT_COLOR)
return plot
def plot_dollar_value_lines(self, group=False):
"""Plot the dollar value of portfolio holdings for the entire time
period as a line plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
dollar_values = self._get_dollar_values(group)
dollar_values['TOTAL'] = dollar_values.sum(1)
title = ('Portfolio Value | ' + self._TITLE_DOLLAR_FORMAT + (
'\n')).format(dollar_values['TOTAL'].ix[-1])
plot = dollar_values.plot(kind='line', ax=plt.gca())
plot.set_title(title, color=self._TEXT_COLOR)
plot_utils.format_x_ticks_as_dates(plot)
plot_utils.format_y_ticks_as_dollars(plot)
plot_utils.format_legend(plot, self._TEXT_COLOR)
return plot
def plot_profit_and_loss_lines(self):
"""Plot the profit and loss of the portfolio for the entire time period
as a line plot.
Args:
group: Whether to aggregate based on symbol_groups in config.
"""
profit_and_loss = self._get_profit_and_loss()
title = ('Cumulative P&L | ' + self._TITLE_DOLLAR_FORMAT + (
'\n')).format(profit_and_loss[-1])
plot = profit_and_loss.plot(kind='line', ax=plt.gca())
plot.set_title(title, color=self._TEXT_COLOR)
plot_utils.format_x_ticks_as_dates(plot)
plot_utils.format_y_ticks_as_dollars(plot)
return plot
def get_report(self):
"""Creates the entire report composed of individual plots.
"""
subject = self._config['subject_format'].format(str(
self._daily['adj_close'].index[-1].date()))
plain_body = ''
plt.style.use(self._STYLE_SHEET)
# Create list of plot images to include in the report image.
plot_images = []
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_change_bars, group=True))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_change_bars))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_bars, group=True))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_bars))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_lines, group=True))
plot_images.append(plot_utils.get_plot_image(
self.plot_dollar_value_lines))
plot_images.append(plot_utils.get_plot_image(
self.plot_profit_and_loss_lines))
plot_images.append(plot_utils.get_plot_image(
self.plot_percent_return_lines))
plot_images = [PIL.Image.open(x) for x in plot_images]
# Arrange plot images in a grid in the report image.
plot_width = plot_images[0].size[0]
plot_height = plot_images[0].size[1]
report_image = PIL.Image.new('RGB', (
plot_width * self._REPORT_COLS, plot_height * int(
np.ceil(len(plot_images) / self._REPORT_COLS))), 'white')
for i, item in enumerate(plot_images):
report_image.paste(item, ((i % self._REPORT_COLS) * plot_width, int(
np.floor(i / self._REPORT_COLS)) * plot_height))
# Convert report image to bytes in PNG format.
report_image_bytes = io.BytesIO()
report_image.save(report_image_bytes, format='png')
report_image_bytes.seek(0)
return {'subject': subject,
'plain_body': plain_body,
'files': {self._FILENAME: report_image_bytes}}
|
python
|
from numpy import absolute, isnan, where
from scipy.spatial.distance import correlation
def compute_correlation_distance(x, y):
correlation_distance = correlation(x, y)
if isnan(correlation_distance):
return 2
else:
return where(absolute(correlation_distance) < 1e-8, 0, correlation_distance)
|
python
|
# This sample tests the case where a subclass of Dict uses
# a dictionary literal as an argument to the constructor call.
from collections import Counter, defaultdict
from typing import Callable, Generic, Mapping, Optional, TypeVar
c1 = Counter({0, 1})
reveal_type(c1, expected_text="Counter[int]")
for i in range(256):
c1 = Counter({0: c1[1]})
reveal_type(c1, expected_text="Counter[int]")
reveal_type(c1, expected_text="Counter[int]")
K = TypeVar("K")
V = TypeVar("V")
MyFuncType = Callable[[Callable[[K], V]], V]
class MyFunc(Generic[K, V]):
def __init__(self, g: MyFuncType[K, V]) -> None:
self.g = g
MyFuncMapping = Mapping[K, Optional[MyFunc[K, V]]]
my_func_defaultdict: MyFuncMapping[str, int] = defaultdict(
lambda: None, {"x": MyFunc(lambda f: f("a"))}
)
|
python
|
# Sequรชncia dos termos numรฉricos de uma funรงรฃo arbitrรกria.
# Printa a sequรชncia dos termos da funรงรฃo X^2 atรฉ um termo escolhido.
n = int(input())
for i in range(0,n):
print(i*i)
i = i+1
# Printa a sequรชncia dos termos da funรงรฃo X^3 atรฉ um termo escolhido
y = int(input())
for i in range (0,y):
print (i*i*i)
i = i+1
#Este cรณdigo pode se repetir de forma genรฉrica para todos os expoentes possรญveis da funรงรฃo print
# Como escolher a quantidade de vezes que a funรงรฃo Print deveria exponenciar o argumento X^k ?
# Sendo X a base e K o expoente de valor inteiro.
|
python
|
from bisect import bisect
from contextlib import closing, contextmanager
from itertools import accumulate, chain, islice, zip_longest
from multiprocessing import Lock, RawValue, Process
from os import cpu_count
from re import sub
from sys import argv, stdout
output_file = open("bench_output-fasta_bg.txt", mode="wb", buffering=0)
write = output_file.write
def acquired_lock():
lock = Lock()
lock.acquire()
return lock
def started_process(target, args):
process = Process(target=target, args=args)
process.start()
return process
@contextmanager
def lock_pair(pre_lock=None, post_lock=None, locks=None):
pre, post = locks if locks else (pre_lock, post_lock)
if pre:
pre.acquire()
yield
if post:
post.release()
def write_lines(
sequence, n, width, lines_per_block=10000, newline=b'\n', table=None):
i = 0
blocks = (n - width) // width // lines_per_block
if blocks:
for _ in range(blocks):
output = bytearray()
for i in range(i, i + width * lines_per_block, width):
output += sequence[i:i + width] + newline
else:
i += width
if table:
write(output.translate(table))
else:
write(output)
output = bytearray()
if i < n - width:
for i in range(i, n - width, width):
output += sequence[i:i + width] + newline
else:
i += width
output += sequence[i:n] + newline
if table:
write(output.translate(table))
else:
write(output)
stdout.buffer.flush()
def cumulative_probabilities(alphabet, factor=1.0):
probabilities = tuple(accumulate(p * factor for _, p in alphabet))
table = bytearray.maketrans(
bytes(chain(range(len(alphabet)), [255])),
bytes(chain((ord(c) for c, _ in alphabet), [10]))
)
return probabilities, table
def copy_from_sequence(header, sequence, n, width, locks=None):
sequence = bytearray(sequence, encoding='utf8')
while len(sequence) < n:
sequence.extend(sequence)
with lock_pair(locks=locks):
write(header)
write_lines(sequence, n, width)
def lcg(seed, im, ia, ic):
local_seed = seed.value
try:
while True:
local_seed = (local_seed * ia + ic) % im
yield local_seed
finally:
seed.value = local_seed
def lookup(probabilities, values):
for value in values:
yield bisect(probabilities, value)
def lcg_lookup_slow(probabilities, seed, im, ia, ic):
with closing(lcg(seed, im, ia, ic)) as prng:
yield from lookup(probabilities, prng)
def lcg_lookup_fast(probabilities, seed, im, ia, ic):
local_seed = seed.value
try:
while True:
local_seed = (local_seed * ia + ic) % im
yield bisect(probabilities, local_seed)
finally:
seed.value = local_seed
def lookup_and_write(
header, probabilities, table, values, start, stop, width, locks=None):
if isinstance(values, bytearray):
output = values
else:
output = bytearray()
output[:stop - start] = lookup(probabilities, values)
with lock_pair(locks=locks):
if start == 0:
write(header)
write_lines(output, len(output), width, newline=b'\xff', table=table)
def random_selection(header, alphabet, n, width, seed, locks=None):
im = 139968.0
ia = 3877.0
ic = 29573.0
probabilities, table = cumulative_probabilities(alphabet, im)
if not locks:
with closing(lcg_lookup_fast(probabilities, seed, im, ia, ic)) as prng:
output = bytearray(islice(prng, n))
lookup_and_write(header, probabilities, table, output, 0, n, width)
else:
pre_seed, post_seed, pre_write, post_write = locks
m = cpu_count() * 3 if n > width * 15 else 1
partitions = [n // (width * m) * width * i for i in range(1, m)]
processes = []
pre = pre_write
with lock_pair(locks=(pre_seed, post_seed)):
with closing(lcg(seed, im, ia, ic)) as prng:
for start, stop in zip([0] + partitions, partitions + [n]):
values = list(islice(prng, stop - start))
post = acquired_lock() if stop < n else post_write
processes.append(started_process(
lookup_and_write,
(header, probabilities, table, values,
start, stop, width, (pre, post))
))
pre = post
for p in processes:
p.join()
def fasta(n):
alu = sub(r'\s+', '', """
GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA
TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT
AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG
GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG
CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA
""")
iub = list(zip_longest('acgtBDHKMNRSVWY',
(.27, .12, .12, .27), fillvalue=.02))
homosapiens = list(zip('acgt', (0.3029549426680, 0.1979883004921,
0.1975473066391, 0.3015094502008)))
seed = RawValue('f', 42)
width = 60
tasks = [
(copy_from_sequence,
[b'>ONE Homo sapiens alu\n', alu, n * 2, width]),
(random_selection,
[b'>TWO IUB ambiguity codes\n', iub, n * 3, width, seed]),
(random_selection,
[b'>THREE Homo sapiens frequency\n', homosapiens, n * 5, width, seed]),
]
if cpu_count() < 2:
for func, args in tasks:
func(*args)
else:
written_1 = acquired_lock()
seeded_2 = acquired_lock()
written_2 = acquired_lock()
locks_sets = [
(None, written_1),
(None, seeded_2, written_1, written_2),
(seeded_2, None, written_2, None),
]
processes = [
started_process(target, args + [locks_sets[i]])
for i, (target, args) in enumerate(tasks)
]
for p in processes:
p.join()
output_file.close()
if __name__ == "__main__":
if len(argv) > 1:
fasta(int(argv[1]))
else:
fasta(1000000)
|
python
|
# ______ _ _ _ _ _ _ _
# | ___ \ | | | | (_) (_) | | (_)
# | |_/ / __ ___ | |__ __ _| |__ _| |_ ___| |_ _ ___
# | __/ '__/ _ \| '_ \ / _` | '_ \| | | / __| __| |/ __|
# | | | | | (_) | |_) | (_| | |_) | | | \__ \ |_| | (__
# \_| |_| \___/|_.__/ \__,_|_.__/|_|_|_|___/\__|_|\___|
# ___ ___ _ _
# | \/ | | | (_)
# | . . | ___ ___| |__ __ _ _ __ _ ___ ___
# | |\/| |/ _ \/ __| '_ \ / _` | '_ \| |/ __/ __|
# | | | | __/ (__| | | | (_| | | | | | (__\__ \
# \_| |_/\___|\___|_| |_|\__,_|_| |_|_|\___|___/
# _ _ _
# | | | | | |
# | | __ _| |__ ___ _ __ __ _| |_ ___ _ __ _ _
# | | / _` | '_ \ / _ \| '__/ _` | __/ _ \| '__| | | |
# | |___| (_| | |_) | (_) | | | (_| | || (_) | | | |_| |
# \_____/\__,_|_.__/ \___/|_| \__,_|\__\___/|_| \__, |
# __/ |
# |___/
#
# MIT License
#
# Copyright (c) 2019 Probabilistic Mechanics Laboratory
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
""" Custom layers """
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.keras.constraints import MinMaxNorm
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import constraints
from tensorflow.python.framework import tensor_shape
class DOrC(Layer):
""" Discrete ordinal classifier layer
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(DOrC, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape, **kwargs):
self.threshold1 = self.add_weight("threshold1",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.0, max_value=0.3, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.threshold2 = self.add_weight("threshold2",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.2, max_value=0.5, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.threshold3 = self.add_weight("threshold3",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.4, max_value=0.8, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.threshold4 = self.add_weight("threshold4",
shape = [1],
initializer = self.kernel_initializer,
constraint = MinMaxNorm(min_value=0.8, max_value=2.0, rate=1.0),
dtype = self.dtype,
trainable = self.trainable,
**kwargs)
self.built = True
def call(self, inputs):
first_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs-self.threshold1)))
second_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs*first_threshold-self.threshold2)))
third_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs*second_threshold-self.threshold3)))
fourth_threshold = 1/(1 + gen_math_ops.exp(-5e1*(inputs*third_threshold-self.threshold4)))
output = 1 + first_threshold +second_threshold + third_threshold + fourth_threshold
return output
def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)
|
python
|
# -*- coding: utf-8 -*-
"""
drift - Logging setup code
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Set up logging based on config dict.
"""
from __future__ import absolute_import
import os
import logging
from logging.handlers import SysLogHandler
import logging.config
import json
import datetime
import sys
import time
import uuid
from socket import gethostname
from collections import OrderedDict
from functools import wraps
from logstash_formatter import LogstashFormatterV1
import six
from six.moves.urllib.parse import urlsplit
from flask import g, request
from drift.core.extensions.jwt import current_user
from drift.utils import get_tier_name
def get_stream_handler():
"""returns a stream handler with standard formatting for use in local development"""
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(name)-15s %(message)s"
)
stream_handler.setFormatter(stream_formatter)
return stream_handler
def get_caller():
"""returns a nice string representing caller for logs
Note: This is heavy"""
import inspect
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller = "{} ({}#{})".format(calframe[2][3], calframe[2][1], calframe[2][2])
return caller
def get_clean_path_from_url(url):
"""extract the endpoint path from the passed in url and remove
service information and any id's so that the endpoint path
might be easily used in grouping.
"""
clean_path = None
try:
lst = urlsplit(url)
path = lst.path
lst = path.split("/")
for i, l in enumerate(lst):
try:
int(l)
except ValueError:
pass
else:
lst[i] = "<int>"
# assume that the service name is the first part so we skip it
clean_path = "/" + "/".join(lst[2:])
except Exception:
# Todo: should report these errors
pass
return clean_path
def get_log_details():
details = OrderedDict()
tenant_name = None
tier_name = get_tier_name()
remote_addr = None
try:
remote_addr = request.remote_addr
except Exception:
pass
try:
if hasattr(g, "conf"):
tenant_name = (
g.conf.tenant_name["tenant_name"] if g.conf.tenant_name else "(none)"
)
except RuntimeError as e:
if "Working outside of application context" in repr(e):
pass
else:
raise
log_context = {}
log_context["created"] = datetime.datetime.utcnow().isoformat() + "Z"
log_context["tenant"] = tenant_name
log_context["tier"] = tier_name
log_context["remote_addr"] = remote_addr
details["logger"] = log_context
jwt_context = {}
try:
fields = set(
[
"user_id",
"player_id",
"roles",
"jti",
"user_name",
"player_name",
"client_id",
"identity_id",
]
)
for k, v in current_user.items():
if k in fields:
key = "{}".format(k)
jwt_context[key] = v
if k == "roles" and v:
jwt_context[k] = ",".join(v)
except Exception as e:
pass
if jwt_context:
details["user"] = jwt_context
# add Drift-Log-Context" request headers to the logs
try:
details["client"] = json.loads(request.headers.get("Drift-Log-Context"))
except Exception:
pass
return details
# Custom log record
_logRecordFactory = logging.getLogRecordFactory()
def drift_log_record_factory(*args, **kw):
global _logRecordFactory
logrec = _logRecordFactory(*args, **kw)
log_details = get_log_details()
for k, v in log_details.items():
setattr(logrec, k, v)
logger_fields = (
"levelname",
"levelno",
"process",
"thread",
"name",
"filename",
"module",
"funcName",
"lineno",
)
for f in logger_fields:
log_details["logger"][f] = getattr(logrec, f, None)
try:
correlation_id = request.correlation_id
except Exception:
correlation_id = None
log_details["logger"]["correlation_id"] = correlation_id
log_details["logger"]["created"] = datetime.datetime.utcnow().isoformat() + "Z"
for k, v in log_details.items():
setattr(logrec, k, v)
return logrec
class JSONFormatter(logging.Formatter):
"""
Format log message as JSON.
"""
source_host = gethostname()
log_tag = None
def __init__(self):
super(JSONFormatter, self).__init__()
def formatTime(self, record, datefmt=None):
dt = datetime.datetime.fromtimestamp(record.created)
return dt.isoformat() + "Z"
def get_formatted_data(self, record):
data = OrderedDict()
# put the timestamp first for splunk timestamp indexing
data["timestamp"] = self.formatTime(record)
if hasattr(record, "logger") and "tier" in record.logger:
data["tenant"] = "{}.{}".format(
record.logger.get("tier", None), record.logger.get("tenant", None)
)
field_names = "logger", "client", "user"
data.update(
{key: getattr(record, key) for key in field_names if hasattr(record, key)}
)
return data
def format(self, record):
data = self.get_formatted_data(record)
json_text = json.dumps(data, default=self._json_default)
return json_text
def json_format(self, data):
json_text = json.dumps(data, default=self._json_default)
return "drift.%s: @cee: %s" % (self.log_tag, json_text)
@staticmethod
def _json_default(obj):
"""
Coerce everything to strings.
All objects representing time get output as ISO8601.
"""
if (
isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
or isinstance(obj, datetime.time)
):
return obj.isoformat()
else:
return str(obj)
class ServerLogFormatter(JSONFormatter):
log_tag = "server"
def format(self, record):
data = self.get_formatted_data(record)
data["message"] = super(JSONFormatter, self).format(record)
data["level"] = record.levelname
try:
data["request"] = "{} {}".format(request.method, request.url)
except Exception:
pass
return self.json_format(data)
class EventLogFormatter(JSONFormatter):
log_tag = "events"
def format(self, record):
data = self.get_formatted_data(record)
data["event_name"] = super(JSONFormatter, self).format(record)
data.update(getattr(record, "extra", {}))
return self.json_format(data)
class ClientLogFormatter(JSONFormatter):
log_tag = "client"
def format(self, record):
data = self.get_formatted_data(record)
data.update(getattr(record, "extra", {}))
return self.json_format(data)
def trim_logger(data):
# remove unnecessary logger fields
for k, v in data["logger"].copy().items():
if k not in ["name", "tier", "tenant", "correlation_id"]:
del data["logger"][k]
def format_request_body(key, value):
if key == "password":
return "*"
else:
# constrain the body to 64 characters per key and convert to string
return str(value)[:64]
class RequestLogFormatter(JSONFormatter):
log_tag = "request"
def format(self, record):
data = self.get_formatted_data(record)
trim_logger(data)
try:
data["method"] = request.method
data["url"] = request.url
data["remote_addr"] = request.remote_addr
except Exception:
pass
data["endpoint"] = get_clean_path_from_url(request.url)
request_body = None
try:
if request.json:
request_body = {
key: format_request_body(key, value)
for key, value in request.json.items()
}
else:
request_body = request.data
except Exception:
pass
if request_body:
data["request_body"] = request_body
try:
data.update(getattr(record, "extra", {}))
except Exception:
pass
if data.get("log_level") == 1:
data = {
"timestamp": data["timestamp"],
"tenant": data["tenant"],
"method": data["method"],
"endpoint": data["endpoint"],
}
return self.json_format(data)
# Calling 'logsetup' more than once may result in multiple handlers emitting
# multiple log events for a single log call. Flagging it is a simple fix.
_setup_done = False
class StreamFormatter(logging.Formatter):
"""
The stream formatter automatically grab the record's extra field
and append its content to the log message
"""
def format(self, record):
message = super(StreamFormatter, self).format(record)
if hasattr(record, "extra"):
message += " | {}".format(record.extra)
return message
def logsetup(app):
global _setup_done
if _setup_done:
return
_setup_done = True
app.log_formatter = None
output_format = app.config.get("LOG_FORMAT", "json").lower()
log_level = app.config.get("LOG_LEVEL", "INFO").upper()
if output_format == "json":
logger = logging.getLogger()
logger.setLevel(log_level)
formatter = LogstashFormatterV1()
app.log_formatter = formatter
# make sure this is our only stream handler
logger.handlers = []
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
else:
logging.basicConfig(
level=log_level, format='%(asctime)s - %(name)-14s %(levelname)-5s: %(message)s'
)
# if output_format == 'text':
# logging.basicConfig(level=log_level)
# else:
# handler = logging.StreamHandler()
# formatter = LogstashFormatterV1()
# handler.setFormatter(formatter)
# logging.basicConfig(handlers=[handler], level=log_level)
# if 'logging' in app.config:
# logging.config.dictConfig(app.config['logging'])
@app.before_request
def _setup_logging():
return setup_logging(app)
def setup_logging(app):
"""Inject a tracking identifier into the request and set up context-info
for all debug logs
"""
g.log_defaults = None
request_id = request.headers.get("Request-ID", None)
if not request_id:
default_request_id = str(uuid.uuid4())
request_id = request.headers.get("X-Request-ID", default_request_id)
request.request_id = request_id
g.log_defaults = get_log_defaults()
if app.log_formatter:
app.log_formatter.defaults = g.log_defaults
def get_log_defaults():
defaults = {}
tenant_name = None
tier_name = get_tier_name()
remote_addr = None
try:
remote_addr = request.remote_addr
except Exception:
pass
try:
if hasattr(g, 'conf'):
tenant_name = g.conf.tenant_name['tenant_name'] if g.conf.tenant_name else '(none)'
except RuntimeError as e:
if "Working outside of application context" in repr(e):
pass
else:
raise
defaults["tenant"] = tenant_name
defaults["tier"] = tier_name
defaults["remote_addr"] = remote_addr
jwt_context = get_user_context()
if jwt_context:
defaults["user"] = jwt_context
# add Client-Log-Context" request headers to the logs
client = None
try:
client = request.headers.get("Client-Log-Context", None)
defaults["client"] = json.loads(client)
except Exception:
defaults["client"] = client
defaults["request"] = {
"request_id": request.request_id,
"url": request.url,
"method": request.method,
"remote_addr": request.remote_addr,
"path": request.path,
"user_agent": request.headers.get('User-Agent'),
"endpoint": get_clean_path_from_url(request.url)
}
defaults["request"].update(request.view_args or {})
return defaults
def get_user_context():
jwt_context = {}
try:
fields = set(["user_id", "player_id", "roles", "jti", "user_name",
"player_name", "client_id", "identity_id"])
for k, v in current_user.items():
if k in fields:
key = "{}".format(k)
jwt_context[key] = v
if k == "roles" and v:
jwt_context[k] = ",".join(v)
except Exception:
pass
return jwt_context
def drift_init_extension(app, **kwargs):
logsetup(app)
def request_log_level(level):
def wrapper(fn):
@wraps(fn)
def decorated(*args, **kwargs):
g.request_log_level = int(level)
return fn(*args, **kwargs)
return decorated
return wrapper
|
python
|
# -*- coding: utf-8 -*-
'''
Created on 2017/09/14
@author: yuyang
'''
import os
import urllib
import uuid
from docx.shared import Pt
from docx.shared import RGBColor
from docx.shared import Inches
JPEG_EXTENSION = '.jpg'
PNG_EXTENSION = '.png'
GIF_EXTENSION = '.gif'
SPLIT_STRING = '///'
def add_author(document, author):
para = document.add_paragraph()
run = para.add_run(author)
font = run.font
#font.name = 'Microsoft YaHei'
font.size = Pt(12)
font.color.rgb = RGBColor(0x43, 0x6E, 0xEE)
def add_content(document, content, para = None, font_size = 16):
if not para:
para = document.add_paragraph()
run = para.add_run(content)
font = run.font
font.bold = False
font.size = Pt(font_size)
font.color.rgb = RGBColor(0x08, 0x08, 0x08)
def add_picture(document, story):
filenames = analyze_pic(story)
for filename in filenames:
try:
document.add_picture(filename, width=Inches(5))
except:
print 'ๆๅ
ฅๅพ็ๅบ้๏ผ' + filename
def add_time(document, time):
para = document.add_paragraph()
run = para.add_run(time)
font = run.font
font.italic = True
#font.name = 'Microsoft YaHei'
font.size = Pt(10)
font.color.rgb = RGBColor(0x7A, 0x7A, 0x7A)
def download_pic(url, extension):
try:
if not os.path.exists('.//pics'):
os.mkdir('.//pics')
filename = '.\\pics\\' + str(uuid.uuid4()) + extension
urllib.urlretrieve(url, filename)
except Exception:
print 'ไธ่ฝฝๅพ็ๅบ้๏ผ ' + url
return filename
def analyze_pic(story):
filenames = []
picBox = None
imgGroup = None
try:
picBox = story.find_element_by_class_name('picBox')
except:
None
try:
imgGroup = story.find_element_by_class_name('tl_imgGroup')
except:
None
if picBox:# one picture
img_url = picBox.find_element_by_tag_name('a').get_attribute('href')
print 'ๅพ็๏ผ', img_url
filename = download_pic(img_url, JPEG_EXTENSION)
filenames.append(filename)
elif imgGroup:# multi picture
a_tags = imgGroup.find_elements_by_tag_name('a')
for a_tag in a_tags:
img_url = a_tag.get_attribute('href')
print 'ๅพ็๏ผ', img_url
filename = download_pic(img_url, JPEG_EXTENSION)
filenames.append(filename)
return filenames
|
python
|
"""
Views for the app
"""
from __future__ import absolute_import
from __future__ import division
import os
import uuid
from auth import constants
from auth.forms import \
CategoryForm, \
CountryForm, \
CurrencyForm, \
GatewayForm, \
LoginVoucherForm, \
MyUserForm, \
NetworkForm, \
NewVoucherForm, \
ProductForm, \
UserForm
from auth.models import Auth, Category, Country, Currency, Gateway, Network, Product, User, Voucher, db
# from auth.payu import get_transaction, set_transaction, capture
from auth.resources import logos
from auth.services import \
environment_dump, \
healthcheck as healthcheck_service
from auth.utils import is_logged_in, has_role
from flask import \
Blueprint, \
abort, \
current_app, \
flash, \
redirect, \
request, \
render_template, \
send_from_directory, \
session, \
url_for
from flask_menu import register_menu
from flask_potion.exceptions import ItemNotFound
from flask_security import \
auth_token_required, \
current_user, \
login_required, \
roles_accepted
from PIL import Image
bp = Blueprint('auth', __name__)
RESOURCE_MODELS = {
'categories': Category,
'countries': Country,
'currencies': Currency,
'gateways': Gateway,
'networks': Network,
'products': Product,
'users': User,
'vouchers': Voucher,
}
def generate_token():
"""Generate token for the voucher session"""
return uuid.uuid4().hex
def resource_query(resource):
"""Generate a filtered query for a resource"""
model = RESOURCE_MODELS[resource]
query = model.query
if current_user.has_role('network-admin') or current_user.has_role('gateway-admin'):
if model == Network:
query = query.filter_by(id=current_user.network_id)
elif model in [ Gateway, User ]:
query = query.filter_by(network_id=current_user.network_id)
if current_user.has_role('network-admin'):
if model == Voucher:
query = query.join(Voucher.gateway).join(Gateway.network).filter(Network.id == current_user.network_id)
if current_user.has_role('gateway-admin'):
if model == Gateway:
query = query.filter_by(id=current_user.gateway_id)
elif model in [ User, Voucher ]:
query = query.filter_by(gateway_id=current_user.gateway_id)
return query
def resource_instance(resource, id):
"""Return instances"""
model = RESOURCE_MODELS[resource]
return resource_query(resource).filter(model.id == id).first_or_404()
def resource_instances(resource):
"""Return instances"""
query = resource_query(resource)
if resource == 'vouchers':
return (query.filter(Voucher.status != 'archived')
.order_by(Voucher.status, Voucher.created_at.desc())
.all())
else:
return query.all()
def resource_index(resource, form=None):
"""Handle a resource index request"""
instances = resource_instances(resource)
return render_template('%s/index.html' % resource,
form=form,
instances=instances)
def resource_new(resource, form):
"""Handle a new resource request"""
if form.validate_on_submit():
instance = RESOURCE_MODELS[resource]()
form.populate_obj(instance)
db.session.add(instance)
db.session.commit()
flash('Create %s successful' % instance)
return redirect(url_for('.%s_index' % resource))
return render_template('%s/new.html' % resource, form=form)
def resource_edit(resource, id, form_class):
instance = resource_instance(resource, id)
form = form_class(obj=instance)
if form.validate_on_submit():
form.populate_obj(instance)
db.session.commit()
flash('Update %s successful' % instance)
return redirect(url_for('.%s_index' % resource))
return render_template('%s/edit.html' % resource,
form=form,
instance=instance)
def resource_delete(resource, id):
instance = resource_instance(resource, id)
if request.method == 'POST':
db.session.delete(instance)
db.session.commit()
flash('Delete %s successful' % instance)
return redirect(url_for('.%s_index' % resource))
return render_template('shared/delete.html',
instance=instance,
resource=resource)
def resource_action(resource, id, action):
instance = resource_instance(resource, id)
if request.method == 'POST':
if action in constants.ACTIONS[resource]:
getattr(instance, action)()
db.session.commit()
flash('%s %s successful' % (instance, action))
return redirect(url_for('.%s_index' % resource))
else:
abort(404)
return render_template('shared/action.html',
instance=instance,
action=action,
resource=resource)
@bp.route('/network', methods=['GET', 'POST'])
@login_required
@roles_accepted('network-admin')
@register_menu(
bp,
'.network',
'My Network',
visible_when=has_role('network-admin'),
order=997
)
def my_network():
form = NetworkForm(obj=current_user.network)
if form.validate_on_submit():
form.populate_obj(current_user.network)
db.session.commit()
flash('Update successful')
return redirect('/')
return render_template('networks/current.html',
form=form,
instance=current_user.network)
@bp.route('/gateway', methods=['GET', 'POST'])
@login_required
@roles_accepted('gateway-admin')
@register_menu(
bp,
'.gateway',
'My Gateway',
visible_when=has_role('gateway-admin'),
order=998
)
def my_gateway():
gateway = current_user.gateway
return _gateways_edit(
gateway,
'My Gateway',
url_for('.my_gateway'),
url_for('.home')
)
@bp.route('/user', methods=['GET', 'POST'])
@login_required
@register_menu(
bp,
'.account',
'My Account',
visible_when=is_logged_in,
order=999
)
def my_account():
form = MyUserForm(obj=current_user)
if form.validate_on_submit():
if form.password.data == '':
del form.password
form.populate_obj(current_user)
db.session.commit()
flash('Update successful')
return redirect('/')
return render_template('users/current.html',
form=form,
instance=current_user)
@bp.route('/networks')
@login_required
@roles_accepted('super-admin')
@register_menu(
bp,
'.networks',
'Networks',
visible_when=has_role('super-admin'),
order=10
)
def networks_index():
return resource_index('networks')
@bp.route('/networks/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def networks_new():
form = NetworkForm()
return resource_new('networks', form)
@bp.route('/networks/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def networks_edit(id):
return resource_edit('networks', id, NetworkForm)
@bp.route('/networks/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def networks_delete(id):
return resource_delete('networks', id)
@bp.route('/gateways')
@login_required
@roles_accepted('super-admin', 'network-admin')
@register_menu(
bp,
'.gateways',
'Gateways',
visible_when=has_role('super-admin', 'network-admin'),
order=20)
def gateways_index():
return resource_index('gateways')
def handle_logo(form):
if request.files['logo']:
filename = form.logo.data = logos.save(request.files['logo'], name='%s.' % form.id.data)
im = Image.open(logos.path(filename))
im.thumbnail((300, 300), Image.ANTIALIAS)
im.save(logos.path(filename))
else:
del form.logo
@bp.route('/gateways/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin')
def gateways_new():
form = GatewayForm()
if form.validate_on_submit():
handle_logo(form)
gateway = Gateway()
form.populate_obj(gateway)
db.session.add(gateway)
db.session.commit()
flash('Create %s successful' % gateway)
return redirect(url_for('.gateways_index'))
return render_template('gateways/new.html', form=form)
def _gateways_edit(gateway, page_title, action_url, redirect_url):
form = GatewayForm(obj=gateway)
if form.validate_on_submit():
handle_logo(form)
form.populate_obj(gateway)
db.session.commit()
flash('Update %s successful' % gateway)
return redirect(redirect_url)
return render_template('gateways/edit.html',
action_url=action_url,
form=form,
instance=gateway,
logos=logos,
page_title=page_title)
@bp.route('/gateways/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin')
def gateways_edit(id):
gateway = Gateway.query.filter_by(id=id).first_or_404()
return _gateways_edit(
gateway,
'Edit Gateway',
url_for('.gateways_edit', id=id),
url_for('.gateways_index')
)
@bp.route('/gateways/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin')
def gateways_delete(id):
return resource_delete('gateways', id)
@bp.route('/users')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.users',
'Users',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=40
)
def users_index():
form = UserForm()
return resource_index('users', form=form)
@bp.route('/users/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def users_new():
form = UserForm()
if current_user.has_role('gateway-admin'):
del form.roles
return resource_new('users', form)
@bp.route('/users/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def users_edit(id):
instance = resource_instance('users', id)
if (current_user.has_role('network-admin')
and instance.network != current_user.network):
abort(403)
if (current_user.has_role('gateway-admin')
and (instance.network != current_user.network
or instance.gateway != current_user.gateway)):
abort(403)
form = UserForm(obj=instance)
if current_user.has_role('network-admin'):
del form.gateway
if current_user == instance:
del form.active
del form.roles
if form.validate_on_submit():
if form.password.data == '':
del form.password
form.populate_obj(instance)
db.session.commit()
flash('Update %s successful' % instance)
return redirect(url_for('.users_index'))
return render_template('users/edit.html', form=form, instance=instance)
@bp.route('/users/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def users_delete(id):
return resource_delete('users', id)
@bp.route('/vouchers')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.vouchers',
'Vouchers',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=5
)
def vouchers_index():
return resource_index('vouchers')
@bp.route('/vouchers/<id>/<action>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def vouchers_action(id, action):
return resource_action('vouchers', id, action)
@bp.route('/categories')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.categories',
'Categories',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=99
)
def categories_index():
return resource_index('categories')
@bp.route('/categories/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def categories_new():
form = CategoryForm()
return resource_new('categories', form)
@bp.route('/categories/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def categories_delete(id):
return resource_delete('categories', id)
@bp.route('/categories/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def categories_edit(id):
return resource_edit('categories', id, CategoryForm)
@bp.route('/products')
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.products',
'Products',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=99
)
def products_index():
return resource_index('products')
@bp.route('/products/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def products_new():
form = ProductForm()
return resource_new('products', form)
@bp.route('/products/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def products_delete(id):
return resource_delete('products', id)
@bp.route('/products/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def products_edit(id):
return resource_edit('products', id, ProductForm)
@bp.route('/countries')
@login_required
@roles_accepted('super-admin')
@register_menu(
bp,
'.countries',
'Countries',
visible_when=has_role('super-admin'),
order=99
)
def countries_index():
return resource_index('countries')
@bp.route('/countries/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def countries_new():
form = CountryForm()
return resource_new('countries', form)
@bp.route('/countries/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def countries_delete(id):
return resource_delete('countries', id)
@bp.route('/countries/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin')
def countries_edit(id):
return resource_edit('countries', id, CountryForm)
@bp.route('/currencies')
@login_required
@roles_accepted('super-admin')
@register_menu(
bp,
'.currencies',
'Currencies',
visible_when=has_role('super-admin'),
order=99
)
def currencies_index():
return resource_index('currencies')
@bp.route('/currencies/new', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def currencies_new():
form = CurrencyForm()
return resource_new('currencies', form)
@bp.route('/currencies/<id>/delete', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def currencies_delete(id):
return resource_delete('currencies', id)
@bp.route('/currencies/<id>', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
def currencies_edit(id):
return resource_edit('currencies', id, CurrencyForm)
@bp.route('/new-voucher', methods=['GET', 'POST'])
@login_required
@roles_accepted('super-admin', 'network-admin', 'gateway-admin')
@register_menu(
bp,
'.new-voucher',
'New Voucher',
visible_when=has_role('super-admin', 'network-admin', 'gateway-admin'),
order=0
)
def vouchers_new():
form = NewVoucherForm()
choices = []
defaults = {}
if current_user.has_role('gateway-admin'):
choices = [
[
current_user.gateway_id,
'%s - %s' % (current_user.gateway.network.title,
current_user.gateway.title)
]
]
defaults[current_user.gateway_id] = {
'minutes': current_user.gateway.default_minutes,
'megabytes': current_user.gateway.default_megabytes,
}
else:
if current_user.has_role('network-admin'):
networks = [current_user.network]
else:
networks = Network.query.all()
for network in networks:
for gateway in network.gateways:
choices.append([
gateway.id,
'%s - %s' % (network.title,
gateway.title)
])
defaults[gateway.id] = {
'minutes': gateway.default_minutes,
'megabytes': gateway.default_megabytes,
}
if choices == []:
flash('Define a network and gateway first.')
return redirect(request.referrer)
form.gateway_id.choices = choices
item = defaults[choices[0][0]]
if request.method == 'GET':
form.minutes.data = item['minutes']
form.megabytes.data = item['megabytes']
if form.validate_on_submit():
voucher = Voucher()
form.populate_obj(voucher)
db.session.add(voucher)
db.session.commit()
return redirect(url_for('.vouchers_new', code=voucher.code))
return render_template('vouchers/new.html', form=form, defaults=defaults)
@bp.route('/wifidog/login/', methods=['GET', 'POST'])
def wifidog_login():
form = LoginVoucherForm(request.form)
if form.validate_on_submit():
voucher_code = form.voucher_code.data.upper()
voucher = Voucher.query.filter_by(code=voucher_code, status='new').first()
if voucher is None:
flash(
'Voucher not found, did you type the code correctly?',
'error'
)
return redirect(request.referrer)
form.populate_obj(voucher)
voucher.token = generate_token()
db.session.commit()
session['voucher_token'] = voucher.token
url = ('http://%s:%s/wifidog/auth?token=%s' %
(voucher.gw_address,
voucher.gw_port,
voucher.token))
return redirect(url)
if request.method == 'GET':
gateway_id = request.args.get('gw_id')
else:
gateway_id = form.gateway_id.data
if gateway_id is None:
abort(404)
gateway = Gateway.query.filter_by(id=gateway_id).first_or_404()
return render_template('wifidog/login.html', form=form, gateway=gateway)
@bp.route('/wifidog/ping/')
def wifidog_ping():
return ('Pong', 200)
@bp.route('/wifidog/auth/')
def wifidog_auth():
auth = Auth(
user_agent=request.user_agent.string,
stage=request.args.get('stage'),
ip=request.args.get('ip'),
mac=request.args.get('mac'),
token=request.args.get('token'),
incoming=int(request.args.get('incoming')),
outgoing=int(request.args.get('outgoing')),
gateway_id=request.args.get('gw_id')
)
(auth.status, auth.messages) = auth.process_request()
db.session.add(auth)
db.session.commit()
def generate_point(measurement):
return {
"measurement": 'auth_%s' % measurement,
"tags": {
"source": "auth",
"network_id": auth.gateway.network_id,
"gateway_id": auth.gateway_id,
"user_agent": auth.user_agent,
"stage": auth.stage,
"ip": auth.ip,
"mac": auth.mac,
"token": auth.token,
},
"time": auth.created_at,
"fields": {
"value": getattr(auth, measurement),
}
}
# points = [generate_point(m) for m in [ 'incoming', 'outgoing' ]]
# influx_db.connection.write_points(points)
return ("Auth: %s\nMessages: %s\n" % (auth.status, auth.messages), 200)
@bp.route('/wifidog/portal/')
def wifidog_portal():
voucher_token = session.get('voucher_token')
if voucher_token:
voucher = Voucher.query.filter_by(token=voucher_token).first()
else:
voucher = None
gateway_id = request.args.get('gw_id')
if gateway_id is None:
abort(404)
gateway = Gateway.query.filter_by(id=gateway_id).first_or_404()
logo_url = None
if gateway.logo:
logo_url = logos.url(gateway.logo)
return render_template('wifidog/portal.html',
gateway=gateway,
logo_url=logo_url,
voucher=voucher)
@bp.route('/pay')
def pay():
return_url = url_for('.pay_return', _external=True)
cancel_url = url_for('.pay_cancel', _external=True)
response = set_transaction('ZAR',
1000,
'Something',
return_url,
cancel_url)
return redirect('%s?PayUReference=%s' % (capture, response.payUReference))
@bp.route('/pay/return')
def pay_return():
response = get_transaction(request.args.get('PayUReference'))
basketAmount = '{:.2f}'.format(int(response.basket.amountInCents) / 100)
category = 'success' if response.successful else 'error'
flash(response.displayMessage, category)
return render_template('payu/transaction.html',
response=response,
basketAmount=basketAmount)
@bp.route('/pay/cancel')
def pay_cancel():
response = get_transaction(request.args.get('payUReference'))
basketAmount = '{:.2f}'.format(int(response.basket.amountInCents) / 100)
flash(response.displayMessage, 'warning')
return render_template('payu/transaction.html',
response=response,
basketAmount=basketAmount)
@bp.route('/favicon.ico')
def favicon():
directory = os.path.join(current_app.root_path, 'static')
return send_from_directory(directory,
'favicon.ico',
mimetype='image/vnd.microsoft.icon')
@bp.route('/auth-token')
@login_required
def auth_token():
return current_user.get_auth_token()
@bp.route('/healthcheck')
@auth_token_required
def healthcheck():
return healthcheck_service.check()
@bp.route('/environment')
@auth_token_required
def environment():
return environment_dump.dump_environment()
@bp.route('/')
def home():
return redirect(url_for('security.login'))
|
python
|
# Copyright (c) Naas Development Team.
# Distributed under the terms of the Modified BSD License.
import os
c = get_config()
c.NotebookApp.ResourceUseDisplay.track_cpu_percent = True
c.NotebookApp.ResourceUseDisplay.mem_warning_threshold = 0.1
c.NotebookApp.ResourceUseDisplay.cpu_warning_threshold = 0.1
# We rely on environment variables to configure JupyterHub so that we
# avoid having to rebuild the JupyterHub container every time we change a
# configuration parameter.
# Spawn single-user servers as Docker containers
c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
c.JupyterHub.logo_file = "/srv/jupyterhub/naas_logo.svg"
c.JupyterHub.service_tokens = {
'secret-token': os.environ.get('ADMIN_API_TOKEN', 'SHOULD_BE_CHANGED'),
}
c.KubeSpawner.image = os.environ['DOCKER_NOTEBOOK_IMAGE']
c.KubeSpawner.image_pull_policy = 'Always'
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
c.KubeSpawner.environment = {
'JUPYTERHUB_URL': os.environ.get('JUPYTERHUB_URL', ''),
'PUBLIC_DK_API': os.environ.get('PUBLIC_DK_API', ''),
'TC_API_SCREENSHOT': os.environ.get('TC_API_SCREENSHOT', ''),
'ALLOWED_IFRAME': os.environ.get('ALLOWED_IFRAME', ''),
'TZ': os.environ.get('TZ', 'Europe/Paris')
}
c.KubeSpawner.cpu_guarantee = os.environ.get('KUBE_CPU_GUAR', 0.3)
c.KubeSpawner.cpu_limit = os.environ.get('KUBE_CPU_LIMIT', 1.0)
c.KubeSpawner.mem_limit = os.environ.get('KUBE_MEM_LIMIT', '4G')
c.KubeSpawner.mem_guarantee = os.environ.get('KUBE_MEM_GUAR', '500M')
# Explicitly set notebook directory because we'll be mounting a host volume to
# it. Most jupyter/docker-stacks *-notebook images run the Notebook server as
# user `jovyan`, and set the notebook directory to `/home/jovyan/work`.
# We follow the same convention.
notebook_dir = os.environ.get('DOCKER_NOTEBOOK_DIR') or '/home/ftp'
c.KubeSpawner.notebook_dir = notebook_dir
# Mount the real user's Docker volume on the host to the notebook user's
c.KubeSpawner.volumes = [
{
'name': 'nfs-root',
'nfs': {
'server': os.environ.get('VOLUME_SERVER', 'fs-b87bd009.efs.eu-west-3.amazonaws.com'),
'path': '/'
}
}
]
c.KubeSpawner.volume_mounts = [
{
'name': 'nfs-root',
'mountPath': os.environ.get('DOCKER_NOTEBOOK_DIR'),
'subPath': os.environ.get('KUBE_NAMESPACE', 'prod') + '/ftpusers/{username}'
}
]
# This is used to set proper rights on NFS mount point.
c.KubeSpawner.lifecycle_hooks = {
"postStart": {
"exec": {
"command": ["/bin/sh", "-c", f"chown -R 21:21 {os.environ.get('DOCKER_NOTEBOOK_DIR')}"]
}
}
}
c.KubeSpawner.extra_pod_config = {
"subdomain": "jupyter-single-user",
"hostname": "jupyter-{username}",
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "jupyterNodeGroup",
"operator": "In",
"values": [
"true"
]
}
]
}
]
}
}
},
"tolerations": [
{
"key": "jupyter",
"operator": "Equal",
"value": "true",
"effect": "NoSchedule"
}
]
}
c.KubeSpawner.extra_labels = {
"name": "jupyter-single-user"
}
# For debugging arguments passed to spawned containers
c.KubeSpawner.debug = True
c.KubeSpawner.start_timeout = 120
# User containers will access hub by container name on the Docker network
c.JupyterHub.hub_ip = os.environ.get('HOST', '0.0.0.0')
c.JupyterHub.hub_port = os.environ.get('PORT', 8081)
c.KubeSpawner.hub_connect_ip = 'hub'
# Authenticate users with local
c.JupyterHub.authenticator_class = 'naasauthenticator.NaasAuthenticator'
c.Authenticator.check_common_password = True
c.Authenticator.minimum_password_length = 10
c.Authenticator.allowed_failed_logins = 10
# Persist hub data on volume mounted inside container
data_dir = os.environ.get('DATA_VOLUME_CONTAINER', '/data')
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format(
host=os.environ['POSTGRES_HOST'],
password=os.environ['POSTGRES_PASSWORD'],
db=os.environ['POSTGRES_DB'],
)
c.JupyterHub.tornado_settings = {
'headers': {
'Content-Security-Policy': 'frame-ancestors self ' + os.environ.get('ALLOWED_IFRAME', '')
}
}
# Whitlelist users and admins
c.Authenticator.whitelist = whitelist = set()
c.Authenticator.admin_users = admin = set()
c.JupyterHub.admin_access = True
|
python
|
#FUNรรES (FUNCTION)
#EXEMPLO SEM O USO DE FUNรรO :(
rappers_choice = ["L7NNON", "KB", "Trip Lee", "Travis Scott", ["Lecrae", "Projota", "Tupac"], "Don Omar"]
rappers_country = {"BR":["Hungria", "Kamau", "Projota", "Mano Brown", "Luo", "L7NNON"],
"US":["Tupac", "Drake", "Eminem", "KB", "Kanye West", "Lecrae", "Travis Scott", "Trip Lee"]}
for rp in rappers_choice:
if isinstance(rp, list):
for rp_one in rp:
if rp_one in rappers_country["BR"]:
print(f"Rapper BR: {rp_one}")
elif rp_one in rappers_country["US"]:
print(f"Rapper US: {rp_one}")
else:
print(f"Rapper not found in lists: {rp_one}")
else:
if rp in rappers_country["BR"]:
print(f"Rapper BR: {rp}")
elif rp in rappers_country["US"]:
print(f"Rapper US: {rp}")
else:
print(f"Rapper not found in lists: {rp}")
|
python
|
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
def buildFeatureFrame(filename,timepoint):
temp = np.asarray(np.load(filename,allow_pickle=True)).item()
imfilename = temp['filename']
img = io.imread(imfilename);
masks = clear_border(temp['masks'])
image_props = measure.regionprops_table(masks,
intensity_image=img,
properties=('label','area','filled_area', 'centroid',
'eccentricity','mean_intensity'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def generateCandidates(image1, image2, im1_select, dist_multiplier=2):
delX = np.sqrt((image1['centroid-0'][im1_select]-image2['centroid-0'])**2+
(image1['centroid-1'][im1_select]-image2['centroid-1'])**2)
max_dist = dist_multiplier*min(delX)
candidates = np.array(delX[delX < max_dist].index)
return(candidates)
def generateLinks(filename_t0, filename_t1,timepoint, nnDist = 10,costMax=35, mN_Int = 10, mN_Ecc=4, mN_Area=25, mN_Disp=1):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
arr = pd.DataFrame()
for i in np.array(ip0.index):
candidates = generateCandidates(ip0, ip1, i, dist_multiplier=nnDist)
canFRAME = pd.DataFrame(candidates)
canFRAME["1"] = i
arr = arr.append(canFRAME)
arr = arr.rename(columns={0: "t1", "1": "t0"})
arr = arr.reset_index(drop=True)
properties = pd.DataFrame()
mInt_0 = float(np.median(ip0.loc[:,['mean_intensity']]))
mInt_1 = float(np.median(ip1.loc[:,['mean_intensity']]))
for link in np.array(arr.index):
tmp_props_0 = (ip0.loc[arr.loc[link,["t0"]],:])
tmp_props_1 = (ip1.loc[arr.loc[link,["t1"]],:])
deltaInt = (np.abs((int(tmp_props_0["mean_intensity"])/mInt_0)-(int(tmp_props_1["mean_intensity"])/mInt_1))/
np.mean([(int(tmp_props_0["mean_intensity"])/mInt_0),(int(tmp_props_1["mean_intensity"])/mInt_1)]))
deltaArea = (np.abs(int(tmp_props_0['area']) - int(tmp_props_1['area']))/
np.mean([int(tmp_props_0["area"]),int(tmp_props_1["area"])]))
deltaEcc = np.absolute(float(tmp_props_0['eccentricity']) - float(tmp_props_1['eccentricity']))
deltaX = np.sqrt((int(tmp_props_0['centroid-0'])-int(tmp_props_1['centroid-0']))**2+
(int(tmp_props_0['centroid-1'])-int(tmp_props_1['centroid-1']))**2)
properties = properties.append(pd.DataFrame([int(tmp_props_0['label']),int(tmp_props_1['label']),
deltaInt ,deltaArea,deltaEcc,deltaX]).T)
properties = properties.rename(columns={0: "label_t0", 1: "label_t1", 2: "deltaInt",
3: "deltaArea", 4: "deltaEcc", 5: "deltaX"})
properties = properties.reset_index(drop=True)
properties["Cost"]=(properties.loc[:,"deltaInt"]*mN_Int)+(properties.loc[:,"deltaEcc"]*mN_Ecc)+(properties.loc[:,"deltaArea"]*mN_Area)+(properties.loc[:,"deltaX"]*mN_Disp)
properties["TransitionCapacity"]=1
properties = properties.loc[properties["Cost"]<costMax]
properties = properties.reset_index(drop=True)
return(properties)
def DivSimScore(daughterCell_1, daughterCell_2, FrameNext):
daughterStats_1 = FrameNext[(FrameNext['label'] == daughterCell_1)]
daughterStats_2 = FrameNext[(FrameNext['label'] == daughterCell_2)]
deltaInt = (np.abs((int(daughterStats_1["mean_intensity"]))-(int(daughterStats_2["mean_intensity"])))/
np.mean([(int(daughterStats_1["mean_intensity"])),(int(daughterStats_2["mean_intensity"]))]))
deltaArea = (np.abs(int(daughterStats_1['area']) - int(daughterStats_2['area']))/
np.mean([int(daughterStats_1["area"]),int(daughterStats_2["area"])]))
deltaEcc = np.absolute(float(daughterStats_1['eccentricity']) - float(daughterStats_2['eccentricity']))
deltaX = np.sqrt((int(daughterStats_1['centroid-0'])-int(daughterStats_2['centroid-0']))**2+
(int(daughterStats_1['centroid-1'])-int(daughterStats_2['centroid-1']))**2)
sims = pd.DataFrame([int(daughterCell_1),int(daughterCell_2),
deltaInt ,deltaArea,deltaEcc,deltaX]).T
sims = sims.rename(columns={0: "label_D1", 1: "label_D2", 2: "D2deltaInt",
3: "D2deltaArea", 4: "D2deltaEcc", 5: "D2deltaX"})
return(sims)
def DivSetupScore(motherCell, daughterCell_1, daughterCell_2, FrameCurr, FrameNext):
#determine similarities between mother and daughters
simDF = DivSimScore(daughterCell_1, daughterCell_2, FrameNext)
#determine relative area of mother compared to daughters
MotherArea = int(FrameCurr[(FrameCurr['label'] == motherCell)]['area'])
daughterArea_1 = int(FrameNext[(FrameNext['label'] == daughterCell_1)]['area'])
daughterArea_2 = int(FrameNext[(FrameNext['label'] == daughterCell_2)]['area'])
areaChange = MotherArea/(daughterArea_1 + daughterArea_2)
simDF["MDDeltaArea"] = areaChange
return(simDF)
def DivisionCanditates(propMtx, filename_t0,filename_t1,timepoint,mS_Area = 10, mS_Ecc = 2, mS_Int = 2, mS_Disp = 1, MDAR_thresh = 0.75, SDis_thresh = 20.0):
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
Mothers = np.unique(propMtx.loc[:,['label_t0']])
DivCandidacy = pd.DataFrame()
for cell in Mothers:
DaughtersPossible = (propMtx[(propMtx['label_t0'] == cell)].loc[:,'label_t1'])
DaughtersPairs = np.array(np.meshgrid(DaughtersPossible, DaughtersPossible)).T.reshape(-1,2)
Sisters = np.unique(np.sort(DaughtersPairs),axis=0)
for pair in range(Sisters.shape[0]):
if (Sisters[pair,0] != Sisters[pair,1]):
tmpScoreSetup = (DivSetupScore(cell,Sisters[pair,0], Sisters[pair,1], ip0,ip1))
LogicMDAR = (tmpScoreSetup["MDDeltaArea"]>MDAR_thresh)
ScoreSDis = (mS_Int*tmpScoreSetup["D2deltaInt"]) + (mS_Area*tmpScoreSetup["D2deltaArea"]) + (mS_Ecc*tmpScoreSetup["D2deltaEcc"]) + (mS_Disp*tmpScoreSetup["D2deltaX"])
LogicSDis = (ScoreSDis<SDis_thresh)
tmpCandidacy = pd.DataFrame([cell,Sisters[pair,0],Sisters[pair,1],(LogicSDis&LogicMDAR).bool()]).T
DivCandidacy = DivCandidacy.append(tmpCandidacy)
DivCandidacy = DivCandidacy.rename(columns={0: "Mother", 1: "Daughter1", 2: "Daughter2",3: "Div"})
DivCandidacy = DivCandidacy.reset_index(drop=True)
# select true values
DivSelect = DivCandidacy[(DivCandidacy['Div'] == True)]
DivConnects_1 = DivSelect[['Mother','Daughter1','Div']]
DivConnects_2 = DivSelect[['Mother','Daughter2','Div']]
DivConnects_1 = DivConnects_1.rename(columns={'Mother': "label_t0", 'Daughter1': "label_t1"})
DivConnects_2 = DivConnects_2.rename(columns={'Mother': "label_t0", 'Daughter2': "label_t1"})
DivConnects = pd.concat([DivConnects_1,DivConnects_2])
DivConnects = DivConnects.reset_index(drop=True)
return(DivConnects)
def UpdateConnectionsDiv(propMtx,DivCandidatesMtx):
propMtx.loc[propMtx['label_t0'].isin(np.unique(DivCandidatesMtx['label_t0'])),['TransitionCapacity']] = 2
for div in range(DivCandidatesMtx.shape[0]):
tmp_prop = propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),]
old_score = float(tmp_prop.loc[:,'Cost'])
new_score = (old_score/2)
propMtx.loc[(DivCandidatesMtx.loc[div,'label_t0'] ==propMtx['label_t0'])&(DivCandidatesMtx.loc[div,'label_t1'] ==propMtx['label_t1']),'Cost'] = new_score
return(propMtx)
def SolveMinCostTable(filename_t0, filename_t1, DivisionTable,timepoint, OpeningCost = 30, ClosingCost = 30):
#rename
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip0 = ip0.rename(columns={"label" : "label_t0"})
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
ip1 = ip1.rename(columns={"label" : "label_t1"})
ip0["slabel_t0"] = np.array(range(ip0.label_t0.shape[0]))+1
i0max = np.max(np.asarray(ip0["slabel_t0"]))
ip1["slabel_t1"] = np.array(range(i0max,i0max+ip1.label_t1.shape[0]))+1
i1max = np.max(np.asarray(ip1["slabel_t1"]))
i0_translation = ip0[["label_t0","slabel_t0"]]
i1_translation = ip1[["label_t1","slabel_t1"]]
result_tmp = pd.merge(DivisionTable, i0_translation, on=['label_t0'])
result = pd.merge(result_tmp, i1_translation, on=['label_t1'])
result_shorthand = result[['slabel_t0','slabel_t1','Cost','TransitionCapacity']]
transNodes0 = np.array(result_shorthand['slabel_t0']) ;
transNodes1 = np.array(result_shorthand['slabel_t1']) ;
transCosts = np.array(result_shorthand['Cost']) ;
transCaps = np.repeat(1,transNodes0.size) ;
sourceNodes0 = np.repeat([0],i1max)
sourceNodes1 = np.array(range(i1max))+1
sourceCosts = np.concatenate((np.repeat(1,ip0.shape[0]),np.repeat(OpeningCost,ip1.shape[0])), axis=None)
#Source capacities are dictates by which node could be splitting. Source capacity = 2 if there was a division candidate
tmpUnique0 = result_shorthand[["slabel_t0","TransitionCapacity"]].drop_duplicates()
HighCaps = tmpUnique0.loc[tmpUnique0["TransitionCapacity"]==2,]
LowCaps = pd.DataFrame(i0_translation).copy(deep=True)
LowCaps['Cap'] = 1
LowCaps.loc[LowCaps['slabel_t0'].isin(np.array(HighCaps['slabel_t0'])),'Cap'] = 2
sourceCaps = np.concatenate((np.array(LowCaps['Cap']),np.repeat(1,ip1.shape[0])), axis=None)
sinkNodes0 = np.array(range(i1max))+1
sinkNodes1 = np.repeat([i1max+1],i1max)
sinkCosts = np.concatenate((np.repeat(ClosingCost,ip0.shape[0]),np.repeat(1,ip1.shape[0])), axis=None)
sinkCaps = np.repeat(1,i1max)
# Define the directed graph for the flow.
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
start_nodes = np.concatenate((sourceNodes0, transNodes0, sinkNodes0)).tolist()
end_nodes = np.concatenate((sourceNodes1, transNodes1, sinkNodes1)).tolist()
capacities = np.concatenate((sourceCaps, transCaps, sinkCaps)).tolist()
costs = np.concatenate((sourceCosts, transCosts, sinkCosts)).tolist()
source = 0
sink = i1max+1
supply_amount = np.max([i0max,i1max-i0max])
supplies = np.concatenate(([supply_amount],np.repeat(0,i1max),[-1*supply_amount])).tolist()
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
# Add each arc.
for i in range(len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],capacities[i], int(costs[i]))
# Add node supplies.
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
ArcFrame = pd.DataFrame()
# Find the minimum cost flow between node 0 and node 4.
if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:
print('Minimum cost:', min_cost_flow.OptimalCost())
for i in range(min_cost_flow.NumArcs()):
cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)
ArcFrame = ArcFrame.append(pd.DataFrame([min_cost_flow.Tail(i),
min_cost_flow.Head(i),
min_cost_flow.Flow(i),
min_cost_flow.Capacity(i),
cost]).T)
else:
print('There was an issue with the min cost flow input.')
ArcFrame = ArcFrame.rename(columns={0:'start',1:'end',2:"Flow",3:"Capacity",4:"Cost"})
#ArcFrame = ArcFrame.reset_index(drop=True)
FinalFrame = ArcFrame.loc[ArcFrame["Flow"]!=0,]
FinalFrame = FinalFrame.reset_index(drop=True)
return(FinalFrame)
def ReviewCostTable(minCostFlowtable, timepoint, OpeningCost=30,ClosingCost=30):
sink = max(minCostFlowtable["end"])
Transitions = minCostFlowtable.loc[(minCostFlowtable["start"]!=0)&(minCostFlowtable["end"]!=sink),]
trans_start_nodes = np.unique(Transitions["start"])
trans_end_nodes = np.unique(Transitions["end"])
#find nodes that either appear (no start) or disappear (no end)
appearing = minCostFlowtable[(~minCostFlowtable.start.isin(trans_start_nodes))&
(~minCostFlowtable.end.isin(trans_start_nodes))&
(~minCostFlowtable.start.isin(trans_end_nodes))&
(~minCostFlowtable.end.isin(trans_end_nodes))]
appearing = appearing.loc[(appearing["Cost"] == OpeningCost)|(appearing["Cost"] == ClosingCost)]
appearing = appearing.reset_index(drop=True)
appearFrame = pd.DataFrame()
for i in range(appearing.shape[0]):
if(appearing.loc[i,"start"] == 0):
appearFrame = appearFrame.append(pd.DataFrame([-1,appearing.loc[i,"end"]]).T)
elif(appearing.loc[i,"end"] == sink):
appearFrame = appearFrame.append(pd.DataFrame([appearing.loc[i,"end"],-1]).T)
appearFrame = appearFrame.rename(columns={0:"slabel_t0",1:"slabel_t1"})
appearFrame = appearFrame.reset_index(drop=True)
#Assemble
transFrame = Transitions.loc[:,["start","end"]]
transFrame = transFrame.rename(columns={"start":"slabel_t0","end":"slabel_t1"})
totalFrame = pd.concat([appearFrame,transFrame])
totalFrame = totalFrame.reset_index(drop=True)
totalFrame["timepoint"] = timepoint
return(totalFrame)
def TranslationTable(filename_t0, filename_t1, DivisionTable,timepoint):
#rename
ip0 = buildFeatureFrame(filename_t0,timepoint)
ip0 = ip0.rename(columns={"label" : "label_t0"})
ip1 = buildFeatureFrame(filename_t1,timepoint+1)
ip1 = ip1.rename(columns={"label" : "label_t1"})
ip0["slabel_t0"] = np.array(range(ip0.label_t0.shape[0]))+1
i0max = np.max(np.asarray(ip0["slabel_t0"]))
ip1["slabel_t1"] = np.array(range(i0max,i0max+ip1.label_t1.shape[0]))+1
i1max = np.max(np.asarray(ip1["slabel_t1"]))
i0_translation = ip0[["label_t0","slabel_t0"]]
i1_translation = ip1[["label_t1","slabel_t1"]]
dvtabDF = DivisionTable
result_tmp = pd.merge(dvtabDF, i0_translation, on=['label_t0'])
translation_table = pd.merge(result_tmp, i1_translation, on=['label_t1'])
#result_shorthand = result[['slabel_t0','slabel_t1','Cost','TransitionCapacity']]
startLabels = translation_table.loc[:,["label_t0","slabel_t0"]]
startLabels["timepoint"] = timepoint
startLabels["frame"] = timepoint+1
endLabels = translation_table.loc[:,["label_t1","slabel_t1"]]
endLabels["timepoint"] = timepoint+1
endLabels["frame"] = timepoint+2
startLabels = startLabels.rename(columns={"label_t0":"label","slabel_t0":"slabel"})
endLabels = endLabels.rename(columns={"label_t1":"label","slabel_t1":"slabel"})
allLabels = pd.concat([startLabels,endLabels])
allLabels = allLabels.reset_index(drop=True)
allLabels = allLabels.astype( 'int64')
allLabels["Master_ID"] = allLabels["timepoint"].astype('str')+"_"+allLabels["label"].astype('str')
allLabels = allLabels.astype({"Master_ID":'str'})
allLabels["RajTLG_ID"] = allLabels["frame"]*int(10**(np.ceil(np.log10(max(allLabels['slabel'])))+2))+allLabels["label"]
allLabels = allLabels.drop_duplicates()
allLabels = allLabels.reset_index(drop=True)
return(allLabels)
def TranslateConnections(ConnectionTable, TranslationTable, timepoint, preference = "Master_ID"):
subTranslationTable_0 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_0['slabel_t0'] = subTranslationTable_0['slabel']
subTranslationTable_1 = TranslationTable.loc[:,[preference,"slabel"]]
subTranslationTable_1['slabel_t1'] = subTranslationTable_1['slabel']
merge_0 = pd.merge(ConnectionTable, subTranslationTable_0, on="slabel_t0")
merge = pd.merge(merge_0, subTranslationTable_1, on="slabel_t1")
pref = str(preference)
result = merge.loc[:,[pref+"_x",pref+"_y"]]
result = result.drop_duplicates()
result = result.dropna(thresh=1)
result = result.reset_index(drop=True)
result = result.rename(columns = {(pref+"_x") : (pref+"_"+str(timepoint)), (pref+"_y") : (pref+"_"+str(timepoint+1))})
return(result)
def RajTLG_wrap(filename_t0, filename_t1,timepoint,ConnectionTable,TranslationTable):
frame0 = buildFeatureFrame(filename_t0,timepoint);
frame1 = buildFeatureFrame(filename_t1,timepoint+1);
frames = pd.concat([frame0,frame1])
frames["timepoint"] = frames["time"]
InfoDF = pd.merge(frames,TranslationTable, on=['label','timepoint'])
RajTLG_translation = TranslateConnections(ConnectionTable=ConnectionTable, TranslationTable=TranslationTable, timepoint=timepoint, preference="RajTLG_ID")
RajTLGFrame = pd.DataFrame()
if (timepoint == 0):
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint)],"centroid-0"])
tmpParent = "NaN"
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
for i in range(RajTLG_translation.shape[0]):
tmpID = RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)]
tmpFrame = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"frame"])
tmpX = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-1"])
tmpY = int(InfoDF.loc[InfoDF["RajTLG_ID"] == RajTLG_translation.loc[i,"RajTLG_ID"+"_"+str(timepoint+1)],"centroid-0"])
tmpParent = int(RajTLG_translation.loc[RajTLG_translation["RajTLG_ID"+"_"+str(timepoint+1)] == tmpID,
"RajTLG_ID"+"_"+str(timepoint)])
RajTLGFrame = RajTLGFrame.append(pd.DataFrame([tmpID,tmpFrame,tmpX,tmpY,tmpParent]).T)
RajTLGFrame = RajTLGFrame.reset_index(drop=True)
RajTLGFrame = RajTLGFrame.rename(columns={0:"pointID", 1:"frameNumber",
2:"xCoord",3:"yCoord",4:"parentID"})
RajTLGFrame["annotation"] = "none"
#RajTLGFrame.to_csv(outfilename,index=False)
return(RajTLGFrame)
def HCR_connect(sampleName, TLlast_mask, HCR_mask, timepoint, nnDist=3, costMax=35, mN_Int=10, mN_Ecc=4, mN_Area=25, mN_Disp=1, mS_Area = 10, mS_Ecc = 2, mS_Int = 2, mS_Disp = 1, MDAR_thresh = 0.75, SDis_thresh = 20.0, openingCost = 30, closingCost = 30):
propies = generateLinks(filename_t0 = TLlast_mask, filename_t1 = HCR_mask,
timepoint = timepoint, nnDist = nnDist,
costMax = costMax, mN_Int = mN_Int,
mN_Ecc = mN_Ecc, mN_Area = mN_Area,
mN_Disp = mN_Disp)
tmpdivs = DivisionCanditates(propMtx = propies,
filename_t0 = TLlast_mask, filename_t1 = HCR_mask,
MDAR_thresh = MDAR_thresh, SDis_thresh = SDis_thresh,
mS_Disp = mS_Disp, mS_Area = mS_Area,
mS_Ecc = mS_Ecc, mS_Int = mS_Int,
timepoint = timepoint)
finaldivs = UpdateConnectionsDiv(propies, tmpdivs)
minCost_table = SolveMinCostTable(TLlast_mask, HCR_mask,
DivisionTable=finaldivs,
timepoint=timepoint,
OpeningCost = openingCost,
ClosingCost = closingCost)
finTable = ReviewCostTable(minCostFlowtable = minCost_table, timepoint=timepoint)
translation_table = TranslationTable(TLlast_mask, HCR_mask, DivisionTable=finaldivs,
timepoint=timepoint)
masterConnects_Raj = TranslateConnections(finTable, translation_table, timepoint=timepoint, preference="RajTLG_ID")
masterConnects_Master = TranslateConnections(finTable, translation_table, timepoint=timepoint, preference="Master_ID")
col_df = finTable[(finTable['slabel_t0']!=-1)&(finTable['slabel_t1']!=-1)]
col_df.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_connect.csv', index=False)
translation_table.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_translation.csv', index=False)
masterConnects_Raj.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_connections_RajLab.csv', index=False)
masterConnects_Master.to_csv('results/'+sampleName+'/HCR/'+sampleName+'_HCR_connections_MasterID.csv', index=False)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.