content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python3
# If imports fail, try installing scispacy and the model:
# pip3 install scispacy
# pip3 install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.2.0/en_core_sci_md-0.2.0.tar.gz
import sys
import os
import re
import scispacy
import spacy
from sspostproc import refine_split
DOC_ID_RE = re.compile(r'^-+ DOC-ID: ".*" -+$')
pipeline = spacy.load('en_core_sci_md', disable=['parser', 'tagger', 'ner'])
pipeline.add_pipe(pipeline.create_pipe('sentencizer'))
def argparser():
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('text', nargs='+')
return ap
def sentences(passage, refine=True):
split = []
if not passage.endswith('\n'):
passage += '\n' # spacy needs the newline
analyzed = pipeline(passage)
for sentence in analyzed.sents:
text = str(sentence)
if text and not text.isspace():
split.append(text.rstrip('\n'))
if refine:
split = refine_split('\n'.join(split)).split('\n')
return split
def main(argv):
args = argparser().parse_args(argv[1:])
for path in args.text:
with open(path) as f:
for ln, l in enumerate(f, start=1):
l = l.rstrip()
if DOC_ID_RE.match(l):
print(l)
elif not l or l.isspace():
print(l)
else:
for s in sentences(l):
print(s)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 18:17:10 2020
@author: Manuel Camargo
"""
import tests.read_log as rl
import tests.analyzer_test as ats
import tests.timeit_tests as tit
import tests.timeline_split_tests as tst
if __name__ == "__main__":
ats.timeseries_test()
ats.log_test()
tit.execute_tests()
tst.split_log_test()
rl.read_log_test()
|
python
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2017 by Caspar. All rights reserved.
# File Name: fzcmeans.py
# Author: Shankai Yan
# E-mail: [email protected]
# Created Time: 2017-01-23 12:52:12
###########################################################################
''' Latent Dirichlet Allocation Clustering Wrapper '''
import os
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.utils.validation import check_is_fitted
class LDACluster(LatentDirichletAllocation):
def __init__(self, n_clusters=10, **kwargs):
self.n_clusters = n_clusters
super(LDACluster, self).__init__(n_topics=n_clusters, **kwargs)
def fit_predict(self, X, y=None, fuzzy=False):
'''Compute cluster centers and predict cluster index for each sample.
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
u : array, shape [n_samples, n_clusters] or [n_samples,]
Predicted fuzzy c-partitioned matrix or most likely cluster labels.
'''
X[X < 0] = 0
self.fit_u_ = self.u_ = super(LDACluster, self).fit_transform(X)
self.fit_labels_ = self.labels_ = self.fit_u_.argmax(axis=1)
if (fuzzy):
return self.fit_u_
return self.fit_labels_
|
python
|
# Faça um programa que leia o nome completo de uma pessoa, mostrando em seguida o primeiro e o último nome separadamente.
n = str(input('\nType your full name:\n>>> ')).strip().title()
n1 = n.split()
print('\nYour first name {} and your last name {}.\n'.format(n1[0], n1[-1]))
|
python
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from page_objects.login_page import LoginPage
from tools.webdriver_factory import WebdriverFactory
class TestLogin:
def setup_method(self, method):
self.driver = WebdriverFactory().get_webdriver()
self.driver.get("https://test.amberum.com")
LoginPage(driver=self.driver).login()
def teardown_method(self, method):
self.driver.quit()
def test_ustawienia(self):
self.driver.find_element(By.XPATH, "// *[ @ id = 'settings'] / span[1]").click()
nazwa = self.driver.find_element(By.XPATH, "//h2").text
assert nazwa == "Lokacje"
def test_ustawienia_lokacje(self):
self.driver.find_element(By.XPATH, "// *[ @ id = 'settings'] / span[1]").click()
WebDriverWait(self.driver, 10).until(
ec.element_to_be_clickable((By.XPATH, "// *[ @ id = 'location']")))
self.driver.find_element(By.XPATH, "// *[ @ id = 'location']").click()
nazwa = self.driver.find_element(By.XPATH, "//h2").text
assert nazwa == "Lokacje"
def test_ustawienia_produkcja(self):
self.driver.find_element(By.XPATH, "// *[ @ id = 'settings'] / span[1]").click()
WebDriverWait(self.driver, 10).until(
ec.element_to_be_clickable((By.XPATH, "// *[ @ id = 'settings-production']")))
self.driver.find_element(By.XPATH, "// *[ @ id = 'settings-production']").click()
nazwa = self.driver.find_element(By.XPATH, "//h2").text
assert nazwa == "Produkcja"
def test_ustawienia_stawki(self):
self.driver.find_element(By.XPATH, "// *[ @ id = 'settings'] / span[1]").click()
WebDriverWait(self.driver, 10).until(
ec.element_to_be_clickable((By.XPATH, "// *[ @ id = 'settings-rates']")))
self.driver.find_element(By.XPATH, "// *[ @ id = 'settings-rates']").click()
nazwa = self.driver.find_element(By.XPATH, "//h2").text
assert nazwa == "Stawki pracy"
|
python
|
class Config(object):
DEBUG = False
SQLALCHEMY_DATABASE_URI = ''
SECRET_KEY = 'dbbc933946e04a729b17c0625b72e3db'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_SUPPRESS_SEND = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
CELERY_BROKER_URL = '' #get this from cloud amqp
YT_API_KEY = "" #youtube api key
HOST_PLAYLISTS = ["PLiQ766zSC5jPIKibTa5qtXpwgwEBalDV4","PLMCXHnjXnTnvo6alSjVkgxV-VH6EPyvoX","PLl0KD3g-oDOHElCF7S7q6CRGz1qG8vQkS"]
|
python
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <[email protected]>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../')
# Import Salt libs
from salt import state
@skipIf(NO_MOCK, NO_MOCK_REASON)
class StateCompilerTestCase(TestCase):
'''
TestCase for the state compiler.
'''
def test_format_log_non_ascii_character(self):
'''
Tests running a non-ascii character through the state.format_log
function. See Issue #33605.
'''
# There is no return to test against as the format_log
# function doesn't return anything. However, we do want
# to make sure that the function doesn't stacktrace when
# called.
ret = {'changes': {u'Français': {'old': 'something old',
'new': 'something new'}},
'result': True}
state.format_log(ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(StateCompilerTestCase, needs_daemon=False)
|
python
|
import os
from distutils.version import StrictVersion
from pathlib import Path
from warnings import warn
from ..utils.translations import trans
try:
from qtpy import API_NAME, QtCore
except Exception as e:
if 'No Qt bindings could be found' in str(e):
raise type(e)(
trans._(
"No Qt bindings could be found.\n\nnapari requires either PyQt5 or PySide2 to be installed in the environment.\nTo install the default backend (currently PyQt5), run \"pip install napari[all]\" \nYou may also use \"pip install napari[pyside2]\"for Pyside2, or \"pip install napari[pyqt5]\" for PyQt5",
deferred=True,
)
) from e
raise
if API_NAME == 'PySide2':
# Set plugin path appropriately if using PySide2. This is a bug fix
# for when both PyQt5 and Pyside2 are installed
import PySide2
os.environ['QT_PLUGIN_PATH'] = str(
Path(PySide2.__file__).parent / 'Qt' / 'plugins'
)
# When QT is not the specific version, we raise a warning:
if StrictVersion(QtCore.__version__) < StrictVersion('5.12.3'):
warn_message = trans._(
"napari was tested with QT library `>=5.12.3`.\nThe version installed is {version}. Please report any issues with this specific QT version at https://github.com/Napari/napari/issues.",
deferred=True,
version=QtCore.__version__,
)
warn(message=warn_message)
from .qt_event_loop import get_app, gui_qt, quit_app, run
from .qt_main_window import Window
from .widgets.qt_range_slider import QHRangeSlider, QVRangeSlider
|
python
|
__version__ = '0.1.1'
from .tools import underline2hump, hump2underline, json_hump2underline
|
python
|
import pywikibot
import redis
from api.translation_v2.core import Translation
from page_lister import get_pages_from_category
from redis_wikicache import RedisPage, RedisSite
if __name__ == '__main__':
# print(translate_using_postgrest_json_dictionary('mat', '[[mine|Mine]]', 'en', 'mg'))
# print(translate_using_postgrest_json_dictionary('solo-ana', '[[mine|Mine]]', 'en', 'mg'))
# print(translate_using_bridge_language('mat', 'castigar', 'es', 'mg'))
# print(translate_using_bridge_language('ana', 'Schau', 'de', 'mg'))
# print(translate_using_bridge_language('mat', 'schweben', 'de', 'mg'))
# print(translate_using_convergent_definition('ana', 'hover', 'en', 'mg'))
# print(translate_using_postgrest_json_dictionary('mat', 'flood', 'en', 'fr'))
t = Translation()
site = RedisSite('en', 'wiktionary')
# wp = RedisPage(site, '')
errored = []
errors = 0
k = 100
entries = 0
# for v in '잠자다,자다,앉다,睡眠,眠る,眠,寝る,眠,微睡む,座る,居,やすむ,睡目'.split(','):
# entries += t.process_wiktionary_wiki_page(RedisPage(RedisSite('en', 'wiktionary'), v))
# for v in '平均'.split(','):
# entries += t.process_wiktionary_wiki_page(RedisPage(RedisSite('en', 'wiktionary'), v, offline=False))
for wp in get_pages_from_category('en', 'Chinese verbs'):
try:
t.process_wiktionary_wiki_page(
RedisPage(
RedisSite(
'en',
'wiktionary'),
wp.title(),
offline=False))
except (pywikibot.Error, redis.exceptions.TimeoutError):
pass
# for i in range(k):
# try:
# wp = site.random_page()
# entries += t.process_wiktionary_wiki_page(wp)
# except Exception as exception:
# errors += 1
# print(exception)
# errored.append((wp, exception))
# else:
# if not i % 200:
# print(i, 'entries', entries, '/ process error rate:', errors*100. / (i+1))
print('process error rate:', errors * 100. / (k))
print('entries created:', entries)
print(errored)
|
python
|
keysDict = {
'YawLeftButton': 0x1E, # Key A
'YawRightButton': 0x20, # Key D
'PitchUpButton': 0xB5, # NUM /
'PitchDownButton': 0x37, # NUM *
'RollLeftButton': 0x4E, # NUM +
'RollRightButton': 0x9C, # NUM ENTER
'EnableFSD': 0x24, # Key J
'EngineBoost': 0x0F, # Key Tab
'Speed100': 0x47, # NUM 7
'SpeedZero': 0x2D, # Key X
'Speed50': 0x48, # NUM 8
'ThrustUp': 0x13, # Key R
'space': 0x39,
'TargetAhead': 0x14, # Key T
'UI_OpenGalaxyMap': 0x4A, # NUM -
'UI_NextTab': 0x12, # Key E
'UI_PrevTab': 0x10, # Key Q
'UI_Up': 0x11, # Key W
'UI_Down': 0x1F, # Key S
'UI_Left': 0x1E, # Key A
'UI_Right': 0x20, # Key D
'UI_1': 0x02, # Key 1
'UI_2': 0x03, # Key 2
'UI_3': 0x04, # Key 3
'UI_4': 0x05, # Key 4
'enter': 0x1C, # Key RETURN(ENTER)
'esc': 0x01, # Key ESC
'pause': 0x19 # Key P
}
# Scancode Reference
# SCANCODE = {
# 'DIK_ESCAPE' : 0x01,
# 'DIK_1' : 0x02,
# 'DIK_2' : 0x03,
# 'DIK_3' : 0x04,
# 'DIK_4' : 0x05,
# 'DIK_5' : 0x06,
# 'DIK_6' : 0x07,
# 'DIK_7' : 0x08,
# 'DIK_8' : 0x09,
# 'DIK_9' : 0x0A,
# 'DIK_0' : 0x0B,
# 'DIK_MINUS' : 0x0C, # - on main keyboard */
# 'DIK_EQUALS' : 0x0D,
# 'DIK_BACK' : 0x0E, # backspace */
# 'DIK_TAB' : 0x0F,
# 'DIK_Q' : 0x10,
# 'DIK_W' : 0x11,
# 'DIK_E' : 0x12,
# 'DIK_R' : 0x13,
# 'DIK_T' : 0x14,
# 'DIK_Y' : 0x15,
# 'DIK_U' : 0x16,
# 'DIK_I' : 0x17,
# 'DIK_O' : 0x18,
# 'DIK_P' : 0x19,
# 'DIK_LBRACKET' : 0x1A,
# 'DIK_RBRACKET' : 0x1B,
# 'DIK_RETURN' : 0x1C, # Enter on main keyboard */
# 'DIK_LCONTROL' : 0x1D,
# 'DIK_A' : 0x1E,
# 'DIK_S' : 0x1F,
# 'DIK_D' : 0x20,
# 'DIK_F' : 0x21,
# 'DIK_G' : 0x22,
# 'DIK_H' : 0x23,
# 'DIK_J' : 0x24,
# 'DIK_K' : 0x25,
# 'DIK_L' : 0x26,
# 'DIK_SEMICOLON' : 0x27,
# 'DIK_APOSTROPHE' : 0x28,
# 'DIK_GRAVE' : 0x29, # accent grave */
# 'DIK_LSHIFT' : 0x2A,
# 'DIK_BACKSLASH' : 0x2B,
# 'DIK_Z' : 0x2C,
# 'DIK_X' : 0x2D,
# 'DIK_C' : 0x2E,
# 'DIK_V' : 0x2F,
# 'DIK_B' : 0x30,
# 'DIK_N' : 0x31,
# 'DIK_M' : 0x32,
# 'DIK_COMMA' : 0x33,
# 'DIK_PERIOD' : 0x34, # . on main keyboard */
# 'DIK_SLASH' : 0x35, # / on main keyboard */
# 'DIK_RSHIFT' : 0x36,
# 'DIK_NUMPAD_MULTIPLY' : 0x37, # * on numeric keypad */
# 'DIK_LMENU' : 0x38, # left Alt */
# 'DIK_SPACE' : 0x39,
# 'DIK_CAPITAL' : 0x3A,
# 'DIK_F1' : 0x3B,
# 'DIK_F2' : 0x3C,
# 'DIK_F3' : 0x3D,
# 'DIK_F4' : 0x3E,
# 'DIK_F5' : 0x3F,
# 'DIK_F6' : 0x40,
# 'DIK_F7' : 0x41,
# 'DIK_F8' : 0x42,
# 'DIK_F9' : 0x43,
# 'DIK_F10' : 0x44,
# 'DIK_NUMLOCK' : 0x45,
# 'DIK_SCROLL' : 0x46, # Scroll Lock */
# 'DIK_NUMPAD7' : 0x47,
# 'DIK_NUMPAD8' : 0x48,
# 'DIK_NUMPAD9' : 0x49,
# 'DIK_SUBTRACT' : 0x4A, # - on numeric keypad */
# 'DIK_NUMPAD4' : 0x4B,
# 'DIK_NUMPAD5' : 0x4C,
# 'DIK_NUMPAD6' : 0x4D,
# 'DIK_NUMPAD_ADD' : 0x4E, # + on numeric keypad */
# 'DIK_NUMPAD1' : 0x4F,
# 'DIK_NUMPAD2' : 0x50,
# 'DIK_NUMPAD3' : 0x51,
# 'DIK_NUMPAD0' : 0x52,
# 'DIK_DECIMAL' : 0x53, # . on numeric keypad */
# 'DIK_F11' : 0x57,
# 'DIK_F12' : 0x58,
# 'DIK_F13' : 0x64, # (NEC PC98) */
# 'DIK_F14' : 0x65, # (NEC PC98) */
# 'DIK_F15' : 0x66, # (NEC PC98) */
# 'DIK_KANA' : 0x70, # (Japanese keyboard) */
# 'DIK_CONVERT' : 0x79, # (Japanese keyboard) */
# 'DIK_NOCONVERT' : 0x7B, # (Japanese keyboard) */
# 'DIK_YEN' : 0x7D, # (Japanese keyboard) */
# 'DIK_NUMPADEQUALS' : 0x8D, # : on numeric keypad (NEC PC98) */
# 'DIK_CIRCUMFLEX' : 0x90, # (Japanese keyboard) */
# 'DIK_AT' : 0x91, # (NEC PC98) */
# 'DIK_COLON' : 0x92, # (NEC PC98) */
# 'DIK_UNDERLINE' : 0x93, # (NEC PC98) */
# 'DIK_KANJI' : 0x94, # (Japanese keyboard) */
# 'DIK_STOP' : 0x95, # (NEC PC98) */
# 'DIK_AX' : 0x96, # (Japan AX) */
# 'DIK_UNLABELED' : 0x97, # (J3100) */
# 'DIK_NUMPADENTER' : 0x9C, # Enter on numeric keypad */
# 'DIK_RCONTROL' : 0x9D,
# 'DIK_NUMPADCOMMA' : 0xB3, # , on numeric keypad (NEC PC98) */
# 'DIK_NUMPAD_DIVIDE' : 0xB5, # / on numeric keypad */
# 'DIK_SYSRQ' : 0xB7,
# 'DIK_RMENU' : 0xB8, # right Alt */
# 'DIK_HOME' : 0xC7, # Home on arrow keypad */
# 'DIK_UP' : 0xC8, # UpArrow on arrow keypad */
# 'DIK_PRIOR' : 0xC9, # PgUp on arrow keypad */
# 'DIK_LEFT' : 0xCB, # LeftArrow on arrow keypad */
# 'DIK_RIGHT' : 0xCD, # RightArrow on arrow keypad */
# 'DIK_END' : 0xCF, # End on arrow keypad */
# 'DIK_DOWN' : 0xD0, # DownArrow on arrow keypad */
# 'DIK_NEXT' : 0xD1, # PgDn on arrow keypad */
# 'DIK_INSERT' : 0xD2, # Insert on arrow keypad */
# 'DIK_DELETE' : 0xD3, # Delete on arrow keypad */
# 'DIK_LWIN' : 0xDB, # Left Windows key */
# 'DIK_RWIN' : 0xDC, # Right Windows key */
# 'DIK_APPS' : 0xDD, # AppMenu key */
# 'DIK_BACKSPACE' : 0x0E, # backspace */
# 'DIK_NUMPADSTAR' : 0x37, # * on numeric keypad */
# 'DIK_LALT' : 0x38, # left Alt */
# 'DIK_CAPSLOCK' : 0x3A, # CapsLock */
# 'DIK_NUMPADMINUS' : 0x4A, # - on numeric keypad */
# 'DIK_NUMPADPLUS' : 0x4E, # + on numeric keypad */
# 'DIK_NUMPADPERIOD' : 0x53, # . on numeric keypad */
# 'DIK_NUMPADSLASH' : 0xB5, # / on numeric keypad */
# 'DIK_RALT' : 0xB8, # right Alt */
# 'DIK_UPARROW' : 0xC8, # UpArrow on arrow keypad */
# 'DIK_PGUP' : 0xC9, # PgUp on arrow keypad */
# 'DIK_LEFTARROW' : 0xCB, # LeftArrow on arrow keypad */
# 'DIK_RIGHTARROW' : 0xCD, # RightArrow on arrow keypad */
# 'DIK_DOWNARROW' : 0xD0, # DownArrow on arrow keypad */
# 'DIK_PGDN' : 0xD1 # PgDn on arrow keypad */
# }
|
python
|
import torch
from .stft import STFT
from librosa.filters import mel as librosa_mel_fn
from .functional import dynamic_range_compression, dynamic_range_decompression
import numpy as np
class MelSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=16 * 8, win_length=1024,
n_mel_channels=128, sampling_rate=16000, mel_fmin=0.0,
mel_fmax=8000.0):
super(MelSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis_inv = torch.from_numpy(np.linalg.pinv(mel_basis)).float()
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
self.register_buffer('mel_basis_inv', mel_basis_inv)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
output += 11.5
output /= 3
return output
def spectral_de_normalize(self, magnitudes):
magnitudes *= 3
magnitudes -= 11.5
output = dynamic_range_decompression(magnitudes)
return output
def transform(self, x):
"""
Forward Transform to generate Mel Scaled Magnitude Spectrogram
:param x: input signal [Batch, Samples]
:return: Mel Spectrogram [Batch, Mel Filters, Frames]
"""
linear, phases = self.stft_fn.transform(x)
mel_output = torch.matmul(self.mel_basis, linear)
mel_output = self.spectral_normalize(mel_output)
linear = self.spectral_normalize(linear)
return mel_output, linear
def inverse_mel(self, y, iteration=40):
"""
Backward Transform to generate Estimated Audio
:param spec: [Batch, Mel Filters, Frames]
:return: Estimated Audio [Batch, Samples]
"""
y = self.spectral_de_normalize(y)
magnitudes = torch.matmul(self.mel_basis_inv, y)
return self.stft_fn.griffin_lim(magnitudes, iteration)
def inverse_linear(self, y, iteration=40):
"""
Backward Transform to generate Estimated Audio
:param spec: [Batch, NFFT, Frames]
:return: Estimated Audio [Batch, Samples]
"""
y = self.spectral_de_normalize(y)
return self.stft_fn.griffin_lim(y, iteration)
def sample_to_frame(self, n):
return self.stft_fn.sample_to_frame(n)
def frame_to_sample(self, f):
return self.stft_fn.frame_to_sample(f)
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on 26/04/2016
Versão 1.0
@author: Ricieri (ELP)
Python 3.4.4
"""
"""
Reviewed on 15/10/2020
Versão 1.0 rev.A - rounded printing values to 3 decimal places and displays '°C' instead of 'ºC'.
@author: Marcelo (ELP)
Python 3.8.6
"""
"""
Reviewed on 06/05/2021
Versão 1.0 rev.B - Added FAC_DCDC_EMA variables.
@author: Marcelo (ELT)
Python 3.9.5
"""
import struct
import glob
import serial
import time
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
import os
from datetime import datetime
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Listas de Entidades BSMP
A posição da entidade na lista corresponde ao seu ID BSMP
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
UDC_FIRMWARE_VERSION = "0.42 2021-05-06"
ListVar = ['iLoad1','iLoad2','iMod1','iMod2','iMod3','iMod4','vLoad',
'vDCMod1','vDCMod2','vDCMod3','vDCMod4','vOutMod1','vOutMod2',
'vOutMod3','vOutMod4','temp1','temp2','temp3','temp4','ps_OnOff',
'ps_OpMode','ps_Remote','ps_OpenLoop','ps_SoftInterlocks',
'ps_HardInterlocks','iRef','wfmRef_Gain','wfmRef_Offset','sigGen_Enable','sigGen_Type',
'sigGen_Ncycles','sigGenPhaseStart','sigGen_PhaseEnd','sigGen_Freq',
'sigGen_Amplitude','sigGen_Offset','sigGen_Aux','dp_ID','dp_Class','dp_Coeffs','ps_Model',
'wfmRef_PtrBufferStart','wfmRef_PtrBufferEnd','wfmRef_PtrBufferK','wfmRef_SyncMode']
ListCurv = ['wfmRef_Curve','sigGen_SweepAmp','samplesBuffer','fullwfmRef_Curve','wfmRef_Blocks','samplesBuffer_blocks']
ListFunc = ['TurnOn','TurnOff','OpenLoop','ClosedLoop','OpMode','RemoteInterface',
'SetISlowRef','ConfigWfmRef','ConfigSigGen', 'EnableSigGen',
'DisableSigGen','ConfigDPModule','WfmRefUpdate','ResetInterlocks','ConfigPSModel',
'ConfigHRADC','ConfigHRADCOpMode','EnableHRADCSampling','DisableHRADCSampling','ResetWfmRef',
'SetRSAddress','EnableSamplesBuffer','DisableSamplesBuffer','SetISlowRefx4','SelectHRADCBoard','SelectTestSource',
'ResetHRADCBoards','Config_nHRADC','ReadHRADC_UFM','WriteHRADC_UFM','EraseHRADC_UFM','ReadHRADC_BoardData']
ListTestFunc = ['UdcIoExpanderTest', 'UdcLedTest', 'UdcBuzzerTest', 'UdcEepromTest', 'UdcFlashTest', 'UdcRamTest',
'UdcRtcTest', 'UdcSensorTempTest', 'UdcIsoPlaneTest', 'UdcAdcTest', 'UdcUartTest', 'UdcLoopBackTest',
'UdcComTest', 'UdcI2cIsoTest']
ListHRADCInputType = ['Vin_bipolar','Vin_unipolar_p','Vin_unipolar_n','Iin_bipolar','Iin_unipolar_p',
'Iin_unipolar_n','Vref_bipolar_p','Vref_bipolar_n','GND','Vref_unipolar_p',
'Vref_unipolar_n','GND_unipolar','Temp','Reserved0','Reserved1','Reserved2']
ListPSModels = ['FBP_100kHz', 'FBP_Parallel_100kHz', 'FAC_ACDC_10kHz', 'FAC_DCDC_20kHz',
'FAC_Full_ACDC_10kHz', 'FAC_Full_DCDC_20kHz', 'FAP_ACDC',
'FAP_DCDC_20kHz', 'TEST_HRPWM', 'TEST_HRADC', 'JIGA_HRADC',
'FAP_DCDC_15kHz_225A', 'FBPx4_100kHz', 'FAP_6U_DCDC_20kHz',
'JIGA_BASTIDOR']
ListPSModels_v2_1 = ['Empty','FBP','FBP_DCLink','FAC_ACDC','FAC_DCDC',
'FAC_2S_ACDC','FAC_2S_DCDC','FAC_2P4S_ACDC','FAC_2P4S_DCDC',
'FAP','FAP_4P','FAC_DCDC_EMA','FAP_2P2S','FAP_IMAS',
'FAC_2P_ACDC_IMAS','FAC_2P_DCDC_IMAS','Invalid','Invalid',
'Invalid','Invalid','Invalid','Invalid','Invalid','Invalid',
'Invalid','Invalid','Invalid','Invalid','Invalid','Invalid',
'Invalid','Uninitialized']
ListVar_v2_1 = ['ps_status','ps_setpoint','ps_reference','firmware_version',
'counter_set_slowref','counter_sync_pulse','siggen_enable',
'siggen_type','siggen_num_cycles','siggen_n','siggen_freq',
'siggen_amplitude','siggen_offset','siggen_aux_param',
'wfmref_selected','wfmref_sync_mode','wfmref_gain',
'wfmref_offset','p_wfmref_start','p_wfmref_end','p_wfmref_idx']
#ListCurv_v2_1 = ['wfmref','buf_samples_ctom','buf_samples_mtoc']
ListCurv_v2_1 = ['wfmref_data_0','wfmref_data_1','buf_samples_ctom']
ListFunc_v2_1 = ['turn_on','turn_off','open_loop','closed_loop','select_op_mode',
'reset_interlocks','set_command_interface',
'set_serial_termination','unlock_udc','lock_udc',
'cfg_source_scope','cfg_freq_scope','cfg_duration_scope',
'enable_scope','disable_scope','sync_pulse','set_slowref',
'set_slowref_fbp','set_slowref_readback_mon',
'set_slowref_fbp_readback_mon','set_slowref_readback_ref',
'set_slowref_fbp_readback_ref','reset_counters','cfg_wfmref',
'select_wfmref','get_wfmref_size','reset_wfmref','cfg_siggen',
'set_siggen','enable_siggen','disable_siggen','set_param','get_param',
'save_param_eeprom','load_param_eeprom', 'save_param_bank',
'load_param_bank','set_dsp_coeffs','get_dsp_coeff',
'save_dsp_coeffs_eeprom', 'load_dsp_coeffs_eeprom',
'save_dsp_modules_eeprom', 'load_dsp_modules_eeprom','reset_udc']
ListOpMode_v2_1 = ['Off','Interlock','Initializing','SlowRef','SlowRefSync',
'Cycle','RmpWfm','MigWfm','FastRef']
ListSigGenTypes_v2_1 = ['Sine','DampedSine','Trapezoidal','DampedSquaredSine',
'Square']
ListParameters = ['PS_Name','PS_Model','Num_PS_Modules','Command_Interface',
'RS485_Baudrate','RS485_Address','RS485_Termination',
'UDCNet_Address','Ethernet_IP','Ethernet_Subnet_Mask',
'Buzzer_Volume','Freq_ISR_Controller','Freq_TimeSlicer',
'Control_Loop_State','Max_Ref','Min_Ref','Max_Ref_OpenLoop',
'Min_Ref_OpenLoop',
'PWM_Freq','PWM_DeadTime','PWM_Max_Duty','PWM_Min_Duty',
'PWM_Max_Duty_OpenLoop','PWM_Min_Duty_OpenLoop',
'PWM_Lim_Duty_Share','HRADC_Num_Boards','HRADC_Freq_SPICLK',
'HRADC_Freq_Sampling','HRADC_Enable_Heater',
'HRADC_Enable_Monitor','HRADC_Type_Transducer',
'HRADC_Gain_Transducer','HRADC_Offset_Transducer','SigGen_Type',
'SigGen_Num_Cycles','SigGen_Freq','SigGen_Amplitude',
'SigGen_Offset','SigGen_Aux_Param','WfmRef_ID_WfmRef',
'WfmRef_SyncMode','WfmRef_Frequency','WfmRef_Gain',
'WfmRef_Offset','Analog_Var_Max','Analog_Var_Min',
'Hard_Interlocks_Debounce_Time','Hard_Interlocks_Reset_Time',
'Soft_Interlocks_Debounce_Time','Soft_Interlocks_Reset_Time',
'Scope_Sampling_Frequency','Scope_Source','','','','','','',
'','','','','Password','Enable_Onboard_EEPROM']
ListBCBFunc = ['ClearPof', 'SetPof', 'ReadPof', 'EnableBuzzer', 'DisableBuzzer',
'SendUartData', 'GetUartData', 'SendCanData', 'GetCanData',
'GetI2cData']
typeFormat = {'uint8_t': 'BBHBB', 'uint16_t': 'BBHHB', 'uint32_t': 'BBHIB',
'float': 'BBHfB'}
bytesFormat = {'Uint16': 'H', 'Uint32': 'L', 'Uint64': 'Q', 'float': 'f'}
typeSize = {'uint8_t': 6, 'uint16_t': 7, 'uint32_t': 9, 'float': 9}
num_blocks_curves_fbp = [4, 4, 4]
num_blocks_curves_fax = [16, 16, 16]
size_curve_block = [1024, 1024, 1024]
ufmOffset = {'serial': 0, 'calibdate': 4, 'variant': 9, 'rburden': 10,
'calibtemp': 12, 'vin_gain': 14, 'vin_offset': 16,
'iin_gain': 18, 'iin_offset': 20, 'vref_p': 22, 'vref_n': 24,
'gnd': 26}
hradcVariant = ['HRADC-FBP','HRADC-FAX-A','HRADC-FAX-B','HRADC-FAX-C','HRADC-FAX-D']
hradcInputTypes = ['GND', 'Vref_bipolar_p', 'Vref_bipolar_n', 'Temp',
'Vin_bipolar_p', 'Vin_bipolar_n', 'Iin_bipolar_p','Iin_bipolar_n']
NUM_MAX_COEFFS_DSP = 12
num_dsp_classes = 7
num_dsp_modules = [4, 4, 4, 6, 8, 4, 2, 2]
num_coeffs_dsp_modules = [0, 1, 1, 4, 8, 16, 2]
dsp_classes_names = ["DSP_Error", "DSP_SRLim", "DSP_LPF","DSP_PI",
"DSP_IIR_2P2Z", "DSP_IIR_3P3Z", "DSP_VdcLink_FeedForward",
"DSP_Vect_Product"]
# FBP
list_fbp_soft_interlocks = ['Heat-Sink Overtemperature']
list_fbp_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'DCLink Overvoltage',
'DCLink Undervoltage',
'DCLink Relay Fault',
'DCLink Fuse Fault',
'MOSFETs Driver Fault',
'Welded Relay Fault']
# FBP DC-Link
list_fbp_dclink_hard_interlocks = ['Power_Module_1_Fault',
'Power_Module_2_Fault',
'Power_Module_3_Fault',
'Total_Output_Overvoltage',
'Power_Module_1_Overvoltage',
'Power_Module_2_Overvoltage',
'Power_Module_3_Overvoltage',
'Total_Output_Undervoltage',
'Power_Module_1_Undervoltage',
'Power_Module_2_Undervoltage',
'Power_Module_3_Undervoltage',
'Smoke_Detector','External_Interlock']
# FAC ACDC
list_fac_acdc_soft_interlocks = []
list_fac_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_acdc_iib_is_interlocks = ['Rectifier Overvoltage',
'Input Overcurrent',
'IGBT Overtemperature',
'IGBT Overtemperature HW',
'Driver Overvoltage',
'Driver Overcurrent',
'Top Driver Error',
'Bottom Driver Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_is_alarms = ['Rectifier Overvoltage',
'Input Overcurrent',
'IGBT Overtemperature',
'Driver Overvoltage',
'Driver Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_cmd_interlocks = ['Capbank Overvoltage',
'Output Overvoltage',
'External Boards Overvoltage',
'Auxiliary Board Overcurrent',
'IDB Board Overcurrent',
'Rectifier Inductor Overtemperature',
'Rectifier Heat-Sink Overtemperature',
'AC Mains Overcurrent',
'Emergency Button',
'AC Mains Undervoltage',
'AC Mains Overvoltage',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_acdc_iib_cmd_alarms = ['Capbank Overvoltage',
'Output Overvoltage',
'External Boards Overvoltage',
'Auxiliary Board Overcurrent',
'IDB Board Overcurrent',
'Rectifier Inductor Overtemperature',
'Rectifier Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAC DCDC
list_fac_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault']
list_fac_dcdc_hard_interlocks = ['Load Overcurrent',
'CapBank Overvoltage',
'CapBank Undervoltage',
'IIB Interlock',
'External Interlock',
'Rack Interlock']
list_fac_dcdc_iib_interlocks = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 1 Overtemperature HW',
'IGBT 2 Overtemperature',
'IGBT 2 Overtemperature HW',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Top Driver 1 Error',
'Bottom Driver 1 Error',
'Top Driver 2 Error',
'Bottom Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_dcdc_iib_alarms = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAC-2S AC/DC
list_fac_2s_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_2s_acdc_iib_is_interlocks = list_fac_acdc_iib_is_interlocks
list_fac_2s_acdc_iib_cmd_interlocks = list_fac_acdc_iib_cmd_interlocks
list_fac_2s_acdc_iib_is_alarms = list_fac_acdc_iib_is_alarms
list_fac_2s_acdc_iib_cmd_alarms = list_fac_acdc_iib_cmd_alarms
# FAC-2S DC/DC
list_fac_2s_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault']
list_fac_2s_dcdc_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank Overvoltage',
'Module 2 CapBank Overvoltage',
'Module 1 CapBank Undervoltage',
'Module 2 CapBank Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'External Interlock',
'Rack Interlock']
list_fac_2s_dcdc_iib_interlocks = list_fac_dcdc_iib_interlocks
list_fac_2s_dcdc_iib_alarms = list_fac_dcdc_iib_alarms
# FAC-2P4S AC/DC
list_fac_2p4s_acdc_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overvoltage',
'Rectifier Undervoltage',
'Rectifier Overcurrent',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IIB Input Stage Interlock',
'IIB Command Interlock']
list_fac_2p4s_acdc_iib_is_interlocks = list_fac_acdc_iib_is_interlocks
list_fac_2p4s_acdc_iib_cmd_interlocks = list_fac_acdc_iib_cmd_interlocks
list_fac_2p4s_acdc_iib_is_alarms = list_fac_acdc_iib_is_alarms
list_fac_2p4s_acdc_iib_cmd_alarms = list_fac_acdc_iib_cmd_alarms
# FAC-2P4S DC/DC
list_fac_2p4s_dcdc_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent',
'Arms High Difference',
'Complementary PS Interlock']
list_fac_2p4s_dcdc_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank Overvoltage',
'Module 2 CapBank Overvoltage',
'Module 3 CapBank Overvoltage',
'Module 4 CapBank Overvoltage',
'Module 5 CapBank Overvoltage',
'Module 6 CapBank Overvoltage',
'Module 7 CapBank Overvoltage',
'Module 8 CapBank Overvoltage',
'Module 1 CapBank Undervoltage',
'Module 2 CapBank Undervoltage',
'Module 3 CapBank Undervoltage',
'Module 4 CapBank Undervoltage',
'Module 5 CapBank Undervoltage',
'Module 6 CapBank Undervoltage',
'Module 7 CapBank Undervoltage',
'Module 8 CapBank Undervoltage',
'IIB 1 Itlk',
'IIB 2 Itlk',
'IIB 3 Itlk',
'IIB 4 Itlk',
'IIB 5 Itlk',
'IIB 6 Itlk',
'IIB 7 Itlk',
'IIB 8 Itlk']
list_fac_2p4s_dcdc_iib_interlocks = list_fac_dcdc_iib_interlocks
list_fac_2p4s_dcdc_iib_alarms = list_fac_dcdc_iib_alarms
# FAP
list_fap_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'IGBTs Current High Difference']
list_fap_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'DCLink Overvoltage',
'DCLink Undervoltage',
'Welded Contactor Fault',
'Opened Contactor Fault',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IIB Itlk']
list_fap_iib_interlocks = ['Input Overvoltage',
'Output Overvoltage',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Driver 1 Error',
'Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'DCLink Contactor Fault',
'Contact Sticking of Contactor',
'External Interlock',
'Rack Interlock',
'High Leakage Current',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fap_iib_alarms = ['Input Overvoltage',
'Output Overvoltage',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'High Leakage Current',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAP-4P
list_fap_4p_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'IGBTs Current High Difference']
list_fap_4p_hard_interlocks = ['Load Overcurrent',
'Load Overvoltage',
'IGBT 1 Mod 1 Overcurrent',
'IGBT 2 Mod 1 Overcurrent',
'IGBT 1 Mod 2 Overcurrent',
'IGBT 2 Mod 2 Overcurrent',
'IGBT 1 Mod 3 Overcurrent',
'IGBT 2 Mod 3 Overcurrent',
'IGBT 1 Mod 4 Overcurrent',
'IGBT 2 Mod 4 Overcurrent',
'Welded Contactor Mod 1 Fault',
'Welded Contactor Mod 2 Fault',
'Welded Contactor Mod 3 Fault',
'Welded Contactor Mod 4 Fault',
'Opened Contactor Mod 1 Fault',
'Opened Contactor Mod 2 Fault',
'Opened Contactor Mod 3 Fault',
'Opened Contactor Mod 4 Fault',
'DCLink Mod 1 Overvoltage',
'DCLink Mod 2 Overvoltage',
'DCLink Mod 3 Overvoltage',
'DCLink Mod 4 Overvoltage',
'DCLink Mod 1 Undervoltage',
'DCLink Mod 2 Undervoltage',
'DCLink Mod 3 Undervoltage',
'DCLink Mod 4 Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'IIB Mod 3 Itlk',
'IIB Mod 4 Itlk']
list_fap_4p_iib_interlocks = list_fap_iib_interlocks
list_fap_4p_iib_alarms = list_fap_iib_alarms
# FAC DCDC EMA
list_fac_dcdc_ema_soft_interlocks = ['DCCT Fault',
'Load Feedback Fault']
list_fac_dcdc_ema_hard_interlocks = ['Load Overcurrent',
'DCLink Overvoltage',
'DCLink Undervoltage',
'Emergency Button',
'Load Waterflow',
'Load Overtemperature',
'IIB Itlk']
list_fac_dcdc_ema_iib_interlocks = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 1 Overtemperature HW',
'IGBT 2 Overtemperature',
'IGBT 2 Overtemperature HW',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Top Driver 1 Error',
'Bottom Driver 1 Error',
'Top Driver 2 Error',
'Bottom Driver 2 Error',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
list_fac_dcdc_ema_iib_alarms = ['Input Overvoltage',
'Input Overcurrent',
'Output Overcurrent',
'IGBT 1 Overtemperature',
'IGBT 2 Overtemperature',
'Driver Overvoltage',
'Driver 1 Overcurrent',
'Driver 2 Overcurrent',
'Inductors Overtemperature',
'Heat-Sink Overtemperature',
'Ground Leakage Overcurrent',
'Board IIB Overtemperature',
'Module Overhumidity']
# FAP-2P2S
list_fap_2p2s_soft_interlocks = ['DCCT 1 Fault',
'DCCT 2 Fault',
'DCCT High Difference',
'Load Feedback 1 Fault',
'Load Feedback 2 Fault',
'Arms High Difference',
'IGBTs Current High Difference',
'Complementary PS Interlock']
list_fap_2p2s_hard_interlocks = ['Load Overcurrent',
'IGBT 1 Mod 1 Overcurrent',
'IGBT 2 Mod 1 Overcurrent',
'IGBT 1 Mod 2 Overcurrent',
'IGBT 2 Mod 2 Overcurrent',
'IGBT 1 Mod 3 Overcurrent',
'IGBT 2 Mod 3 Overcurrent',
'IGBT 1 Mod 4 Overcurrent',
'IGBT 2 Mod 4 Overcurrent',
'Welded Contactor Mod 1 Fault',
'Welded Contactor Mod 2 Fault',
'Welded Contactor Mod 3 Fault',
'Welded Contactor Mod 4 Fault',
'Opened Contactor Mod 1 Fault',
'Opened Contactor Mod 2 Fault',
'Opened Contactor Mod 3 Fault',
'Opened Contactor Mod 4 Fault',
'DCLink Mod 1 Overvoltage',
'DCLink Mod 2 Overvoltage',
'DCLink Mod 3 Overvoltage',
'DCLink Mod 4 Overvoltage',
'DCLink Mod 1 Undervoltage',
'DCLink Mod 2 Undervoltage',
'DCLink Mod 3 Undervoltage',
'DCLink Mod 4 Undervoltage',
'IIB Mod 1 Itlk',
'IIB Mod 2 Itlk',
'IIB Mod 3 Itlk',
'IIB Mod 4 Itlk',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent']
list_fap_2p2s_iib_interlocks = list_fap_iib_interlocks
list_fap_2p2s_iib_alarms = list_fap_iib_alarms
# FAP 225A
list_fap_225A_soft_interlocks = ['IGBTs Current High Difference']
list_fap_225A_hard_interlocks = ['Load Overcurrent',
'DCLink Contactor Fault',
'IGBT 1 Overcurrent',
'IGBT 2 Overcurrent']
# FAC-2P ACDC
list_fac_2p_acdc_imas_soft_interlocks = []
list_fac_2p_acdc_imas_hard_interlocks = ['CapBank Overvoltage',
'Rectifier Overcurrent',
'AC Mains Contactor Fault',
'Module A Interlock',
'Module B Interlock',
'DCDC Interlock']
# FAC-2P DCDC
list_fac_2p_dcdc_imas_soft_interlocks = []
list_fac_2p_dcdc_imas_hard_interlocks = ['Load Overcurrent',
'Module 1 CapBank_Overvoltage',
'Module 2 CapBank_Overvoltage',
'Module 1 CapBank_Undervoltage',
'Module 2 CapBank_Undervoltage',
'Arm 1 Overcurrent',
'Arm 2 Overcurrent',
'Arms High_Difference',
'ACDC Interlock']
class SerialDRS(object):
ser = serial.Serial()
def __init__(self):
#self.ser=serial.Serial()
self.MasterAdd = '\x00'
self.SlaveAdd = '\x01'
self.BCastAdd = '\xFF'
self.ComWriteVar = '\x20'
self.WriteFloatSizePayload = '\x00\x05'
self.WriteDoubleSizePayload = '\x00\x03'
self.ComReadVar = '\x10\x00\x01'
self.ComRequestCurve = '\x40'
self.ComSendWfmRef = '\x41'
self.ComFunction = '\x50'
self.DP_MODULE_MAX_COEFF = 16
self.ListDPClass = ['ELP_Error','ELP_SRLim','ELP_LPF','ELP_PI_dawu','ELP_IIR_2P2Z','ELP_IIR_3P3Z',
'DCL_PID','DCL_PI','DCL_DF13','DCL_DF22','DCL_23']
self.ListHardInterlocks = ['Sobrecorrente', 'Interlock Externo', 'Falha AC',
'Falha ACDC', 'Falha DCDC','Sobretensao','Falha Resistor Precarga','Falha Carga Capacitores Saída',
'Botão de Emergência', 'OUT_OVERVOLTAGE', 'IN_OVERVOLTAGE','ARM1_OVERCURRENT','ARM2_OVERCURRENT',
'IN_OVERCURRENT','DRIVER1_FAULT','DRIVER2_FAULT','OUT1_OVERCURRENT','OUT2_OVERCURRENT','OUT1_OVERVOLTAGE',
'OUT2_OVERVOLTAGE','LEAKAGE_OVERCURRENT','AC_OVERCURRENT']
self.ListSoftInterlocks = ['IGBT1_OVERTEMP','IGBT2_OVERTEMP','L1_OVERTEMP','L2_OVERTEMP','HEATSINK_OVERTEMP','WATER_OVERTEMP',
'RECTFIER1_OVERTEMP','RECTFIER2_OVERTEMP','AC_TRANSF_OVERTEMP','WATER_FLUX_FAULT','OVER_HUMIDITY_FAULT']
print("\n pyDRS - compatible UDC firmware version: " + UDC_FIRMWARE_VERSION + "\n")
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções Internas da Classe
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Converte float para hexadecimal
def float_to_hex(self, value):
hex_value = struct.pack('f', value)
return hex_value.decode('ISO-8859-1')
# Converte lista de float para hexadecimal
def float_list_to_hex(self, value_list):
hex_list = b''
for value in value_list:
hex_list = hex_list + struct.pack('f', value)
return hex_list.decode('ISO-8859-1')
def format_list_size(self, in_list, max_size):
out_list = in_list[0:max_size]
if max_size > len(in_list):
for i in range(max_size - len(in_list)):
out_list.append(0)
return out_list
# Converte double para hexadecimal
def double_to_hex(self,value):
hex_value = struct.pack('H',value)
return hex_value.decode('ISO-8859-1')
# Converte unsigned int para hexadecimal
def uint32_to_hex(self,value):
hex_value = struct.pack('I',value)
return hex_value.decode('ISO-8859-1')
# Converte indice para hexadecimal
def index_to_hex(self,value):
hex_value = struct.pack('B',value)
return hex_value.decode('ISO-8859-1')
# Converte payload_size para hexadecimal
def size_to_hex(self,value):
hex_value = struct.pack('>H',value)
return hex_value.decode('ISO-8859-1')
# Função Checksum
def checksum(self, packet):
b=bytearray(packet.encode('ISO-8859-1'))
csum =(256-sum(b))%256
hcsum = struct.pack('B',csum)
send_msg = packet + hcsum.decode(encoding='ISO-8859-1')
return send_msg
# Função de leitura de variável
def read_var(self,var_id):
send_msg = self.checksum(self.SlaveAdd+self.ComReadVar+var_id)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
def is_open(self):
return self.ser.isOpen()
def _convertToUint16List(self, val, format):
val_16 = []
val_b = struct.pack(bytesFormat[format],val)
print(val_b)
for i in range(0,len(val_b),2):
val_16.append(struct.unpack('H',val_b[i:i+2])[0])
print(val_16)
return val_16
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Chamada de Entidades Funções BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def TurnOn_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOn'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOn(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOn'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def turn_on(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('turn_on'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOff_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOff'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def TurnOff(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('TurnOff'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def turn_off(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('turn_off'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def open_loop(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('open_loop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def closed_loop(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('closed_loop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpenLoop(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpenLoop'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClosedLoop(self,ps_modules):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_modules
hex_modules = self.double_to_hex(ps_modules)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ClosedLoop'))+hex_modules
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpenLoop_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpenLoop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClosedLoop_FAx(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ClosedLoop'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def OpMode(self,op_mode):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_opmode
hex_opmode = self.double_to_hex(op_mode)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('OpMode'))+hex_opmode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def RemoteInterface(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('RemoteInterface'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetISlowRef(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetISlowRef'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigWfmRef(self,gain,offset):
payload_size = self.size_to_hex(1+4+4) #Payload: ID + gain + offset
hex_gain = self.float_to_hex(gain)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigWfmRef'))+hex_gain+hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigSigGen(self,sigType,nCycles,phaseStart,phaseEnd):
payload_size = self.size_to_hex(1+2+2+4+4) #Payload: ID + type + nCycles + phaseStart + phaseEnd
hex_sigType = self.double_to_hex(sigType)
hex_nCycles = self.double_to_hex(nCycles)
hex_phaseStart = self.float_to_hex(phaseStart)
hex_phaseEnd = self.float_to_hex(phaseEnd)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigSigGen'))+hex_sigType+hex_nCycles+hex_phaseStart+hex_phaseEnd
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableSigGen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableSigGen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableSigGen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableSigGen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigDPModule(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigDPModule'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigDPModuleFull(self,dp_id,dp_class,dp_coeffs):
self.Write_dp_ID(dp_id)
self.Write_dp_Class(dp_class)
self.Write_dp_Coeffs(dp_coeffs)
self.ConfigDPModule()
def WfmRefUpdate(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('WfmRefUpdate'))
send_msg = self.checksum(self.BCastAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
def ResetInterlocks(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetInterlocks'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_interlocks(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_interlocks'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigPSModel(self,ps_model):
payload_size = self.size_to_hex(1+2) #Payload: ID + ps_Model
hex_model = self.double_to_hex(ps_model)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigPSModel'))+hex_model
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigHRADC(self,hradcID,freqSampling,inputType,enableHeater,enableMonitor):
payload_size = self.size_to_hex(1+2+4+2+2+2) #Payload: ID + hradcID + freqSampling + inputType + enableHeater + enableMonitor
hex_hradcID = self.double_to_hex(hradcID)
hex_freq = self.float_to_hex(freqSampling)
hex_type = self.double_to_hex(ListHRADCInputType.index(inputType))
hex_enHeater = self.double_to_hex(enableHeater)
hex_enMonitor = self.double_to_hex(enableMonitor)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigHRADC'))+hex_hradcID+hex_freq+hex_type+hex_enHeater+hex_enMonitor
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ConfigHRADCOpMode(self,hradcID,opMode):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + hradcID + opMode
hex_hradcID = self.double_to_hex(hradcID)
hex_opMode = self.double_to_hex(opMode)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ConfigHRADCOpMode'))+hex_hradcID+hex_opMode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableHRADCSampling(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableHRADCSampling'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableHRADCSampling(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableHRADCSampling'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ResetWfmRef(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetWfmRef'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetRSAddress(self,rs_address):
payload_size = self.size_to_hex(1+2) #Payload: ID + rs_address
hex_add = self.double_to_hex(rs_address)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetRSAddress'))+hex_add
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableSamplesBuffer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EnableSamplesBuffer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableSamplesBuffer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('DisableSamplesBuffer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SelectHRADCBoard(self,hradcID):
payload_size = self.size_to_hex(1+2) #Payload: ID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SelectHRADCBoard'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SelectTestSource(self,inputType):
payload_size = self.size_to_hex(1+2) #Payload: inputType
hex_type = self.double_to_hex(ListHRADCInputType.index(inputType))
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SelectTestSource'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ResetHRADCBoards(self, enable):
payload_size = self.size_to_hex(1+2) #Payload: ID+enable(2)
hex_enable = self.double_to_hex(enable)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ResetHRADCBoards'))+hex_enable
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def Config_nHRADC(self,nHRADC):
payload_size = self.size_to_hex(1+2) #Payload: nHRADC
hex_nhradc = self.double_to_hex(nHRADC)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('Config_nHRADC'))+hex_nhradc
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ReadHRADC_UFM(self,hradcID,ufmadd):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + hradcID + ufmadd
hex_hradcID = self.double_to_hex(hradcID)
hex_ufmadd = self.double_to_hex(ufmadd)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ReadHRADC_UFM'))+hex_hradcID+hex_ufmadd
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def WriteHRADC_UFM(self,hradcID,ufmadd,ufmdata):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + hradcID + ufmadd + ufmdata
hex_hradcID = self.double_to_hex(hradcID)
hex_ufmadd = self.double_to_hex(ufmadd)
hex_ufmdata = self.double_to_hex(ufmdata)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('WriteHRADC_UFM'))+hex_hradcID+hex_ufmadd+hex_ufmdata
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EraseHRADC_UFM(self,hradcID):
payload_size = self.size_to_hex(1+2) #Payload: ID + hradcID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('EraseHRADC_UFM'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def InitHRADC_BoardData(self, serial = 12345678, day = 1, mon = 1,
year = 2017, hour = 12, minutes = 30,
variant = 'HRADC-FBP', rburden = 20, calibtemp = 40,
vin_gain = 1, vin_offset = 0, iin_gain = 1,
iin_offset = 0, vref_p = 5, vref_n = -5, gnd = 0):
boardData = {'serial': serial, 'variant': variant, 'rburden': rburden,
'tm_mday': day, 'tm_mon': mon, 'tm_year': year,
'tm_hour': hour, 'tm_min': minutes, 'calibtemp': calibtemp,
'vin_gain': vin_gain, 'vin_offset': vin_offset,
'iin_gain': iin_gain, 'iin_offset': iin_offset,
'vref_p': vref_p, 'vref_n': vref_n, 'gnd': gnd}
return boardData
def WriteHRADC_BoardData(self,hradcID,boardData):
print('Configurando placa em UFM mode...')
self.ConfigHRADCOpMode(hradcID,1)
time.sleep(0.5)
print('\nEnviando serial number...')
ufmdata_16 = self._convertToUint16List(boardData['serial'],'Uint64')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['serial'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando variante...')
ufmdata_16 = self._convertToUint16List(hradcVariant.index(boardData['variant']),'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['variant'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando rburden...')
ufmdata_16 = self._convertToUint16List(boardData['rburden'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['rburden'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando calibdate...')
ufmdata_16 = self._convertToUint16List(boardData['tm_mday'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate'],ufmdata_16[i])
time.sleep(0.1)
# Month
ufmdata_16 = self._convertToUint16List(boardData['tm_mon'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+1,ufmdata_16[i])
time.sleep(0.1)
# Year
ufmdata_16 = self._convertToUint16List(boardData['tm_year'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+2,ufmdata_16[i])
time.sleep(0.1)
# Hour
ufmdata_16 = self._convertToUint16List(boardData['tm_hour'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+3,ufmdata_16[i])
time.sleep(0.1)
# Minutes
ufmdata_16 = self._convertToUint16List(boardData['tm_min'],'Uint16')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibdate']+4,ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando calibtemp...')
ufmdata_16 = self._convertToUint16List(boardData['calibtemp'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['calibtemp'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vin_gain...')
ufmdata_16 = self._convertToUint16List(boardData['vin_gain'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vin_gain'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vin_offset...')
ufmdata_16 = self._convertToUint16List(boardData['vin_offset'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vin_offset'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando iin_gain...')
ufmdata_16 = self._convertToUint16List(boardData['iin_gain'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['iin_gain'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando iin_offset...')
ufmdata_16 = self._convertToUint16List(boardData['iin_offset'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['iin_offset'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vref_p...')
ufmdata_16 = self._convertToUint16List(boardData['vref_p'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vref_p'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando vref_n...')
ufmdata_16 = self._convertToUint16List(boardData['vref_n'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['vref_n'],ufmdata_16[i])
time.sleep(0.1)
print('\nEnviando gnd...')
ufmdata_16 = self._convertToUint16List(boardData['gnd'],'float')
for i in range(len(ufmdata_16)):
self.WriteHRADC_UFM(hradcID,i+ufmOffset['gnd'],ufmdata_16[i])
time.sleep(0.1)
print('Colocando a placa em Sampling mode...')
self.ConfigHRADCOpMode(hradcID,0)
def ReadHRADC_BoardData(self,hradcID):
print('Configurando placa em UFM mode...')
print(self.ConfigHRADCOpMode(hradcID,1))
time.sleep(0.5)
print('Extraindo dados da placa...')
payload_size = self.size_to_hex(1+2) #Payload: ID + hradcID
hex_hradcID = self.double_to_hex(hradcID)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('ReadHRADC_BoardData'))+hex_hradcID
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
print(self.ser.read(6))
print('Lendo dados da placa...')
self.read_var(self.index_to_hex(50+hradcID))
reply_msg = self.ser.read(1+1+2+56+1)
print(reply_msg)
print(len(reply_msg))
val = struct.unpack('BBHLLHHHHHHfffffffffB',reply_msg)
try:
boardData = self.InitHRADC_BoardData(val[3]+val[4]*pow(2,32),val[5],
val[6],val[7],val[8],val[9],
hradcVariant[val[10]],val[11],
val[12],val[13],val[14],val[15],
val[16],val[17],val[18],val[19])
except:
print('\n### Placa não inicializada ###\n')
boardData = self.InitHRADC_BoardData(serial = int(input('Digite o S/N: ')))
print('\n')
print('Colocando a placa em Sampling mode...')
print(self.ConfigHRADCOpMode(hradcID,0))
time.sleep(0.5)
return boardData
def UpdateHRADC_BoardData(self,hradcID):
variant = len(hradcVariant)
while variant >= len(hradcVariant) or variant < 0:
variant = int(input("Enter HRADC variant number:\n 0: HRADC-FBP\n 1: HRADC-FAX-A\n 2: HRADC-FAX-B\n 3: HRADC-FAX-C\n 4: HRADC-FAX-D\n\n>>> "))
variant = hradcVariant[variant]
boardData = self.ReadHRADC_BoardData(hradcID)
boardData['variant'] = variant
boardData['vin_offset'] = np.float32(0)
boardData['iin_offset'] = np.float32(0)
if variant == 'HRADC-FBP':
boardData['rburden'] = np.float32(20)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-A':
boardData['rburden'] = np.float32(0)
boardData['vin_gain'] = np.float32(6.0/5.0)
boardData['iin_gain'] = np.float32(6.0/5.0)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-B':
boardData['rburden'] = np.float32(0)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-C':
boardData['rburden'] = np.float32(5)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
elif variant == 'HRADC-FAX-D':
boardData['rburden'] = np.float32(1)
boardData['vin_gain'] = np.float32(1)
boardData['iin_gain'] = np.float32(1)
print(boardData['vin_gain'])
print(boardData['variant'])
print('\n\nBoard data from HRADC of slot #' + str(hradcID) + ' is about to be overwritten by the following data:')
print(boardData)
i = input('\n Do you want to proceed? [y/n]: ')
if i is 'Y' or i is 'y':
self.ConfigHRADCOpMode(hradcID,1)
time.sleep(0.1)
self.EraseHRADC_UFM(hradcID)
time.sleep(0.5)
self.ResetHRADCBoards(1)
time.sleep(0.5)
self.ResetHRADCBoards(0)
time.sleep(1.5)
self.WriteHRADC_BoardData(hradcID,boardData)
boardData_new = self.ReadHRADC_BoardData(hradcID)
print(boardData_new)
print(boardData)
if boardData_new == boardData:
print('\n\n ### Operation was successful !!! ### \n\n')
else:
print('\n\n ### Operation failed !!! ### \n\n')
return [boardData, boardData_new]
def GetHRADCs_BoardData(self,numHRADC):
boardData_list = []
for i in range(numHRADC):
boardData_list.append(self.ReadHRADC_BoardData(i))
return boardData_list
def UdcEepromTest(self, rw, data=None):
if data is not None:
payload_size = self.size_to_hex(12)
hex_rw = self.double_to_hex(rw)
hex_byte_0 = self.double_to_hex(data[0])
hex_byte_1 = self.double_to_hex(data[1])
hex_byte_2 = self.double_to_hex(data[2])
hex_byte_3 = self.double_to_hex(data[3])
hex_byte_4 = self.double_to_hex(data[4])
hex_byte_5 = self.double_to_hex(data[5])
hex_byte_6 = self.double_to_hex(data[6])
hex_byte_7 = self.double_to_hex(data[7])
hex_byte_8 = self.double_to_hex(data[8])
hex_byte_9 = self.double_to_hex(data[9])
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcEepromTest'))+hex_rw[0]+ \
hex_byte_0[0] + hex_byte_1[0] + hex_byte_2[0] + hex_byte_3[0] + hex_byte_4[0] + hex_byte_5[0]+ \
hex_byte_6[0] + hex_byte_7[0] + hex_byte_8[0] + hex_byte_9[0]
print(send_packet.encode('ISO-8859-1'))
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(15)
def UdcFlashTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcFlashTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcRamTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcRamTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcAdcTest(self, rw, channel):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_channel = self.double_to_hex(channel)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcAdcTest'))+hex_rw[0]+hex_channel[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcSensorTempTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcSensorTempTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcRtcTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcRtcTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcUartTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcUartTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcIoExpanderTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcIoExpanderTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
# def UdcEthernetTest(self, rw):
# payload_size = self.size_to_hex(2)
# hex_rw = self.double_to_hex(rw)
# send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcEthernetTest'))+hex_rw
# self.ser.write(send_packet.encode('ISO-8859-1'))
# return self.ser.read()
def UdcIsoPlaneTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcIsoPlaneTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcLoopBackTest(self, rw, channel):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_channel = self.double_to_hex(channel)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcLoopBackTest'))+hex_rw[0]+hex_channel[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcLedTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcLedTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcBuzzerTest(self, rw):
payload_size = self.size_to_hex(2)
hex_rw = self.double_to_hex(rw)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcBuzzerTest'))+hex_rw[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def UdcComTest(self, rw, val):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_value = self.double_to_hex(val)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcComTest'))+hex_rw[0]+hex_value[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
time.sleep(0.2)
return self.ser.read(6)
def UdcI2cIsoTest(self, rw, val):
payload_size = self.size_to_hex(3)
hex_rw = self.double_to_hex(rw)
hex_value = self.double_to_hex(val)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListTestFunc.index('UdcI2cIsoTest'))+hex_rw[0]+hex_value[0]
self.ser.write(send_packet.encode('ISO-8859-1'))
return self.ser.read(6)
def SetISlowRefx4(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc.index('SetISlowRefx4'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SetPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SetPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ClearPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('ClearPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def ReadPof(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('ReadPof'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def EnableBuzzer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('EnableBuzzer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def DisableBuzzer(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('DisableBuzzer'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SendUartData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SendUartData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetUartData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetUartData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def SendCanData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('SendCanData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetCanData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetCanData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def GetI2cData(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListBCBFunc.index('GetI2cData'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def read_ps_status(self):
self.read_var(self.index_to_hex(ListVar_v2_1.index('ps_status')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
status = {}
status['state'] = ListOpMode_v2_1[(val[3] & 0b0000000000001111)]
status['open_loop'] = (val[3] & 0b0000000000010000) >> 4
status['interface'] = (val[3] & 0b0000000001100000) >> 5
status['active'] = (val[3] & 0b0000000010000000) >> 7
status['model'] = ListPSModels_v2_1[(val[3] & 0b0001111100000000) >> 8]
status['unlocked'] = (val[3] & 0b0010000000000000) >> 13
#print(status)
return status
def set_ps_name(self,ps_name):
if type(ps_name) == str:
for n in range(len(ps_name)):
self.set_param('PS_Name', n, float(ord(ps_name[n])))
for i in range(n+1,64):
self.set_param('PS_Name', i, float(ord(" ")))
def get_ps_name(self):
ps_name = ""
for n in range(64):
ps_name = ps_name + chr(int(self.get_param('PS_Name', n)))
if ps_name[-3:] == ' ':
ps_name = ps_name[:n-2]
break
return ps_name
def set_slowref(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_slowref_fbp(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_slowref_readback_mon(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_readback_mon'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def set_slowref_fbp_readback_mon(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp_readback_mon'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(21)
if(len(reply_msg) == 6):
return reply_msg
else:
val = struct.unpack('BBHffffB',reply_msg)
return [val[3],val[4],val[5],val[6]]
def set_slowref_readback_ref(self,setpoint):
payload_size = self.size_to_hex(1+4) #Payload: ID + iSlowRef
hex_setpoint = self.float_to_hex(setpoint)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_readback_ref'))+hex_setpoint
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def set_slowref_fbp_readback_ref(self, iRef1 = 0, iRef2 = 0, iRef3 = 0, iRef4 = 0):
payload_size = self.size_to_hex(1+4*4) #Payload: ID + 4*iRef
hex_iRef1 = self.float_to_hex(iRef1)
hex_iRef2 = self.float_to_hex(iRef2)
hex_iRef3 = self.float_to_hex(iRef3)
hex_iRef4 = self.float_to_hex(iRef4)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_slowref_fbp_readback_ref'))+hex_iRef1+hex_iRef2+hex_iRef3+hex_iRef4
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(21)
if(len(reply_msg) == 6):
return reply_msg
else:
val = struct.unpack('BBHffffB',reply_msg)
return [val[3],val[4],val[5],val[6]]
def set_param(self, param_id, n, value):
payload_size = self.size_to_hex(1+2+2+4) #Payload: ID + param id + [n] + value
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_value = self.float_to_hex(value)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_param'))+hex_id+hex_n+hex_value
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def get_param(self, param_id, n = 0):
payload_size = self.size_to_hex(1+2+2) #Payload: ID + param id + [n]
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('get_param'))+hex_id+hex_n
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
if len(reply_msg) == 9:
val = struct.unpack('BBHfB',reply_msg)
return val[3]
else:
#print('Invalid parameter')
return float('nan')
def save_param_eeprom(self, param_id, n = 0, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + param id + [n] + memory type
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_param_eeprom'))+hex_id+hex_n+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def load_param_eeprom(self, param_id, n = 0, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2) #Payload: ID + param id + [n] + memory type
if type(param_id) == str:
hex_id = self.double_to_hex(ListParameters.index(param_id))
if type(param_id) == int:
hex_id = self.double_to_hex(param_id)
hex_n = self.double_to_hex(n)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_param_eeprom'))+hex_id+hex_n+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if reply_msg[4] == 8:
print('Invalid parameter')
return reply_msg
def save_param_bank(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_param_bank'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_param_bank(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_param_bank'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def get_param_bank(self, list_param = ListParameters, timeout = 0.5, print_modules = True):
timeout_old = self.ser.timeout
#self.ser.timeout = 0.05
param_bank = []
for param_name in list_param:
param_row = [param_name]
for n in range(64):
if param_name == 'PS_Name':
p = self.get_ps_name()
param_row.append(p)
#if(print_modules):
#print('PS_Name: ' + p)
self.ser.timeout = timeout
break
else:
p = self.get_param(param_name,n)
if math.isnan(p):
break
param_row.append(p)
#if(print_modules):
#print(param_name + "[" + str(n) + "]: " + str(p))
if(print_modules):
print(param_row)
param_bank.append(param_row)
self.ser.timeout = timeout_old
return param_bank
def store_param_bank_csv(self, bank):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=',')
for param_row in bank:
writer.writerow(param_row)
def enable_onboard_eeprom(self):
self.set_param('Enable_Onboard_EEPROM',0,0)
self.save_param_eeprom('Enable_Onboard_EEPROM',0,2)
def disable_onboard_eeprom(self):
self.set_param('Enable_Onboard_EEPROM',0,1)
self.save_param_eeprom('Enable_Onboard_EEPROM',0,2)
def set_dsp_coeffs(self, dsp_class, dsp_id, coeffs_list = [0,0,0,0,0,0,0,0,0,0,0,0]):
coeffs_list_full = self.format_list_size(coeffs_list, NUM_MAX_COEFFS_DSP)
payload_size = self.size_to_hex(1+2+2+4*NUM_MAX_COEFFS_DSP)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_coeffs = self.float_list_to_hex(coeffs_list_full)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_dsp_coeffs'))+hex_dsp_class+hex_dsp_id+hex_coeffs
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_dsp_coeff(self, dsp_class, dsp_id, coeff):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_coeff = self.double_to_hex(coeff)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('get_dsp_coeff'))+hex_dsp_class+hex_dsp_id+hex_coeff
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(9)
#print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def save_dsp_coeffs_eeprom(self, dsp_class, dsp_id, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_dsp_coeffs_eeprom'))+hex_dsp_class+hex_dsp_id+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_dsp_coeffs_eeprom(self, dsp_class, dsp_id, type_memory = 2):
payload_size = self.size_to_hex(1+2+2+2)
hex_dsp_class= self.double_to_hex(dsp_class)
hex_dsp_id = self.double_to_hex(dsp_id)
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_dsp_coeffs_eeprom'))+hex_dsp_class+hex_dsp_id+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def save_dsp_modules_eeprom(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('save_dsp_modules_eeprom'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def load_dsp_modules_eeprom(self, type_memory = 2):
payload_size = self.size_to_hex(1+2) #Payload: ID + memory type
hex_type = self.double_to_hex(type_memory)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('load_dsp_modules_eeprom'))+hex_type
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_udc(self):
reply = input('\nEste comando realiza o reset do firmware da placa UDC, e por isso, so e executado caso a fonte esteja desligada. \nCaso deseje apenas resetar interlocks, utilize o comando reset_interlocks(). \n\nTem certeza que deseja prosseguir? [Y/N]: ')
if reply == 'Y' or reply == 'y':
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_udc'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
def run_bsmp_func(self,id_func,print_msg = 0):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(id_func)
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
reply_msg = self.ser.read(6)
if print_msg:
print(reply_msg)
return reply_msg
def run_bsmp_func_all_ps(self,p_func,add_list,arg = None,delay = 0.5, print_reply = 1):
old_add = self.GetSlaveAdd()
for add in add_list:
self.SetSlaveAdd(add)
if arg == None:
r = p_func()
else:
r = p_func(arg)
if print_reply:
print('\n Add ' + str(add))
print(r)
time.sleep(delay)
self.SetSlaveAdd(old_add)
def cfg_source_scope(self,p_source):
payload_size = self.size_to_hex(1+4) #Payload: ID + p_source
hex_op_mode = self.uint32_to_hex(p_source)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_source_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_freq_scope(self,freq):
payload_size = self.size_to_hex(1+4) #Payload: ID + freq
hex_op_mode = self.float_to_hex(freq)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_freq_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_duration_scope(self,duration):
payload_size = self.size_to_hex(1+4) #Payload: ID + duration
hex_op_mode = self.float_to_hex(duration)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_duration_scope'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def enable_scope(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('enable_scope'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def disable_scope(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('disable_scope'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_scope_vars(self):
print('\n### Scope Variables ###\n')
print('Frequency: ' + str((round(self.read_bsmp_variable(25,'float'),3))))
print('Duration: ' + str((round(self.read_bsmp_variable(26,'float'),3))))
print('Source Data: ' + str((round(self.read_bsmp_variable(27,'uint32_t'),3))))
def sync_pulse(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('sync_pulse'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def select_op_mode(self,op_mode):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_op_mode = self.double_to_hex(ListOpMode_v2_1.index(op_mode))
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('select_op_mode'))+hex_op_mode
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_serial_termination(self,term_enable):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_enable = self.double_to_hex(term_enable)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_serial_termination'))+hex_enable
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_command_interface(self,interface):
payload_size = self.size_to_hex(1+2) #Payload: ID + enable
hex_interface = self.double_to_hex(interface)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('set_command_interface'))+hex_interface
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def unlock_udc(self,password):
payload_size = self.size_to_hex(1+2) #Payload: ID + password
hex_password = self.double_to_hex(password)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('unlock_udc'))+hex_password
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def lock_udc(self,password):
payload_size = self.size_to_hex(1+2) #Payload: ID + password
hex_password = self.double_to_hex(password)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('lock_udc'))+hex_password
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_counters(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('reset_counters'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_siggen(self,sig_type,num_cycles,freq,amplitude,offset,aux0,aux1,aux2,aux3):
payload_size = self.size_to_hex(1+2+2+4+4+4+4*4)
hex_sig_type = self.double_to_hex(ListSigGenTypes_v2_1.index(sig_type))
hex_num_cycles = self.double_to_hex(num_cycles)
hex_freq = self.float_to_hex(freq)
hex_amplitude = self.float_to_hex(amplitude)
hex_offset = self.float_to_hex(offset)
hex_aux0 = self.float_to_hex(aux0)
hex_aux1 = self.float_to_hex(aux1)
hex_aux2 = self.float_to_hex(aux2)
hex_aux3 = self.float_to_hex(aux3)
send_packet = self.ComFunction + payload_size + self.index_to_hex(ListFunc_v2_1.index('cfg_siggen')) + hex_sig_type + hex_num_cycles + hex_freq + hex_amplitude + hex_offset + hex_aux0 + hex_aux1 + hex_aux2 + hex_aux3
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def set_siggen(self,freq,amplitude,offset):
payload_size = self.size_to_hex(1+4+4+4)
hex_freq = self.float_to_hex(freq)
hex_amplitude = self.float_to_hex(amplitude)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction + payload_size + self.index_to_hex(ListFunc_v2_1.index('set_siggen')) + hex_freq + hex_amplitude + hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def enable_siggen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('enable_siggen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def disable_siggen(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('disable_siggen'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def cfg_wfmref(self, idx, sync_mode, frequency, gain = 1, offset = 0):
payload_size = self.size_to_hex(1+2+2+4+4+4) #Payload: ID + idx + sync_mode + frequency + gain + offset
hex_idx = self.double_to_hex(idx)
hex_mode = self.double_to_hex(sync_mode)
hex_freq = self.float_to_hex(frequency)
hex_gain = self.float_to_hex(gain)
hex_offset = self.float_to_hex(offset)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('cfg_wfmref'))+hex_idx+hex_mode+hex_freq+hex_gain+hex_offset
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def select_wfmref(self,idx):
payload_size = self.size_to_hex(1+2) #Payload: ID + idx
hex_idx = self.double_to_hex(idx)
send_packet = self.ComFunction+payload_size+self.index_to_hex(ListFunc_v2_1.index('select_wfmref'))+hex_idx
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def reset_wfmref(self):
payload_size = self.size_to_hex(1) #Payload: ID
send_packet = self.ComFunction+payload_size + self.index_to_hex(ListFunc_v2_1.index('reset_wfmref'))
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(6)
def get_wfmref_vars(self,curve_id):
print('\n### WfmRef ' + str(curve_id) + ' Variables ###\n')
print('Length: ' + str((round(self.read_bsmp_variable(20+curve_id*3,'uint32_t'),3)-round(self.read_bsmp_variable(19+curve_id*3,'uint32_t'),3))/2+1))
print('Index: ' + str((round(self.read_bsmp_variable(21+curve_id*3,'uint32_t'),3)-round(self.read_bsmp_variable(19+curve_id*3,'uint32_t'),3))/2+1))
print('WfmRef Selected: ' + str(round(self.read_bsmp_variable(14,'uint16_t'),3)))
print('Sync Mode: ' + str(round(self.read_bsmp_variable(15,'uint16_t'),3)))
print('Frequency: ' + str(round(self.read_bsmp_variable(16,'float'),3)) + " Hz")
print('Gain: ' + str(round(self.read_bsmp_variable(17,'float'),3)))
print('Offset: ' + str(round(self.read_bsmp_variable(18,'float'),3)))
def read_csv_file(self,filename, type = 'float'):
csv_list = []
with open(filename, newline = '') as f:
reader = csv.reader(f)
for row in reader:
if type == 'float':
row_converted = float(row[0])
elif type == 'string' or type == 'str':
row_converted = str(row[0])
csv_list.append(row_converted)
print('Length of list: ' + str(len(csv_list)) + '\n')
return csv_list
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Leitura de Valores das Variáveis BSMP
O retorno do método são os valores double/float da respectiva variavel
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def read_bsmp_variable(self,id_var,type_var,print_msg = 0):
self.read_var(self.index_to_hex(id_var))
reply_msg = self.ser.read(typeSize[type_var])
if print_msg:
print(reply_msg)
val = struct.unpack(typeFormat[type_var],reply_msg)
return val[3]
def read_bsmp_variable_gen(self,id_var,size_bytes,print_msg = 0):
self.read_var(self.index_to_hex(id_var))
reply_msg = self.ser.read(size_bytes+5)
if print_msg:
print(reply_msg)
return reply_msg
def read_udc_arm_version(self):
self.read_var(self.index_to_hex(3))
reply_msg = self.ser.read(133)
val = struct.unpack('16s',reply_msg[4:20])
return val[0].decode('utf-8')
def read_udc_c28_version(self):
self.read_var(self.index_to_hex(3))
reply_msg = self.ser.read(133)
val = struct.unpack('16s',reply_msg[20:36])
return val[0].decode('utf-8')
def read_udc_version(self):
print('\n ARM: ' + self.read_udc_arm_version())
print(' C28: ' + self.read_udc_c28_version())
def Read_iLoad1(self):
self.read_var(self.index_to_hex(ListVar.index('iLoad1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iLoad2(self):
self.read_var(self.index_to_hex(ListVar.index('iLoad2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod1(self):
self.read_var(self.index_to_hex(ListVar.index('iMod1')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod2(self):
self.read_var(self.index_to_hex(ListVar.index('iMod2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod3(self):
self.read_var(self.index_to_hex(ListVar.index('iMod3')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iMod4(self):
self.read_var(self.index_to_hex(ListVar.index('iMod4')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vLoad(self):
self.read_var(self.index_to_hex(ListVar.index('vLoad')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod1(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod2(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod2')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod3(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod3')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vDCMod4(self):
self.read_var(self.index_to_hex(ListVar.index('vDCMod4')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod1(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod1')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod2(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod2')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod3(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod3')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_vOutMod4(self):
self.read_var(self.index_to_hex(ListVar.index('vOutMod4')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp1(self):
self.read_var(self.index_to_hex(ListVar.index('temp1')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp2(self):
self.read_var(self.index_to_hex(ListVar.index('temp2')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp3(self):
self.read_var(self.index_to_hex(ListVar.index('temp3')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_temp4(self):
self.read_var(self.index_to_hex(ListVar.index('temp4')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_ps_OnOff(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OnOff')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_OpMode(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OpMode')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_Remote(self):
self.read_var(self.index_to_hex(ListVar.index('ps_Remote')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_OpenLoop(self):
self.read_var(self.index_to_hex(ListVar.index('ps_OpenLoop')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_ps_SoftInterlocks(self):
op_bin = 1
ActiveSoftInterlocks = []
SoftInterlocksList = ['N/A', 'Sobre-tensao na carga 1', 'N/A',\
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 2', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 3', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',\
'Sobre-tensao na carga 4', 'N/A', \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A']
self.read_var(self.index_to_hex(ListVar.index('ps_SoftInterlocks')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHIB',reply_msg)
print('Soft Interlocks ativos:')
for i in range(len('{0:b}'.format(val[3]))):
if (val[3] & (op_bin << i)) == 2**i:
ActiveSoftInterlocks.append(SoftInterlocksList[i])
print(SoftInterlocksList[i])
print('---------------------------------------------------------------')
return val[3]
def Read_ps_HardInterlocks(self):
op_bin = 1
ActiveHardInterlocks = []
HardInterlocksList = ['Sobre-corrente na carga 1', 'N/A', \
'Sobre-tensao no DC-Link do modulo 1', \
'Sub-tensao no DC-Link do modulo 1', \
'Falha no rele de entrada do DC-Link do modulo 1', \
'Falha no fusivel de entrada do DC-Link do modulo 1', \
'Falha nos drivers do modulo 1', \
'Sobre-temperatura no modulo 1', \
'Sobre-corrente na carga 2', 'N/A', \
'Sobre-tensao no DC-Link do modulo 2', \
'Sub-tensao no DC-Link do modulo 2', \
'Falha no rele de entrada do DC-Link do modulo 2', \
'Falha no fusivel de entrada do DC-Link do modulo 2', \
'Falha nos drivers do modulo 2', \
'Sobre-temperatura no modulo 2', \
'Sobre-corrente na carga 3', 'N\A', \
'Sobre-tensao no DC-Link do modulo 3', \
'Sub-tensao no DC-Link do modulo 3', \
'Falha no rele de entrada no DC-Link do modulo 3', \
'Falha no fusivel de entrada do DC-Link do modulo 3', \
'Falha nos drivers do modulo 3', \
'Sobre-temperatura no modulo 3', \
'Sobre-corrente na carga 4', 'N/A', \
'Sobre-tensao no DC-Link do modulo 4', \
'Sub-tensao no DC-Link do modulo 4', \
'Falha no rele de entrada do DC-Link do modulo 4', \
'Falha no fusivel de entrada do DC-Link do modulo 4', \
'Falha nos drivers do modulo 4', \
'Sobre-temperatura no modulo 4']
self.read_var(self.index_to_hex(ListVar.index('ps_HardInterlocks')))
reply_msg = self.ser.read(9)
print(reply_msg)
val = struct.unpack('BBHIB',reply_msg)
print('Hard Interlocks ativos:')
for i in range(len('{0:b}'.format(val[3]))):
if (val[3] & (op_bin << i)) == 2**i:
ActiveHardInterlocks.append(HardInterlocksList[i])
print(HardInterlocksList[i])
print('---------------------------------------------------------------')
return val[3]
def Read_iRef(self):
self.read_var(self.index_to_hex(ListVar.index('iRef')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_wfmRef_Gain(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_Gain')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_wfmRef_Offset(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_Offset')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Enable(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Enable')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_Type(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Type')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_Ncycles(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Ncycles')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_sigGen_PhaseStart(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_PhaseStart')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_PhaseEnd(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_PhaseEnd')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Freq(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Freq')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Amplitude(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Amplitude')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Offset(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Offset')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_sigGen_Aux(self):
self.read_var(self.index_to_hex(ListVar.index('sigGen_Aux')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_dp_ID(self):
self.read_var(self.index_to_hex(ListVar.index('dp_ID')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_dp_Class(self):
self.read_var(self.index_to_hex(ListVar.index('dp_Class')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_dp_Coeffs(self):
self.read_var(self.index_to_hex(ListVar.index('dp_Coeffs')))
reply_msg = self.ser.read(69)
val = struct.unpack('BBHffffffffffffffffB',reply_msg)
return [val[3],val[4],val[5],val[6],val[7],val[8],val[9],val[10],val[11],val[12],val[13],val[14],val[15],val[16],val[17],val[18]]
def Read_ps_Model(self):
self.read_var(self.index_to_hex(ListVar.index('ps_Model')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val
def read_ps_model(self):
reply_msg = self.Read_ps_Model()
return ListPSModels[reply_msg[3]]
def Read_wfmRef_PtrBufferStart(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferStart')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_PtrBufferEnd(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferEnd')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_PtrBufferK(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_PtrBufferK')))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHIB',reply_msg)
return val[3]
def Read_wfmRef_SyncMode(self):
self.read_var(self.index_to_hex(ListVar.index('wfmRef_SyncMode')))
reply_msg = self.ser.read(7)
val = struct.unpack('BBHHB',reply_msg)
return val[3]
def Read_iRef1(self):
self.read_var(self.index_to_hex(45))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef2(self):
self.read_var(self.index_to_hex(46))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef3(self):
self.read_var(self.index_to_hex(47))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_iRef4(self):
self.read_var(self.index_to_hex(48))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
def Read_counterSetISlowRefx4(self):
self.read_var(self.index_to_hex(49))
reply_msg = self.ser.read(9)
val = struct.unpack('BBHfB',reply_msg)
return val[3]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Escrita de Valores das Variáveis BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Write_sigGen_Freq(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Freq'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Amplitude(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Amplitude'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Offset(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Offset'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_sigGen_Aux(self,float_value):
hex_float = self.float_to_hex(float_value)
send_packet = self.ComWriteVar+self.WriteFloatSizePayload+self.index_to_hex(ListVar.index('sigGen_Aux'))+hex_float
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_ID(self,double_value):
hex_double = self.double_to_hex(double_value)
send_packet = self.ComWriteVar+self.WriteDoubleSizePayload+self.index_to_hex(ListVar.index('dp_ID'))+hex_double
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_Class(self,double_value):
hex_double = self.double_to_hex(double_value)
send_packet = self.ComWriteVar+self.WriteDoubleSizePayload+self.index_to_hex(ListVar.index('dp_Class'))+hex_double
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Write_dp_Coeffs(self,list_float):
hex_float_list = []
#list_full = list_float[:]
#while(len(list_full) < self.DP_MODULE_MAX_COEFF):
# list_full.append(0)
list_full = [0 for i in range(self.DP_MODULE_MAX_COEFF)]
list_full[:len(list_float)] = list_float[:]
for float_value in list_full:
hex_float = self.float_to_hex(float(float_value))
hex_float_list.append(hex_float)
str_float_list = ''.join(hex_float_list)
payload_size = self.size_to_hex(1+4*self.DP_MODULE_MAX_COEFF) #Payload: ID + 16floats
send_packet = self.ComWriteVar+payload_size+self.index_to_hex(ListVar.index('dp_Coeffs'))+str_float_list
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Métodos de Escrita de Curvas BSMP
O retorno do método são os bytes de retorno da mensagem
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Send_wfmRef_Curve(self,block_idx,data):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(ListCurv.index('wfmRef_Curve'))+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Recv_wfmRef_Curve(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('wfmRef_Curve'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+8192+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
for k in range(7,len(recv_msg)-1,4):
val.append(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer(self):
block_hex = struct.pack('>H',0).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('samplesBuffer'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+16384+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
try:
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
except:
pass
return val
def Send_fullwfmRef_Curve(self,block_idx,data):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(ListCurv.index('fullwfmRef_Curve'))+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def Recv_fullwfmRef_Curve(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('fullwfmRef_Curve'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+16384+1) #Address+Command+Size+ID+Block_idx+data+checksum
val = []
for k in range(7,len(recv_msg)-1,4):
val.append(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer_blocks(self,block_idx):
block_hex = struct.pack('>H',block_idx).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: ID+Block_index
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(ListCurv.index('samplesBuffer_blocks'))+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
#t0 = time.time()
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+1024+1) #Address+Command+Size+ID+Block_idx+data+checksum
#print(time.time()-t0)
#print(recv_msg)
val = []
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
return val
def Recv_samplesBuffer_allblocks(self):
buff = []
#self.DisableSamplesBuffer()
for i in range(0,16):
#t0 = time.time()
buff.extend(self.Recv_samplesBuffer_blocks(i))
#print(time.time()-t0)
#self.EnableSamplesBuffer()
return buff
def read_curve_block(self,curve_id,block_id):
block_hex = struct.pack('>H',block_id).decode('ISO-8859-1')
payload_size = self.size_to_hex(1+2) #Payload: curve_id + block_id
send_packet = self.ComRequestCurve+payload_size+self.index_to_hex(curve_id)+block_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
#t0 = time.time()
self.ser.reset_input_buffer()
self.ser.write(send_msg.encode('ISO-8859-1'))
recv_msg = self.ser.read(1+1+2+1+2+size_curve_block[curve_id]+1) #Address+Command+Size+ID+Block_idx+data+checksum
#print(time.time()-t0)
#print(recv_msg)
val = []
for k in range(7,len(recv_msg)-1,4):
val.extend(struct.unpack('f',recv_msg[k:k+4]))
return val
def write_curve_block(self,curve_id,block_id,data):
block_hex = struct.pack('>H',block_id).decode('ISO-8859-1')
val = []
for k in range(0,len(data)):
val.append(self.float_to_hex(float(data[k])))
payload_size = struct.pack('>H', (len(val)*4)+3).decode('ISO-8859-1')
curva_hex = ''.join(val)
send_packet = self.ComSendWfmRef+payload_size+self.index_to_hex(curve_id)+block_hex+curva_hex
send_msg = self.checksum(self.SlaveAdd+send_packet)
self.ser.write(send_msg.encode('ISO-8859-1'))
return self.ser.read(5)
def write_wfmref(self,curve,data):
#curve = ListCurv_v2_1.index('wfmref')
block_size = int(size_curve_block[curve]/4)
print(block_size)
blocks = [data[x:x+block_size] for x in range(0, len(data), block_size)]
ps_status = self.read_ps_status()
wfmref_selected = self.read_bsmp_variable(14,'uint16_t')
if( (wfmref_selected == curve) and (ps_status['state'] == 'RmpWfm' or ps_status['state'] == 'MigWfm') ):
print("\n The specified curve ID is currently selected and PS is on " + ps_status['state'] + " state. Choose a different curve ID to proceed.\n")
else:
for block_id in range(len(blocks)):
self.write_curve_block(curve, block_id, blocks[block_id])
print(blocks[block_id])
def read_buf_samples_ctom(self):
buf = []
curve_id = ListCurv_v2_1.index('buf_samples_ctom')
ps_status = self.read_ps_status()
if ps_status['model'] == 'FBP':
for i in range(num_blocks_curves_fbp[curve_id]):
buf.extend(self.read_curve_block(curve_id,i))
else:
for i in range(num_blocks_curves_fax[curve_id]):
buf.extend(self.read_curve_block(curve_id,i))
return buf
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções Serial
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def Connect(self,port='COM2',baud=6000000):
try:
SerialDRS.ser = serial.Serial(port,baud,timeout=1) #port format should be 'COM'+number
return True
except:
return False
def Disconnect(self):
if (self.ser.isOpen()):
try:
self.ser.close()
return True
except:
return False
def SetSlaveAdd(self,address):
self.SlaveAdd = struct.pack('B',address).decode('ISO-8859-1')
def GetSlaveAdd(self):
return struct.unpack('B',self.SlaveAdd.encode())[0]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
======================================================================
Funções auxiliares
======================================================================
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def read_vars_common(self, print_all = False):
loop_state = ["Closed Loop","Open Loop"]
ps_status = self.read_ps_status()
if ps_status['open_loop'] == 0:
if (ps_status['model'] == 'FAC_ACDC') or (ps_status['model'] == 'FAC_2S_ACDC') or (ps_status['model'] == 'FAC_2P4S_ACDC'):
setpoint_unit = " V"
else:
setpoint_unit = " A"
else:
setpoint_unit = " %"
print("\nPS Model: " + ps_status['model'])
print("State: " + ps_status['state'])
print("Loop State: " + loop_state[ps_status['open_loop']])
print("\nSetpoint: " + str(round(self.read_bsmp_variable(1,'float'),3)) + setpoint_unit)
print("Reference: " + str(round(self.read_bsmp_variable(2,'float'),3)) + setpoint_unit)
if print_all:
print(self.read_ps_status())
print("\nCounter set_slowref: " + str(round(self.read_bsmp_variable(4,'uint32_t'),3)))
print("Counter sync pulse: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
self.get_siggen_vars()
self.get_wfmref_vars(0)
self.get_wfmref_vars(1)
self.get_scope_vars()
def decode_interlocks(self,reg_interlocks,list_interlocks):
active_interlocks = []
for i in range(32):
if(reg_interlocks & (1 << i)):
active_interlocks.append(list_interlocks[i])
print('\t' + list_interlocks[i])
return active_interlocks
def read_vars_fbp(self, n = 1, dt = 0.5):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fbp_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " V")
print("Load Resistance: " + str(abs(round(self.read_bsmp_variable(34,'float') / self.read_bsmp_variable(33,'float'),3))) + " Ohm")
print("Load Power: " + str(abs(round(self.read_bsmp_variable(34,'float') * self.read_bsmp_variable(33,'float'),3))) + " W")
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " V")
print("Heat-Sink Temp: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " °C")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
time.sleep(dt)
except:
pass
def read_vars_fbp_dclink(self, n = 1, dt = 0.5):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("\nHard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_dclink_hard_interlocks)
print("\nModules status: " + str(round(self.read_bsmp_variable(33,'uint32_t'),3)))
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " V")
print("PS1 Voltage: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " V")
print("PS2 Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("PS3 Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("Dig Pot Tap: " + str(round(self.read_bsmp_variable(38,'uint8_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_acdc(self, n = 1, dt = 0.5, iib = 1):
#try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
time.sleep(dt)
#except:
# pass
def read_vars_fac_dcdc(self, n = 1, dt = 0.5, iib = 1):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
print("WfmRef Index: " + str( (round(self.read_bsmp_variable(20,'uint32_t'),3) - round(self.read_bsmp_variable(18,'uint32_t'),3))/2 + 1))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_dcdc_hard_interlocks)
iib_itlks = self.read_bsmp_variable(51,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(52,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_dcdc_iib_alarms)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nDuty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " A")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(51,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(52,'uint32_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_dcdc_ema(self, n = 1, dt = 0.5, iib = 0):
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_dcdc_ema_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_dcdc_ema_hard_interlocks)
iib_itlks = self.read_bsmp_variable(49,'uint32_t')
print("IIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_dcdc_ema_iib_interlocks)
iib_alarms = self.read_bsmp_variable(50,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_dcdc_ema_iib_alarms)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)))
print("DC-Link Voltage: " + str(round(self.read_bsmp_variable(34,'float'),3)))
print("\nDuty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)))
if(iib):
print("\nIIB Input Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IIB IGBT 1 Temp: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " °C")
print("IIB IGBT 2 Temp: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(49,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(50,'uint32_t'),3)))
time.sleep(dt)
except:
pass
def read_vars_fac_2s_acdc(self, n = 1, add_mod_a = 2, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(add_mod_a)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print('\n *** MODULE A ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_2s_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_2s_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_2s_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_2s_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
self.SetSlaveAdd(add_mod_a+1)
print('\n *** MODULE B ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_acdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_acdc_hard_interlocks)
iib_is_itlks = self.read_bsmp_variable(45,'uint32_t')
print("\nIIB IS Interlocks: " + str(iib_is_itlks))
if(iib_is_itlks):
self.decode_interlocks(iib_is_itlks, list_fac_2s_acdc_iib_is_interlocks)
iib_is_alarms = self.read_bsmp_variable(46,'uint32_t')
print("IIB IS Alarms: " + str(iib_is_alarms))
if(iib_is_alarms):
self.decode_interlocks(iib_is_alarms, list_fac_2s_acdc_iib_is_alarms)
iib_cmd_itlks = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Cmd Interlocks: " + str(iib_cmd_itlks))
if(iib_cmd_itlks):
self.decode_interlocks(iib_cmd_itlks, list_fac_2s_acdc_iib_cmd_interlocks)
iib_cmd_alarms = self.read_bsmp_variable(58,'uint32_t')
print("IIB Cmd Alarms: " + str(iib_cmd_alarms))
if(iib_cmd_alarms):
self.decode_interlocks(iib_cmd_alarms, list_fac_2s_acdc_iib_cmd_alarms)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
if(iib):
print("\nIIB IS Input Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("IIB IS Input Voltage: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("IIB IS IGBT Temp: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " °C")
print("IIB IS Driver Voltage: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " V")
print("IIB IS Driver Current: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IIB IS Inductor Temp: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " °C")
print("IIB IS Heat-Sink Temp: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " °C")
print("IIB IS Board Temp: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " °C")
print("IIB IS Board RH: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " %")
print("IIB IS Interlocks: " + str(round(self.read_bsmp_variable(45,'uint32_t'),3)))
print("IIB IS Alarms: " + str(round(self.read_bsmp_variable(46,'uint32_t'),3)))
print("\nIIB Cmd Load Voltage: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("IIB Cmd CapBank Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Cmd Rectifier Inductor Temp: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " °C")
print("IIB Cmd Rectifier Heat-Sink Temp: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " °C")
print("IIB Cmd External Boards Voltage: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("IIB Cmd Auxiliary Board Current: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " A")
print("IIB Cmd IDB Board Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Cmd Ground Leakage Current: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " A")
print("IIB Cmd Board Temp: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " °C")
print("IIB Cmd Board RH: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IIB Cmd Interlocks: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
print("IIB Cmd Alarms: " + str(round(self.read_bsmp_variable(58,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2s_dcdc(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 14*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2s_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2s_dcdc_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " V")
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " %")
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " %")
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(40 + iib_offset,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(41 + iib_offset,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(42 + iib_offset,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(43 + iib_offset,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(44 + iib_offset,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(45 + iib_offset,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(46 + iib_offset,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(47 + iib_offset,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(48 + iib_offset,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(49 + iib_offset,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(50 + iib_offset,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(51 + iib_offset,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(52 + iib_offset,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(53 + iib_offset,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2s_dcdc_iib_alarms)
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2p4s_acdc(self, n = 1, add_mod_a = 1, dt = 0.5, iib = 0):
self.read_vars_fac_2s_acdc(n, add_mod_a, dt, iib)
def read_vars_fac_2p4s_dcdc(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p4s_dcdc_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p4s_dcdc_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)))
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)))
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)))
print("\nArm Current 1: " + str(round(self.read_bsmp_variable(36,'float'),3)))
print("Arm Current 2: " + str(round(self.read_bsmp_variable(37,'float'),3)))
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(38,'float'),3)))
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(39,'float'),3)))
print("CapBank Voltage 3: " + str(round(self.read_bsmp_variable(40,'float'),3)))
print("CapBank Voltage 4: " + str(round(self.read_bsmp_variable(41,'float'),3)))
print("CapBank Voltage 5: " + str(round(self.read_bsmp_variable(42,'float'),3)))
print("CapBank Voltage 6: " + str(round(self.read_bsmp_variable(43,'float'),3)))
print("CapBank Voltage 7: " + str(round(self.read_bsmp_variable(44,'float'),3)))
print("CapBank Voltage 8: " + str(round(self.read_bsmp_variable(45,'float'),3)))
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(46,'float'),3)))
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(47,'float'),3)))
print("Duty-Cycle 3: " + str(round(self.read_bsmp_variable(48,'float'),3)))
print("Duty-Cycle 4: " + str(round(self.read_bsmp_variable(49,'float'),3)))
print("Duty-Cycle 5: " + str(round(self.read_bsmp_variable(50,'float'),3)))
print("Duty-Cycle 6: " + str(round(self.read_bsmp_variable(51,'float'),3)))
print("Duty-Cycle 7: " + str(round(self.read_bsmp_variable(52,'float'),3)))
print("Duty-Cycle 8: " + str(round(self.read_bsmp_variable(53,'float'),3)))
if(iib):
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(55, 'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(58,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(59,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(60,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(61,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(62,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(63,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(64,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(65,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(66,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2p4s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(67,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2p4s_dcdc_iib_alarms)
print("\nIIB CapBank Voltage: " + str(round(self.read_bsmp_variable(68,'float'),3)) + " V")
print("IIB Input Current: " + str(round(self.read_bsmp_variable(69,'float'),3)) + " A")
print("IIB Output Current: " + str(round(self.read_bsmp_variable(70,'float'),3)) + " A")
print("IIB IGBT Leg 1 Temp: " + str(round(self.read_bsmp_variable(71,'float'),3)) + " °C")
print("IIB IGBT Leg 2 Temp: " + str(round(self.read_bsmp_variable(72,'float'),3)) + " °C")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(73,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(74,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(75,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(76,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(77,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(78,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(79,'float'),3)) + " %")
iib_itlks = self.read_bsmp_variable(80,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fac_2p4s_dcdc_iib_interlocks)
iib_alarms = self.read_bsmp_variable(81,'uint32_t')
print("IIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fac_2p4s_dcdc_iib_alarms)
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fap(self, n = 1, com_add = 1, dt = 0.5, iib = 1):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_hard_interlocks)
iib_itlks = self.read_bsmp_variable(56,'uint32_t')
print("\nIIB Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_iib_interlocks)
iib_alarms = self.read_bsmp_variable(57,'uint32_t')
print("\nIIB Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_iib_alarms)
iload = self.read_bsmp_variable(33,'float')
print("\nLoad Current: " + str(round(iload,3)) + " A")
print("Load Current DCCT 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current DCCT 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
if not iload == 0:
print("\nLoad Resistance: " + str(abs(round(self.read_bsmp_variable(43,'float') / iload ,3))) + " Ohm")
else:
print("\nLoad Resistance: 0 Ohm")
print("Load Power: " + str(abs(round(self.read_bsmp_variable(43,'float') * self.read_bsmp_variable(33,'float'),3))) + " W")
print("\nDC-Link Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nIGBT 1 Current: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IGBT 2 Current: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("\nIGBT 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " %")
if(iib):
print("\nIIB Input Voltage: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " V")
print("IIB Output Voltage: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " V")
print("IIB IGBT 1 Current: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IIB IGBT 2 Current: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("IIB IGBT 1 Temp: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " °C")
print("IIB IGBT 2 Temp: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " °C")
print("IIB Driver Voltage: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("IIB Driver Current 1: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " A")
print("IIB Driver Current 2: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " A")
print("IIB Inductor Temp: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " °C")
print("IIB Heat-Sink Temp: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " °C")
print("IIB Ground Leakage Current: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " A")
print("IIB Board Temp: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " °C")
print("IIB Board RH: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("IIB Interlocks: " + str(round(self.read_bsmp_variable(56,'uint32_t'),3)))
print("IIB Alarms: " + str(round(self.read_bsmp_variable(57,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fap_4p(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 16*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_4p_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_4p_hard_interlocks)
for j in range(4):
iib_itlks = self.read_bsmp_variable(72 + j*16,'uint32_t')
print("\nIIB " + str(j+1) + " Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_4p_iib_interlocks)
iib_alarms = self.read_bsmp_variable(73 + j*16,'uint32_t')
print("IIB " + str(j+1) + " Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_4p_iib_alarms)
print("\n Mean Load Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("Load Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("\nIGBT 1 Current Mod 1: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("IGBT 2 Current Mod 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IGBT 1 Current Mod 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IGBT 2 Current Mod 2: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IGBT 1 Current Mod 3: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " A")
print("IGBT 2 Current Mod 3: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " A")
print("IGBT 1 Current Mod 4: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " A")
print("IGBT 2 Current Mod 4: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("\nDC-Link Voltage Mod 1: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " V")
print("DC-Link Voltage Mod 2: " + str(round(self.read_bsmp_variable(46,'float'),3)) + " V")
print("DC-Link Voltage Mod 3: " + str(round(self.read_bsmp_variable(47,'float'),3)) + " V")
print("DC-Link Voltage Mod 4: " + str(round(self.read_bsmp_variable(48,'float'),3)) + " V")
print("\nMean Duty-Cycle: " + str(round(self.read_bsmp_variable(49,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " %")
if not iib == 0:
print("\nIIB " + str(iib) + " Input Voltage: " + str(round(self.read_bsmp_variable(58 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Output Voltage: " + str(round(self.read_bsmp_variable(59 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " IGBT 1 Current: " + str(round(self.read_bsmp_variable(60 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 2 Current: " + str(round(self.read_bsmp_variable(61 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 1 Temp: " + str(round(self.read_bsmp_variable(62 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " IGBT 2 Temp: " + str(round(self.read_bsmp_variable(63 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Driver Voltage: " + str(round(self.read_bsmp_variable(64 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Driver Current 1: " + str(round(self.read_bsmp_variable(65 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Driver Current 2: " + str(round(self.read_bsmp_variable(66 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Inductor Temp: " + str(round(self.read_bsmp_variable(67 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Heat-Sink Temp: " + str(round(self.read_bsmp_variable(68 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Ground Leakage Current: " + str(round(self.read_bsmp_variable(69 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Board Temp: " + str(round(self.read_bsmp_variable(70 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Board RH: " + str(round(self.read_bsmp_variable(71 + iib_offset,'float'),3)) + " %")
print("IIB " + str(iib) + " Interlocks: " + str(round(self.read_bsmp_variable(72 + iib_offset,'uint32_t'),3)))
print("IIB " + str(iib) + " Alarms: " + str(round(self.read_bsmp_variable(73 + iib_offset,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except Exception as e:
print(e)
self.SetSlaveAdd(old_add)
def read_vars_fap_2p2s(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
iib_offset = 16*(iib-1)
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_2p2s_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_2p2s_hard_interlocks)
for j in range(4):
iib_itlks = self.read_bsmp_variable(78 + j*16,'uint32_t')
print("\nIIB " + str(j+1) + " Interlocks: " + str(iib_itlks))
if(iib_itlks):
self.decode_interlocks(iib_itlks, list_fap_4p_iib_interlocks)
iib_alarms = self.read_bsmp_variable(79 + j*16,'uint32_t')
print("IIB " + str(j+1) + " Alarms: " + str(iib_alarms))
if(iib_alarms):
self.decode_interlocks(iib_alarms, list_fap_4p_iib_alarms)
print("\nMean Load Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Current 1: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Load Current 2: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nArm Current 1: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " A")
print("Arm Current 2: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " A")
print("\nIGBT 1 Current Mod 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " A")
print("IGBT 2 Current Mod 1: " + str(round(self.read_bsmp_variable(39,'float'),3)) + " A")
print("IGBT 1 Current Mod 2: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " A")
print("IGBT 2 Current Mod 2: " + str(round(self.read_bsmp_variable(41,'float'),3)) + " A")
print("IGBT 1 Current Mod 3: " + str(round(self.read_bsmp_variable(42,'float'),3)) + " A")
print("IGBT 2 Current Mod 3: " + str(round(self.read_bsmp_variable(43,'float'),3)) + " A")
print("IGBT 1 Current Mod 4: " + str(round(self.read_bsmp_variable(44,'float'),3)) + " A")
print("IGBT 2 Current Mod 4: " + str(round(self.read_bsmp_variable(45,'float'),3)) + " A")
print("\nDC-Link Voltage Mod 1: " + str(round(self.read_bsmp_variable(50,'float'),3)) + " V")
print("DC-Link Voltage Mod 2: " + str(round(self.read_bsmp_variable(51,'float'),3)) + " V")
print("DC-Link Voltage Mod 3: " + str(round(self.read_bsmp_variable(52,'float'),3)) + " V")
print("DC-Link Voltage Mod 4: " + str(round(self.read_bsmp_variable(53,'float'),3)) + " V")
print("\nMean Duty-Cycle: " + str(round(self.read_bsmp_variable(54,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(55,'float'),3)) + " %")
print("\nIGBT 1 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(56,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 1: " + str(round(self.read_bsmp_variable(57,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(58,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 2: " + str(round(self.read_bsmp_variable(59,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(60,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 3: " + str(round(self.read_bsmp_variable(61,'float'),3)) + " %")
print("IGBT 1 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(62,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle Mod 4: " + str(round(self.read_bsmp_variable(63,'float'),3)) + " %")
if not iib == 0:
print("\nIIB " + str(iib) + " Input Voltage: " + str(round(self.read_bsmp_variable(64 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Output Voltage: " + str(round(self.read_bsmp_variable(65 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " IGBT 1 Current: " + str(round(self.read_bsmp_variable(66 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 2 Current: " + str(round(self.read_bsmp_variable(67 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " IGBT 1 Temp: " + str(round(self.read_bsmp_variable(68 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " IGBT 2 Temp: " + str(round(self.read_bsmp_variable(69 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Driver Voltage: " + str(round(self.read_bsmp_variable(70 + iib_offset,'float'),3)) + " V")
print("IIB " + str(iib) + " Driver Current 1: " + str(round(self.read_bsmp_variable(71 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Driver Current 2: " + str(round(self.read_bsmp_variable(72 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Inductor Temp: " + str(round(self.read_bsmp_variable(73 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Heat-Sink Temp: " + str(round(self.read_bsmp_variable(74 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Ground Leakage Current: " + str(round(self.read_bsmp_variable(75 + iib_offset,'float'),3)) + " A")
print("IIB " + str(iib) + " Board Temp: " + str(round(self.read_bsmp_variable(76 + iib_offset,'float'),3)) + " °C")
print("IIB " + str(iib) + " Board RH: " + str(round(self.read_bsmp_variable(77 + iib_offset,'float'),3)) + " %")
print("IIB " + str(iib) + " Interlocks: " + str(round(self.read_bsmp_variable(78 + iib_offset,'uint32_t'),3)))
print("IIB " + str(iib) + " Alarms: " + str(round(self.read_bsmp_variable(79 + iib_offset,'uint32_t'),3)))
time.sleep(dt)
self.SetSlaveAdd(old_add)
except Exception as e:
print(e)
self.SetSlaveAdd(old_add)
def read_vars_fap_225A(self, n = 1, com_add = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fap_225A_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fap_225A_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("\nIGBT 1 Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("IGBT 2 Current: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " A")
print("\nIGBT 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " %")
print("IGBT 2 Duty-Cycle: " + str(round(self.read_bsmp_variable(37,'float'),3)) + " %")
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(38,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fbp_2s_ufjf(self, n = 1, com_add = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fbp_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fbp_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " A")
print("Load Error: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("\nMod 1 Load Voltage: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " V")
print("Mod 3 Load Voltage: " + str(round(self.read_bsmp_variable(40,'float'),3)) + " V")
#print("\nMod 1 DC-Link Voltage: " + str(round(self.read_bsmp_variable(29,'float'),3)) + " V")
#print("Mod 1 Temperature: " + str(round(self.read_bsmp_variable(31,'float'),3)) + " °C")
#print("\nMod 3 DC-Link Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
#print("Mod 3 Temperature: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " °C")
print("\nMod 1 Duty-Cycle: " + str(round(self.read_bsmp_variable(32,'float'),3)) + " %")
print("Mod 3 Duty-Cycle: " + str(round(self.read_bsmp_variable(36,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def read_vars_fac_2p_acdc_imas(self, n = 1, add_mod_a = 2, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(add_mod_a)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print('\n *** MODULE A ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_acdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_acdc_imas_hard_interlocks)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
self.SetSlaveAdd(add_mod_a+1)
print('\n *** MODULE B ***')
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_acdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_acdc_imas_hard_interlocks)
print("\nCapBank Voltage: " + str(round(self.read_bsmp_variable(33,'float'),3)) + " V")
print("Rectifier Current: " + str(round(self.read_bsmp_variable(34,'float'),3)) + " A")
print("Duty-Cycle: " + str(round(self.read_bsmp_variable(35,'float'),3)) + " %")
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
raise
def read_vars_fac_2p_dcdc_imas(self, n = 1, com_add = 1, dt = 0.5, iib = 0):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
self.SetSlaveAdd(com_add)
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.read_vars_common()
print("\nSync Pulse Counter: " + str(round(self.read_bsmp_variable(5,'uint32_t'),3)))
soft_itlks = self.read_bsmp_variable(31,'uint32_t')
print("\nSoft Interlocks: " + str(soft_itlks))
if(soft_itlks):
self.decode_interlocks(soft_itlks, list_fac_2p_dcdc_imas_soft_interlocks)
print('')
hard_itlks = self.read_bsmp_variable(32,'uint32_t')
print("Hard Interlocks: " + str(hard_itlks))
if(hard_itlks):
self.decode_interlocks(hard_itlks, list_fac_2p_dcdc_imas_hard_interlocks)
print("\nLoad Current: " + str(round(self.read_bsmp_variable(33,'float'),3)) + ' A')
print("Load Current Error: " + str(round(self.read_bsmp_variable(34,'float'),3)) + ' A')
print("\nArm 1 Current: " + str(round(self.read_bsmp_variable(35,'float'),3)) + ' A')
print("Arm 2 Current: " + str(round(self.read_bsmp_variable(36,'float'),3)) + ' A')
print("Arms Current Diff: " + str(round(self.read_bsmp_variable(37,'float'),3)) + ' A')
print("\nCapBank Voltage 1: " + str(round(self.read_bsmp_variable(38,'float'),3)) + ' V')
print("CapBank Voltage 2: " + str(round(self.read_bsmp_variable(39,'float'),3)) + ' V')
print("\nDuty-Cycle 1: " + str(round(self.read_bsmp_variable(40,'float'),3)) + ' %')
print("Duty-Cycle 2: " + str(round(self.read_bsmp_variable(41,'float'),3)) + ' %')
print("Differential Duty-Cycle: " + str(round(self.read_bsmp_variable(42,'float'),3)) + ' %')
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
raise
def check_param_bank(self, param_file):
fbp_param_list = []
max_sampling_freq = 600000
c28_sysclk = 150e6
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'Num_PS_Modules' and param[1] > 4:
print("Invalid " + str(param[0]) + ": " + str(param[1]) + ". Maximum is 4")
elif str(param[0]) == 'Freq_ISR_Controller' and param[1] > 6000000:
print("Invalid " + str(param[0]) + ": " + str(param[1]) + ". Maximum is 4" )
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def get_default_ramp_waveform(self, interval=500, nrpts=4000, ti=None, fi=None, forms=None):
from siriuspy.magnet.util import get_default_ramp_waveform
return get_default_ramp_waveform(interval, nrpts, ti, fi, forms)
def save_ramp_waveform(self, ramp):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(ramp)
def save_ramp_waveform_col(self, ramp):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f)
for val in ramp:
writer.writerow([val])
def read_vars_fac_n(self, n = 1, dt = 0.5):
old_add = self.GetSlaveAdd()
try:
for i in range(n):
print('\n--- Measurement #' + str(i+1) + ' ------------------------------------------\n')
self.SetSlaveAdd(1)
self.read_vars_fac_dcdc()
print('\n-----------------------\n')
self.SetSlaveAdd(2)
self.read_vars_fac_acdc()
time.sleep(dt)
self.SetSlaveAdd(old_add)
except:
self.SetSlaveAdd(old_add)
def get_step_buffer_fbp_ufjf(self, net1, net2, i_0, i_f, dly):
self.set_param('Analog_Var_Max',4,net1)
self.set_param('Analog_Var_Max',5,net2)
self.set_slowref(i_0)
time.sleep(0.5)
self.enable_buf_samples()
time.sleep(dly)
self.set_slowref(i_f)
self.disable_buf_samples()
buf = self.read_buf_samples_ctom()
buf1 = buf[0:4096:2]
buf2 = buf[1:4096:2]
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.plot(buf1)
ax1.grid()
ax2.plot(buf2)
ax2.grid()
fig.show()
return [buf1,buf2]
def set_buf_samples_freq(self, fs):
self.set_param('Freq_TimeSlicer',1,fs)
self.save_param_eeprom('Freq_TimeSlicer',1)
self.reset_udc()
def calc_pi(self, r_load, l_load, f_bw, v_dclink, send_drs = 0, dsp_id = 0):
kp = 2*3.1415*f_bw*l_load/v_dclink
ki = kp*r_load/l_load
print('\n Kp = ' + str(kp))
print(' Ki = ' + str(ki) + '\n')
if send_drs:
self.set_dsp_coeffs(3,dsp_id,[kp,ki,0.95,-0.95])
return [kp,ki]
def config_dsp_modules_drs_fap_tests(self):
kp_load = 0
ki_load = 20.95
kp_share = 0.000032117
ki_share = 0.0012
drs.set_dsp_coeffs(3,0,[kp_load,ki_load,0.6,0])
drs.set_dsp_coeffs(3,1,[kp_share,ki_share,0.0015,-0.0015])
drs.save_dsp_modules_eeprom()
def set_prbs_sampling_freq(self,freq, type_memory):
self.set_param('Freq_TimeSlicer',0,freq)
self.set_param('Freq_TimeSlicer',1,freq)
self.save_param_bank(type_memory)
def get_dsp_modules_bank(self, list_dsp_classes = [1,2,3,4,5,6], print_modules = 1):
dsp_modules_bank = []
for dsp_class in list_dsp_classes:
for dsp_id in range(num_dsp_modules[dsp_class]):
dsp_module = [dsp_classes_names[dsp_class], dsp_class, dsp_id]
for dsp_coeff in range(num_coeffs_dsp_modules[dsp_class]):
try:
coeff = self.get_dsp_coeff(dsp_class,dsp_id,dsp_coeff)
if dsp_class == 3 and dsp_coeff == 1:
coeff *= self.get_param('Freq_ISR_Controller',0)
dsp_module.append(coeff)
except:
dsp_module.append('nan')
dsp_modules_bank.append(dsp_module)
if(print_modules):
print(dsp_module)
return dsp_modules_bank
def store_dsp_modules_bank_csv(self, bank):
filename = input('Digite o nome do arquivo: ')
with open( filename + '.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter=',')
for dsp_module in bank:
writer.writerow(dsp_module)
def set_dsp_modules_bank(self, dsp_modules_file, save_eeprom = 0):
dsp_modules_row = []
with open(dsp_modules_file,newline='') as f:
reader = csv.reader(f)
for dsp_module in reader:
if not dsp_module == []:
if not dsp_module[0][0] == '#':
list_coeffs = []
for coeff in dsp_module[3:3+num_coeffs_dsp_modules[int(dsp_module[1])]]:
list_coeffs.append(float(coeff))
print(str(int(dsp_module[1])) + ' ' + str(int(dsp_module[2])) + ' ' + str(list_coeffs))
self.set_dsp_coeffs(int(dsp_module[1]),int(dsp_module[2]),list_coeffs)
if(save_eeprom):
self.save_dsp_modules_eeprom()
else:
print('\n *** Aviso: Os coeficientes configurados não foram salvos na memória EEPROM. Caso deseje salvar, utilize o argumento save_eeprom = 1')
def set_param_bank(self, param_file):
fbp_param_list = []
with open(param_file,newline='') as f:
reader = csv.reader(f)
for row in reader:
fbp_param_list.append(row)
for param in fbp_param_list:
if str(param[0]) == 'PS_Name':
print(str(param[0]) + "[0]: " + str(param[1]))
print(self.set_ps_name(str(param[1])))
else:
for n in range(64):
try:
print(str(param[0]) + "["+ str(n) + "]: " + str(param[n+1]))
print(self.set_param(str(param[0]),n,float(param[n+1])))
except:
break
#self.save_param_bank()
def select_param_bank(self, cfg_dsp_modules = 0):
add = int(input('\n Digite o endereco serial atual do controlador a ser configurado: '))
oldadd = self.GetSlaveAdd()
self.SetSlaveAdd(add)
areas = ['IA','LA','PA']
ps_models = ['fbp','fbp_dclink','fap','fap_4p','fap_2p4s','fac','fac_2s']
ps_folders = ['fbp','fbp_dclink','fap','fap',]
la_fap = ['TB-Fam:PS-B','TS-01:PS-QF1A','TS-01:PS-QF1B','TS-02:PS-QD2',
'TS-02:PS-QF2','TS-03:PS-QF3','TS-04:PS-QD4A','TS-04:PS-QD4B',
'TS-04:PS-QF4']
print('\n Selecione area: \n')
print(' 0: Sala de racks')
print(' 1: Linhas de transporte')
print(' 2: Sala de fontes\n')
area = int(input(' Digite o numero correspondente: '))
if area == 0:
sector = input('\n Digite o setor da sala de racks [1 a 20]: ')
if int(sector) < 10:
sector = '0' + sector
rack = input('\n Escolha o rack em que a fonte se encontra [1/2/3]: ')
#if (rack != '1') and (rack != '2'):
if not ((rack == '1') or (rack == '2') or (sector == '09' and rack == '3')):
print(' \n *** RACK INEXISTENTE ***\n')
return
print('\n Escolha o tipo de fonte: \n')
print(' 0: FBP')
print(' 1: FBP-DCLink\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0:
crate = '_crate_' + input('\n Digite a posicao do bastidor, de cima para baixo. Leve em conta os bastidores que ainda nao foram instalados : ')
elif ps_model == 1:
crate = ''
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
file_dir = '../ps_parameters/IA-' + sector + '/' + ps_models[ps_model] + '/'
file_name = 'parameters_' + ps_models[ps_model] + '_IA-' + sector + 'RaPS0' + rack + crate + '.csv'
file_path = file_dir + file_name
print('\n Banco de parametros a ser utilizado: ' + file_path)
elif area == 1:
print('\n Escolha o tipo de fonte: \n')
print(' 0: FBP')
print(' 1: FBP-DCLink')
print(' 2: FAP\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0 or ps_model == 1:
crate = input('\n Digite a posicao do bastidor, de cima para baixo. Leve em conta os bastidores que ainda nao foram instalados : ')
ps_name = '_LA-RaPS06_crate_' + crate
file_dir = '../ps_parameters/LA/' + ps_models[ps_model] + '/'
file_name = 'parameters_' + ps_models[ps_model] + ps_name + '.csv'
file_path = file_dir + file_name
elif ps_model == 2:
ps_list = []
file_dir = '../ps_parameters/LA/fap/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de fontes FAP da linha de transporte ### \n')
for idx, ps in enumerate(ps_list):
print(' ' + str(idx) + ': ' + ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
print('\n Banco de parametros a ser utilizado: ' + file_path)
elif area == 2:
print('\n Escolha o tipo de fonte: \n')
print(' 0: FAC')
print(' 1: FAP\n')
ps_model = int(input(' Digite o numero correspondente: '))
if ps_model == 0:
ps_list = []
file_dir = '../ps_parameters/PA/fac/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de bastidores de controle FAC da sala de fontes ### \n')
for idx, ps in enumerate(ps_list):
print(' ', idx, ': ', ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
elif ps_model == 1:
ps_list = []
file_dir = '../ps_parameters/PA/fap/'
for entry in os.listdir(file_dir):
if os.path.isfile(os.path.join(file_dir, entry)):
ps_list.append(entry)
print('\n ### Lista de bastidores de controle FAP da sala de fontes ### \n')
for idx, ps in enumerate(ps_list):
print(' ', idx, ': ', ps)
ps_idx = int(input('\n Escolha o índice da fonte correspondente: '))
file_path = file_dir + ps_list[ps_idx]
else:
print(' \n *** TIPO DE FONTE INEXISTENTE ***\n')
return
print('\n Banco de parametros a ser utilizado: ' + file_path)
else:
print(' \n *** SALA INEXISTENTE ***\n')
return
r = input('\n Tem certeza que deseja prosseguir? [Y/N]: ')
if (r != 'Y') and (r != 'y'):
print(' \n *** OPERAÇÃO CANCELADA ***\n')
return
self.SetSlaveAdd(add)
if ps_model == 0 and cfg_dsp_modules == 1:
print('\n Enviando parametros de controle para controlador ...')
dsp_file_dir = '../dsp_parameters/IA-' + sector + '/' + ps_models[ps_model] + '/'
dsp_file_name = 'dsp_parameters_' + ps_models[ps_model] + '_IA-' + sector + 'RaPS0' + rack + crate + '.csv'
dsp_file_path = dsp_file_dir + dsp_file_name
self.set_dsp_modules_bank(dsp_file_path)
print('\n Gravando parametros de controle na memoria ...')
time.sleep(1)
self.save_dsp_modules_eeprom()
print('\n Enviando parametros de operacao para controlador ...\n')
time.sleep(1)
self.set_param_bank(file_path)
print('\n Gravando parametros de operacao na memoria EEPROM onboard ...')
self.save_param_bank(2)
time.sleep(5)
print('\n Resetando UDC ...')
self.reset_udc()
time.sleep(2)
print('\n Pronto! Não se esqueça de utilizar o novo endereço serial para se comunicar com esta fonte! :)\n')
self.SetSlaveAdd(oldadd)
def get_siggen_vars(self):
print('\n### SigGen Variables ###\n')
print('Enable: ' + str((round(self.read_bsmp_variable(6,'uint16_t'),3))))
print('Type: ' + ListSigGenTypes_v2_1[int(round(self.read_bsmp_variable(7,'uint16_t'),3))])
print('Num Cycles: ' + str(round(self.read_bsmp_variable(8,'uint16_t'),3)))
print('Index: ' + str(round(self.read_bsmp_variable(9,'float'),3)))
print('Frequency: ' + str(round(self.read_bsmp_variable(10,'float'),3)))
print('Amplitude: ' + str(round(self.read_bsmp_variable(11,'float'),3)))
print('Offset: ' + str(round(self.read_bsmp_variable(12,'float'),3)))
self.read_var(self.index_to_hex(13))
reply_msg = self.ser.read(21)
val = struct.unpack('BBHffffB',reply_msg)
print('Aux Param 0: ' + str(val[3]))
print('Aux Param 1: ' + str(val[4]))
print('Aux Param 2: ' + str(val[5]))
print('Aux Param 3: ' + str(val[6]))
def firmware_initialization(self):
print("\n ### Inicialização de firmware ### \n")
print("\n Lendo status...")
print(self.read_ps_status())
print("\n Lendo versão de firmware...")
self.read_udc_version()
print("\n Desbloqueando UDC...")
print(self.unlock_udc(0xFFFF))
print("\n Habilitando EEPROM onboard...")
self.enable_onboard_eeprom()
print("\n Alterando senha...")
print(self.set_param('Password',0,0xCAFE))
print(self.save_param_eeprom('Password',0,2))
print("\n Configurando banco de parâmetros...")
self.select_param_bank()
print("\n ### Fim da inicialização de firmware ### \n")
def cfg_hensys_ps_model(self):
list_files = ['fbp_dclink/parameters_fbp_dclink_hensys.csv',
'fac/parameters_fac_acdc_hensys.csv',
'fac/parameters_fac_dcdc_hensys.csv',
'fac/parameters_fac_2s_acdc_hensys.csv',
'fac/parameters_fac_2s_dcdc_hensys.csv',
'fac/parameters_fac_2p4s_acdc_hensys.csv',
'fac/parameters_fac_2p4s_dcdc_hensys.csv',
'fap/parameters_fap_hensys.csv',
'fap/parameters_fap_2p2s_hensys.csv',
'fap/parameters_fap_4p_hensys.csv']
print('\n Desbloqueando UDC ...')
print(self.unlock_udc(0xCAFE))
print('\n *** Escolha o modelo de fonte a ser configurado ***\n')
print(' 0: FBP-DClink')
print(' 1: FAC-ACDC')
print(' 2: FAC-DCDC')
print(' 3: FAC-2S-ACDC')
print(' 4: FAC-2S-DCDC')
print(' 5: FAC-2P4S-ACDC')
print(' 6: FAC-2P4S-DCDC')
print(' 7: FAP')
print(' 8: FAP-2P2S')
print(' 9: FAP-4P')
model_idx = int(input('\n Digite o índice correspondente: '))
file_path = '../ps_parameters/development/' + list_files[model_idx]
print('\n Banco de parametros a ser utilizado: ' + file_path)
r = input('\n Tem certeza que deseja prosseguir? [Y/N]: ')
if (r != 'Y') and (r != 'y'):
print(' \n *** OPERAÇÃO CANCELADA ***\n')
return
print('\n Enviando parametros de operacao para controlador ...\n')
time.sleep(1)
self.set_param_bank(file_path)
print('\n Gravando parametros de operacao na memoria EEPROM onboard ...')
self.save_param_bank(2)
time.sleep(5)
print('\n Resetando UDC ...')
self.reset_udc()
time.sleep(2)
print('\n Pronto! Nao se esqueca de utilizar o novo endereco serial para se comunicar com esta fonte! :)\n')
def test_bid_board(self, password):
r = input("\n Antes de iniciar, certifique-se que o bastidor foi energizado sem a placa BID.\n Para prosseguir, conecte a placa BID a ser testada e pressione qualquer tecla... ")
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria onboard ...")
print(self.load_param_bank(type_memory = 2))
print("\n Banco de parametros da memoria onboard:\n")
max_param = ListParameters.index('Scope_Source')
param_bank_onboard = []
for param in ListParameters[0:max_param]:
val = self.get_param(param,0)
print(param + ':',val)
param_bank_onboard.append(val)
print("\n Salvando banco de parametros na memoria offboard ...")
print(self.save_param_bank(type_memory = 1))
time.sleep(5)
print("\n Resetando UDC ...")
self.reset_udc()
time.sleep(3)
self.read_ps_status()
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria offboard ...")
print(self.load_param_bank(type_memory = 1))
self.read_ps_status()
print("\n Verificando banco de parametros offboard apos reset ... \n")
try:
param_bank_offboard = []
for param in ListParameters[0:max_param]:
val = self.get_param(param,0)
print(param, val)
param_bank_offboard.append(val)
if param_bank_onboard == param_bank_offboard:
print("\n Placa BID aprovada!\n")
else:
print("\n Placa BID reprovada!\n")
except:
print(" Placa BID reprovada!\n")
def upload_parameters_bid(self, password):
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria offboard ...")
print(self.load_param_bank(type_memory = 1))
time.sleep(1)
print("\n Salvando banco de parametros na memoria onboard ...")
print(self.save_param_bank(type_memory = 2))
time.sleep(5)
print("\n Carregando coeficientes de controle da memoria offboard ...")
print(self.load_dsp_modules_eeprom(type_memory = 1))
time.sleep(1)
print("\n Salvando coeficientes de controle na memoria onboard ...\n")
print(self.save_dsp_modules_eeprom(type_memory = 2))
def download_parameters_bid(self,password):
print("\n Desbloqueando UDC ...")
print(self.unlock_udc(password))
print("\n Carregando banco de parametros da memoria onboard ...")
print(self.load_param_bank(type_memory = 2))
time.sleep(1)
print("\n Salvando banco de parametros na memoria offboard ...")
print(self.save_param_bank(type_memory = 1))
time.sleep(5)
print("\n Carregando coeficientes de controle da memoria onboard ...")
print(self.load_dsp_modules_eeprom(type_memory = 2))
time.sleep(1)
print("\n Salvando coeficientes de controle na memoria offboard ...")
print(self.save_dsp_modules_eeprom(type_memory = 1))
|
python
|
DEBUG = False
BCRYPT_LEVEL = 12 # Configuration for the Flask-Bcrypt extension
from settings.local_settings import *
|
python
|
import requests, json
def activities(activity): # Get Activity ID
switch = {
1: "755600276941176913",
2: "755827207812677713",
3: "773336526917861400",
4: "814288819477020702"
}
return switch.get(activity, "755600276941176913")
print("--------------------------------") # Heading
print("Fro's Activity Starter")
print("--------------------------------")
print("Please note that to start an activity, you need invite permissions. Your token and any sensitive details are not sent to a third party. You can check the script.")
print("--------------------------------")
channel_id = input("Enter the ID of the voice channel you would like to start the activity in: ") # Channel ID to generate the invite
token = input("Enter your Discord token (used to generate the invite): ") # Discord Token to generate the invite
print("1. Youtube Together") # Activities Heading
print("2. Poker Night")
print("3. Betrayal.io")
print("4. Fishington.io")
activity = int(input("Enter in the ID of the activity you would like to start: ")) # Get Activity
data = json.dumps({ # Data to Send
"max_age": 86400,
"max_uses": 0,
"target_application_id": activities(activity),
"target_type": 2,
"temporary": False,
"validate": None
})
headers = { # Headers
"Authorization": token,
"Content-Type": "application/json"
}
response = requests.post("https://discord.com/api/v8/channels/" + channel_id + "/invites", data=data, headers=headers).json() # Send request to Discord servers
print("Generated Invite Link: https://discord.gg/" + response["code"]) # Print the invite link
print("Post this invite link in a text channel and click the link directly (regardless if it says Activity has Ended), it will launch the activity and make you join the voice channel.") # Explanation
|
python
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class GetRes(object):
def __init__(self, target_name):
self.target_name = target_name
def __repr__(self):
return 'GetRes(%r)' % self.target_name
class GetAtt(GetRes):
def __init__(self, target_name, attr):
super(GetAtt, self).__init__(target_name)
self.attr = attr
def __repr__(self):
return 'GetAtt(%r, %r)' % (self.target_name, self.attr)
class RsrcDef(object):
def __init__(self, properties, depends_on):
self.properties = properties
self.depends_on = depends_on
def __repr__(self):
return 'RsrcDef(%r, %r)' % (self.properties, self.depends_on)
class Template(object):
def __init__(self, resources={}, key=None):
self.key = key
self.resources = resources
def __repr__(self):
return 'Template(%r)' % self.resources
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 09:31:37 2020
@author: natnem
"""
import math
def countingSort(array, exp):
n = len(array)
out = [0]*n
count = [0]*10
for i in range(n):
index = (array[i]//exp) % 10
count[index] += 1
for i in range(1,10):
count[i] += count[i-1]
for x in range(n-1,-1,-1):
index = (array[x]//exp)%10
out[count[index]-1] = array[x]
count[index] -= 1
return out
def RadixSort(A,base):
d = math.ceil(math.log(max(A),base))
# print(d)
for i in range(d):
A = countingSort(A,10**i)
return A
mylist = [329,
457,
657,
839,
436,
720,
355]
print(RadixSort(mylist,10))
|
python
|
from django.shortcuts import redirect
from rest_framework import permissions, viewsets
from rest_framework.decorators import api_view, permission_classes
from . import models
from . import serializers
from ..scraps.models import Scrap
class ScrapBookViewSet(viewsets.ModelViewSet):
queryset = models.ScrapBook.objects.all()
serializer_class = serializers.ScrapBookSerializer
class ScrapBookItemViewSet(viewsets.ModelViewSet):
queryset = models.ScrapBookItem.objects.all()
serializer_class = serializers.ScrapBookItemSerializer
def create(self, request, **kwargs):
# Add url kwargs to post data, which ModelViewSet uses to create ScrapBookItem
request.data.update(kwargs)
return super().create(request, **kwargs)
@api_view(['POST'])
@permission_classes((permissions.IsAuthenticated,))
def add_scrap_to_book(request, scrap_id=None, book_id=None):
scrap = Scrap.objects.get(pk=scrap_id)
book = models.ScrapBook.objects.get(pk=book_id)
models.ScrapBookItem.objects.get_or_create(scrap=scrap, book=book)
return redirect(book.get_absolute_url())
|
python
|
import numpy as np
import os.path
from keras.models import load_model, Model
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger
from python_research.experiments.utils import (
TimeHistory
)
from python_research.experiments.multiple_feature_learning.builders.keras_builders import (
build_multiple_features_model,
build_settings_for_dataset
)
from python_research.experiments.utils import (
TrainTestIndices
)
from python_research.experiments.utils import Dataset
from python_research.experiments.utils import (
parse_multiple_features
)
from typing import List, NamedTuple
from python_research.validation import validate
class TrainingSet(NamedTuple):
x_train: list
x_test: list
x_val: list
y_train: list
y_test: list
y_val: list
model: Model
def build_training_set(
original_path: str,
gt_path: str,
area_path: str,
stddev_path: str,
diagonal_path: str,
moment_path: str,
nb_samples: int,
neighborhood: List[int]
) -> TrainingSet:
settings = build_settings_for_dataset(neighborhood)
original_data = Dataset(
original_path,
gt_path,
nb_samples,
settings.input_neighborhood
)
train_test_indices = TrainTestIndices(
original_data.train_indices,
original_data.test_indices
)
bands_sets = [original_data.x.shape[-1]]
x_trains = [original_data.x_train]
x_vals = [original_data.x_val]
x_tests = [original_data.x_test]
if area_path is not None:
area_data = Dataset(
area_path,
gt_path,
nb_samples,
settings.input_neighborhood,
train_test_indices=train_test_indices
)
bands_sets.append(area_data.x.shape[-1])
x_trains.append(area_data.x_train)
x_vals.append(area_data.x_val)
x_tests.append(area_data.x_test)
if stddev_path is not None:
stddev_data = Dataset(
stddev_path,
gt_path,
nb_samples,
settings.input_neighborhood,
train_test_indices=train_test_indices
)
bands_sets.append(stddev_data.x.shape[-1])
x_trains.append(stddev_data.x_train)
x_vals.append(stddev_data.x_val)
x_tests.append(stddev_data.x_test)
if diagonal_path is not None:
diagonal_data = Dataset(
diagonal_path,
gt_path,
nb_samples,
settings.input_neighborhood,
train_test_indices=train_test_indices
)
bands_sets.append(diagonal_data.x.shape[-1])
x_trains.append(diagonal_data.x_train)
x_vals.append(diagonal_data.x_val)
x_tests.append(diagonal_data.x_test)
if moment_path is not None:
moment_data = Dataset(
moment_path,
gt_path,
nb_samples,
settings.input_neighborhood,
train_test_indices=train_test_indices
)
bands_sets.append(moment_data.x.shape[-1])
x_trains.append(moment_data.x_train)
x_vals.append(moment_data.x_val)
x_tests.append(moment_data.x_test)
model = build_multiple_features_model(
settings,
len(original_data.labels) - 1,
bands_sets
)
return TrainingSet(
x_train=x_trains,
x_test=x_tests,
x_val=x_vals,
y_train=original_data.y_train,
y_test=original_data.y_test,
y_val=original_data.y_val,
model=model
)
def main():
args = parse_multiple_features()
os.makedirs(args.output_dir, exist_ok=True)
output_path = os.path.join(args.output_dir, args.output_name)
training_set = build_training_set(
args.original_path,
args.gt_path,
args.area_path,
args.stddev_path,
args.diagonal_path,
args.moment_path,
args.nb_samples,
args.neighborhood
)
early = EarlyStopping(patience=args.patience)
logger = CSVLogger(output_path + ".csv")
checkpoint = ModelCheckpoint(
output_path + "_model",
save_best_only=True
)
timer = TimeHistory()
training_set.model.fit(
x=training_set.x_train,
y=training_set.y_train,
validation_data=(training_set.x_val, training_set.y_val),
epochs=200,
batch_size=args.batch_size,
callbacks=[
early,
logger,
checkpoint,
timer
],
verbose=args.verbosity
)
model = load_model(output_path + "_model")
print(validate(model, training_set))
times = timer.times
np.savetxt(output_path + "_times.csv", times, fmt="%1.4f")
if __name__ == "__main__":
main()
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import Iterable, Iterator
def g():
yield 1
yield 2
yield 3
print('Iterable? [1, 2, 3]:', isinstance([1, 2, 3], Iterable))
print('Iterable? \'abc\':', isinstance('abc', Iterable))
print('Iterable? 123:', isinstance(123, Iterable))
print('Iterable? g():', isinstance(g(), Iterable))
print('Iterator? [1, 2, 3]:', isinstance([1, 2, 3], Iterator))
print('Iterator? iter([1, 2, 3]):', isinstance(iter([1, 2, 3]), Iterator))
print('Iterator? \'abc\':', isinstance('abc', Iterator))
print('Iterator? 123:', isinstance(123, Iterator))
print('Iterator? g():', isinstance(g(), Iterator))
# iter list:
print('for x in [1, 2, 3, 4, 5]:')
for x in [1, 2, 3, 4, 5]:
print(x)
print('for x in iter([1, 2, 3, 4, 5]):')
for x in iter([1, 2, 3, 4, 5]):
print(x)
print('next():')
it = iter([1, 2, 3, 4, 5])
print(next(it))
print(next(it))
print(next(it))
print(next(it))
print(next(it))
d = {'a': 1, 'b': 2, 'c': 3}
# iter each key:
print('iter key:', d)
for k in d.keys():
print('key:', k)
# iter each value:
print('iter value:', d)
for v in d.values():
print('value:', v)
# iter both key and value:
print('iter item:', d)
for k, v in d.items():
print('item:', k, v)
# iter list with index:
print('iter enumerate([\'A\', \'B\', \'C\']')
for i, value in enumerate(['A', 'B', 'C']):
print(i, value)
# iter complex list:
print('iter [(1, 1), (2, 4), (3, 9)]:')
for x, y in [(1, 1), (2, 4), (3, 9)]:
print(x, y)
|
python
|
from tkinter import ttk, Frame, Label, Entry, Button, LEFT, font
from models.repositories.credentials_repository import CredentialsDatabase
from settings import CREDENTIALS_DB, SALT
import sys
sys.path.append("..")
from providers.password_encrypt_provider import PasswordEncryptProvider
class PasswordPage(Frame):
def __init__(self, master_password, username, master=None):
# Connects to the database
self.conn = CredentialsDatabase('SQLITE', dbname=CREDENTIALS_DB)
self.credentials_list = self.get_registered_credentials(username=username)
# Creates the hash provider instance
self.encrypt_provider = PasswordEncryptProvider(SALT, master_password)
Frame.__init__(self, master)
# Create the tabs for the login screen
self.tabControl = ttk.Notebook(master)
self.tab1 = ttk.Frame(self.tabControl)
self.tab2 = ttk.Frame(self.tabControl)
self.tabControl.add(self.tab1, text='Create credentials')
self.tabControl.add(self.tab2, text='Registered credentials')
self.tabControl.pack(expand=1, fill="both")
# CREATE CREDENTIALS SCREEN ----------------------------------------
# Header container
self.create_credential_header_container = Frame(self.tab1)
self.create_credential_header_container.pack()
self.create_credential_header_container['pady'] = 10
# Header Text
self.create_credential_header = Label(self.create_credential_header_container, text='Cadastro de credenciais')
self.create_credential_header.pack()
# Credential name container
self.register_credential_name_container = Frame(self.tab1)
self.register_credential_name_container['pady'] = 10
self.register_credential_name_container['padx'] = 10
self.register_credential_name_container.pack()
# Credential name input and label
self.register_credential_name_label = Label(self.register_credential_name_container, text='Nome da credencial',
width=20)
self.register_credential_name_input = Entry(self.register_credential_name_container)
self.register_credential_name_label.pack(side=LEFT)
self.register_credential_name_input.pack()
# Credential url container
self.register_credential_url_container = Frame(self.tab1)
self.register_credential_url_container['pady'] = 10
self.register_credential_url_container['padx'] = 10
self.register_credential_url_container.pack()
# Credential url input and label
self.register_credential_url_label = Label(self.register_credential_url_container, text='Url da credencial',
width=20)
self.register_credential_url_input = Entry(self.register_credential_url_container)
self.register_credential_url_label.pack(side=LEFT)
self.register_credential_url_input.pack()
# Password container
self.register_credential_password_container = Frame(self.tab1)
self.register_credential_password_container['pady'] = 10
self.register_credential_password_container['padx'] = 10
self.register_credential_password_container.pack()
# Password input and label
self.register_credential_password_label = Label(self.register_credential_password_container, text='Senha', width=20)
self.register_credential_password_input = Entry(self.register_credential_password_container, show="*")
self.register_credential_password_label.pack(side=LEFT)
self.register_credential_password_input.pack()
# Password container
self.register_confirm_credential_password_container = Frame(self.tab1)
self.register_confirm_credential_password_container['pady'] = 10
self.register_confirm_credential_password_container['padx'] = 10
self.register_confirm_credential_password_container.pack()
# Password input and label
self.register_confirm_credential_password_label = Label(self.register_confirm_credential_password_container,
text='Confirmar senha', width=20)
self.register_confirm_credential_password_input = Entry(self.register_confirm_credential_password_container,
show="*")
self.register_confirm_credential_password_label.pack(side=LEFT)
self.register_confirm_credential_password_input.pack()
# Submit button container
self.register_submit_container = Frame(self.tab1)
self.register_submit_container.pack()
self.register_submit_container['pady'] = 10
# Submit button
self.register_submit_button = Button(self.register_submit_container)
self.register_submit_button['text'] = 'Cadastrar credencial'
self.register_submit_button['command'] = lambda: self.register_credential(username=username)
self.register_submit_button.pack()
# Register message container
self.register_message_container = Frame(self.tab1)
self.register_message_container['pady'] = 10
self.register_message_container['padx'] = 10
self.register_message_container.pack()
# Register message
self.register_message = Label(self.register_message_container, text='')
self.register_message.pack()
# LIST CREDENTIALS SCREEN ----------------------------------------
# Header container
self.list_credential_header_container = Frame(self.tab2)
self.list_credential_header_container.pack()
self.list_credential_header_container['pady'] = 10
# Header Text
self.list_credential_header = Label(self.list_credential_header_container, text='Credenciais cadastradas')
self.list_credential_header.pack()
# Credentials list container
self.credential_container = Frame(self.tab2)
self.credential_container['pady'] = 10
self.credential_container['padx'] = 5
self.credential_container.pack()
# Credentials list treeview
self.credentials_columns = ['Name', 'Url', 'Password']
self.credentials = ttk.Treeview(columns=self.credentials_columns, show='headings')
vsb = ttk.Scrollbar(orient='vertical', command=self.credentials.yview)
hsb = ttk.Scrollbar(orient='horizontal', command=self.credentials.xview)
self.credentials.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
self.credentials.grid(column=0, row=0, sticky='nsew', in_=self.credential_container)
vsb.grid(column=1, row=0, sticky='ns', in_=self.credential_container)
hsb.grid(column=0, row=1, sticky='ew', in_=self.credential_container)
self.credential_container.grid_columnconfigure(0, weight=1)
self.credential_container.grid_rowconfigure(0, weight=1)
# Builds the credential list
self.pass_encrypted = False
self._build_tree(show_pass=self.pass_encrypted)
# Show pass button container
self.show_pass_button_container = Frame(self.tab2)
self.show_pass_button_container.pack()
self.show_pass_button_container['pady'] = 10
# Show pass button
self.show_pass_button = Button(self.show_pass_button_container)
self.show_pass_button['text'] = 'Show passwords'
self.show_pass_button['command'] = self.show_passowd
self.show_pass_button.pack()
def _build_tree(self, show_pass=False):
for col in self.credentials_columns:
self.credentials.heading(col, text=col.title(), command=lambda c=col: self.sortby(self.credentials, c, 0))
# adjust the column's width to the header string
self.credentials.column(col, width=font.Font().measure(col.title()))
self.pass_encrypted = not show_pass
for credential in self.credentials_list:
if show_pass:
decrypted_pass = self.encrypt_provider.decrypt_password(credential.password)
credential_data = [credential.name, credential.url, decrypted_pass]
else:
credential_data = [credential.name, credential.url, credential.password.decode()]
self.credentials.insert('', 'end', values=credential_data)
for ix, val in enumerate(credential_data):
col_w = font.Font().measure(val)
if self.credentials.column(self.credentials_columns[ix], width=None) < col_w:
self.credentials.column(self.credentials_columns[ix], width=col_w)
def sortby(self, tree, col, descending):
"""sort tree contents when a column header is clicked on"""
# grab values to sort
data = [(tree.set(child, col), child) for child in tree.get_children('')]
# if the data to be sorted is numeric change to float
# data = change_numeric(data)
# now sort the data in place
data.sort(reverse=descending)
for ix, item in enumerate(data):
tree.move(item[1], '', ix)
# switch the heading so it will sort in the opposite direction
tree.heading(col, command=lambda col=col: self.sortby(tree, col, int(not descending)))
def register_credential(self, username):
credential_name = self.register_credential_name_input.get()
credential_url = self.register_credential_url_input.get()
credential_password = self.register_credential_password_input.get()
credential_confirm_password = self.register_confirm_credential_password_input.get()
if credential_password != credential_confirm_password:
self.register_message['text'] = 'Password and password confirmation don\'t match'
return None
# Encrypts password
encrypted_password = self.encrypt_provider.encrypt_password(credential_password)
# Stores encrypted password in database
new_credential = self.conn.create_credential(username=username,
credential_name=credential_name,
credential_password=encrypted_password,
credential_url=credential_url)
if not new_credential:
self.register_message['text'] = 'Credential already exists'
else:
self.register_message['text'] = 'Credential created successfuly'
self.credentials_list.append(new_credential)
self._build_tree()
def get_registered_credentials(self, username):
credentials_list = self.conn.get_all_credentials(username=username)
return credentials_list
def show_passowd(self):
self.credentials.delete(*self.credentials.get_children())
self._build_tree(show_pass=self.pass_encrypted)
|
python
|
import pygame
from pygame.locals import *
import random
import sys
from settings import *
pygame.font.init()
pygame.init()
class Pad(pygame.sprite.Sprite):
def __init__(self):
super(Pad, self).__init__()
self.width, self.height = 95,95
self.image = pygame.Surface((self.width, self.height))
self.image.fill(black)
self.image.set_alpha(160)
self.rect = self.image.get_rect()
self.pos_x,self.pos_y = [-1,-1]
self.prev_pos = [-1,-1]
def set_pos(self):
global pad_pos
self.prev_pos = [self.pos_x,self.pos_y]
try:
pad_pos.remove([self.pos_x,self.pos_y])
except:
pass
self.pos_x = random.randint(0,3)
self.pos_y = random.randint(0,3)
if [self.pos_x,self.pos_y] in pad_pos or self.prev_pos == [self.pos_x,self.pos_y]:
self.set_pos()
else:
pad_pos.append([self.pos_x,self.pos_y])
self.rect.x = (self.pos_x*(self.width+5)) + 25
self.rect.y = (self.pos_y*(self.height+5)) + 25
def update(self):
global ACTION_CLICK,ACTION_HOVER,score,GAME_STATE,CLICK,pad_pos
if mouse_collide(self,self.mouse):
self.image.set_alpha(210)
if CLICK:
score += 1
self.set_pos()
else:
self.image.set_alpha(160)
pygame.draw.rect(window,white,(self.rect.x-2,self.rect.y-2,self.width+4,self.height+4),2)
def text(text, font, size, color, x, y):
font_style = str(font)
font_size = size
text_font = pygame.font.SysFont(font_style, font_size)
message = text_font.render(text, True, color)
window.blit(message, (x, y))
def mouse_collide(obj,mouse):
if obj.rect.x + obj.height > mouse[0] > obj.rect.x \
and obj.rect.y + obj.width > mouse[1] > obj.rect.y:
return True
else:
return False
def button(x,y,w,h,a_hover,a_click):
global window,ACTION_HOVER,ACTION_CLICK,CLICK
mouse = pygame.mouse.get_pos() #Get Mouse Position
if x+w > mouse[0] and mouse[0] > x and y+h > mouse[1] and mouse[1] > y: #If mouse position is inside the box
#pygame.draw.rect( window, (255,255,255), (x,y,w,h) )
ACTION_HOVER = a_hover
if CLICK:
ACTION_CLICK = a_click #If clicked, set click action
elif ACTION_HOVER == a_hover:
ACTION_HOVER = None
if ACTION_CLICK == a_click and not CLICK:
ACTION_CLICK = None
def reset():
global time, score, starttime, CLICK, ACTION_CLICK, ACTION_HOVER
time = 0
score = 0
starttime = False
CLICK = False
ACTION_CLICK = None
ACTION_HOVER = None
pad1.set_pos()
pad2.set_pos()
pad3.set_pos()
def menu_anim():
for y in range(180,120,-5):
window.fill(black)
text("Quick Taps",'segoe ui',60,white,90,y)
clock.tick(60)
pygame.display.update()
for x in range(-200,188,40):
window.fill(black)
text("Quick Taps",'segoe ui',60,white,90,120)
text("Play",'segoe ui',40,white,x,220)
clock.tick(60)
pygame.display.update()
for x in range(-200,158,40):
window.fill(black)
text("Quick Taps",'segoe ui',60,white,90,120)
text("Play",'segoe ui',40,white,188,220)
text("Scenery",'segoe ui',40,white,x,270)
clock.tick(60)
pygame.display.update()
for x in range(-200,185,40):
window.fill(black)
text("Quick Taps",'segoe ui',60,white,90,120)
text("Play",'segoe ui',40,white,188,220)
text("Scenery",'segoe ui',40,white,158,270)
text("Help",'segoe ui', 40, white, x,320)
clock.tick(60)
pygame.display.update()
def endscreen_anim():
s = pygame.Surface((window_width,window_height))
for x in range(0,180,5):
window.blit(bg,(0,0))
s.set_alpha(x)
s.fill((180,0,0))
window.blit(s,(0,0))
clock.tick(60)
pygame.display.update()
for x in range(-200,178,40):
window.blit(bg,(0,0))
window.blit(s,(0,0))
text("Retry",'segoe ui',40,white,x,160)
clock.tick(60)
pygame.display.update()
for x in range(-200,175,40):
window.blit(bg,(0,0))
window.blit(s,(0,0))
text("Retry",'segoe ui',40,white,178,160)
text("Menu",'segoe ui',40,white,x,220)
clock.tick(60)
pygame.display.update()
def randomize_bg():
global random_bg, bg_used, bg_list, bg
if random_bg:
if len(bg_used) == len(bg_list)-1: # -1 because of centre piece
bg_used = []
while True:
bg = bg_list[random.randint(0,len(bg_list)-1)]
if bg != bg_list[4] and bg not in bg_used:
bg_used.append(bg)
break
if (__name__ == "__main__"):
window = pygame.display.set_mode((window_width, window_height))
pygame.display.set_caption("Quick Taps")
GAME_STATE = title
GAME_MODE = None
active_object_list = pygame.sprite.Group()
pad_pos = []
pad1 = Pad()
pad1.set_pos()
pad2 = Pad()
pad2.set_pos()
pad3 = Pad()
pad3.set_pos()
active_object_list.add(pad1,pad2,pad3)
ACTION_CLICK = None
ACTION_HOVER = None
CLICK = False
score = 0
winscore = 20
starttime = False
time = 0
random_bg = True
bg_used = []
winscreen_alpha = 255
while 1:
while (GAME_STATE == title):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.KEYDOWN) or (event.type == pygame.MOUSEBUTTONUP):
GAME_STATE = menu
menu_anim()
if GAME_STATE != title:
break
window.fill(black)
text("Quick Taps",'segoe ui',60,white,90,180)
clock.tick(60)
winscreen_alpha = 255
pygame.display.update()
while (GAME_STATE == menu):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN):
CLICK = True
if ACTION_CLICK == "1":
GAME_STATE = game
GAME_MODE = arcade
randomize_bg()
elif ACTION_CLICK == "2":
GAME_STATE = bg_select
elif ACTION_CLICK == "3":
GAME_STATE = help_screen
window.fill(black)
text("Quick Taps",'segoe ui',60,white,90,120)
if ACTION_HOVER == "h1":
text("Play",'segoe ui',40,white,190,220)
text("Play",'segoe ui',40,white,188,220)
if ACTION_HOVER == "h2":
text("Scenery",'segoe ui',40,white,160,270)
text("Scenery",'segoe ui',40,white,158,270)
if ACTION_HOVER == "h3":
text("Help",'segoe ui', 40, white, 187,320)
text("Help",'segoe ui', 40, white, 185,320)
button(178,230,90,40,"h1","1")
button(158,280,120,40,"h2","2")
button(185,330,70,40,"h3","3")
CLICK = False
clock.tick(60)
pygame.display.update()
while (GAME_STATE == help_screen):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN):
CLICK = True
if ACTION_CLICK == "1":
GAME_STATE = menu
window.fill(black)
window.blit(help_bg, (0,0))
mouse = pygame.mouse.get_pos()
if ACTION_HOVER == "h1":
text("Back",'segoe ui',40,white,190,360)
text("Back",'segoe ui',40,white,188,360)
button(178,360,90,50,"h1","1")
CLICK = False
clock.tick(60)
pygame.display.update()
while (GAME_STATE == bg_select):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN):
if ACTION_HOVER == 4:
random_bg = True
else:
bg = bg_list[ACTION_HOVER]
random_bg = False
GAME_STATE = menu
bg_used = []
window.fill(black)
mouse = pygame.mouse.get_pos()
for row in range(3):
for col in range(3):
window.blit(pygame.transform.scale(bg_list[row+col*3], (130,130)),(row*148+12,col*148+12))
if row*148+12 + 130 > mouse[0] and mouse[0] > row*148+12 \
and col*148+12 + 130 > mouse[1] and mouse[1] > col*148+12:
ACTION_HOVER = row+col*3
else:
cover = pygame.Surface((130,130));cover.set_alpha(100);cover.fill(black)
window.blit(cover, (row*148+12,col*148+12))
clock.tick(fps)
pygame.display.update()
while (GAME_STATE == game):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event != None):
if (event.type == pygame.KEYDOWN):
if event.key == K_p:
GAME_STATE = paused
s = pygame.Surface((window_width,window_height))
s.set_alpha(200)
s.fill(black)
window.blit(s, (0,0))
text("PAUSED",'segoe ui',40,white,160,190)
if event.type == pygame.MOUSEBUTTONDOWN:
CLICK = True
starttime = True
if mouse_collide(pad1,pygame.mouse.get_pos()) == False \
and mouse_collide(pad2,pygame.mouse.get_pos()) == False \
and mouse_collide(pad3,pygame.mouse.get_pos()) == False:
GAME_STATE = endscreen
endscreen_anim()
reset()
if GAME_STATE != game:
break
pad1.mouse,pad2.mouse,pad3.mouse = pygame.mouse.get_pos(),pygame.mouse.get_pos(),pygame.mouse.get_pos()
active_object_list.update()
screen_fade = pygame.Surface((window_width,window_height));screen_fade.set_alpha(60);screen_fade.fill(white)
window.blit( bg, (0,0))
window.blit( screen_fade, (0,0) )
active_object_list.draw(window)
text(str(score),"segoe ui",50,black,395,10)
if GAME_MODE == arcade:
if score == winscore:
GAME_STATE = winscreen
if starttime:
time += 1
CLICK = False
clock.tick(fps)
pygame.display.update()
while (GAME_STATE == paused):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.KEYDOWN):
if event.key == K_p:
GAME_STATE = game
clock.tick(fps)
pygame.display.update()
while (GAME_STATE == endscreen):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN):
CLICK = True
if (event.type == pygame.KEYDOWN):
if GAME_MODE == arcade:
reset()
GAME_STATE = game
randomize_bg()
window.blit(bg,(0,0))
s = pygame.Surface((window_width,window_height))
s.set_alpha(180)
s.fill((180,0,0))
window.blit(s,(0,0))
if ACTION_HOVER == "h1":
text("Retry",'segoe ui',40,white,180,160)
text("Retry",'segoe ui',40,white,178,160)
if ACTION_HOVER == "h2":
text("Menu",'segoe ui',40,white,177,220)
text("Menu",'segoe ui',40,white,175,220)
button(178,170,90,40,"h1","a")
button(175,230,100,40,"h2","b")
if ACTION_CLICK == "a":
if GAME_MODE == arcade:
reset()
GAME_STATE = game
randomize_bg()
elif ACTION_CLICK == "b":
menu_anim()
GAME_STATE = menu
CLICK = False
clock.tick(fps)
pygame.display.update()
while (GAME_STATE == winscreen):
endtime = str(int(time/60))+":"+str(int(time/6))
message = segoe72.render(endtime, True,black)
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
sys.exit()
if (event.type == pygame.KEYDOWN) or (event.type == pygame.MOUSEBUTTONDOWN):
GAME_STATE = title
reset()
window.blit(bg,(0,0))
s = pygame.Surface((window_width,window_height))
if winscreen_alpha > 0:
winscreen_alpha -= 5
s.set_alpha(winscreen_alpha)
s.fill((255,255,255))
window.blit(s,(0,0))
window.blit(banner,(0,0))
window.blit(message,(window_width/2 - segoe72.size(endtime)[0]/2,175) )
clock.tick(fps)
pygame.display.update()
|
python
|
"""
# Script: polyEvaluateTest.py
#
# Description:
# Unit test for the polyEvaluate command.
#
##############################################################################
"""
import maya.cmds as cmds
import unittest
class PolyEvalutateTest(unittest.TestCase):
def testPolyEvaluate(self):
pass
def testUVSetFlag(self):
pass
|
python
|
#----------------------------------------------------------------------
# LinMoTube
# by Jake Day
# v1.2
# Basic GUI for YouTube on Linux Mobile
#----------------------------------------------------------------------
import ctypes, os, requests, io, sys, subprocess, gi, json, threading
from urllib.parse import urlparse
from youtubesearchpython import *
from PIL import Image
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk, GdkPixbuf, Gio, GLib
gi.require_version('GL', '1.0')
from OpenGL import GL, GLX
from mpv import MPV, MpvRenderContext, OpenGlCbGetProcAddrFn
class LinMoTube(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.set_title("LinMoTube")
self.set_border_width(10)
self.set_default_size(300, 420)
#self.maximize()
def draw(self):
self.my_path = os.path.abspath(os.path.dirname(__file__))
self.cache_path = os.path.expanduser("~/.cache/linmotube/")
self.config_path = os.path.expanduser("~/.config/linmotube/")
self.library_file = os.path.expanduser("~/.config/linmotube/library.json")
if os.path.exists(self.cache_path) == False:
os.mkdir(self.cache_path)
if os.path.exists(self.config_path) == False:
os.mkdir(self.config_path)
if os.path.exists(self.library_file):
with open(self.library_file, "r") as jsonfile:
self.librarydata = json.load(jsonfile)
jsonfile.close()
else:
self.librarydata = []
provider = Gtk.CssProvider()
provider.load_from_file(Gio.File.new_for_path(os.path.join(self.my_path, 'assets/linmotube.css')))
Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
self.get_style_context().add_class('app-theme')
self.mode = "V"
self.playing = False
self.seeking = False
self.duration = "00:00"
self.criteria = None
self.library = False
header = Gtk.HeaderBar(title="LinMoTube")
header.get_style_context().add_class('app-theme')
header.props.show_close_button = True
logopb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/linmotube.png'),
width=30,
height=30,
preserve_aspect_ratio=True)
logoimg = Gtk.Image.new_from_pixbuf(logopb)
header.pack_start(logoimg)
self.set_titlebar(header)
container = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(container)
searchbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
container.add(searchbox)
librarypb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/library.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
libraryimg = Gtk.Image.new_from_pixbuf(librarypb)
librarybtn = Gtk.Button()
librarybtn.connect("clicked", self.OnLoadLibrary)
librarybtn.add(libraryimg)
librarybtn.get_style_context().add_class('app-theme')
searchbox.pack_start(librarybtn, False, False, 0)
self.searchentry = Gtk.SearchEntry()
self.searchentry.set_text("")
self.searchentry.connect("activate", self.OnVideoSearch)
self.searchentry.get_style_context().add_class('app-theme')
searchbox.pack_start(self.searchentry, True, True, 0)
searchbtn = Gtk.Button(label="Go")
searchbtn.connect("clicked", self.OnVideoSearch)
searchbtn.get_style_context().add_class('app-theme')
searchbox.pack_start(searchbtn, False, False, 0)
self.musicpb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/music.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
self.musicimg = Gtk.Image.new_from_pixbuf(self.musicpb)
self.videopb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/video.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
self.videoimg = Gtk.Image.new_from_pixbuf(self.videopb)
self.modebtn = Gtk.Button()
self.modebtn.connect("clicked", self.OnToggleMode)
self.modebtn.add(self.videoimg)
self.modebtn.get_style_context().add_class('app-theme')
searchbox.pack_start(self.modebtn, False, False, 0)
scrolled = Gtk.ScrolledWindow()
scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scrolled.connect("edge-reached", self.DoSearchMore, 70)
container.pack_start(scrolled, True, True, 0)
self.videolist = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
scrolled.add(self.videolist)
self.controls = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.controls.get_style_context().add_class('border-top')
container.pack_end(self.controls, False, False, 0)
playback = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.controls.pack_start(playback, False, False, 0)
self.currentlabel = Gtk.Label(label="no media selected")
self.currentlabel.set_justify(Gtk.Justification.CENTER)
self.currentlabel.set_line_wrap(True)
self.currentlabel.set_max_width_chars(68)
self.currentlabel.get_style_context().add_class('bold')
playback.pack_start(self.currentlabel, True, True, 0)
self.positionlabel = Gtk.Label()
self.positionlabel.set_justify(Gtk.Justification.CENTER)
playback.pack_start(self.positionlabel, True, True, 0)
self.playscale = Gtk.Scale().new(Gtk.Orientation.HORIZONTAL)
self.playscale.set_draw_value(False)
self.playscale.connect("button-press-event", self.OnPlayPositionSeek)
self.playscale.connect("button-release-event", self.OnPlayPositionChange)
playback.pack_start(self.playscale, True, True, 0)
mediabtns = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
playback.pack_start(mediabtns, True, True, 0)
pausepb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/pause.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
pauseimg = Gtk.Image.new_from_pixbuf(pausepb)
pausebtn = Gtk.Button()
pausebtn.add(pauseimg)
pausebtn.connect("clicked", self.OnPauseVideo)
pausebtn.get_style_context().add_class('app-theme')
mediabtns.pack_start(pausebtn, True, True, 0)
stoppb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/stop.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
stopimg = Gtk.Image.new_from_pixbuf(stoppb)
stopbtn = Gtk.Button()
stopbtn.add(stopimg)
stopbtn.connect("clicked", self.OnStopVideo)
stopbtn.get_style_context().add_class('app-theme')
mediabtns.pack_start(stopbtn, True, True, 0)
self.loadinglabel = Gtk.Label()
self.loadinglabel.set_markup("<big><b>loading media...</b></big>");
self.loadinglabel.set_justify(Gtk.Justification.FILL)
self.loadinglabel.set_line_wrap(True)
self.loadinglabel.set_max_width_chars(68)
self.loadinglabel.get_style_context().add_class('app-theme')
container.pack_end(self.loadinglabel, False, False, 0)
self.downloadpb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/download.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
self.savedpb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/saved.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
self.removepb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.my_path, 'assets/remove.png'),
width=24,
height=24,
preserve_aspect_ratio=True)
self.show_all()
self.modebtn.grab_focus()
self.controls.hide()
self.GetOriginalIdleTime()
x = threading.Thread(target=self.DoSearch, args=(None, True))
x.start()
self.player = MediaPlayer()
def GetOriginalIdleTime(self):
sbprocess = subprocess.Popen(['gsettings', 'get', 'org.gnome.desktop.session', 'idle-delay'], stdout=subprocess.PIPE)
out, err = sbprocess.communicate()
self.idleTime = out.decode('UTF-8').replace("uint32", "").strip()
def OnToggleMode(self, button):
self.library = False
if self.mode == "V":
self.mode = "M"
self.modebtn.get_child().set_from_pixbuf(self.musicpb)
else:
self.mode = "V"
self.modebtn.get_child().set_from_pixbuf(self.videopb)
x = threading.Thread(target=self.DoSearch, args=(self.criteria, True))
x.start()
def OnVideoSearch(self, button):
x = threading.Thread(target=self.DoSearch, args=(self.searchentry.get_text(), True))
x.start()
def DoSearchMore(self, swin, pos, dist):
if pos == Gtk.PositionType.BOTTOM:
if self.library == False:
x = threading.Thread(target=self.DoSearch, args=(self.criteria, False))
x.start()
def DoSearch(self, criteria, clear):
self.criteria = criteria
self.library = False
if self.criteria == None:
self.criteria = "linux mobile"
if clear:
GLib.idle_add(self.DoClearVideoList)
GLib.idle_add(self.DoShowLoading)
if clear:
self.videosSearch = VideosSearch(self.criteria, limit=10)
else:
self.videosSearch.next()
results = self.videosSearch.result()['result']
for vid in results:
thumbname = vid['id']
if self.mode == "V":
vidthumb = vid['thumbnails'][0]['url']
vidurl = urlparse(vidthumb)
if os.path.exists(os.path.join(self.cache_path, thumbname)) == False:
content = requests.get(vidthumb).content
file = open(os.path.join(self.cache_path, thumbname), "wb")
file.write(content)
file.close()
im = Image.open(os.path.join(self.cache_path, thumbname)).convert("RGB")
im.save(os.path.join(self.cache_path, thumbname), "jpeg")
if self.mode == "M":
channelthumb = vid['thumbnails'][0]['url']
channelurl = urlparse(channelthumb)
channelthumbname = vid['id']
else:
channelthumb = vid['channel']['thumbnails'][0]['url']
channelurl = urlparse(channelthumb)
channelthumbname = os.path.basename(channelurl.path)
if os.path.exists(os.path.join(self.cache_path, channelthumbname)) == False:
channelcontent = requests.get(channelthumb).content
file = open(os.path.join(self.cache_path, channelthumbname), "wb")
file.write(channelcontent)
file.close()
im = Image.open(os.path.join(self.cache_path, channelthumbname)).convert("RGB")
im.save(os.path.join(self.cache_path, channelthumbname), "jpeg")
GLib.idle_add(self.DoAddVideo, vid['id'], vid['title'], thumbname, channelthumbname, vid['channel']['name'], vid['viewCount']['short'])
GLib.idle_add(self.DoHideLoading)
def DoClearVideoList(self):
videos = self.videolist.get_children()
for video in videos:
if video is not None:
self.videolist.remove(video)
def DoShowLoading(self):
self.loadinglabel.show()
def DoHideLoading(self):
self.loadinglabel.hide()
def DoAddVideo(self, id, title, thumbname, channelthumbname, channelname, viewcount):
vidcard = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.videolist.add(vidcard)
if self.mode == "V":
thumbpb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.cache_path, thumbname),
width=300,
height=200,
preserve_aspect_ratio=True)
thumbimg = Gtk.Image.new_from_pixbuf(thumbpb)
vidbtn = Gtk.Button()
vidbtn.add(thumbimg)
vidbtn.connect("clicked", self.OnPlayVideo, None, id, title, self.mode)
vidbtn.get_style_context().add_class('app-theme')
vidbtn.get_style_context().add_class('no-border')
vidcard.pack_start(vidbtn, True, True, 0)
vidmeta = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
vidcard.pack_start(vidmeta, False, False, 0)
channelpb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.cache_path, channelthumbname),
width=68,
height=68,
preserve_aspect_ratio=False)
channelimg = Gtk.Image.new_from_pixbuf(channelpb)
if self.mode == "M":
vidbtn = Gtk.Button()
vidbtn.add(channelimg)
vidbtn.connect("clicked", self.OnPlayVideo, None, id, title, self.mode)
vidbtn.get_style_context().add_class('app-theme')
vidbtn.get_style_context().add_class('no-border')
vidmeta.pack_start(vidbtn, False, False, 0)
else:
vidmeta.pack_start(channelimg, False, False, 0)
vidinfo = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
vidmeta.pack_start(vidinfo, False, False, 0)
vidheader = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
vidinfo.pack_start(vidheader, False, False, 0)
titlelabel = Gtk.Label()
titlelabel.set_markup("<a href=''><big><b>" + title.replace("&", "&") + "</b></big></a>")
titlelabel.connect("activate-link", self.OnPlayVideo, id, title, self.mode)
titlelabel.set_justify(Gtk.Justification.FILL)
titlelabel.set_line_wrap(True)
titlelabel.set_max_width_chars(68)
titlelabel.get_style_context().add_class('app-theme')
vidheader.pack_start(titlelabel, True, True, 0)
downloadbtn = Gtk.Button()
if self.mode == "V":
if os.path.exists(os.path.join(self.cache_path, id + ".mp4")):
downloadimg = Gtk.Image.new_from_pixbuf(self.savedpb)
else:
downloadimg = Gtk.Image.new_from_pixbuf(self.downloadpb)
downloadbtn.connect("clicked", self.OnDownloadVideo, id, title, thumbname)
else:
if os.path.exists(os.path.join(self.cache_path, id + ".mp3")):
downloadimg = Gtk.Image.new_from_pixbuf(self.savedpb)
else:
downloadimg = Gtk.Image.new_from_pixbuf(self.downloadpb)
downloadbtn.connect("clicked", self.OnDownloadVideo, id, title, thumbname)
downloadbtn.add(downloadimg)
downloadbtn.get_style_context().add_class('app-theme')
downloadbtn.get_style_context().add_class('no-border')
vidheader.pack_end(downloadbtn, False, False, 0)
viddets = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
vidinfo.pack_start(viddets, False, False, 0)
if (channelname != None):
channellabel = Gtk.Label()
channellabel.set_markup("<small>" + channelname.replace("&", "&") + "</small>")
viddets.pack_start(channellabel, False, False, 0)
if (viewcount != None):
viewslabel = Gtk.Label()
viewslabel.set_markup("<small>" + viewcount + "</small>")
viddets.pack_end(viewslabel, False, False, 0)
self.show_all()
if self.playing:
self.controls.show()
else:
self.controls.hide()
self.currentlabel.set_text("no media selected")
def OnLoadLibrary(self, button):
self.DoClearVideoList()
self.library = True
for vid in self.librarydata:
vidcard = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.videolist.add(vidcard)
vidmeta = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
vidcard.pack_start(vidmeta, False, False, 0)
thumbpb = GdkPixbuf.Pixbuf.new_from_file_at_scale(
filename=os.path.join(self.cache_path, vid['thumb']),
width=68,
height=68,
preserve_aspect_ratio=False)
thumbimg = Gtk.Image.new_from_pixbuf(thumbpb)
vidbtn = Gtk.Button()
vidbtn.add(thumbimg)
vidbtn.connect("clicked", self.OnPlayVideo, None, vid['id'], vid['title'], vid['type'])
vidbtn.get_style_context().add_class('app-theme')
vidbtn.get_style_context().add_class('no-border')
vidmeta.pack_start(vidbtn, False, False, 0)
titlelabel = Gtk.Label()
titlelabel.set_markup("<a href=''><big><b>" + vid['title'].replace("&", "&") + "</b></big></a>")
titlelabel.connect("activate-link", self.OnPlayVideo, vid['id'], vid['title'], vid['type'])
titlelabel.set_justify(Gtk.Justification.FILL)
titlelabel.set_line_wrap(True)
titlelabel.set_max_width_chars(68)
titlelabel.get_style_context().add_class('app-theme')
vidmeta.pack_start(titlelabel, True, True, 0)
removeimg = Gtk.Image.new_from_pixbuf(self.removepb)
removebtn = Gtk.Button()
removebtn.add(removeimg)
removebtn.connect("clicked", self.OnRemoveVideo, vid['id'])
removebtn.get_style_context().add_class('app-theme')
removebtn.get_style_context().add_class('no-border')
vidmeta.pack_end(removebtn, False, False, 0)
self.show_all()
self.DoHideLoading()
if self.playing:
self.controls.show()
else:
self.controls.hide()
self.currentlabel.set_text("no media selected")
def OnPlayVideo(self, button, uri, id, title, type):
self.currentlabel.set_text(title)
self.positionlabel.set_text("loading...")
self.playscale.set_range(0, 0)
self.playscale.set_value(0)
self.currentposition = 0
self.controls.show()
x = threading.Thread(target=self.DoPlayVideo, args=(button, uri, id, type))
x.start()
def DoPlayVideo(self, button, uri, id, type):
vidurl = 'https://www.youtube.com/watch?v=' + id
if type == "V":
if os.path.exists(os.path.join(self.cache_path, id + ".mp4")):
self.player.mode(type, False)
self.player.play(os.path.join(self.cache_path, id + ".mp4"))
else:
self.player.mode(type, True)
self.player.play(vidurl)
else:
if os.path.exists(os.path.join(self.cache_path, id + ".mp3")):
self.player.mode(type, False)
self.player.play(os.path.join(self.cache_path, id + ".mp3"))
else:
self.player.mode(type, True)
self.player.play(vidurl)
self.playing = True
sbparams = ['gsettings', 'set', 'org.gnome.desktop.session', 'idle-delay', '0']
sbproc = subprocess.Popen(sbparams, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
return True
def OnStopVideo(self, evt):
self.player.stop()
self.playing = False
self.controls.hide()
self.currentlabel.set_text("no media selected")
self.positionlabel.set_text("")
self.playscale.set_range(0, 0)
self.playscale.set_value(0)
self.currentposition = 0
sbparams = ['gsettings', 'set', 'org.gnome.desktop.session', 'idle-delay', self.idleTime]
sbproc = subprocess.Popen(sbparams, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
def OnPauseVideo(self, evt):
if self.playing:
self.player.pause()
self.playing = False
else:
self.player.resume()
self.playing = True
def OnDownloadVideo(self, button, id, title, thumb):
button.get_child().set_from_pixbuf(self.savedpb)
x = threading.Thread(target=self.DoDownloadVideo, args=(id, title, thumb))
x.start()
def DoDownloadVideo(self, id, title, thumb):
vidurl = 'https://www.youtube.com/watch?v=' + id
if self.mode == "M":
downloadparams = [
'youtube-dl',
'--extract-audio',
'--audio-format', 'mp3',
'-o', os.path.join(self.cache_path, id + ".mp3"),
vidurl
]
else:
downloadparams = [
'youtube-dl',
'--recode-video', 'mp4',
'-o', os.path.join(self.cache_path, id + ".mp4"),
vidurl
]
download = subprocess.Popen(downloadparams, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
videodata = {
'id' : id,
'title' : title,
'type' : self.mode,
'thumb' : thumb
}
vids = []
for vid in self.librarydata:
vids.append(vid['id'])
if id not in vids:
self.librarydata.append(videodata)
with open(self.library_file, "w") as jsonfile:
json.dump(self.librarydata, jsonfile)
jsonfile.close()
def OnRemoveVideo(self, button, id):
newdata = []
for vid in self.librarydata:
if (vid['id'] != id):
newdata.append(vid)
self.librarydata = newdata
with open(self.library_file, "w") as jsonfile:
json.dump(self.librarydata, jsonfile)
jsonfile.close()
self.OnLoadLibrary(button)
def OnUpdateDuration(self, s):
value = "%02d:%02d" % divmod(s, 60)
self.duration = str(value)
self.playscale.set_range(0, s)
def DoUpdatePosition(self, s):
value = "%02d:%02d" % divmod(s, 60)
self.currentposition = s
if self.seeking == False:
self.positionlabel.set_text(str(value) + "/" + self.duration)
self.playscale.set_value(s)
def OnUpdatePosition(self, s):
GLib.idle_add(self.DoUpdatePosition, s)
def OnPlayPositionSeek(self, s, e):
self.seeking = True
def OnPlayPositionChange(self, s, e):
c = self.currentposition
n = s.get_value()
pos = n - c
self.player.seek(pos)
self.seeking = False
class MediaPlayer(Gtk.GLArea):
def __init__(self, **properties):
super().__init__(**properties)
self._proc_addr_wrapper = OpenGlCbGetProcAddrFn(get_process_address)
self.ctx = None
self.mode("V", True)
self.connect("realize", self.DoRealize)
self.connect("render", self.DoRender)
self.connect("unrealize", self.DoUnrealize)
def DoRealize(self, area):
self.make_current()
self.ctx = MpvRenderContext(self.mpv, 'opengl', opengl_init_params={'get_proc_address': self._proc_addr_wrapper})
self.ctx.update_cb = self.wrapped_c_render_func
def DoUnrealize(self, arg):
self.ctx.free()
self.mpv.terminate()
def wrapped_c_render_func(self):
GLib.idle_add(self.call_frame_ready, None, GLib.PRIORITY_HIGH)
def call_frame_ready(self, *args):
if self.ctx.update():
self.queue_render()
def DoRender(self, arg1, arg2):
if self.ctx:
factor = self.get_scale_factor()
rect = self.get_allocated_size()[0]
width = rect.width * factor
height = rect.height * factor
fbo = GL.glGetIntegerv(GL.GL_DRAW_FRAMEBUFFER_BINDING)
self.ctx.render(flip_y=True, opengl_fbo={'w': width, 'h': height, 'fbo': fbo})
return True
return False
def mode(self, mode, stream):
if mode == "V":
if stream == True:
self.mpv = MPV(
input_default_bindings=True,
input_vo_keyboard=True,
osc=True,
stream_buffer_size='5MiB',
demuxer_max_bytes='1024KiB',
ytdl=True,
ytdl_format='(bestvideo[height<=720]+bestaudio)'
)
else:
self.mpv = MPV(
input_default_bindings=True,
input_vo_keyboard=True,
osc=True
)
#self.mpv.fullscreen = True
else:
if stream == True:
self.mpv = MPV(video=False, stream_buffer_size='5MiB', demuxer_max_bytes='1024KiB', ytdl=True)
else:
self.mpv = MPV(video=False)
@self.mpv.property_observer('duration')
def duration_observer(_name, value):
if value != None:
app.OnUpdateDuration(value)
@self.mpv.property_observer('time-pos')
def time_observer(_name, value):
if value != None:
app.OnUpdatePosition(value)
def play(self, media):
self.mpv.play(media)
def stop(self):
self.mpv.stop()
def pause(self):
self.mpv._set_property('pause', True)
def resume(self):
self.mpv._set_property('pause', False)
def seek(self, pos):
self.mpv.seek(pos)
def get_process_address(_, name):
address = GLX.glXGetProcAddress(name.decode("utf-8"))
return ctypes.cast(address, ctypes.c_void_p).value
app = LinMoTube()
app.connect("destroy", Gtk.main_quit)
app.draw()
Gtk.main()
|
python
|
"""
@author: maffettone
SVM models for classification and regression
"""
from sklearn.svm import SVC
from sklearn.svm import SVR
def gen_classifier(params):
clf = SVC(probability=False,
C=params['C'],
gamma=params['gamma']
)
return clf
def gen_regressor(params):
clf = SVR(C=params['C'],
gamma=params['gamma']
)
return clf
def gen_model(params):
if params['classification']:
return gen_classifier(params)
elif params['regression']:
return gen_regressor(params)
else:
raise KeyError('Neither regression or classification specified!')
|
python
|
from .data_archive import * # noqa
from .limit_monitor import * # noqa
from .openmct import * # noqa
|
python
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# 遍历根资产的子域名
import sys
import requests
import threading
import time,json,re
from bs4 import BeautifulSoup
class subdomain(object):
def __init__(self,url):
self.url = url
self.set_dns = set()
self.set_ip138 = set()
self.set_crt = set()
self.set_baidu = set()
self.set_find = set()
# 统一调用接口
def get_subdomain(self):
# 调用5个线程
threads = []
threads.append(threading.Thread(target=self.dns))
threads.append(threading.Thread(target=self.ip138))
threads.append(threading.Thread(target=self.crt))
threads.append(threading.Thread(target=self.baidu))
threads.append(threading.Thread(target=self.find))
for i in threads:
i.start()
for i in threads:
i.join()
subdomain_total = self.set_dns | self.set_ip138 | self.set_crt | self.set_baidu | self.set_find
return subdomain_total
def dns(self):
url = 'https://www.virustotal.com/vtapi/v2/domain/report?apikey=0ad3c01b1ff7952bc8cbb4370ef4a0c53201d2daffe113efb1d2fef484e16e58&domain=' + self.url
try:
r = requests.get(url)
time.sleep(10)
r_dict = json.loads(r.text)
for i in r_dict['subdomains']:
set_dns.add(i)
print '[!]subdomain:'+str(len(dns_set))
return set_dns
except:
print '[-]subdomains:error'
return
#virustotal dns
def ip138(self):
url1 = 'http://site.ip138.com/%s/domain.htm'%self.url
try :
r = requests.get(url1)
b = BeautifulSoup(r.content,'lxml')
for i in b.find_all('a',href=re.compile('%s'%self.url),target='_blank',rel=''):
self.set_ip138.add(i.string)
print '[!]ip138:'+ str(len(ip138_set))
return self.set_ip138
except:
print '[-]IP137 interface failed'
return
#ip137 interface
def crt(self):
url1 = 'https://crt.sh/?q=%25.' + self.url
try:
r = requests.get(url1).content
b = BeautifulSoup(r,'lxml')
for i in b.find_all('td',class_='',style=''):
if '</a>' not in str(i) and '*.' not in str(i):
self.set_crt.add(i.string)
print '[!]crt:' + str(len(crt_set))
return self.set_crt
except:
print '[-]crt interface failed'
return
def baidu(self):
url_r = 'http://ce.baidu.com/index/getRelatedSites?site_address=%s' % self.url
try:
r = requests.get(url_r).content
jr = json.loads(r)
urls = jr['data']
for url in urls:
url = url['domain']
self.set_baidu.add(url)
print '[!]baidu:%s' % str(len(baidu_set))
return self.set_baidu
except:
print 'Baidu interface failed'
return
def find(self):
url = 'https://findsubdomains.com/subdomains-of/%s'%self.url
try:
r = requests.get(url).content
b = BeautifulSoup(r, 'lxml')
for c in b.find_all(attrs={'class': 'js-domain-name domains', 'class': 'domains js-domain-name'}):
self.set_find.add(c.string.strip())
print '[!]find:' + str(len(find_set))
return self.set_find
except:
print '[-]find interface failed'
return
|
python
|
from pkwscraper.lib.controller import Controller
"""
This example shows the invalid votes percentage in communes (gminy).
It also shows how much of these votes where invalid because of
putting voting mark next to 2 or more candidates. This is considered to
be main indication of probability of elections falsification.
The ballots are counted by hand by voting commissions. Considering that
most members of commission are loyal and lawful (or even at least one of
them), the easiest known way of falsifying votes is to secretly put
additional mark on ballots with valid vote given to undesired candidates
during counting them. It makes this particular vote invalid, without
further possibility to determine if the ballot was falsified or if the
voter gave an invalid vote. This can be prevented by putting away pens
during counting ballots.
Last years there is a problem with finding enough people for voting
commissions (there must be more than 100 000 of them throughout the
country), so the salary was raised noticibly. Therefore much of these
people are just random people not involved in politics and can be
considered honest. Nevertheless commissions are created by local
authorities, so there is bigger risk of unthrustworthy commission
members in rural areas where one of parties has vast majority of
support.
However, demographic structure and value of turnout also can cause
differences in ammount of invalid votes. It is also important to mention
that giving invalid votes is considered a form of boycott or protest
against the voting system, in some groups of voters. But ballots with
invalid votes made as a protest most often does not contain any voting
mark, which is differentiated in protocoles.
To sum up - probability of elections falsification can be somewhat
detected by applying statistical analysis to voting results. This
example shows relatively easiest analysis of it.
Color code:
- red: MANY invalid votes, MANY of them due to multiple voting marks
- blue: MANY invalid votes, LITTLE of them due to multiple voting marks
- green: LITTLE invalid votes, LITTLE of them due to multiple voting marks
- yellow: LITTLE invalid votes, MANY of them due to multiple voting marks
Red color may indicate units with highest probability of using the
described falsification method. This should be further checked with the
total number of voters and results for individual committees.
"""
def function(db):
# read protocoles data from polling districts from DB
protocoles = db["protokoły"].find(
query={},
fields=["voters", "ballots_valid", "votes_invalid",
"invalid_2_candidates", "votes_valid"]
)
# initiate sums
voters = 0
ballots_valid = 0
votes_invalid = 0
invalid_2_candidates = 0
votes_valid = 0
# iterate over protocoles and sum votes
for protocole_record in protocoles:
voters += protocole_record[0]
ballots_valid += protocole_record[1]
votes_invalid += protocole_record[2]
invalid_2_candidates += protocole_record[3]
votes_valid += protocole_record[4]
# calculate measures
invalid_percent = votes_invalid / ballots_valid
too_many_candidates_percent = invalid_2_candidates / votes_invalid
too_many_absolute = invalid_2_candidates / ballots_valid
# return vector of values
return invalid_percent, too_many_candidates_percent
def colormap(values):
# unpack values
invalid_fraction, too_many_candidates_fraction = values
# determine color channels
red = too_many_candidates_fraction
green = 1 - invalid_fraction
blue = 1 - max(red, green)
alpha = 0.82
# compose color
return (red, green, blue, alpha)
def main():
# run
ctrl = Controller(
("Sejm", 2015), function, colormap, granularity="communes",
outlines_granularity="constituencies", normalization=True,
output_filename="głosy_nieważne.png"
)
ctrl.run()
# print measures extremes
min_invalid, min_multiple = ctrl.vis.mins
max_invalid, max_multiple = ctrl.vis.maxs
print(f"Invalid votes percentage ranges from"
f" {round(100*min_invalid, 1)} % to"
f" {round(100*max_invalid, 1)} %.")
print(f"Fraction of them, caused by marking more than"
f" 1 candidate, ranges from {round(100*min_multiple, 1)} %"
f" to {round(100*max_multiple, 1)} %.")
if __name__ == "__main__":
main()
|
python
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Prune T2TModels using some heuristic.
This supports a very common form of pruning known as magnitude-based pruning.
It ranks individual weights or units according to their magnitudes and zeros
out the smallest k% of weights, effectively removing them from the graph.
Example run:
- train a resnet on cifar10:
bin/t2t_trainer.py --problem=image_cifar10 --hparams_set=resnet_cifar_32 \
--model=resnet
- evaluate different pruning percentages using weight-level pruning:
bin/t2t_prune.py --pruning_params_set=resnet_weight --problem=image_cifar10\
--hparams_set=resnet_cifar_32 --model=resnet
"""
import os
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.data_generators import problem as problem_lib # pylint: disable=unused-import
from tensor2tensor.utils import pruning_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# See flags.py for additional command-line flags.
flags.DEFINE_string("pruning_params_set", None,
"Which pruning parameters to use.")
def create_pruning_params():
return registry.pruning_params(FLAGS.pruning_params_set)
def create_pruning_strategy(name):
return registry.pruning_strategies(name)
def main(argv):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
hparams = t2t_trainer.create_hparams()
trainer_lib.add_problem_hparams(hparams, FLAGS.problem)
pruning_params = create_pruning_params()
pruning_strategy = create_pruning_strategy(pruning_params.strategy)
config = t2t_trainer.create_run_config(hparams)
params = {"batch_size": hparams.batch_size}
# add "_rev" as a hack to avoid image standardization
problem = registry.problem(FLAGS.problem)
input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
hparams)
dataset = input_fn(params, config).repeat()
features, labels = dataset.make_one_shot_iterator().get_next()
sess = tf.Session()
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
FLAGS.model, hparams)
spec = model_fn(
features,
labels,
tf.estimator.ModeKeys.EVAL,
params=hparams,
config=config)
# Restore weights
saver = tf.train.Saver()
checkpoint_path = os.path.expanduser(FLAGS.output_dir or
FLAGS.checkpoint_path)
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))
def eval_model():
preds = spec.predictions["predictions"]
preds = tf.argmax(preds, -1, output_type=labels.dtype)
_, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds)
sess.run(tf.initialize_local_variables())
for _ in range(FLAGS.eval_steps):
acc = sess.run(acc_update_op)
return acc
pruning_utils.sparsify(sess, eval_model, pruning_strategy, pruning_params)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
python
|
import os
class GeneralConfig:
# General
validation_split = 0.2
seed = 42
verbose = True
architecture_type = "PretrainedResNet50"
# Data image extension
image_extension = ".jpg"
pip_packages = [
"tensorflow==2.2",
"numpy",
"pandas",
"matplotlib",
"scikit-image",
"scikit-learn",
"opencv-python",
"tqdm",
]
# Enable hyperdrive experiments
hyperdrive = True
class ModelConfig:
# General model hyperparameters
batch_size = 64
epochs = 10
# Early stopping
patience = 5
# Normalization samples
normalization_samples = 1000
# Data augmentation
data_augmentation = True
rotation_range = 40
zoom_range = 0.2
width_shift_range = 0.2
height_shift_range = 0.2
horizontal_flip = True
vertical_flip = False
featurewise_center = True
featurewise_std_normalization = True
samplewise_center = False
samplewise_std_normalization = False
rescale = None
# TTA augmentation passes
tta_augmentation_passes = 10
# Pretrained ResNet50 hyperparameters
pretrained_resnet50_hyperparams = {
"input_dim": 224,
"learning_rate": 1e-4,
"hidden_dim_begin": 256,
"hidden_dim_min": 128,
"freezed_conv_layers": 15,
"activation": "elu",
"batch_normalization": True,
"dropout": True,
"dropout_begin": 0.2,
"dropout_max": 0.5,
"final_average_pooling": True,
"depth": 2,
}
class HyperdriveConfig:
# Pretrained ResNet50 hyperparameters
pretrained_resnet50_hyperparams_space = {
"--input-dim": [112, 224],
"--learning-rate": [1e-4],
"--hidden-dim-begin": [256],
"--hidden-dim-min": [128],
"--freezed-conv-layers": [5, 15, 30],
"--activation": ["elu"],
"--batch-normalization": [True],
"--dropout": [True],
"--dropout-begin": [0.2],
"--dropout-max": [0.5],
"--final-average-pooling": [False, True],
"--depth": [0, 1, 2],
}
evaluation_interval = 2
slack_factor = 0.1
max_total_runs = 15
max_concurrent_runs = 1
class PathsConfig:
# Data paths
entry_script = "classification/training.py"
data_train = "train"
data_test = "test"
classes = "classes.txt"
# Outputs paths
outputs_directory = "outputs"
generators_directory = os.path.join(outputs_directory, "generators")
image_generator_path = os.path.join(generators_directory, "image_generator.pkl")
augmented_image_generator_path = os.path.join(
generators_directory, "augmented_image_generator.pkl"
)
predictions_directory = os.path.join(outputs_directory, "predictions")
model_directory = os.path.join(outputs_directory, "model")
confusion_matrix_path = "confusion_matrix.jpg"
|
python
|
from .GeometricRestriction import GeometricRestriction
class SizingClusterAreas(GeometricRestriction):
"""The SizingClusterAreas object defines a sizing cluster areas geometric restriction.
The SizingClusterAreas object is derived from the GeometricRestriction object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import optimization
mdb.models[name].optimizationTasks[name].geometricRestrictions[name]
"""
def __init__(self, name: str, regions: tuple):
"""This method creates a SizingClusterAreas object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].optimizationTasks[name].SizingClusterAreas
Parameters
----------
name
A String specifying the geometric restriction repository key.
regions
Tuple of Region objects specifying the regions to which the geometric restriction is
applied.
Returns
-------
A SizingClusterAreas object.
"""
super().__init__()
pass
def setValues(self):
"""This method modifies the SizingClusterAreas object.
"""
pass
|
python
|
import logging
import logging.handlers
from traceback import format_stack
from brotab.inout import in_temp_dir
def _init_logger(tag, filename: str):
FORMAT = '%(asctime)-15s %(process)-5d %(levelname)-8s %(filename)s:%(lineno)d:%(funcName)s %(message)s'
MAX_LOG_SIZE = 50 * 1024 * 1024
LOG_BACKUP_COUNT = 1
log = logging.getLogger('brotab')
log.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
filename=filename,
maxBytes=MAX_LOG_SIZE,
backupCount=LOG_BACKUP_COUNT,
)
handler.setFormatter(logging.Formatter(FORMAT))
log.addHandler(handler)
log.info('Logger has been created (%s)', tag)
return log
def init_brotab_logger(tag: str):
return _init_logger(tag, in_temp_dir('brotab.log'))
def init_mediator_logger(tag: str):
return _init_logger(tag, in_temp_dir('brotab_mediator.log'))
def disable_logging():
# disables flask request logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
log.disabled = True
# TODO: investigate this, maybe we can redirect werkzeug from stdout to a file
# log.handlers = []
# disables my own logging in log_and_suppress_exceptions
# app.logger.disabled = True
# from flask.logging import default_handler
# app.logger.removeHandler(default_handler)
def disable_click_echo():
"""Stupid flask started using click which unconditionally prints stupid
messages"""
def numb_echo(*args, **kwargs):
pass
import click
click.echo = numb_echo
click.secho = numb_echo
def stack():
return '\n'.join(format_stack())
mediator_logger = init_mediator_logger('mediator')
brotab_logger = init_brotab_logger('brotab')
|
python
|
import cv2
import numpy as np
from numpy.linalg import norm
import os
import json
SZ = 20
PROVINCE_START = 1000
MAX_WIDTH = 2000
def point_limit(point):
if point[0] < 0:
point[0] = 0
if point[1] < 0:
point[1] = 0
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11'] / m['mu02']
M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])
img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
return img
# 来自opencv的sample,用于svm训练
def preprocess_hog(digits):
samples = []
for img in digits:
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bin_n = 16
bin = np.int32(bin_n * ang / (2 * np.pi))
bin_cells = bin[:10, :10], bin[10:, :10], bin[:10, 10:], bin[10:, 10:]
mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
eps = 1e-7
hist /= hist.sum() + eps
hist = np.sqrt(hist)
hist /= norm(hist) + eps
samples.append(hist)
return np.float32(samples)
provinces = [
"zh_cuan", "川",
"zh_e", "鄂",
"zh_gan", "赣",
"zh_gan1", "甘",
"zh_gui", "贵",
"zh_gui1", "桂",
"zh_hei", "黑",
"zh_hu", "沪",
"zh_ji", "冀",
"zh_jin", "津",
"zh_jing", "京",
"zh_jl", "吉",
"zh_liao", "辽",
"zh_lu", "鲁",
"zh_meng", "蒙",
"zh_min", "闽",
"zh_ning", "宁",
"zh_qing", "靑",
"zh_qiong", "琼",
"zh_shan", "陕",
"zh_su", "苏",
"zh_sx", "晋",
"zh_wan", "皖",
"zh_xiang", "湘",
"zh_xin", "新",
"zh_yu", "豫",
"zh_yu1", "渝",
"zh_yue", "粤",
"zh_yun", "云",
"zh_zang", "藏",
"zh_zhe", "浙"
]
# 根据设定的阈值和图片直方图,找出波峰,用于分隔字符
def find_waves(threshold, histogram):
up_point = -1 # 上升点
is_peak = False
if histogram[0] > threshold:
up_point = 0
is_peak = True
wave_peaks = []
for i, x in enumerate(histogram):
if is_peak and x < threshold:
if i - up_point > 2:
is_peak = False
wave_peaks.append((up_point, i))
elif not is_peak and x >= threshold:
is_peak = True
up_point = i
if is_peak and up_point != -1 and i - up_point > 4:
wave_peaks.append((up_point, i))
return wave_peaks
# 根据找出的波峰,分隔图片,从而得到逐个字符图片
def seperate_card(img, waves):
part_cards = []
for wave in waves:
part_cards.append(img[:, wave[0]:wave[1]])
return part_cards
class StatModel(object):
def load(self, fn):
self.model = self.model.load(fn)
def save(self, fn):
self.model.save(fn)
class SVM(StatModel):
def __init__(self, C=1, gamma=0.5):
self.model = cv2.ml.SVM_create()
self.model.setGamma(gamma)
self.model.setC(C)
self.model.setKernel(cv2.ml.SVM_RBF)
self.model.setType(cv2.ml.SVM_C_SVC)
# 字符识别
def predict(self, samples):
r = self.model.predict(samples)
return r[1].ravel()
class CardPredictor:
def __init__(self):
f = open('/home/python/Desktop/opencv_test/opencv_demo/opencv_test1/config.js')
j = json.load(f)
for c in j["config"]:
if c["open"]:
self.cfg = c.copy()
break
else:
raise RuntimeError('没有设置有效配置参数')
def train_svm(self):
# 识别英文字母和数字
self.model = SVM(C=1, gamma=0.5)
# 识别中文
self.modelchinese = SVM(C=1, gamma=0.5)
if os.path.exists("svm.dat"):
self.model.load("svm.dat")
if os.path.exists("svmchinese.dat"):
self.modelchinese.load("svmchinese.dat")
def accurate_place(self, card_img_hsv, limit1, limit2, color):
row_num, col_num = card_img_hsv.shape[:2]
xl = col_num
xr = 0
yh = 0
yl = row_num
# col_num_limit = self.cfg["col_num_limit"]
row_num_limit = self.cfg["row_num_limit"]
col_num_limit = col_num * 0.8 if color != "green" else col_num * 0.5 # 绿色有渐变
for i in range(row_num):
count = 0
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > col_num_limit:
if yl > i:
yl = i
if yh < i:
yh = i
for j in range(col_num):
count = 0
for i in range(row_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > row_num - row_num_limit:
if xl > j:
xl = j
if xr < j:
xr = j
return xl, xr, yh, yl
def shibie(self, card_pic):
img = cv2.imread(card_pic, 1)
# color_img = img
pic_hight, pic_width = img.shape[0:2]
if pic_width > MAX_WIDTH:
resize_rate = MAX_WIDTH / pic_width
img = cv2.resize(img, (MAX_WIDTH, int(pic_hight * resize_rate)), interpolation=cv2.INTER_AREA)
# img = cv2.resize(img, (600, 450), interpolation=cv2.INTER_AREA)
kernel = np.ones((20, 20), np.uint8)
oldimg = img
# 高斯去噪
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# equ = cv2.equalizeHist(img)
# img = np.hstack((img, equ))
# 去掉图像中不会是车牌的区域
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img_opening = cv2.addWeighted(img, 1, opening, -1, 0)
# 找到图像边缘
ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_OTSU)
img_edge = cv2.Canny(img_thresh, 100, 200)
# 使用开运算和闭运算让图像边缘成为一个整体
# kernel = np.ones((self.cfg["morphologyr"], self.cfg["morphologyc"]), np.uint8)
kernel = np.ones((4, 22), np.uint8)
img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)
img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)
# 查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
image, contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 1900]
print('len(contours)', len(contours))
# 一一排除不是车牌的矩形区域
car_contours = []
for cnt in contours:
rect = cv2.minAreaRect(cnt) # 返回值元组((最小外接矩形的中心坐标),(宽,高),旋转角度)-----> ((x, y), (w, h), θ )
area_width, area_height = rect[1]
if area_width < area_height:
area_width, area_height = area_height, area_width
wh_ratio = area_width / area_height
# print(wh_ratio)
# 要求矩形区域长宽比在2到6之间,2到6是车牌的长宽比,其余的矩形排除
if wh_ratio > 2 and wh_ratio < 6:
car_contours.append(rect)
box = cv2.boxPoints(rect)
box = np.int0(box)
# oldimg = cv2.drawContours(oldimg, [box], 0, (0, 0, 255), 2)
# cv2.imshow("img_edge2", img_edge2)
# cv2.waitKey(0)
# print(rect)
print(len(car_contours))
card_imgs = []
position = [] # y_min, y_max, x_min, x_max
# 矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
for i, rect in enumerate(car_contours):
if rect[2] > -1 and rect[2] < 1:
angle = 1
else:
angle = rect[2]
rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle)
box = cv2.boxPoints(rect)
heigth_point = right_point = [0, 0]
left_point = low_point = [pic_width, pic_hight]
for point in box:
if left_point[0] > point[0]:
left_point = point
if low_point[1] > point[1]:
low_point = point
if heigth_point[1] < point[1]:
heigth_point = point
if right_point[0] < point[0]:
right_point = point
if left_point[1] <= right_point[1]: # 正角度
new_right_point = [right_point[0], heigth_point[1]]
pts2 = np.float32([left_point, heigth_point, new_right_point])
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
point_limit(new_right_point)
point_limit(heigth_point)
point_limit(left_point)
card_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]
position.append([int(left_point[1]), int(heigth_point[1]), int(left_point[0]), int(new_right_point[0])])
# print("-------------1")
# print(int(left_point[1]), int(heigth_point[1]), int(left_point[0]), int(new_right_point[0]))
card_imgs.append(card_img)
# cv2.imshow("card", card_img)
# cv2.waitKey(0)
elif left_point[1] > right_point[1]: # 负角度
new_left_point = [left_point[0], heigth_point[1]]
pts2 = np.float32([new_left_point, heigth_point, right_point]) # 字符只是高度需要改变
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
point_limit(right_point)
point_limit(heigth_point)
point_limit(new_left_point)
card_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
position.append([int(right_point[1]), int(heigth_point[1]), int(new_left_point[0]), int(right_point[0])])
# print("-------------2")
# print(int(right_point[1]), int(heigth_point[1]), int(new_left_point[0]), int(right_point[0]))
card_imgs.append(card_img)
# cv2.imshow("card", card_img)
# cv2.waitKey(0)
# 开始使用颜色定位,排除不是车牌的矩形,目前只识别蓝、绿、黄车牌
colors = []
for card_index, card_img in enumerate(card_imgs):
green = yello = blue = black = white = 0
card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
if card_img_hsv is None:
continue
row_num, col_num = card_img_hsv.shape[:2]
card_img_count = row_num * col_num
for i in range(row_num):
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if 11 < H <= 34 and S > 34: # 图片分辨率调整
yello += 1
elif 35 < H <= 99 and S > 34: # 图片分辨率调整
green += 1
elif 99 < H <= 124 and S > 34: # 图片分辨率调整
blue += 1
if 0 < H < 180 and 0 < S < 255 and 0 < V < 46:
black += 1
elif 0 < H < 180 and 0 < S < 43 and 221 < V < 225:
white += 1
color = "no"
limit1 = limit2 = 0
if yello * 2 >= card_img_count:
color = "yello"
limit1 = 11
limit2 = 34
elif green * 2 >= card_img_count:
color = "green"
limit1 = 35
limit2 = 99
elif blue * 3 >= card_img_count:
color = "blue"
limit1 = 100
limit2 = 124
elif black + white >= card_img_count * 0.7:
color = "bw"
print(color)
colors.append(color)
print(blue, green, yello, black, white, card_img_count)
# cv2.imshow("color", card_img)
# cv2.waitKey(0)
if limit1 == 0:
continue
# 以上为确定车牌颜色
# 以下为根据车牌颜色再定位,缩小边缘非车牌边界
xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
continue
need_accurate = False
if yl >= yh:
yl = 0
yh = row_num
need_accurate = True
if xl >= xr:
xl = 0
xr = col_num
need_accurate = True
if (yh - yl) < row_num * 0.7:
yl = 0
yh = row_num
# card_img1 = color_img[position[card_index][0]+yl:position[card_index][1],
# position[card_index][2]+xl:position[card_index][3]]
# cv2.imshow("card_img1", card_img1)
# cv2.waitKey(0)
card_imgs[card_index] = card_img[yl:yh, xl:xr] if color != "green" or yl < (yh - yl) // 4 else card_img[
yl - (
yh - yl) // 4:yh,
xl:xr]
# print(xl, xr, yh, yl)
if need_accurate:
card_img = card_imgs[card_index]
card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
continue
if yl >= yh:
yl = 0
yh = row_num
if xl >= xr:
xl = 0
xr = col_num
# card_img1 = color_img[position[card_index][0] + yl:position[card_index][1],
# position[card_index][2] + xl:position[card_index][3]]
# cv2.imshow("card_img1", card_img1)
# cv2.waitKey(0)
card_imgs[card_index] = card_img[yl:yh, xl:xr] if color != "green" or yl < (yh - yl) // 4 else card_img[
yl - (
yh - yl) // 4:yh,
xl:xr]
# print(xl, xr, yh, yl)
# 以下为识别车牌中的字符
predict_result = []
roi = None
card_color = None
for i, color in enumerate(colors):
if color in ("blue", "yello", "green"):
card_img = card_imgs[i]
card_img1 = cv2.resize(card_img, (720, 180))
# print(card_img)
# print("___________________________________________")
# cv2.imshow("card_img1", card_img1)
# cv2.waitKey(0)
gray_img = cv2.cvtColor(card_img1, cv2.COLOR_BGR2GRAY)
# 黄、绿车牌字符比背景暗、与蓝车牌刚好相反,所以黄、绿车牌需要反向
# if color == "green" or color == "yello":
# gray_img = cv2.bitwise_not(gray_img)
blur = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret3, gray_img = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# cv2.imshow("gray_img", gray_img)
# cv2.waitKey(0)
# print("gray_img{}".format(gray_img))
x_histogram = np.sum(gray_img, axis=1)
x_min = np.min(x_histogram)
# print("x_min{}".format(x_min))
x_average = np.sum(x_histogram) / x_histogram.shape[0]
x_threshold = (x_min + x_average) / 2
# print("x_threshold:{}".format(x_threshold))
wave_peaks = find_waves(x_threshold, x_histogram)
# print(wave_peaks)
print("wavex:{}".format(wave_peaks))
if len(wave_peaks) == 0:
print("peak less 0:")
continue
# 认为水平方向,最大的波峰为车牌区域
wave = max(wave_peaks, key=lambda x: x[1] - x[0])
# print("wave:{}".format(wave))
gray_img = gray_img[wave[0]:wave[1]]
row_num, col_num = gray_img.shape[:2]
gray_img = gray_img[1:row_num - 1]
y_histogram = np.sum(gray_img, axis=0)
y_min = np.min(y_histogram)
y_average = np.sum(y_histogram) / y_histogram.shape[0]
y_threshold = (y_min + y_average) / 5
# print("y_threshold:{}".format(y_threshold))
wave_peaks = find_waves(y_threshold, y_histogram)
print("wavey:{}".format(wave_peaks))
# 车牌字符数应大于6
if len(wave_peaks) <= 6:
print("peak less 1:", len(wave_peaks))
continue
wave = max(wave_peaks, key=lambda x: x[1] - x[0])
print("wave_max:{}".format(wave))
max_wave_dis = wave[1] - wave[0]
if len(wave_peaks) >= 10: # 含有汉字川的情况
if abs(wave_peaks[2][1] - wave_peaks[0][0] - max_wave_dis) <= 5:
new_wave = (wave_peaks[0][0], wave_peaks[2][1])
wave_peaks = wave_peaks[3:]
wave_peaks.insert(0, new_wave)
# 判断是否是左侧车牌边缘
if wave_peaks[0][1] - wave_peaks[0][0] < max_wave_dis / 3 and wave_peaks[1][1] - wave_peaks[0][
0] > max_wave_dis:
wave_peaks.pop(0)
# 组合分离汉字
cur_dis = 0
for i, wave in enumerate(wave_peaks):
if wave[1] - wave[0] + cur_dis > max_wave_dis * 0.5:
break
else:
cur_dis += wave[1] - wave[0]
if i > 0:
wave = (wave_peaks[0][0], wave_peaks[i][1])
wave_peaks = wave_peaks[i + 1:]
wave_peaks.insert(0, wave)
# 去除车牌上的分隔点
# print(wave_peaks)
point = wave_peaks[2]
if point[1] - point[0] < max_wave_dis / 3:
point_img = gray_img[:, point[0]:point[1]]
if np.mean(point_img) < 255 / 5:
wave_peaks.pop(2)
# cv2.imshow("card_img", card_img)
# cv2.waitKeyEx(0)
if len(wave_peaks) <= 6:
print("peak less 2:", len(wave_peaks))
continue
# print(wave_peaks)
# first_card = card_img[:, wave_peaks[0][0]:wave_peaks[0][1]]
# cv2.imwrite("chinese/test_card14.jpg", first_card)
# cv2.imshow("first_card", img)
# cv2.waitKey(0)
# print("--------------------------")
# print(gray_img)
part_cards = seperate_card(gray_img, wave_peaks)
# print(part_cards)
# print("part_cards:{}".format(len(part_cards)))
# print(part_cards[6].shape[0], part_cards[6].shape[0])
for i, part_card in enumerate(part_cards):
# 可能是固定车牌的铆钉
if np.mean(part_card) < 255 / 5:
print("a point")
continue
part_card_old = part_card
w = abs(part_card.shape[1] - SZ) // 2
# if i == 0:
# chi_path = "/home/python/Desktop/opencv_test/opencv_demo/opencv_test1/chinese_test/jpg/" + os.path.basename(
# card_pic)
# cv2.imwrite(chi_path, part_card)
# cv2.imshow("part_card", part_card)
# cv2.waitKey(0)
part_card = cv2.copyMakeBorder(part_card, 0, 0, w, w, cv2.BORDER_CONSTANT, value=[0, 0, 0])
part_card = cv2.resize(part_card, (SZ, SZ), interpolation=cv2.INTER_AREA)
part_card = deskew(part_card)
part_card = preprocess_hog([part_card])
# cv2.imshow("part_card1", part_card)
# cv2.waitKey(0)
if i == 0:
resp = self.modelchinese.predict(part_card)
# print("resp1:{}".format(resp))
# print(resp[0])
charactor = provinces[int(resp[0]) - PROVINCE_START]
# print(print("charactor1:{}".format(charactor)))
else:
resp = self.model.predict(part_card)
# print("resp2:{}".format(resp))
# print(resp[0])
charactor = chr(resp[0])
# print(charactor)
# cv2.imshow("part_card", part_card)
# cv2.waitKeyEx(0)
# 判断最后一个数是否是车牌边缘,假设车牌边缘被认为是1
if charactor == "1" and i == len(part_cards) - 1:
# print(part_card_old.shape[0], part_card_old.shape[1])
# cv2.imshow("last_card", part_card_old)
# cv2.waitKey(0)
if i != 6:
if part_card_old.shape[0] / part_card_old.shape[1] >= 7: # 1太细,认为是边缘
continue
predict_result.append(charactor)
roi = card_img
# cv2.imshow("card_image", card_img)
# cv2.waitKey(0)
card_color = color
break
return predict_result, roi, card_color
if __name__ == '__main__':
c = CardPredictor()
c.train_svm()
r, roi, color = c.shibie("/home/python/Desktop/opencv_test/opencv_demo/opencv_test1/test_pic/car35.jpg")
# if len(r) == 7:
# print(r)
# cv2.imshow("car_pic", roi)
# cv2.waitKey(0)
# else:
# print("无法识别")
print(r)
cv2.imshow("card_img", roi)
cv2.waitKey(0)
# c = CardPredictor()
# c.train_svm()
# for root, dirs, files in os.walk("/home/python/Desktop/opencv_test/opencv_demo/pic"):
# count = 0
# unshibie = []
# for i, file in enumerate(files):
# car_pic = os.path.join(root, file)
# try:
# r, roi, color = c.shibie(car_pic)
# except:
# unshibie.append(file)
# continue
# if len(r) == 7:
# count += 1
# print("成功识别{},结果为{}".format(file, r))
# else:
# unshibie.append(file)
# print("识别率为:{:.2f}%".format(count/(i+1)*100))
# print("没有识别的车牌有:{}".format(unshibie))
# break
|
python
|
"""
Test method for Sub opcode
"""
from easier68k.simulator.m68k import M68K
from easier68k.core.opcodes.cmpi import Cmpi
from easier68k.core.models.assembly_parameter import AssemblyParameter
from easier68k.core.enum.ea_mode import EAMode
from easier68k.core.enum.register import Register
from easier68k.core.enum.op_size import OpSize
from easier68k.core.models.memory_value import MemoryValue
from .test_opcode_helper import run_opcode_test
def test_cmpi():
"""
Test to see that it can compare a number to another number.
Example case used:
MOVE.W #123,D0
CMPI.W #52,D0
"""
sim = M68K()
sim.set_program_counter_value(0x1000)
stored_val = 123
sim.set_register(Register.D0, MemoryValue(OpSize.WORD, unsigned_int=stored_val))
params = [AssemblyParameter(EAMode.IMM, 52), AssemblyParameter(EAMode.DRD, 0)]
cmpi = Cmpi(params, OpSize.WORD) # CMPI.W #52,D0
run_opcode_test(sim, cmpi, Register.D0, stored_val, [False, False, False, False, False], 4)
def test_cmpi_negative():
"""
Test to see that cmpi can handle negative values.
Example case used:
MOVE.B #2,D2
CMPI.B #-1,D2
"""
sim = M68K()
sim.set_program_counter_value(0x1000)
stored_val = 2
sim.set_register(Register.D2, MemoryValue(OpSize.BYTE, unsigned_int=stored_val))
params = [AssemblyParameter(EAMode.IMM, -1), AssemblyParameter(EAMode.DRD, 2)]
cmpi = Cmpi(params, OpSize.BYTE) # CMPI.B #-1,D2
run_opcode_test(sim, cmpi, Register.D2, stored_val, [False, False, False, False, True], 4)
def test_cmpi_zero():
"""
Test to see that cmpi works with 0.
Example case used:
MOVE.L #0,D2
CMPI.B #0,D2
"""
sim = M68K()
sim.set_program_counter_value(0x1000)
params = [AssemblyParameter(EAMode.IMM, 0), AssemblyParameter(EAMode.DRD, 2)]
cmpi = Cmpi(params, OpSize.BYTE) # CMPI.B #0,D2
run_opcode_test(sim, cmpi, Register.D2, 0, [False, False, True, False, False], 4)
def test_cmpi_disassembles():
"""
Test to see that cmpi can be assembled from some input
Example case used:
MOVE.W #$FFFF,D1
CMPI.W #123, D1
"""
data = bytearray.fromhex('0C41007B') # CMPI.W #123, D1
result = Cmpi.disassemble_instruction(data)
assert result is not None
sim = M68K()
sim.set_program_counter_value(0x1000)
stored_value = 0xFFFF
sim.set_register(Register.D1, MemoryValue(OpSize.WORD, unsigned_int=stored_value))
run_opcode_test(sim, result, Register.D1, stored_value, [False, True, False, False, False], 4)
def test_ccr_carry():
"""
Tests to see that the carry bit is set correctly
Example case used:
MOVE.B #$FF,D0
CMPI.W #$100,D0
"""
sim = M68K()
sim.set_program_counter_value(0x1000)
stored_val = 0xFF
sim.set_register(Register.D0, MemoryValue(OpSize.BYTE, unsigned_int=stored_val))
params = [AssemblyParameter(EAMode.IMM, 256), AssemblyParameter(EAMode.DRD, 0)]
cmpi = Cmpi(params, OpSize.WORD) # CMPI.W #$100,D0
run_opcode_test(sim, cmpi, Register.D0, stored_val, [False, True, False, False, True], 4)
def test_ccr_overflow():
"""
Tests to see that the overflow bit is set correctly
Example case used:
MOVE.L #-4,D1
CMPI.B #125,D1
"""
sim = M68K()
sim.set_program_counter_value(0x1000)
stored_val = MemoryValue(OpSize.LONG, signed_int=-4)
sim.set_register(Register.D1, stored_val)
params = [AssemblyParameter(EAMode.IMM, 125), AssemblyParameter(EAMode.DRD, 1)]
cmpi = Cmpi(params, OpSize.BYTE) # CMPI.B #125,D1
run_opcode_test(sim, cmpi, Register.D1, stored_val.get_value_unsigned(), [False, False, False, True, False], 4)
def test_add_assemble():
"""
Check that assembly is the same as the input
Example case used:
CMPI.W #$123,D3
"""
# CMPI.W #$123,D3
data = bytearray.fromhex('0C430123')
result = Cmpi.disassemble_instruction(data)
assm = result.assemble()
assert data == assm
|
python
|
from typing import Mapping
from collections import OrderedDict
import copy
from torch.utils.data import DataLoader
from catalyst.core.callback import Callback, CallbackOrder
from catalyst.core.runner import IRunner
class PeriodicLoaderCallback(Callback):
"""Callback for runing loaders with specified period.
To disable loader use ``0`` as period.
Example:
>>> PeriodicLoaderRunnerCallback(
>>> train_additional=2,
>>> valid=3,
>>> valid_additional=5
>>> )
"""
def __init__(self, **kwargs):
"""
Args:
kwargs: loader names and their run periods.
"""
super().__init__(order=CallbackOrder.external)
self.valid_loader: str = None
self.valid_metrics: Mapping[str, float] = None
self.loaders: Mapping[str, DataLoader] = OrderedDict()
self.loader_periods = {}
for loader, period in kwargs.items():
if not isinstance(period, (int, float)):
raise TypeError(
"Expected loader period type is int/float "
f"but got {type(period)}"
)
self.loader_periods[loader] = int(period)
def on_stage_start(self, runner: IRunner) -> None:
"""Collect information about loaders.
Arguments:
runner (IRunner): current runner
Raises:
ValueError: if there are no loaders in epoch
"""
# store pointers to data loader objects
for name, loader in runner.loaders.items():
self.loaders[name] = loader
# stage validation loader
self.valid_loader = copy.copy(runner.valid_loader)
is_loaders_match = all(
loader in runner.loaders for loader in self.loader_periods.keys()
)
is_same_loaders_number = len(self.loader_periods) == len(
runner.loaders
)
if is_same_loaders_number and is_loaders_match:
# find potential epoch with zero loaders
zero_loaders_epochs = list(
filter(
lambda n: all(
(p == 0 or n % p != 0)
for p in self.loader_periods.values()
),
range(1, runner.num_epochs + 1),
)
)
if len(zero_loaders_epochs) > 0:
epoch_with_err = zero_loaders_epochs[0]
raise ValueError(
f"There will be no loaders in epoch {epoch_with_err}!"
)
def on_epoch_start(self, runner: IRunner) -> None:
"""
Set loaders for current epoch.
If validation is not required then the first loader
from loaders used in current epoch will be used
as validation loader.
Metrics from the latest epoch with true
validation loader will be used
in the epochs where this loader is missing.
Arguments:
runner (IRunner): current runner
Raises:
ValueError: if there are no loaders in epoch
"""
epoch_num = runner.epoch
# loaders to use in current epoch
epoch_loaders = OrderedDict()
for name, loader in self.loaders.items():
period = self.loader_periods.get(name, 1)
# ignore loaders where period - 0
if period > 0 and epoch_num % period == 0:
epoch_loaders[name] = loader
if len(epoch_loaders) == 0:
raise ValueError(f"There is no loaders in epoch {epoch_num}!")
first_loader = next(iter(epoch_loaders.keys()))
runner.valid_loader = (
self.valid_loader
if self.valid_loader in epoch_loaders
else first_loader
)
runner.loaders = epoch_loaders
def on_epoch_end(self, runner: IRunner) -> None:
"""Store validation metrics and use latest validation score
when validation loader is not required.
Arguments:
runner (IRunner): current runner
"""
if self.valid_loader in runner.loaders:
self.valid_metrics = {
runner.main_metric: runner.valid_metrics[runner.main_metric]
}
elif self.valid_metrics is not None:
# use previous score on validation
runner.valid_metrics = self.valid_metrics
__all__ = ["PeriodicLoaderCallback"]
|
python
|
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from models.rawmail import RawMail
from models.post import Post
from models.settings import Settings
from models.userimage import UserImage
from models.slug import Slug
from models.userimage import UserImage
from models.postcounter import PostCounter
import re, logging, exceptions, traceback, webapp2, json, datetime, filestore
from errorhandling import log_error
from google.appengine.api import urlfetch
from StringIO import StringIO
class DropboxBackupHandler(webapp2.RequestHandler):
def get(self):
images_total = 0
images_backed_up = 0
try:
self.response.headers['Content-Type'] = 'text/plain'
settings = Settings.get()
if not settings.dropbox_access_token:
self.log('No access token available, no backup will be performed.')
return
posts = [p for p in Post.query().order(Post.date).fetch()]
self.log('Backing up %s posts to Dropbox' % len(posts))
post_text = StringIO()
for p in posts:
post_text.write(p.date.strftime('%Y-%m-%d'))
post_text.write('\r\n\r\n')
post_text.write(p.text.replace('\r\n', '\n').replace('\n', '\r\n').rstrip())
post_text.write('\r\n\r\n')
result = self.put_file(settings.dropbox_access_token, 'MyLife.txt', post_text.getvalue().encode('utf-8'))
post_text.close()
self.log('Backed up posts. Revision: %s' % result['rev'])
self.log('Fetching Dropbox file list')
files_in_dropbox = self.get_dropbox_filelist(settings.dropbox_access_token)
self.log('Got %s files from Dropbox' % len(files_in_dropbox))
self.log('Fetching images...')
images = [i for i in UserImage.query().order(UserImage.date).fetch()]
self.log('Total images in MyLife: %s' % len(images))
not_backed_up = [i for i in images if not i.backed_up_in_dropbox]
not_in_dropbox = [i for i in images if not i.filename in files_in_dropbox]
self.log('\nFiles not backed up: \n\n' + '\n'.join([i.filename for i in not_backed_up]))
self.log('\nFiles marked as backed up, but not in Dropbox: \n\n' + '\n'.join([i.filename for i in not_in_dropbox]))
images = not_backed_up + not_in_dropbox
images_total = len(images)
self.log('Found %s images that need to be backed up in Dropbox' % images_total)
for img in images:
self.log('Backing up %s' % img.filename)
bytes = filestore.read(img.original_size_key)
result = self.put_file(settings.dropbox_access_token, img.filename, bytes)
self.log('Backed up %s. Revision: %s' % (img.filename, result['rev']))
img.backed_up_in_dropbox = True
img.put()
images_backed_up += 1
settings.dropbox_last_backup = datetime.datetime.now()
settings.put()
self.log('Finished backup successfully')
except apiproxy_errors.OverQuotaError, ex:
self.log(ex)
log_error('Error backing up to Dropbox, quota exceeded', 'The backup operation did not complete because it ran out of quota. ' +
'The next time it runs it will continue backing up your posts and images.' +
'%s images out of %s were backed up before failing' % (images_backed_up, images_total))
except Exception, ex:
self.log('Failed to backup posts and images to dropbox: %s' % traceback.format_exc(6))
logging.exception("message")
self.log('ERROR: %s' % ex)
log_error('Error backing up to Dropbox', 'Failed to backup posts and images to dropbox: %s' % traceback.format_exc(6))
def log(self, msg):
self.response.write(str(msg) + '\r\n')
logging.info(msg)
def get_file_info(self, access_token, name):
headers = {
'Content-Type' : 'application/json',
'Authorization' : 'Bearer ' + access_token
}
data = {
"path": "/" + name,
"include_media_info": False,
"include_deleted": False,
"include_has_explicit_shared_members": False
}
result = urlfetch.fetch(
payload=json.dumps(data),
method=urlfetch.POST,
url='https://api.dropboxapi.com/2/files/get_metadata',
headers=headers
)
if result.status_code != 200:
raise Exception("Failed to get file metadata from Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
self.log(result.content)
return json.loads(result.content)
def put_file(self, access_token, name, bytes):
# info = self.get_file_info(access_token, name)
# self.log(info)
dropbox_args = {
"path": "/" + name,
"mode": { ".tag" : "overwrite"},
"autorename": True,
"mute": False
}
headers = {
'Content-Type' : 'application/octet-stream',
'Authorization' : 'Bearer ' + access_token,
'Dropbox-API-Arg' : json.dumps(dropbox_args)
}
result = urlfetch.fetch(
payload=bytes,
method=urlfetch.POST,
url='https://content.dropboxapi.com/2/files/upload',
headers=headers
)
if result.status_code != 200:
self.log(result.content)
raise Exception("Failed to send file to Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
return json.loads(result.content)
def get_dropbox_filelist(self, access_token):
headers = {
'Content-Type' : 'application/json',
'Authorization' : 'Bearer ' + access_token
}
data = {
"path": "",
"recursive": True,
"include_media_info": False,
"include_deleted": False,
"include_has_explicit_shared_members": False,
"include_mounted_folders": False,
"limit" : 1000
}
result = urlfetch.fetch(
payload=json.dumps(data),
method=urlfetch.POST,
url='https://api.dropboxapi.com/2/files/list_folder',
headers=headers)
if result.status_code != 200:
raise Exception("Failed to get files from Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
json_data = json.loads(result.content)
file_list = [o['name'] for o in json_data['entries']]
#Get everything
while json_data['has_more']:
self.log('Getting next batch...')
result = urlfetch.fetch(
payload=json.dumps({"cursor" : json_data['cursor']}),
method=urlfetch.POST,
url='https://api.dropboxapi.com/2/files/list_folder/continue',
headers=headers)
if result.status_code != 200:
raise Exception("Failed to get files from Dropbox. Status: %s, body: %s" % (result.status_code, result.content))
json_data = json.loads(result.content)
file_list.extend([o['name'] for o in json_data['entries']])
return file_list
|
python
|
"""Multiple Correspondence Analysis (MCA)"""
import numpy as np
import pandas as pd
from sklearn import utils
from . import ca
from . import one_hot
class MCA(ca.CA):
def fit(self, X, y=None):
utils.check_array(X, dtype=[str, np.number])
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
n_initial_columns = X.shape[1]
# One-hot encode the data
self.one_hot_ = one_hot.OneHotEncoder().fit(X)
# Apply CA to the indicator matrix
super().fit(self.one_hot_.transform(X))
# Compute the total inertia
n_new_columns = len(self.one_hot_.column_names_)
self.total_inertia_ = (n_new_columns - n_initial_columns) / n_initial_columns
return self
def row_coordinates(self, X):
return super().row_coordinates(self.one_hot_.transform(X))
def column_coordinates(self, X):
return super().column_coordinates(self.one_hot_.transform(X))
def transform(self, X):
"""Computes the row principal coordinates of a dataset."""
utils.validation.check_is_fitted(self, 's_')
utils.check_array(X, dtype=[str, np.number])
return self.row_coordinates(X)
|
python
|
listOfWords = ['wordOne', 'wordTwo', 'wordThree', 'wordFour', 'wordFive']
listOfInts = []
for i in listOfWords:
listOfInts.append(len(i))
print("List of words:" + str(listOfWords))
print("List of wordlength:" + str(listOfInts))
|
python
|
import asyncio
import logging
import os
import random
from aiohttp import WSServerHandshakeError, ClientConnectorError
from cryptology import ClientWriterStub, Keys, run_client, exceptions
from datetime import datetime
from decimal import Context, ROUND_DOWN, Decimal
from pathlib import Path
from typing import Optional
SERVER = os.getenv('SERVER', 'ws://127.0.0.1:8080')
NAME = Path(__file__).stem
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(NAME)
async def writer(ws: ClientWriterStub, sequence_id: int) -> None:
while True:
sequence_id += 1
buy = random.choice([True, False])
context = Context(prec=8, rounding=ROUND_DOWN)
amount = context.create_decimal_from_float(random.random() * 0.001 + 0.00000001)
amount = amount.quantize(Decimal(10) ** -8)
trade_pair = random.choice(
('BTC_USD', 'ETH_USD', 'BCH_USD', 'LTC_USD', 'BTC_EUR', 'ETH_EUR', 'BCH_EUR', 'LTC_EUR', 'ETH_BTC',)
)
if buy:
logger.info(f'buying {amount} of {trade_pair}')
else:
logger.info(f'selling {amount} of {trade_pair}')
msg = {
'@type': 'PlaceBuyFoKOrder' if buy else 'PlaceSellFoKOrder',
'trade_pair': trade_pair,
'amount': str(amount),
'price': '1000000000' if buy else '0.00000001',
}
await ws.send_signed_message(sequence_id=sequence_id, payload=msg)
await asyncio.sleep(0.19)
async def read_callback(ws: ClientWriterStub, order: int, ts: datetime, payload: dict) -> None:
logger.debug(f'received: {order}, {ts}, {payload}')
async def throttling(level: int, sequence_id: int, order_id: int) -> bool:
logger.warning(f'OMG!!! {level} queued messages. Slow down!')
return False
async def main(loop: Optional[asyncio.AbstractEventLoop] = None):
random.seed()
client_keys = Keys.load(NAME + '.pub', NAME + '.priv')
server_keys = Keys.load('cryptology.pub', None)
logger.info(f'connecting to {SERVER}')
while True:
try:
await run_client(
client_id=NAME,
client_keys=client_keys,
ws_addr=SERVER,
server_keys=server_keys,
writer=writer,
read_callback=read_callback,
throttling_callback=throttling,
last_seen_order=-1,
loop=loop
)
except exceptions.RateLimit:
logger.error('rate limit reached')
except exceptions.ServerRestart:
logger.warning('server restart')
await asyncio.sleep(80)
except exceptions.InvalidKey:
logger.critical('the public key does not match client name')
break
except (exceptions.CryptologyConnectionError, ClientConnectorError, WSServerHandshakeError) as ex:
logger.error(ex)
await asyncio.sleep(30)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop=loop))
|
python
|
#!/usr/bin/python
#coding:utf-8
from bs4 import BeautifulSoup
import requests
url='http://www.moko.cc/mtb.html'
r=requests.get(url, verify=False)
content=r.content
soup=BeautifulSoup(content,'lxml')
modlist=soup.find_all('div','sub_show')
link=[]
for i in modlist:
if i==modlist[-1] or i==modlist[0]:
continue
tmp=i.find_all('a')
for j in tmp:
if 'html' in j['href']:
link.append(j['href'])
print j['href']
print 'http://www.moko.cc'+link[0]
url2='http://www.moko.cc'+link[0]
r=requests.get(url2, verify=False)
content=r.content
soup=BeautifulSoup(content,'lxml')
alist=soup.find_all('div','thumbnail_box')
soup2=BeautifulSoup(str(alist),'lxml')
blist=soup2.find_all('dd')
for item in blist:
link=item.find('img')
try:
if '.png' in link['src']:
pass
else:
print link['src'].split('?')[0]
except:
continue
|
python
|
import pytest
# run tests only if snappy is available
snappy = pytest.importorskip("snappy")
def test_snappy():
import snappy
from snappy import (ProductIO, ProductUtils, ProgressMonitor, jpy)
|
python
|
from collections import Counter
import nltk
from nltk import *
import numpy as np
import xml.etree.ElementTree as ET
import tmx
trigram_measures = nltk.collocations.TrigramAssocMeasures()
bigram_measures = nltk.collocations.BigramAssocMeasures()
class CorpusUtil(object):
__slots__ = 'tokenFrequencies', 'nGramFrequencies', 'neighbors'
def __init__(self):
self.tokenFrequencies = Counter()
self.nGramFrequencies = Counter()
self.neighbors = {}
def countTokenFreqFromCorpus(self, path):
with open(path, 'r', encoding='latin1') as f:
lines = f.readlines()
for line in lines:
tokens = line.split()[1:]
for t in tokens:
t = t.lower()
self.tokenFrequencies[t] += 1
#once the object is created same ngram size needs to be used for this particular instance, since it is stored to
#the instance variable "nGramFrequencies"
def countNGramFrequenciesFromCorpus(self, path, n):
with open(path, 'r', encoding='latin1') as f:
lines = f.readlines()
for line in lines:
ngrams_ = ngrams(line.split()[1:], n)
for gram in ngrams_:
self.nGramFrequencies[gram] += 1
#returns all possible neighbors for a specific word in the corpus
#for n it returns all possible n-1 and n+1
def findNeighbors(self, path, token):
token = token.lower()
with open(path, 'r', encoding='latin1') as f:
lines = f.readlines()
for line in lines:
tokens = line.split()[1:]
for idx, t in enumerate(tokens):
t = t.lower()
if t == token:
before = idx-1
after = idx+1
if before >= 0:
if token not in self.neighbors.keys():
self.neighbors[token] = set()
self.neighbors[token].add(tokens[before])#add the n-1 token
if after < len(tokens):
if token not in self.neighbors.keys():
self.neighbors[token] = set()
self.neighbors[token].add(tokens[after])#add the n+1 token
class AlignedCorpusUtil(object):
__slots__ = 'alignedSentences', 'tokenFrequenciesSource', 'tokenFrequenciesTarget', 'bigramFrequenciesSource'
def __init__(self):
self.alignedSentences = {}
self.tokenFrequenciesSource = Counter()
self.tokenFrequenciesTarget = Counter()
self.bigramFrequenciesSource = Counter()
def readAligedCorpus(self, path):
tree = ET.parse(path)
root = tree.getroot()
body = root.find('body')
for tu in body.findall('tu'):
de = ''
en = ''
for tuv in tu.findall('tuv'):
atr = tuv.attrib
lang = atr.get('{http://www.w3.org/XML/1998/namespace}lang')
if lang == 'de':
for seg in tuv.findall('seg'):
de = seg.text
elif lang == 'en':
for seg in tuv.findall('seg'):
en = seg.text
self.alignedSentences[de] = en
def readTokenizedAlignedCorpora(self, path, lang):
if lang.lower().strip() == 'de':
treeDE = ET.parse(path)
rootDE = treeDE.getroot()
for w in rootDE.findall('*/*/*/w'):
word = w.text.lower().strip()
self.tokenFrequenciesSource[word] += 1
for w in rootDE.findall('*/*/*/*/w'):
word = w.text.lower().strip()
self.tokenFrequenciesSource[word] += 1
if lang.lower().strip() == 'en':
treeEN = ET.parse(path)
rootEN = treeEN.getroot()
for w in rootEN.findall('*/*/*/w'):
word = w.text.lower().strip()
self.tokenFrequenciesTarget[word] += 1
for w in rootEN.findall('*/*/*/*/w'):
word = w.text.lower().strip()
self.tokenFrequenciesTarget[word] += 1
for w in rootEN.findall('*/*/*/*/*/w'):
word = w.text.lower().strip()
self.tokenFrequenciesTarget[word] += 1
def sourceBigramsFromAlignedCorpus(self, pathDe):
treeDE = ET.parse(pathDe)
rootDE = treeDE.getroot()
words1 = []
for w in rootDE.findall('*/*/*/w'):
word = w.text.lower().strip()
words1.append(word)
#get bigrams
for idx,val in enumerate(words1):
if idx < len(words1)-1:
self.bigramFrequenciesSource[(val,words1[idx+1])] += 1
words2 = []
for w in rootDE.findall('*/*/*/*/w'):
word = w.text.lower().strip()
words2.append(word)
#get bigrams
for idx,val in enumerate(words2):
if idx < len(words2)-1:
self.bigramFrequenciesSource[(val,words2[idx+1])] += 1
print('start')
c = AlignedCorpusUtil()
path = 'resources/corpora/Europarl/de-en.tmx'
c.readAligedCorpus(path)
import glob, os
de_path = 'resources/corpora/Europarl/Europarl_de/xml/de/'
en_path = 'resources/corpora/Europarl/Europarl_en/xml/en/'
for file in os.listdir(de_path):
if file.endswith(".xml"):
c.readTokenizedAlignedCorpora(de_path+file, 'de')
for file in os.listdir(en_path):
if file.endswith(".xml"):
c.readTokenizedAlignedCorpora(en_path+file, 'en')
c.sourceBigramsFromAlignedCorpus('resources/corpora/Europarl/Europarl_de/xml/de/ep-00-01-17.xml')
bigrams = c.bigramFrequenciesSource
mostCommon = bigrams.most_common(100)
count = 0
sentences = c.alignedSentences
for sent in sentences:
if ' haus ' in sent:
if ' house ' in sentences[sent]:
count += 1
print('haus translated as house: ', count)
print('haus on its own: ', c.tokenFrequenciesSource['haus'])
print('house on its own: ', c.tokenFrequenciesTarget['house'])
for bi in mostCommon:
print(bi)
|
python
|
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
from misc import bot, dp, admin_ids
from certificate import Certificate, get_all_certificates, set_all_certificates
import logging
from .menu import menu_keyboard
from .states import AdminState, admin_keyboard
@dp.message_handler(commands='cancel', state='*')
@dp.message_handler(Text(equals='Отмена', ignore_case=True), state='*')
async def cmd_cancel(message: types.Message, state: FSMContext):
"""
Обработчик кнопки Отмена.
Parameters
----------
message : types.Message
Текст сообщения (Отмена)
state : FSMContext
Сброс состояния пользователя.
"""
await state.finish()
await message.answer('Выбрете действие:',
reply_markup=menu_keyboard)
@dp.message_handler(commands=['start'], state='*')
async def cmd_start(message: types.Message):
"""
Обработчик кнопки старт. Определяет от кого пришло сообщение.
Parameters
----------
message : types.Message
Текст сообщения
"""
if message.from_certificate.id in admin_ids:
await message.answer('Привет, что хочешь делать?',
reply_markup=admin_keyboard)
await AdminState.wait_admin_action.set()
|
python
|
n1 = float(input("Digite um número: "))
print("O sucessor de {} é {} e o seu antecessor é de {}" .format(n1,n1+1,n1-1))
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
"""
[VRM DRIVER] VRM CLIENT.
"""
from cinder.openstack.common import log as logging
from cinder.openstack.common.gettextutils import _
from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
TASK_WAITING = 'waiting'
TASK_RUNNING = 'running'
TASK_SUCCESS = 'success'
TASK_FAILED = 'failed'
TASK_CANCELLING = 'cancelling'
TASK_UNKNOWN = 'unknown'
LOG = logging.getLogger(__name__)
class HostProxy(BaseProxy):
'''
'''
def __init__(self):
super(HostProxy, self).__init__()
def list_host(self, **kwargs):
'''
:param kwargs:
:return:
'''
LOG.info(_("[VRM-CINDER] start list_host()"))
uri = '/hosts'
method = 'GET'
path = self.site_uri + uri
offset = 0
hosts = []
while True:
parameters = {'limit': self.limit,
'offset': offset,
'scope': kwargs.get('scope')}
appendix = self._joined_params(parameters)
new_url = self._generate_url(path, appendix)
resp, body = self.vrmhttpclient.request(new_url, method)
total = int(body.get('total') or 0)
if total > 0:
res = body.get('hosts')
hosts += res
offset += len(res)
if offset >= total or len(hosts) >= total or len(res) < self.limit:
break
else:
break
return hosts
|
python
|
from collections import namedtuple
from csv import QUOTE_ALL
from unittest import TestCase
import pytest
from bonobo import CsvReader, CsvWriter
from bonobo.constants import EMPTY
from bonobo.util.testing import (
BufferingNodeExecutionContext, ConfigurableNodeTest, FilesystemTester, ReaderTest, WriterTest
)
csv_tester = FilesystemTester("csv")
csv_tester.input_data = "a,b,c\na foo,b foo,c foo\na bar,b bar,c bar"
defaults = {"lineterminator": "\n"}
incontext = ConfigurableNodeTest.incontext
def test_read_csv_from_file_kwargs(tmpdir):
fs, filename, services = csv_tester.get_services_for_reader(tmpdir)
with BufferingNodeExecutionContext(CsvReader(filename, **defaults), services=services) as context:
context.write_sync(EMPTY)
assert context.get_buffer_args_as_dicts() == [
{"a": "a foo", "b": "b foo", "c": "c foo"},
{"a": "a bar", "b": "b bar", "c": "c bar"},
]
###
# CSV Readers / Writers
###
class Csv:
extension = "csv"
ReaderNodeType = CsvReader
WriterNodeType = CsvWriter
L1, L2, L3, L4 = ("a", "hey"), ("b", "bee"), ("c", "see"), ("d", "dee")
LL = ("i", "have", "more", "values")
class CsvReaderTest(Csv, ReaderTest, TestCase):
input_data = "\n".join(("id,name", "1,John Doe", "2,Jane Doe", ",DPR", "42,Elon Musk"))
def check_output(self, context, *, prepend=None):
out = context.get_buffer()
assert out == (prepend or list()) + [("1", "John Doe"), ("2", "Jane Doe"), ("", "DPR"), ("42", "Elon Musk")]
@incontext()
def test_nofields(self, context):
context.write_sync(EMPTY)
context.stop()
self.check_output(context)
assert context.get_output_fields() == ("id", "name")
@incontext(output_type=tuple)
def test_output_type(self, context):
context.write_sync(EMPTY)
context.stop()
self.check_output(context, prepend=[("id", "name")])
@incontext(output_fields=("x", "y"), skip=1)
def test_output_fields(self, context):
context.write_sync(EMPTY)
context.stop()
self.check_output(context)
assert context.get_output_fields() == ("x", "y")
@incontext(quoting=QUOTE_ALL)
def test_quoting(self, context):
context.write_sync(EMPTY)
context.stop()
self.check_output(context)
assert context.get_output_fields() == ("id", "name")
class CsvWriterTest(Csv, WriterTest, TestCase):
@incontext()
def test_fields(self, context):
context.set_input_fields(["foo", "bar"])
context.write_sync(("a", "b"), ("c", "d"))
context.stop()
assert self.readlines() == ("foo,bar", "a,b", "c,d")
@incontext(skip_header=False)
def test_fields_with_headers(self, context):
context.set_input_fields(["foo", "bar"])
context.write_sync(("a", "b"), ("c", "d"))
context.stop()
assert self.readlines() == ("foo,bar", "a,b", "c,d")
@incontext(skip_header=True)
def test_fields_without_headers(self, context):
context.set_input_fields(["foo", "bar"])
context.write_sync(("a", "b"), ("c", "d"))
context.stop()
assert self.readlines() == ("a,b", "c,d")
@incontext()
def test_fields_from_type(self, context):
context.set_input_type(namedtuple("Point", "x y"))
context.write_sync((1, 2), (3, 4))
context.stop()
assert self.readlines() == ("x,y", "1,2", "3,4")
@incontext()
def test_nofields_multiple_args(self, context):
# multiple args are iterated onto and flattened in output
context.write_sync(L1, L2, L3, L4)
context.stop()
assert self.readlines() == ("a,hey", "b,bee", "c,see", "d,dee")
@incontext()
def test_nofields_multiple_args_length_mismatch(self, context):
# if length of input vary, then we get a TypeError (unrecoverable)
with pytest.raises(TypeError):
context.write_sync((L1, L2), (L3,))
@incontext()
def test_nofields_empty_args(self, context):
# empty calls are ignored
context.write_sync(EMPTY, EMPTY, EMPTY)
context.stop()
assert self.readlines() == ("", "", "")
|
python
|
from textwrap import dedent
import re
from ansi2html import Ansi2HTMLConverter
import mistune
from jinja2 import Markup
import pygments
import pygments.lexers
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Generic, Text, Comment
import pygments.formatters.html
ansi_convertor = Ansi2HTMLConverter(inline=True)
pygments_formatter = pygments.formatters.html.HtmlFormatter(
cssclass='highlight'
)
_admonition_leading_pattern = re.compile(r'^ *> ?', flags=re.M)
class BlockGrammar(mistune.BlockGrammar):
admonition = re.compile(r'^> *\[(\S+)\]([^\n]*)\n((>[^\n]*[\n]{0,1})*)')
deflist = re.compile(r'^(([^\n: ][^\n]*\n)+)((:( {0,3})[^\n]*\n)( \5[^\n]*\n|\n)+)')
class BlockLexer(mistune.BlockLexer):
grammar_class = BlockGrammar
default_rules = [
'admonition',
'deflist',
] + mistune.BlockLexer.default_rules
def parse_admonition(self, m):
self.tokens.append({
'type': 'admonition_start',
'name': m.group(1),
'title': m.group(2).strip(),
})
text = _admonition_leading_pattern.sub('', m.group(3))
self.parse(dedent(text))
self.tokens.append({
'type': 'admonition_end',
})
def parse_deflist(self, m):
self.tokens.append({
'type': 'deflist_term_start',
})
self.parse(dedent(m.group(1)))
self.tokens.append({
'type': 'deflist_term_end',
})
self.tokens.append({
'type': 'deflist_def_start',
})
self.parse(dedent(' ' + m.group(3)[1:]))
self.tokens.append({
'type': 'deflist_def_end',
})
def ansi_convert(code):
replaced = code.replace('\u241b', '\x1b')
return ansi_convertor.convert(replaced, full=False)
def style_space_after_prompt(html):
return re.sub(r'<span class="gp">([^<]*[^<\s])</span>(\s)',
r'<span class="gp">\1\2</span>',
html)
def matrix_multiplication_operator(html):
return html.replace('<span class="err">@</span>',
'<span class="o">@</span>')
class MSDOSSessionVenvLexer(RegexLexer):
"""Lexer for simplistic MSDOS sessions with optional venvs.
Note that this doesn't use ``Name.Builtin`` (class="nb"), which naucse
styles the same as the rest of the command.
"""
name = 'MSDOS Venv Session'
aliases = ['dosvenv']
tokens = {
'root': [
(r'((?:\([_\w]+\))?\s?>\s?)([^#\n]*)(#.*)?',
bygroups(Generic.Prompt, Text, Comment)),
(r'(.+)', Generic.Output),
]
}
def get_lexer_by_name(lang):
"""
Workaround for our own lexer. Normally, new lexers have to be added trough
entrypoints to be locatable by get_lexer_by_name().
"""
if lang == 'dosvenv':
return MSDOSSessionVenvLexer()
return pygments.lexers.get_lexer_by_name(lang)
class Renderer(mistune.Renderer):
code_tmpl = '<div class="highlight"><pre><code>{}</code></pre></div>'
def __init__(self, convert_url, *args, **kwargs):
self._convert_url = convert_url
super().__init__(*args, **kwargs)
def admonition(self, name, content):
return '<div class="admonition {}">{}</div>'.format(name, content)
def block_code(self, code, lang):
if lang is not None:
lang = lang.strip()
if not lang or lang == 'plain':
escaped = mistune.escape(code)
return self.code_tmpl.format(escaped)
if lang == 'ansi':
converted = ansi_convert(code)
return self.code_tmpl.format(converted)
lexer = get_lexer_by_name(lang)
html = pygments.highlight(code, lexer, pygments_formatter).strip()
html = style_space_after_prompt(html)
if lang in ('python', 'pycon'):
html = matrix_multiplication_operator(html)
return html
def deflist(self, items):
tags = {'term': 'dt', 'def': 'dd'}
return '<dl>\n{}</dl>'.format(''.join(
'<{tag}>{text}</{tag}>'.format(tag=tags[type], text=text)
for type, text in items
))
def link(self, link, title, text):
return super().link(self._convert_url(link), title, text)
def image(self, src, title, text):
return super().image(self._convert_url(src), title, text)
class Markdown(mistune.Markdown):
def output_admonition(self):
name = self.token['name']
body = self.renderer.placeholder()
if self.token['title']:
template = '<p class="admonition-title">{}</p>\n'
body += template.format(self.token['title'])
while self.pop()['type'] != 'admonition_end':
body += self.tok()
return self.renderer.admonition(name, body)
def output_deflist_term(self):
items = [['term', self.renderer.placeholder()]]
while True:
end_token = 'deflist_{}_end'.format(items[-1][0])
while self.pop()['type'] not in (end_token, 'paragraph'):
items[-1][1] += self.tok()
if self.token['type'] == 'paragraph':
if items[-1][0] == 'term':
items.append(['term', self.renderer.placeholder()])
items[-1][1] += self.token['text']
else:
items[-1][1] += self.output_paragraph()
elif self.peek()['type'] == 'deflist_term_start':
self.pop()
items.append(['term', self.renderer.placeholder()])
elif self.peek()['type'] == 'deflist_def_start':
self.pop()
items.append(['def', self.renderer.placeholder()])
else:
break
return self.renderer.deflist(items)
def convert_markdown(text, convert_url=None, *, inline=False):
convert_url = convert_url if convert_url else lambda x: x
text = dedent(text)
markdown = Markdown(
escape=False,
block=BlockLexer(),
renderer=Renderer(convert_url),
)
result = markdown(text).strip()
if inline and result.startswith('<p>') and result.endswith('</p>'):
result = result[len('<p>'):-len('</p>')]
return Markup(result)
|
python
|
class Cell:
pass
class Fish(Cell):
name = 'F'
@staticmethod
def update(neighbours):
cnt = neighbours.count(Fish.name)
if cnt == 2 or cnt == 3:
return Fish()
else:
return Void()
class Crayfish(Cell):
name = 'C'
@staticmethod
def update(neighbours):
cnt = neighbours.count(Crayfish.name)
if cnt == 2 or cnt == 3:
return Crayfish()
else:
return Void()
class Rock(Cell):
name = '#'
@staticmethod
def update(neighbours):
return Rock()
class Void(Cell):
name = '.'
@staticmethod
def update(neighbours):
if neighbours.count(Fish.name) == 3:
return Fish
elif neighbours.count(Crayfish.name) == 3:
return Crayfish
else:
return Void
class Generation:
"""Describes a generation in particular moment."""
def __init__(self, h, w, is_thor):
"""Initializes a generation.
h and w should be int(height and width of the ocean), is_thor should be bool.
"""
if type(w) is not int:
raise TypeError("type of w argument should be int")
if w <= 0:
raise RuntimeError('w should be positive')
if type(h) is not int:
raise TypeError("type of h argument should be int")
if h <= 0:
raise RuntimeError('h should be positive')
if type(is_thor) is not bool:
raise TypeError("type of num argument should be int")
self._width = w
self._height = h
self._is_thor = is_thor
self._ocean = [[None] * w for i in range(h)]
def set_ocean(self, ocean):
"""Sets ocean.
Ocean should be a two-dimensional array.
"""
if type(ocean) is not list:
raise TypeError('type of ocean should be list')
if len(ocean) != self._height:
raise RuntimeError('Incorrect h')
for i in range(self._height):
if len(ocean[i]) != self._width:
raise RuntimeError('Incorrect w')
self._ocean = ocean
def try_to_reach(self, xx, yy):
if self._is_thor:
return self._ocean[xx % self._height][yy % self._width]
else:
if xx < 0 or xx >= self._height:
return None
if yy < 0 or yy >= self._width:
return None
return self._ocean[xx][yy]
def get_neighbours(self, x, y):
ans = list()
for dx in [-1, 0, 1]:
for dy in [-1, 0, 1]:
if dx == dy == 0:
continue
ans.append(self.try_to_reach(x + dx, y + dy))
return ans
def update(self, x, y):
neighbours = [i.name for i in self.get_neighbours(x, y) if i is not None]
return self._ocean[x][y].update(neighbours)
def next(self):
"""Returns next generation after self."""
new_gen = Generation(self._height, self._width, self._is_thor)
for i in range(self._height):
for j in range(self._width):
new_gen._ocean[i][j] = self.update(i, j)
return new_gen
def __str__(self, *args, **kwargs):
"""Returns graphical view of generation in string format."""
s = ''
for line in self._ocean:
for i in line:
s += i.name
s += '\n'
return s
class Life:
"""Descibes the history of the game from the beginning."""
def __init__(self, gen):
"""Initialize self. gen should be an object of class Generation."""
if not isinstance(gen, Generation):
raise RuntimeError('gen should be an instance of Generation')
self.generations = [gen]
def next_generation(self):
"""Adds next generation to list of generations."""
self.generations.append(self.generations[-1].next())
def get_generation(self, n):
"""Calculates all generations up to the n (inclusevely) and returns the last.
N should be type of int.
"""
if type(n) is not int:
raise TypeError('type of n should be int')
if n < 0:
raise RuntimeError('''n should be non-negative''')
if len(self.generations) > n:
return self.generations[n]
while len(self.generations) <= n:
self.next_generation()
return self.generations[-1]
|
python
|
# flake8: noqa
from __future__ import unicode_literals
import sys
import django
from django.conf import settings
# removed get_queryset <> get_query_set see, #29
#from django.db.models import Manager
## Monkey patch:
#
#try:
# Manager.get_query_set = Manager.get_queryset
#except AttributeError:
# Manager.get_queryset = Manager.get_query_set
from django.core.exceptions import ImproperlyConfigured
if django.VERSION < (1, 8):
from django.template import add_to_builtins
elif django.VERSION < (1, 9):
from django.template.base import add_to_builtins
else:
pass # Removed in 1.9. Use template settings instead
try:
from importlib import import_module
except ImportError: # Fallback for Python 2.6 & Django < 1.7
from django.utils.importlib import import_module
try:
# django 1.4.2+ , https://docs.djangoproject.com/en/1.5/topics/python3/#philosophy
from django.utils import six
except ImportError:
import six
# get_indent
try:
from threading import get_ident
except ImportError:
from six.moves._thread import get_ident # noqa
# try:
# from django.urls import include, handler404, handler500
# except ImportError:
# from django.conf.urls.defaults import include, handler404, handler500 # pyflakes:ignore
# try:
# from django.urls import patterns
# except ImportError:
# try:
# from django.conf.urls.defaults import patterns # pyflakes:ignore
# except ImportError:
# pass
# Handle django.utils.encoding rename in 1.5 onwards.
# smart_unicode -> smart_text
# force_unicode -> force_text
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
if django.VERSION >= (1, 6):
def clean_manytomany_helptext(text):
return text
else:
# Up to version 1.5 many to many fields automatically suffix
# the `help_text` attribute with hardcoded text.
def clean_manytomany_helptext(text):
if text.endswith(' Hold down "Control", or "Command" on a Mac, to select more than one.'):
text = text[:-69]
return text
# cStringIO only if it's available, otherwise StringIO
try:
import cStringIO.StringIO as StringIO
except ImportError:
StringIO = six.StringIO
BytesIO = six.BytesIO
try:
# Django 1.7 or over use the new application loading system
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
def get_model_name(model_cls):
try:
return model_cls._meta.model_name
except AttributeError:
# < 1.6 used module_name instead of model_name
return model_cls._meta.module_name
# View._allowed_methods only present from 1.5 onwards
if django.VERSION >= (1, 5):
from django.views.generic import View
else:
from django.views.generic import View as DjangoView
class View(DjangoView):
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
# URLValidator only accepts `message` in 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import URLValidator
else:
from django.core.validators import URLValidator as DjangoURLValidator
class URLValidator(DjangoURLValidator):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', self.message)
super(URLValidator, self).__init__(*args, **kwargs)
# EmailValidator requires explicit regex prior to 1.6+
if django.VERSION >= (1, 6):
from django.core.validators import EmailValidator
else:
from django.core.validators import EmailValidator as DjangoEmailValidator
from django.core.validators import email_re
class EmailValidator(DjangoEmailValidator):
def __init__(self, *args, **kwargs):
super(EmailValidator, self).__init__(email_re, *args, **kwargs)
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
try:
import unittest2 as unittest
except ImportError:
import unittest # pyflakes:ignore
try:
from unittest import mock # Since Python 3.3 mock is is in stdlib
except ImportError:
try:
import mock # pyflakes:ignore
except ImportError:
# mock is used for tests only however it is hard to check if user is
# running tests or production code so we fail silently here; mock is
# still required for tests at setup.py (See PR #193)
pass
# Django 1.5 compatibility utilities, providing support for custom User models.
# Since get_user_model() causes a circular import if called when app models are
# being loaded, the user_model_label should be used when possible, with calls
# to get_user_model deferred to execution time
user_model_label = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
# get_username_field
if django.VERSION >= (1, 5):
def get_username_field():
return get_user_model().USERNAME_FIELD
else:
def get_username_field():
return 'username'
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
get_user_model = lambda: User
def get_user_model_path():
"""
Returns 'app_label.ModelName' for User model. Basically if
``AUTH_USER_MODEL`` is set at settings it would be returned, otherwise
``auth.User`` is returned.
"""
return getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
def get_user_permission_full_codename(perm):
"""
Returns 'app_label.<perm>_<usermodulename>'. If standard ``auth.User`` is
used, for 'change' perm this would return ``auth.change_user`` and if
``myapp.CustomUser`` is used it would return ``myapp.change_customuser``.
"""
User = get_user_model()
return '%s.%s_%s' % (User._meta.app_label, perm, User._meta.module_name)
def get_user_permission_codename(perm):
"""
Returns '<perm>_<usermodulename>'. If standard ``auth.User`` is
used, for 'change' perm this would return ``change_user`` and if
``myapp.CustomUser`` is used it would return ``change_customuser``.
"""
return get_user_permission_full_codename(perm).split('.')[1]
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
Backported from Django 1.7
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def commit(using=None):
"""
Possibility of calling transaction.commit() in new Django versions (in atomic block).
"""
try:
django.db.transaction.commit(using)
except django.db.transaction.TransactionManagementError:
pass
def rollback(using=None, sid=None):
"""
Possibility of calling transaction.rollback() in new Django versions (in atomic block).
Important: transaction savepoint (sid) is required for Django < 1.8
"""
if sid:
django.db.transaction.savepoint_rollback(sid)
else:
try:
django.db.transaction.rollback(using)
except django.db.transaction.TransactionManagementError:
django.db.transaction.set_rollback(True, using)
# HttpResponseBase only exists from 1.5 onwards
try:
from django.http.response import HttpResponseBase
except ImportError:
from django.http import HttpResponse as HttpResponseBase
# Python 3
try:
unicode = unicode # pyflakes:ignore
basestring = basestring # pyflakes:ignore
str = str # pyflakes:ignore
except NameError:
basestring = unicode = str = str
# urlparse in python3 has been renamed to urllib.parse
try:
from urlparse import urlparse, parse_qs, urlunparse
except ImportError:
from urllib.parse import urlparse, parse_qs, urlunparse
try:
from urllib import urlencode, unquote_plus
except ImportError:
from urllib.parse import urlencode, unquote_plus
def create_permissions(*args, **kwargs):
# create_permission API changed: skip the create_models (second
# positional argument) if we have django 1.7+ and 2+ positional
# arguments with the second one being a list/tuple
from django.contrib.auth.management import create_permissions as original_create_permissions
if django.VERSION < (1, 7) and len(args) > 1 and isinstance(args[1], (list, tuple)):
args = args[:1] + args[2:]
return original_create_permissions(*args, **kwargs)
# Requires django < 1.5 or python >= 2.6
if django.VERSION < (1, 5):
from django.utils import simplejson
else:
import json as simplejson
try:
from collections import OrderedDict as SortedDict
except ImportError:
from django.utils.datastructures import SortedDict
# Backporting from 1.8
if django.VERSION < (1, 8):
from compat.json_response import DjangoJSONEncoder
else:
from django.core.serializers.json import DjangoJSONEncoder
if django.VERSION < (1, 8):
from compat.json_response import JsonResponse
else:
from django.http import JsonResponse
# format_html (django 1.6)
try:
from django.utils.html import format_html, conditional_escape
except ImportError:
# support django < 1.5. Taken from django.utils.html
from django.utils import html
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(html.conditional_escape, args)
kwargs_safe = dict([(k, html.conditional_escape(v)) for (k, v) in
six.iteritems(kwargs)])
return html.mark_safe(format_string.format(*args_safe, **kwargs_safe))
try:
from django.db import close_old_connections as close_connection
except ImportError: # django < 1.8
from django.db import close_connection
def get_template_loaders():
"""
Compatibility method to fetch the template loaders.
Source: https://github.com/django-debug-toolbar/django-debug-toolbar/blob/ece1c2775af108a92a0ef59636266b49e286e916/debug_toolbar/compat.py
"""
try:
from django.template.engine import Engine
except ImportError: # Django < 1.8
Engine = None
if Engine:
try:
engine = Engine.get_default()
except ImproperlyConfigured:
loaders = []
else:
loaders = engine.template_loaders
else: # Django < 1.8
from django.template.loader import find_template_loader
loaders = [
find_template_loader(loader_name)
for loader_name in settings.TEMPLATE_LOADERS]
return loaders
if django.VERSION >= (2, 0):
from django.urls import (
clear_url_caches, get_script_prefix, get_urlconf,
is_valid_path, resolve, reverse, reverse_lazy, set_script_prefix,
set_urlconf, NoReverseMatch, URLPattern,
URLResolver, Resolver404, ResolverMatch, get_ns_resolver, get_resolver, get_callable, get_mod_func
)
RegexURLPattern = URLPattern
RegexURLResolver = URLResolver
elif django.VERSION >= (1, 10):
import django.urls as urlresolvers
from django.urls import (
clear_url_caches, get_script_prefix, get_urlconf,
is_valid_path, resolve, reverse, reverse_lazy, set_script_prefix,
set_urlconf, LocaleRegexProvider, LocaleRegexURLResolver, NoReverseMatch, RegexURLPattern,
RegexURLResolver, Resolver404, ResolverMatch, get_ns_resolver, get_resolver, get_callable, get_mod_func
)
URLPattern = RegexURLPattern
URLResolver = RegexURLResolver
else:
import django.core.urlresolvers as urlresolvers
from django.core.urlresolvers import (
clear_url_caches, get_script_prefix, get_urlconf,
is_valid_path, resolve, reverse, reverse_lazy, set_script_prefix,
set_urlconf, LocaleRegexProvider, LocaleRegexURLResolver, NoReverseMatch, RegexURLPattern,
RegexURLResolver, Resolver404, ResolverMatch, get_ns_resolver, get_resolver, get_callable, get_mod_func
)
URLPattern = RegexURLPattern
URLResolver = RegexURLResolver
try:
from django.shortcuts import resolve_url
except ImportError: # django < 1.5
from .shortcuts import resolve_url
from django.template.loader import render_to_string as render_to_string_django
_context_instance_undefined = object()
_dictionary_undefined = object()
_dirs_undefined = object()
def render_to_string(template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined,
request=None, using=None):
if (context_instance is _context_instance_undefined and dirs is _dirs_undefined and
dictionary is _dictionary_undefined):
if django.VERSION >= (1, 8):
# Call new render_to_string with new arguments
return render_to_string_django(template_name, context, request, using)
else:
# Call legacy render_to_string with new arguments
from django.template import RequestContext
context_instance = RequestContext(request) if request else None
return render_to_string_django(template_name, context, context_instance)
else:
if django.VERSION >= (1, 10):
# Call new render_to_string with legacy arguments
raise NotImplementedError('Django compat does not support calling post-1.8 render_to_string with pre-1.8 '
'keyword arguments')
else:
# Call legacy render_to_string with legacy arguments
if dictionary is _dictionary_undefined:
dictionary = {}
if context_instance is _context_instance_undefined:
context_instance = None
return render_to_string_django(template_name, dictionary, context_instance)
### Undocumented ###
try:
from django.template import VariableNode
except:
from django.template.base import VariableNode
# slugify template filter is available as a standard python function at django.utils.text since django 1.5
try:
from django.utils.text import slugify
except:
from django.template.defaultfilters import slugify
if django.VERSION < (1, 7):
from django.contrib.contenttypes.generic import GenericForeignKey
elif django.VERSION < (1, 9):
from django.contrib.contenttypes.fields import GenericForeignKey
else:
pass # Loading models from __init__ is deprecated from 1.9. Import from compat.models instead
# commit_on_success replaced by atomic in Django >=1.8
atomic = commit_on_success = getattr(django.db.transaction, 'atomic', None) or getattr(django.db.transaction, 'commit_on_success')
# Removed from django.contrib.sites.models in Django 1.9
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError:
from django.contrib.sites.models import get_current_site
# Renamed utils and removed in Django 1.9
try:
from django.contrib.admin import utils as admin_utils
except ImportError:
from django.contrib.admin import util as admin_utils
# the tests will try to import these
__all__ = [
'add_to_builtins',
'get_model',
'get_model_name',
'get_user_model',
'get_username_field',
'import_string',
'commit',
'rollback',
'user_model_label',
'url',
'patterns',
'include',
'handler404',
'handler500',
'get_ident',
# 'mock',
# 'unittest',
'urlparse',
'parse_qs',
'urlunparse',
'urlencode',
'unquote_plus',
'DjangoJSONEncoder',
'JsonResponse',
'HttpResponseBase',
'python_2_unicode_compatible',
'URLValidator',
'EmailValidator',
'View',
'StringIO',
'BytesIO',
'clean_manytomany_helptext',
'smart_text',
'force_text',
'simplejson',
'import_module',
'VariableNode',
'slugify',
'GenericForeignKey',
'SortedDict',
'atomic',
'commit_on_success', # alias
'format_html',
'resolve_url',
'close_connection',
'get_template_loaders',
'LocaleRegexProvider', 'LocaleRegexURLResolver', 'NoReverseMatch',
'RegexURLPattern', 'RegexURLResolver', # Old names before 2.0, alias after
'URLPattern', 'URLResolver', # New names in 2.0, alias before
'Resolver404', 'ResolverMatch', 'clear_url_caches', 'get_callable', 'get_mod_func', 'get_ns_resolver',
'get_resolver', 'get_script_prefix', 'get_urlconf', 'is_valid_path', 'resolve', 'reverse', 'reverse_lazy',
'set_script_prefix', 'set_urlconf',
'render_to_string',
'get_current_site',
'admin_utils'
]
|
python
|
name = 'lib_csv'
title = 'lib_csv: functions to read and write csv files'
version = '0.1.0'
url = 'https://github.com/bitranox/lib_csv'
author = 'Robert Nowotny'
author_email = '[email protected]'
shell_command = 'lib_csv'
def print_version() -> None:
print('version: 0.1.0')
def print_info() -> None:
print("""information for "lib_csv":
lib_csv: functions to read and write csv files
Version : 0.1.0
url : https://github.com/bitranox/lib_csv
author : Robert Nowotny
author_email : [email protected]""")
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-09-12 19:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logger', '0040_auto_20170912_1504'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='uuid',
field=models.CharField(db_index=True, default='', max_length=249),
),
]
|
python
|
from fabric.api import run, quiet
from braid import succeeds, cacheInEnvironment
@cacheInEnvironment
def distroName():
"""
Get the name of the distro.
"""
with quiet():
lsb = run('/usr/bin/lsb_release --id --short', warn_only=True)
if lsb.succeeded:
return lsb.lower()
distros = [
('centos', '/etc/centos-release'),
('fedora', '/etc/fedora-release'),
('rhel', '/etc/redhat-release'),
('debian', '/etc/debian_version'),
]
for distro, sentinel in distros:
if succeeds('/usr/bin/test -f {}'.format(sentinel)):
return distro
def distroFamily():
"""
Get the family of the distro.
@returns: C{'debian'} or C{'fedora'}
"""
families = {
'debian': ['debian', 'ubuntu'],
'fedora': ['fedora', 'centos', 'rhel'],
}
distro = distroName()
for family, members in families.iteritems():
if distro in members:
return family
return 'other'
@cacheInEnvironment
def arch():
"""
Get the architechture of the machine.
"""
return run('/bin/uname --machine')
@cacheInEnvironment
def isRoot():
"""
Check if the current user is root.
"""
return run('id -u') == '0'
__all__ = ['distroName', 'distroFamily', 'arch']
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_acp-calendar
------------
Tests for `acp-calendar` models module.
"""
import datetime
import os
from unittest import mock
from django.test import TestCase
from django.test import override_settings
from acp_calendar.initial_data import get_holiday_type_list, get_holidays_list
from acp_calendar.models import HolidayType, ACPHoliday, FiscalYear, ACPCalendarException
import datetime
from .utils import TestOutputMixin
real_datetime_class = datetime.datetime
def mock_datetime(target, datetime_module):
class DatetimeSubclassMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, real_datetime_class)
class BaseMockedDatetime(real_datetime_class):
@classmethod
def now(cls, tz=None):
return target.replace(tzinfo=tz)
@classmethod
def utcnow(cls):
return target
@classmethod
def today(cls):
return target
# Python2 & Python3-compatible metaclass
MockedDatetime = DatetimeSubclassMeta('datetime', (BaseMockedDatetime,), {})
return mock.patch.object(datetime_module, 'datetime', MockedDatetime)
class TestFiscalYear(TestCase):
def test__create(self):
fy = FiscalYear(2016)
self.assertEqual('FY16', str(fy))
self.assertEqual(datetime.date(2015, 10, 1), fy.start_date)
self.assertEqual(datetime.date(2016, 9, 30), fy.end_date)
def test__str(self):
fy = FiscalYear(2014, display='AF%s')
self.assertEqual('AF14', str(fy))
def test_create_from_date(self):
cdate = datetime.date(2013, 10, 1)
fy = FiscalYear.create_from_date(cdate)
self.assertEqual('FY14', str(fy))
def test_create_from_date_2(self):
cdate = datetime.date(2014, 9, 1)
fy = FiscalYear.create_from_date(cdate)
self.assertEqual('FY14', str(fy))
def test_create_from_date_datetime(self):
cdate = datetime.datetime(2013, 10, 1, 0, 0, 0)
fy = FiscalYear.create_from_date(cdate)
self.assertEqual('FY14', str(fy))
@mock.patch('django.utils.timezone.now')
def test_current_fiscal_year(self, mock_now):
mock_now.return_value = datetime.datetime(2013, 10, 1, 0, 0, 0)
fy = FiscalYear.current_fiscal_year()
self.assertEqual('FY14', str(fy))
class TestHolidayType(TestCase):
def setUp(self):
pass
def test_create(self):
loaded_holiday_types = len(get_holiday_type_list())
data = {'name': 'My Holiday'}
HolidayType.objects.create(**data)
self.assertEqual(12, loaded_holiday_types)
self.assertEqual(1 + loaded_holiday_types, HolidayType.objects.count())
def test_str(self):
navidad = HolidayType.objects.get(short_name='navidad')
self.assertEqual('Navidad', str(navidad))
def tearDown(self):
pass
class TestACPHoliday(TestOutputMixin, TestCase):
def setUp(self):
pass
def test_str(self):
holiday = ACPHoliday.objects.first()
self.assertEqual('2006-01-01 Año Nuevo', str(holiday))
def test_load_initial(self):
loaded_holidays = len(get_holidays_list())
self.assertEqual(144, ACPHoliday.objects.count())
self.assertEqual(datetime.date(2006, 1,1), ACPHoliday.objects.first().date)
self.assertEqual(datetime.date(2018, 12,25), ACPHoliday.objects.last().date)
def test_days_in_range_generator(self):
start_date = datetime.date(2016, 1,1)
end_date = datetime.date(2016,1,31)
jan_days = list(ACPHoliday.days_in_range_generator(start_date, end_date))
self.assertEqual(31, len(jan_days))
self.assertEqual(jan_days[0], start_date)
self.assertEqual(jan_days[30], end_date)
def test_get_working_days(self):
start_date = datetime.date(2016, 1,1)
end_date = datetime.date(2016,1,31)
working_days = ACPHoliday.get_working_days(start_date, end_date)
self.assertEqual(19, working_days)
def test_get_working_days_no_work(self):
start_date = datetime.date(2016, 1,1)
end_date = datetime.date(2016,1,2)
working_days = ACPHoliday.get_working_days(start_date, end_date)
self.assertEqual(0, working_days)
def test_get_working_days_wrong_dates(self):
start_date = datetime.date(2016, 1, 5)
end_date = datetime.date(2016, 1, 2)
try:
working_days = ACPHoliday.get_working_days(start_date, end_date)
self.fail('Did not throw Value error')
except ACPCalendarException as e:
self.assertEqual('Start date cannot occur after end date', str(e))
def test_validate_dates_last_holiday(self):
first_holiday = ACPHoliday.objects.all().first()
last_holiday = ACPHoliday.objects.all().last()
try:
ACPHoliday.validate_dates(first_holiday.date, last_holiday.date + datetime.timedelta(days=1))
self.fail('Value error should have been raised')
except ACPCalendarException as e:
self.assertEqual('End date exceed the last registered holiday', str(e))
def test_validate_dates_first_holiday(self):
first_holiday = ACPHoliday.objects.all().first()
last_holiday = ACPHoliday.objects.all().last()
try:
ACPHoliday.validate_dates(first_holiday.date - datetime.timedelta(days=1), last_holiday.date)
self.fail('Value error should have been raised')
except ACPCalendarException as e:
self.assertEqual('Start date precedes the first registered holiday', str(e))
def test_week_end_days(self):
start_date = datetime.date(2016, 1, 1)
end_date = datetime.date(2016, 1, 31)
week_end_days = ACPHoliday.week_end_days(start_date, end_date)
self.assertEqual(10, week_end_days)
def test_working_delta(self):
start_date = datetime.date(2016, 1, 1)
end_date = ACPHoliday.working_delta(start_date, 15)
self.assertEqual(datetime.date(2016, 1, 25), end_date)
end_date = ACPHoliday.working_delta(start_date, 5)
self.assertEqual(datetime.date(2016, 1, 11), end_date)
def test_get_working_days_for_month(self):
working_days = ACPHoliday.get_working_days_for_month(2016, 3)
self.assertEqual(22, working_days)
def test_get_working_days_for_month_illegal_month(self):
try:
working_days = ACPHoliday.get_working_days_for_month(2016, 13)
self.assertEqual(22, working_days)
self.fail('IllegalMonthError was not thrown')
except ACPCalendarException as e:
self.assertEqual('bad month number 13; must be 1-12', str(e))
def test_convert_to_date(self):
study_date = ACPHoliday.convert_to_date('2016-07-08')
self.assertEqual(datetime.date(2016, 7, 8), study_date)
def test_convert_to_date_invalid(self):
try:
study_date = ACPHoliday.convert_to_date(5)
self.fail('should throw error for dates must be either string or date objects')
except ACPCalendarException as e:
self.assertEqual('Dates must be either string or date objects', str(e))
@override_settings(DEBUG=True)
def test_write_json(self):
dated_filename = self.get_dated_output_filename('test_write_json.json')
results = ACPHoliday.objects.all().write_json(dated_filename)
self.assertEqual(144, results.count())
self.assertTrue(os.path.exists(dated_filename))
holidays_in_json = get_holidays_list(dated_filename)
self.assertEqual('2006-01-01', holidays_in_json[0]['date'])
self.assertEqual('2018-12-25', holidays_in_json[-1]['date'])
self.assertEqual(144, len(holidays_in_json))
#self.clean_output = False
self.clean_output_folder(dated_filename)
@override_settings(DEBUG=True)
def test_write_json_filter(self):
dated_filename = self.get_dated_output_filename('test_write_json_filter.json')
ACPHoliday.objects.update_fiscal_years()
results = ACPHoliday.objects.filter(fiscal_year=2015).write_json(dated_filename)
self.assertEqual(11, results.count())
self.assertTrue(os.path.exists(dated_filename))
holidays_in_json = get_holidays_list(dated_filename)
self.assertEqual('2014-11-03', holidays_in_json[0]['date'])
self.assertEqual('2015-05-01', holidays_in_json[-1]['date'])
self.assertEqual(11, len(holidays_in_json))
self.clean_output_folder(dated_filename)
# def test_filter(self):
# results = ACPHoliday.objects.filter(fiscal_year=2015)
# self.assertEqual(5, results.count())
|
python
|
load("//:plugin.bzl", "ProtoPluginInfo")
ProtoCompileInfo = provider(fields = {
"label": "label object",
"plugins": "ProtoPluginInfo object",
"descriptor": "descriptor set file",
"outputs": "generated protoc outputs",
"files": "final generated files",
"protos": "generated protos (copies)",
"args": "proto arguments",
"tools": "proto tools",
"verbose": "verbose level",
})
rust_keywords = {
"as": True,
"break": True,
"const": True,
"continue": True,
"crate": True,
"else": True,
"enum": True,
"extern": True,
"false": True,
"fn": True,
"for": True,
"if": True,
"impl": True,
"let": True,
"loop": True,
"match": True,
"mod": True,
"move": True,
"mut": True,
"pub": True,
"ref": True,
"return": True,
"self": True,
"Self": True,
"static": True,
"struct": True,
"super": True,
"trait": True,
"true": True,
"type": True,
"unsafe": True,
"use": True,
"where": True,
"while": True,
}
objc_upper_segments = {
"url": "URL",
"http": "HTTP",
"https": "HTTPS",
}
# Hack - providers indexing is by int, but I have not idea how to get the actual
# provider object here.
ProtoInfoProvider = 0
def _capitalize(s):
"""Capitalize a string - only first letter
Args:
s (string): The input string to be capitalized.
Returns:
(string): The capitalized string.
"""
return s[0:1].upper() + s[1:]
def _pascal_objc(s):
"""Convert pascal_case -> PascalCase
Objective C uses pascal case, but there are e exceptions that it uppercases
the entire segment: url, http, and https.
https://github.com/protocolbuffers/protobuf/blob/54176b26a9be6c9903b375596b778f51f5947921/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc#L91
Args:
s (string): The input string to be capitalized.
Returns: (string): The capitalized string.
"""
segments = []
for segment in s.split("_"):
repl = objc_upper_segments.get(segment)
if repl:
segment = repl
else:
segment = _capitalize(segment)
segments.append(segment)
return "".join(segments)
def _pascal_case(s):
"""Convert pascal_case -> PascalCase
Args:
s (string): The input string to be capitalized.
Returns:
(string): The capitalized string.
"""
return "".join([_capitalize(part) for part in s.split("_")])
def _rust_keyword(s):
"""Check if arg is a rust keyword and append '_pb' if true.
Args:
s (string): The input string to be capitalized.
Returns:
(string): The appended string.
"""
return s + "_pb" if rust_keywords.get(s) else s
def _get_output_sibling_file(pattern, proto, descriptor):
"""Get the correct place to
The ctx.actions.declare_file has a 'sibling = <File>' feature that allows
one to declare files in the same directory as the sibling.
This function checks for the prefix special token '{package}' and, if true,
uses the descriptor as the sibling (which declares the output file will be
in the root of the generated tree).
Args:
pattern: the input filename pattern <string>
proto: the .proto <Generated File> (in the staging area)
descriptor: the descriptor <File> that marks the staging root.
Returns:
the <File> to be used as the correct sibling.
"""
if pattern.startswith("{package}/"):
return descriptor
return proto
def _get_plugin_out(ctx, plugin):
if not plugin.out:
return None
filename = plugin.out
filename = filename.replace("{name}", ctx.label.name)
return filename
def _get_output_filename(src, plugin, pattern):
"""Build the predicted filename for file generated by the given plugin.
A 'proto_plugin' rule allows one to define the predicted outputs. For
flexibility, we allow special tokens in the output filename that get
replaced here. The overall pattern is '{token}' mimicking the python
'format' feature.
Additionally, there are '|' characters like '{basename|pascal}' that can be
read as 'take the basename and pipe that through the pascal function'.
Args:
src: the .proto <File>
plugin: the <PluginInfo> object.
pattern: the input pattern string
Returns:
the replaced string
"""
# If output to srcjar, don't emit a per-proto output file.
if plugin.out:
return None
# Slice off this prefix if it exists, we don't use it here.
if pattern.startswith("{package}/"):
pattern = pattern[len("{package}/"):]
basename = src.basename
if basename.endswith(".proto"):
basename = basename[:-6]
elif basename.endswith(".protodevel"):
basename = basename[:-11]
filename = basename
if pattern.find("{basename}") != -1:
filename = pattern.replace("{basename}", basename)
elif pattern.find("{basename|pascal}") != -1:
filename = pattern.replace("{basename|pascal}", _pascal_case(basename))
elif pattern.find("{basename|pascal|objc}") != -1:
filename = pattern.replace("{basename|pascal|objc}", _pascal_objc(basename))
elif pattern.find("{basename|rust_keyword}") != -1:
filename = pattern.replace("{basename|rust_keyword}", _rust_keyword(basename))
else:
filename = basename + pattern
return filename
def _get_proto_filename(src):
"""Assemble the filename for a proto
Args:
src: the .proto <File>
Returns:
<string> of the filename.
"""
parts = src.short_path.split("/")
if len(parts) > 1 and parts[0] == "..":
return "/".join(parts[2:])
return src.short_path
def copy_proto(ctx, descriptor, src):
"""Copy a proto to the 'staging area'
Args:
ctx: the <ctx> object
descriptor: the descriptor <File> that marks the root of the 'staging area'.
src: the source .proto <File>
Returns:
<Generated File> for the copied .proto
"""
proto = ctx.actions.declare_file(_get_proto_filename(src), sibling = descriptor)
ctx.actions.run_shell(
mnemonic = "CopyProto",
inputs = [src],
outputs = [proto],
command = "cp %s %s" % (src.path, proto.path),
)
return proto
def _copy_jar_to_srcjar(ctx, jar):
"""Copy .jar to .srcjar
Args:
ctx: the <ctx> object
jar: the <Generated File> of a jar containing source files.
Returns:
<Generated File> for the renamed file
"""
srcjar = ctx.actions.declare_file("%s/%s.srcjar" % (ctx.label.name, ctx.label.name))
ctx.actions.run_shell(
mnemonic = "CopySrcjar",
inputs = [jar],
outputs = [srcjar],
command = "mv %s %s" % (jar.path, srcjar.path),
)
return srcjar
def _get_plugin_option(ctx, option):
"""Build a plugin option
Args:
ctx: the <ctx> object
option: string from the <PluginInfo>
Returns:
<string> for the --plugin_out= arg
"""
return option.replace("{name}", ctx.label.name)
def _get_plugin_options(ctx, options):
"""Build a plugin option list
Args:
ctx: the <ctx> object
options: list<string> options from the <PluginInfo>
Returns:
<string> for the --plugin_out= arg
"""
return [_get_plugin_option(ctx, option) for option in options]
def get_plugin_out_arg(ctx, outdir, plugin, plugin_outfiles):
"""Build the --java_out argument
Args:
ctx: the <ctx> object
output: the package output directory <string>
plugin: the <PluginInfo> object.
plugin_outfiles: The <dict<string,<File>>. For example, {closure: "library.js"}
Returns
<string> for the protoc arg list.
"""
arg = outdir
if plugin.outdir:
arg = plugin.outdir.replace("{name}", outdir)
elif plugin.out:
outfile = plugin_outfiles[plugin.name]
#arg = "%s" % (outdir)
#arg = "%s/%s" % (outdir, outfile.short_path)
arg = outfile.path
# Collate a list of options from the plugin itself PLUS options from the
# global plugin_options list (if they exist)
options = getattr(plugin, "options", []) + ctx.attr.plugin_options
if options:
arg = "%s:%s" % (",".join(_get_plugin_options(ctx, options)), arg)
return "--%s_out=%s" % (plugin.name, arg)
def _apply_plugin_transitivity_rules(ctx, targets, plugin):
"""Process the proto target list according to plugin transitivity rules
Args:
ctx: the <ctx> object
targets: the dict<string,File> of .proto files that we intend to compile.
plugin: the <PluginInfo> object.
Returns:
<list<File>> the possibly filtered list of .proto <File>s
"""
# Iterate transitivity rules like '{ "google/protobuf": "exclude" }'. The
# only rule type implemented is "exclude", which checks if the pathname or
# dirname ends with the given pattern. If so, remove that item in the
# targets list.
#
# Why does this feature exist? Well, library rules like C# require all the
# proto files to be present during the compilation (collected via transitive
# sources). However, since the well-known types are already present in the
# library dependencies, we don't actually want to compile well-known types
# (but do want to compile everything else).
#
transitivity = {}
transitivity.update(plugin.transitivity)
transitivity.update(ctx.attr.transitivity)
for pattern, rule in transitivity.items():
if rule == "exclude":
for key, target in targets.items():
if ctx.attr.verbose > 2:
print("Checking '%s' endswith '%s'" % (target.short_path, pattern))
if target.dirname.endswith(pattern) or target.path.endswith(pattern):
targets.pop(key)
if ctx.attr.verbose > 2:
print("Removing '%s' from the list of files to compile as plugin '%s' excluded it" % (target.short_path, plugin.name))
elif ctx.attr.verbose > 2:
print("Keeping '%s' (not excluded)" % (target.short_path))
elif rule == "include":
for key, target in targets.items():
if target.dirname.endswith(pattern) or target.path.endswith(pattern):
if ctx.attr.verbose > 2:
print("Keeping '%s' (explicitly included)" % (target.short_path))
else:
targets.pop(key)
if ctx.attr.verbose > 2:
print("Removing '%s' from the list of files to compile as plugin '%s' did not include it" % (target.short_path, plugin.name))
else:
fail("Unknown transitivity rule '%s'" % rule)
return targets
def _get_plugin_outputs(ctx, descriptor, outputs, src, proto, plugin):
"""Get the predicted generated outputs for a given plugin
Args:
ctx: the <ctx> object
descriptor: the descriptor <Generated File>
outputs: the list of outputs.
src: the orginal .proto source <Source File>.
proto: the copied .proto <Generated File> (the one in the package 'staging area')
plugin: the <PluginInfo> object.
Returns:
<list<Generated File>> the augmented list of files that will be generated
"""
for output in plugin.outputs:
filename = _get_output_filename(src, plugin, output)
if not filename:
continue
sibling = _get_output_sibling_file(output, proto, descriptor)
outputs.append(ctx.actions.declare_file(filename, sibling = sibling))
return outputs
def get_plugin_runfiles(tool):
"""Gather runfiles for a plugin.
"""
files = []
if not tool:
return files
info = tool[DefaultInfo]
if not info:
return files
if info.files:
files += info.files.to_list()
if info.default_runfiles:
runfiles = info.default_runfiles
if runfiles.files:
files += runfiles.files.to_list()
if info.data_runfiles:
runfiles = info.data_runfiles
if runfiles.files:
files += runfiles.files.to_list()
return files
def proto_compile_impl(ctx):
###
### Part 1: setup variables used in scope
###
# <int> verbose level
verbose = ctx.attr.verbose
# <File> the protoc tool
protoc = ctx.executable.protoc
# <bool> if this is a gRPC compilation
has_services = ctx.attr.has_services
# <File> for the output descriptor. Often used as the sibling in
# 'declare_file' actions.
descriptor = ctx.outputs.descriptor
# <string> The directory where that generated descriptor is.
outdir = descriptor.dirname
# <list<ProtoInfo>> A list of ProtoInfo
deps = [dep.proto for dep in ctx.attr.deps]
# <list<PluginInfo>> A list of PluginInfo
plugins = [plugin[ProtoPluginInfo] for plugin in ctx.attr.plugins]
# <list<File>> The list of .proto files that will exist in the 'staging
# area'. We copy them from their source location into place such that a
# single '-I.' at the package root will satisfy all import paths.
protos = []
# <dict<string,File>> The set of .proto files to compile, used as the final
# list of arguments to protoc. This is a subset of the 'protos' list that
# are directly specified in the proto_library deps, but excluding other
# transitive .protos. For example, even though we might transitively depend
# on 'google/protobuf/any.proto', we don't necessarily want to actually
# generate artifacts for it when compiling 'foo.proto'. Maintained as a dict
# for set semantics. The key is the value from File.path.
targets = {}
# <dict<string,File>> A mapping from plugin name to the plugin tool. Used to
# generate the --plugin=protoc-gen-KEY=VALUE args
plugin_tools = {}
# <dict<string,<File> A mapping from PluginInfo.name to File. In the case
# of plugins that specify a single output 'archive' (like java), we gather
# them in this dict. It is used to generate args like
# '--java_out=libjava.jar'.
plugin_outfiles = {}
# <list<File>> The list of srcjars that we're generating (like
# 'foo.srcjar').
srcjars = []
# <list<File>> The list of generated artifacts like 'foo_pb2.py' that we
# expect to be produced.
outputs = []
# Additional data files from plugin.data needed by plugin tools that are not
# single binaries.
data = []
###
### Part 2: gather plugin.out artifacts
###
# Some protoc plugins generate a set of output files (like python) while
# others generate a single 'archive' file that contains the individual
# outputs (like java). This first loop is for the latter type. In this
# scenario, the PluginInfo.out attribute will exist; the predicted file
# output location is relative to the package root, marked by the descriptor
# file. Jar outputs are gathered as a special case as we need to
# post-process them to have a 'srcjar' extension (java_library rules don't
# accept source jars with a 'jar' extension)
for plugin in plugins:
if plugin.executable:
plugin_tools[plugin.name] = plugin.executable
data += plugin.data + get_plugin_runfiles(plugin.tool)
filename = _get_plugin_out(ctx, plugin)
if not filename:
continue
out = ctx.actions.declare_file(filename, sibling = descriptor)
outputs.append(out)
plugin_outfiles[plugin.name] = out
if out.path.endswith(".jar"):
srcjar = _copy_jar_to_srcjar(ctx, out)
srcjars.append(srcjar)
###
### Part 3a: Gather generated artifacts for each dependency .proto source file.
###
for dep in deps:
# Iterate all the directly specified .proto files. If we have already
# processed this one, skip it to avoid declaring duplicate outputs.
# Create an action to copy the proto into our staging area. Consult the
# plugin to assemble the actual list of predicted generated artifacts
# and save these in the 'outputs' list.
for src in dep.direct_sources:
if targets.get(src.path):
continue
proto = copy_proto(ctx, descriptor, src)
targets[src] = proto
protos.append(proto)
# Iterate all transitive .proto files. If we already processed in the
# loop above, skip it. Otherwise add a copy action to get it into the
# 'staging area'
for src in dep.transitive_sources.to_list():
if targets.get(src):
continue
if verbose > 2:
print("transitive source: %r" % src)
proto = copy_proto(ctx, descriptor, src)
protos.append(proto)
if ctx.attr.transitive:
targets[src] = proto
###
### Part 3cb: apply transitivity rules
###
# If the 'transitive = true' was enabled, we collected all the protos into
# the 'targets' list.
# At this point we want to post-process that list and remove any protos that
# might be incompatible with the plugin transitivity rules.
if ctx.attr.transitive:
for plugin in plugins:
targets = _apply_plugin_transitivity_rules(ctx, targets, plugin)
###
### Part 3c: collect generated artifacts for all in the target list of protos to compile
###
for src, proto in targets.items():
for plugin in plugins:
outputs = _get_plugin_outputs(ctx, descriptor, outputs, src, proto, plugin)
###
### Part 4: build list of arguments for protoc
###
args = ["--descriptor_set_out=%s" % descriptor.path]
# By default we have a single 'proto_path' argument at the 'staging area'
# root.
args += ["--proto_path=%s" % outdir]
if ctx.attr.include_imports:
args += ["--include_imports"]
if ctx.attr.include_source_info:
args += ["--include_source_info"]
for plugin in plugins:
args += [get_plugin_out_arg(ctx, outdir, plugin, plugin_outfiles)]
args += ["--plugin=protoc-gen-%s=%s" % (k, v.path) for k, v in plugin_tools.items()]
args += [proto.path for proto in targets.values()]
###
### Part 5: build the final protoc command and declare the action
###
mnemonic = "ProtoCompile"
command = " ".join([protoc.path] + args)
if verbose > 0:
print("%s: %s" % (mnemonic, command))
if verbose > 1:
command += " && echo '\n##### SANDBOX AFTER RUNNING PROTOC' && find ."
if verbose > 2:
command = "echo '\n##### SANDBOX BEFORE RUNNING PROTOC' && find . && " + command
if verbose > 3:
command = "env && " + command
for f in outputs:
print("expected output: %q", f.path)
ctx.actions.run_shell(
mnemonic = mnemonic,
command = command,
inputs = protos + data,
outputs = outputs + [descriptor] + ctx.outputs.outputs,
tools = [protoc] + plugin_tools.values()
)
###
### Part 6: assemble output providers
###
# The files for 'DefaultInfo' include any explicit outputs for the rule. If
# we are generating srcjars, use those as the final outputs rather than
# their '.jar' intermediates. Otherwise include all the file outputs.
# NOTE: this looks a little wonky here. It probably works in simple cases
# where there list of plugins has length 1 OR all outputting to jars OR all
# not outputting to jars. Probably would break here if they were mixed.
files = [] + ctx.outputs.outputs
if len(srcjars) > 0:
files += srcjars
else:
files += outputs
if len(plugin_outfiles) > 0:
files += plugin_outfiles.values()
return [ProtoCompileInfo(
label = ctx.label,
plugins = plugins,
protos = protos,
outputs = outputs,
files = files,
tools = plugin_tools,
args = args,
descriptor = descriptor,
), DefaultInfo(files = depset(files))]
proto_compile = rule(
implementation = proto_compile_impl,
attrs = {
"deps": attr.label_list(
doc = "proto_library dependencies",
mandatory = True,
providers = ["proto"],
),
"plugins": attr.label_list(
doc = "List of protoc plugins to apply",
providers = [ProtoPluginInfo],
mandatory = True,
),
"plugin_options": attr.string_list(
doc = "List of additional 'global' options to add (applies to all plugins)",
),
"outputs": attr.output_list(
doc = "Escape mechanism to explicitly declare files that will be generated",
),
"has_services": attr.bool(
doc = "If the proto files(s) have a service rpc, generate grpc outputs",
),
"protoc": attr.label(
doc = "The protoc tool",
default = "@com_google_protobuf//:protoc",
cfg = "host",
executable = True,
),
"verbose": attr.int(
doc = "Increase verbose level for more debugging",
),
"include_imports": attr.bool(
doc = "Pass the --include_imports argument to the protoc_plugin",
default = True,
),
"include_source_info": attr.bool(
doc = "Pass the --include_source_info argument to the protoc_plugin",
default = True,
),
"transitive": attr.bool(
doc = "Emit transitive artifacts",
),
"transitivity": attr.string_dict(
doc = "Transitive rules. When the 'transitive' property is enabled, this string_dict can be used to exclude protos from the compilation list",
),
},
outputs = {
"descriptor": "%{name}/descriptor.source.bin",
},
output_to_genfiles = True,
)
def invoke_transitive(proto_compile_rule, name_suffix, kwargs):
"""Invoke a proto_compile rule using kwargs
Invoke is a convenience function for library rules that call proto_compile
rules. Rather than having to do the same boilerplate across many different
files, this function centralizes the logic of calling proto_compile rules
using kwargs.
Args:
proto_compile_rule: the rule function to invoke
name_suffix: a suffix for the kwargs.name to use for the rule
kwargs: the **kwargs dict, passed directly (not decontucted)
Returns:
The name of the invoked rule. This can be used in the srcs label of a library rule.
"""
deps = kwargs.get("deps")
has_services = kwargs.get("has_services")
include_imports = kwargs.get("include_imports")
include_source_info = kwargs.get("include_source_info")
name = kwargs.get("name")
outputs = kwargs.get("outputs")
plugin_options = kwargs.get("plugin_options")
plugins = kwargs.get("plugins")
protoc = kwargs.get("protoc")
transitive = kwargs.get("transitive", True)
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
rule_name = name + name_suffix
proto_compile_rule(
name = rule_name,
deps = deps,
has_services = has_services,
include_imports = include_imports,
include_source_info = include_source_info,
outputs = outputs,
plugin_options = plugin_options,
plugins = plugins,
protoc = protoc,
transitive = transitive,
verbose = verbose,
visibility = visibility,
)
return rule_name
|
python
|
import concurrent.futures
import urllib.request
import time
URLS = ['http://www.foxnews.com/',
'http://www.cnn.com/',
'http://europe.wsj.com/',
'http://www.bbc.co.uk/',
'http://some-made-up-domain.com/',
'http://www.nytimes.com',
'http://www.facebook.com',
'http://www.silversevensens.com',
'http://www.wakingthered.com',
'http://www.twitter.com',
'http://www.google.com',
'http://www.economist.com',
'http://www.cbc.ca',
'http://www.newyorker.com',
'http://www.nyc.gov']
# Retrieve a single page and report the url and contents
def load_url(url, timeout):
conn = urllib.request.urlopen(url, timeout=timeout)
return conn.read()
workers=5
t0 = time.time()
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 60): url for url in URLS}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
else:
print('%r page is %d bytes' % (url, len(data)))
t1 = time.time()
print("All URL ops took ", t1 - t0, " seconds with ", workers, " workers")
# Serial version
my_URLs = []
t2 = time.time()
for url in URLS:
try:
my_URLs.append(load_url(url, 60))
except:
continue
for url, res in zip(URLS, my_URLs):
try:
print(url, " is ", len(res), " bytes ")
except Exception as exc:
print(url, " messed up ", exc)
t3 = time.time()
print("All URL ops took ", t3 - t2, " seconds serially")
|
python
|
#!/usr/local/uvcdat/bin/python
# Functions to convert between representations of energy flux and
# water flux (precipitation and evoporation) variables.
# TODO: perhaps move the physical constants used here to atmconst.py.
# These are:
# - latent heat of vaporization (water)
# - density (water)
from metrics.common.utilities import *
from unidata import udunits
import numpy
import logging
logger = logging.getLogger(__name__)
def reconcile_energyflux_precip(mv1, mv2, preferred_units=None):
# To compare LHFLX and QFLX, need to unify these to a common variables
# e.g. LHFLX (latent heat flux in W/m^2) vs. QFLX (evaporation in mm/day)
#
# If preferred_units is not provided, the units of mv2 will be
# assumed to be the preferred units.
#
# This function is used by the derived_variable definitions in
# amwg_plot_plan's standard_variables (within amwg.py).
#
# Author: S.M. Burrows, 9 Feb 2015.
# If preferred_units is not provided, assume units of mv2 assumed
# to be the preferred units.
if hasattr(mv1,'units') and hasattr(mv2,'units'):
# First, set preferred_units if needed
if preferred_units is None:
if ('_QFLX_' in mv2.id) or ('_QFLX' in mv1.id):
logger.info("Setting preferred_units='mm/day'")
preferred_units='mm/day'
if ('_LHFLX_' in mv2.id) or ('_LHFLX' in mv1.id):
logger.info("Setting preferred_units='W/m^2'")
preferred_units='W/m^2'
if preferred_units is None:
logger.info("Setting preferred_units to mv.units=%s", mv2.units)
preferred_units = mv2.units
# syntax correction (just in case)
if preferred_units=='W/m2':
preferred_units='W/m^2'
# Now do conversions to preferred_units (only if needed)
if mv1.units!=preferred_units:
mv1 = convert_energyflux_precip(mv1, preferred_units)
if mv2.units!=preferred_units:
mv2 = convert_energyflux_precip(mv2, preferred_units)
else:
logger.error("missing units in arguments to reconcile_energyflux_precip.")
exit
return mv1,mv2
def convert_energyflux_precip(mv, preferred_units):
# The latent heat of vaporization for water is 2260 kJ/kg
lhvap = 2260. # 'kJ/kg'
secondsperday = 86400.
kJperday = 86.4 # 'kJ/day'
if hasattr(mv,'id'):
mvid = mv.id
# syntax correction (just in case)
mv.units = mv.units.replace(' m-2','/m^2')
mv.units = mv.units.replace(' s-1','/s')
if mv.units=='W/m2':
mv.units='W/m^2'
if mv.units=='mm/d':
mv.units = 'mm/day'
# LHFLX
if mv.units=="W/m~S~2~N":
logger.info("Arbitrarily decided that W/m~S~2~N is W/m^2 for %s", mv.id)
mv.units="W/m^2"
if mv.units=="W/m~S~2~N~":
logger.info("Arbitrarily decided that W/m~S~2~N~ is W/m^2 for %s", mv.id)
mv.units="W/m^2"
if mv.units==preferred_units:
return mv
# convert precip between kg/m2/s and mm/day
if ( mv.units=="kg/m2/s" or mv.units=="kg/m^2/s" or mv.units=="kg/s/m2" or\
mv.units=="kg/s/m^2") and preferred_units=="mm/day":
mv = mv * secondsperday # convert to kg/m2/s [= mm/s]
mv.units="mm/day" # [if 1 kg = 10^6 mm^3 as for water]
elif mv.units=='mm/day' and preferred_units=="kg/m2/s":
mv = mv / secondsperday # convert to mm/sec [= kg/m2/s]
mv.units="kg/m2/s" # [if 1 kg = 10^6 mm^3 as for water]
# convert between energy flux (W/m2) and water flux (mm/day)
elif mv.units=="kg/m2/s" and preferred_units=="W/m^2":
mv = mv * kJperday * secondsperday * lhvap
mv.units = 'W/m^2'
elif mv.units=='mm/day' and preferred_units=='W/m^2':
# 1 W = 86.4 kJ / day
mv = mv * lhvap / kJperday
mv.units = 'W/m^2'
elif mv.units=='W/m^2' and preferred_units=='mm/day':
mv = mv * kJperday / lhvap
mv.units = 'mm/day'
else:
tmp = udunits(1.0,mv.units)
try:
s,i = tmp.how(preferred_units)
except Exception as e:
# conversion not possible.
logger.error("could not convert from %s to %s" ,mv.units, preferred_units)
raise e
if not ( numpy.allclose(s,1.0) and numpy.allclose(i,0.0) ):
mv = s*mv + i
mv.units = preferred_units
mv.id = mvid # reset variable id
return mv
"""
else:
print "ERROR: unknown / invalid units in arguments to reconcile_energyflux_precip."
print "mv.units = ", mv.units
print "preferred_units = ", preferred_units
raise DiagError("unknown / invalid units in arguments to reconcile_energyflux_precip.")
exit
"""
|
python
|
from random import choice
#==============================================================================
""" Split a String in Balanced Strings
Balanced strings are those that have an equal quantity of 'L' and 'R'
characters.
Given a balanced string s, split it in the maximum amount of balanced strings.
Return the maximum amount of split balanced strings.
Example 1:
Input: s = "RLRRLLRLRL"
Output: 4
Explanation: s can be split into "RL", "RRLL", "RL", "RL", each substring contains same number of 'L' and 'R'.
Example 2:
Input: s = "RLLLLRRRLR"
Output: 3
Explanation: s can be split into "RL", "LLLRRR", "LR", each substring contains same number of 'L' and 'R'.
Example 3:
Input: s = "LLLLRRRR"
Output: 1
Explanation: s can be split into "LLLLRRRR".
Example 4:
Input: s = "RLRRRLLRLL"
Output: 2
Explanation: s can be split into "RL", "RRRLLRLL", since each substring contains an equal number of 'L' and 'R'
Constraints:
1) 1 <= s.length <= 1000
2) s[i] is either 'L' or 'R'.
3) s is a balanced string.
"""
def balanced_str_split(s: str) -> int:
num_balanced_str = 0
count = 0
for c in s:
if c == 'L': count += 1
if c == 'R': count -= 1
if count == 0: num_balanced_str += 1
return num_balanced_str
def main():
s = ''.join([choice(('L', 'R')) for i in range(10)])
returned = balanced_str_split(s)
print(s, returned, sep='\t')
if __name__ == '__main__':
main()
|
python
|
# -*- coding:utf-8 -*-
# author: [email protected]
# software: PyCharm
import openpyxl
from style.default import ExcelFontStyle
from core.base import TableCommon
if __name__ == '__main__':
"""
二级表头示例
"""
wb = openpyxl.Workbook()
ws = wb.active
wb = TableCommon.excel_write_common(wb, ws, sheet_name='Secondary_Header',
target_name='pexcel_secondary_header.xlsx',
subject_list=['prd', 'onl', 'prd', 'onl', 'prd', 'onl', 'prd', 'onl'],
subject_list_row=2, subject_list_cloumn=2,
info_list=[
['2019.3', 22, 90, 22, 90, 22, 90, 22, 90],
['2019.4', 22, 90, 22, 90, 22, 90, 22, 90],
['2019.5', 22, 90, 22, 90, 22, 90, 22, 90],
['2019.6', 22, 90, 22, 90, 22, 90, 22, 90],
],
merge_map={'A1': 'A2', 'B1': 'C1', 'D1': 'E1', 'F1': 'G1', 'H1': 'I1'},
merge_cell_value_map={'A1': 'Month', 'B1': 'AA',
'D1': 'BB', 'F1': 'CC', 'H1': 'DD'},
info_list_row=3,
info_list_cloumn=1,
save=True,
style=ExcelFontStyle.get_default_style(),
header_style=ExcelFontStyle.get_header_style(),
max_row=2,
max_col=9,
col_wide_map={1: 13})
|
python
|
#!/usr/bin/env python
# coding=utf8
'''
Created on Jul 7, 2012
@author: gf
'''
import os
import Util
import ethtool
def GetInfoString():
active_interfaces = ethtool.get_active_devices()
all_interfaces = GetInterfaceList()
for i in active_interfaces:
if ethtool.get_flags('%s' % i) & ethtool.IFF_POINTOPOINT:
active_interfaces.remove(i)
ret = ''
for inter in active_interfaces:
if inter in all_interfaces:
all_interfaces.remove(inter)
else:
continue
t = 'Static'
if IsInterfaceDHCP(inter):
t = 'DHCP'
ret = ret + ' %s - Active, %s\n' % (inter, t)
ret = ret + ' IP: %s\n' % ethtool.get_ipaddr(inter)
ret = ret + ' Netmask: %s\n' % ethtool.get_netmask(inter)
ret = ret + ' HWAddr: %s\n' % ethtool.get_hwaddr(inter)
for inter in all_interfaces:
t = 'Static'
if IsInterfaceDHCP(inter):
t = 'DHCP'
ret = ret + ' %s - Inactive, %s\n' % (inter, t)
if t == 'Static':
ip, mask, gw, dns = GetInterfaceConf(inter)
ret = ret + ' IP: %s\n' % ip
ret = ret + ' Netmask: %s\n' % mask
ret = ret + ' HWAddr: %s\n' % ethtool.get_hwaddr(inter)
return ret
def GetInterfaceList():
all_interfaces = ethtool.get_devices()
if 'lo' in all_interfaces:
all_interfaces.remove('lo')
for i in all_interfaces:
filename = GetInterfaceConfigFileName(i)
if not os.access(filename, os.R_OK):
all_interfaces.remove(i)
return all_interfaces
def GetInterfaceConfigFileName(interface):
return '/etc/sysconfig/network-scripts/ifcfg-' + interface
def IsInterfaceDHCP(interface):
filename = GetInterfaceConfigFileName(interface)
if not os.access(filename, os.R_OK):
return False
f = open(filename)
bufs = f.readlines()
f.close()
for line in bufs:
line = line.lower().strip()
if line.startswith('bootproto'):
if line.find('dhcp') > 0 :
return True
else:
return False
return False
def GetInterfaceConf(interface):
ip = ""
mask = ""
gw = ""
dns = ""
filename = GetInterfaceConfigFileName(interface)
if not os.access(filename, os.R_OK):
return ip, mask, gw, dns
f = open(filename)
bufs = f.readlines()
f.close()
for line in bufs:
line = line.lower().strip()
if line.startswith('ipaddr'):
splits = line.split('=')
if len(splits) >= 2:
ip = splits[1]
if line.startswith('netmask'):
splits = line.split('=')
if len(splits) >= 2:
mask = splits[1]
if line.startswith('gateway'):
splits = line.split('=')
if len(splits) >= 2:
gw = splits[1]
if line.startswith('dns1'):
splits = line.split('=')
if len(splits) >= 2:
dns = splits[1]
return ip, mask, gw, dns
KEYWORD_LIST = ["bootproto", "ipaddr", "netmask", "gateway", "dns1", "dns2"]
DNS_KEYLIST = ["nameserver"]
def SetDNS(dns):
filename = "/etc/resolv.conf"
if not os.access(filename, os.R_OK):
return
f = open(filename)
bufs = f.readlines()
f.close()
Util.RunShellWithLog("mv -f %s %s.bak" % (filename, filename))
f = open(filename, "w")
comstr = 'nameserver %s' % dns
for line in bufs:
skip = False
low = line.lower().strip()
for key in DNS_KEYLIST:
if low == comstr:
skip = True
break
if not skip:
f.write(line)
f.write('\n')
f.write(comstr)
f.flush()
f.close()
def SetDHCP(interface):
filename = GetInterfaceConfigFileName(interface)
if not os.access(filename, os.R_OK):
return
f = open(filename)
bufs = f.readlines()
f.close()
Util.RunShellWithLog("mv -f %s ~/%s.bak" % (filename, filename))
f = open(filename, "w")
for line in bufs:
skip = False
low = line.lower().strip()
for key in KEYWORD_LIST:
if low.startswith(key):
skip = True
break
if not skip:
f.write(line)
f.write('BOOTPROTO=dhcp\n')
f.flush()
f.close()
Util.RunShellWithLog('systemctl restart NetworkManager.service')
def SetStatic(interface, ip, mask, gw, dns):
filename = GetInterfaceConfigFileName(interface)
if not os.access(filename, os.R_OK):
return
f = open(filename)
bufs = f.readlines()
f.close()
Util.RunShellWithLog("mv -f %s ~/%s.bak" % (filename, filename))
f = open(filename, "w")
for line in bufs:
skip = False
low = line.lower().strip()
for key in KEYWORD_LIST:
if low.startswith(key):
skip = True
break
if not skip:
f.write(line)
f.write('BOOTPROTO=static\n')
f.write('IPADDR=%s\n' % ip)
f.write('NETMASK=%s\n' % mask)
f.write('GATEWAY=%s\n' % gw)
f.write('DNS1=%s\n' % dns)
f.flush()
f.close()
Util.RunShellWithLog('systemctl restart NetworkManager.service')
def GetHostList():
'''
Return Hosts List
{'IP' : 'Host Name'}
'''
list = {}
try:
f = open('/etc/hosts')
bufs = f.readlines()
f.close()
for line in bufs:
if line.startswith('127.0.0.1') or \
line.startswith('::1'):
continue
splits = line.split()
if len(splits) >= 2:
list[splits[0]] = ' '.join(splits[1:])
except Exception, e:
pass
return list
def SaveHostList(list):
try:
f = open('/etc/hosts')
bufs = f.readlines()
f.close()
f = open("/tmp/hosts", "w")
for line in bufs:
line = line.strip()
if line == '':
continue
if line.startswith('127.0.0.1') or \
line.startswith('::1') or \
line.startswith('#'):
f.write(line + '\n')
for i in list.keys():
f.write('%s %s\n' % (i, list[i]))
f.close()
Util.RunShellWithLog("cp -f /etc/hosts ~/hosts.bak")
Util.RunShellWithLog("cp -f /tmp/hosts /etc/hosts")
except Exception, e:
pass
if __name__ == '__main__':
list = {}
SaveHostList(list)
|
python
|
'''
Created on Jan 24, 2013
@author: mdickson
'''
import inworldz.maestro.uuid as genuuid
import inworldz.util.user as user
import inworldz.util.estate as estate
import inworldz.util.properties as DefaultProperties
import inworldz.maestro.MaestroStore as store
from inworldz.maestro.ServiceBase import ServiceBase
from inworldz.maestro.MaestroError import ServiceError
class Estate(ServiceBase):
"""An Estate Instance"""
def getClass(self):
return "Estate"
def getMethods(self):
methods = [
"AssignRegion",
]
return ServiceBase.getMethods() + methods
def getAttrRO(self):
attrs = [
"estate_id",
"estate_name",
"estate_owner",
"parent_estate_id"
]
return ServiceBase.getAttrRO() + attrs
def getFuncs(self):
funcs = [
"create",
"FindByEstateID",
"FindBySimUUID"
]
return ServiceBase.getFuncs() + funcs
getClass = classmethod(getClass)
getAttrRO = classmethod(getAttrRO)
getMethods = classmethod(getMethods)
getFuncs = classmethod(getFuncs)
def __init__(self, uuid, record={}):
ServiceBase.__init__(self, uuid, record)
self.props = DefaultProperties.instance()
self.estate_owner = record.get('estate_owner')
self.estate_name = record.get('estate_name')
self.estate_id = record.get('estate_id')
self.parent_estate_id = record.get('parent_estate_id')
@classmethod
def initialize(cls):
estate_ids = estate.LookupEstateIds()
for estate_id in estate_ids:
cls.recreate({ 'estate_id' : estate_id }, genuuid.createString())
@classmethod
def create(cls, record):
if ((not 'estate_name' in record) or
(not 'estate_owner' in record)):
raise ServiceError("Create: Invalid Estate Information provided")
# make sure the estate owner is valid
if (user.LookupUserNameById(record['estate_owner']) == None):
raise ServiceError("Create failed. Invalid estate owner specified")
record['estate_id'] = estate.CreateNewEstate(record['estate_name'], record['estate_owner'])
if (record['estate_id'] == None):
raise ServiceError("Create: Cannot create new Estate")
record['parent_estate_id'] = record['estate_id']
uuid = genuuid.createString()
new_estate = Estate(uuid, record)
return new_estate.get_uuid()
@classmethod
def recreate(cls, record, uuid):
if (not 'estate_id' in record):
raise ServiceError("Recreate: Invalid Estate Information provided")
# make sure the estate owner is valid
estate_entry = estate.LookupEstateById(record['estate_id'])
if (estate_entry == None):
raise ServiceError("Recreate: Invalid Estate Information provided")
record['estate_name'] = estate_entry[1]
record['estate_owner'] = estate_entry[2]
record['parent_estate_id'] = estate_entry[3]
uuid = genuuid.createString()
new_estate = Estate(uuid, record)
return new_estate.get_uuid()
@classmethod
def FindByEstateID(cls, estate_id):
# First Look in active Estates
for instance in store.get_all("Estate"):
if (instance.get_estate_id() == estate_id):
return (instance.get_uuid())
return (cls.recreate({'estate_id' : estate_id}, genuuid.createString()))
@classmethod
def FindBySimUUID(cls, sim_uuid):
estate_id = estate.FindEstateIDForRegion(sim_uuid)
return cls.FindByEstateID(estate_id)
def get_estate_id(self):
return self.estate_id
def get_estate_name(self):
return self.estate_name
def get_estate_owner(self):
return self.estate_owner
def get_parent_estate_id(self):
return self.parent_estate_id
def AssignRegion(self, region):
region_id = region.get_sim_uuid()
estate.LinkRegionToExistingEstate(region_id, self.estate_id)
|
python
|
import math
import textClasses
def title_word_feature(title, processed_text):
""" List of values from 0 to 1 rating the number title words that appear in the sentence"""
title_word_feature_values = []
# Calculate the number of common words with the title that the sentence has
word_intersection = [set(filter(lambda title_word: title_word \
in title.bag_of_words, sublist))
for sublist in [sentence.bag_of_words
for sentence in processed_text]]
for word_list in word_intersection:
title_word_feature_values.append(len(word_list)
/ len(title.bag_of_words))
return title_word_feature_values
def sentence_length_feature(sentences):
""" List of values from 0 to 1 rating the length of the sentence in comparation with the longest one """
sentence_length_feature_values = []
max_length_sentence = len(sentences[0].original.split(' '))
# Find the longest sentence
for sentence in sentences[1:]:
if len(sentence.original.split(' ')) > max_length_sentence:
max_length_sentence = len(sentence.original.split(' '))
# Normalize the lenght of every sentence
for sentence in sentences:
sentence_length_feature_values.append(len(sentence.original.split(' '
)) / max_length_sentence)
return sentence_length_feature_values
def sentence_location_feature(sentences):
""" List of values from 0 to 1 rating the position of the sentence"""
sentence_location_feature_values = []
for sentence in sentences:
sentence_location_feature_values.append(1 / sentence.position)
return sentence_location_feature_values
def keyword_feature(sentences, words):
""" List of values from 0 to 1 rating the term frequency normalized by the invert frequency of the sentences """
keyword_feature_values = []
total_number_of_sentences = len(sentences)
# Calculate number of sentence where every word is
for word in words:
number_of_sentences = 0
for sentence in sentences:
if word in sentence.bag_of_words:
number_of_sentences += 1
number_of_sentences = (1 if number_of_sentences
== 0 else number_of_sentences)
# asign term weight based on tf/isf
words[word].term_weight = words[word].abs_frequency \
* math.log10(total_number_of_sentences
/ number_of_sentences)
# Calculate the total term weight for every sentence
for sentence in sentences:
sum_of_term_weights = 0
for word in sentence.bag_of_words:
sum_of_term_weights += words[word].term_weight
keyword_feature_values.append(sum_of_term_weights)
return [x / max(keyword_feature_values) for x in
keyword_feature_values]
def pos_tag_feature(sentences, words, pos_tag):
""" List of values from 0 to 1 rating the number of words with a certain part of speech tag that appear in the sentence"""
pos_tag_words_count_list = []
# Create a list with the number of words with the input pos_tag appear in the phrase
for sentence in sentences:
pos_tag_words_count_list.append(len([word for word in
sentence.bag_of_words if words[word].part_of_speech[1]
== pos_tag]))
# Return a list of values normalize by the sentence with the maximum number of pos_tag words
return ([pos_tag_words_sentence / max(pos_tag_words_count_list)
for pos_tag_words_sentence in
pos_tag_words_count_list] if max(pos_tag_words_count_list)
!= 0 else [0] * len(pos_tag_words_count_list))
def phrase_feature(sentences, phrase_list):
""" List of values from 0 to 1 rating the number of phrases that appear in the sentence from a list """
total_number_words = 0
phrase_frequency = []
# Calculate the number of words of the text
# Number of phrase that appear in that sentence
for sentence in sentences:
count_phrase_per_sentence = 0
for phrase in phrase_list:
if phrase in sentence.original:
count_phrase_per_sentence += 1
phrase_frequency.append(count_phrase_per_sentence/len(sentence.bag_of_words))
return phrase_frequency
|
python
|
"""
"""
import os
import numpy as np
import pandas as pd
import xarray as xr
from osgeo import gdal
from src.utils.constants import (
REGIONS,
LANDCOVER_MAP,
LANDCOVER_PERIODS,
LANDCOVER_PADDING
)
if __name__ == "__main__":
# Project's root
os.chdir("../..")
for region in REGIONS:
region_name = region.get("name")
burn_fn = f"data/nc/MODIS/MCD64A1/{region_name}/MCD64A1_500m.nc"
burn_da = xr.open_dataset(burn_fn, mask_and_scale=False)["Burn_Date"]
landcover_folder = f"data/tif/landcover/{region_name}"
df = pd.DataFrame(columns=["year", "landcover", "interval"])
for year in np.unique(LANDCOVER_PERIODS):
landcover_fn = os.path.join(landcover_folder, f"landcover_{year}.tif")
landcover_ds = gdal.Open(landcover_fn)
landcover_arr = landcover_ds.ReadAsArray()
period = (
str(int(year) - LANDCOVER_PADDING),
str(int(year) + LANDCOVER_PADDING)
)
da = burn_da.sel(time=slice(*period))
burn_mask = (da > 0).any(axis=0)
burn_mean = (da > 0).resample(time="Y").sum().mean(axis=0).values
for value, name in LANDCOVER_MAP.items():
landcover_mask = (landcover_arr == value)
mask = (landcover_mask & burn_mask)
burn_rate = burn_mean[mask].sum() / landcover_mask.sum()
fri = 1 / burn_rate
df.loc[len(df)] = [year, name, fri]
output_folder = f"results/csv/{region_name}"
save_to = os.path.join(output_folder, "return_intervals_by_landcover.csv")
df.to_csv(save_to, index=False)
|
python
|
import fnmatch
from functools import wraps
def asgi_cors_decorator(
allow_all=False, hosts=None, host_wildcards=None, callback=None
):
hosts = hosts or []
host_wildcards = host_wildcards or []
# We need hosts and host_wildcards to be b""
hosts = set(h.encode("utf8") if isinstance(h, str) else h for h in hosts)
host_wildcards = [
h.encode("utf8") if isinstance(h, str) else h for h in host_wildcards
]
if any(h.endswith(b"/") for h in (hosts or [])) or any(
h.endswith(b"/") for h in (host_wildcards or [])
):
assert False, "Error: CORS origin rules should never end in a /"
def _asgi_cors_decorator(app):
@wraps(app)
async def app_wrapped_with_cors(scope, recieve, send):
async def wrapped_send(event):
if event["type"] == "http.response.start":
original_headers = event.get("headers") or []
access_control_allow_origin = None
if allow_all:
access_control_allow_origin = b"*"
elif hosts or host_wildcards or callback:
incoming_origin = dict(scope.get("headers") or []).get(
b"origin"
)
if incoming_origin:
matches_hosts = incoming_origin in hosts
matches_wildcards = any(
fnmatch.fnmatch(incoming_origin, host_wildcard)
for host_wildcard in host_wildcards
)
matches_callback = False
if callback is not None:
matches_callback = callback(incoming_origin)
if matches_hosts or matches_wildcards or matches_callback:
access_control_allow_origin = incoming_origin
if access_control_allow_origin is not None:
# Construct a new event with new headers
event = {
"type": "http.response.start",
"status": event["status"],
"headers": [
p
for p in original_headers
if p[0] != b"access-control-allow-origin"
]
+ [
[
b"access-control-allow-origin",
access_control_allow_origin,
]
],
}
await send(event)
await app(scope, recieve, wrapped_send)
return app_wrapped_with_cors
return _asgi_cors_decorator
def asgi_cors(app, allow_all=False, hosts=None, host_wildcards=None, callback=None):
return asgi_cors_decorator(allow_all, hosts, host_wildcards, callback)(app)
|
python
|
from __future__ import division, print_function
import numpy as np
from scipy.constants import pi
from numpy.fft import fftshift
from scipy.fftpack import fft, ifft
try:
import accelerate
jit = accelerate.numba.jit
autojit = accelerate.numba.autojit
complex128 = accelerate.numba.complex128
float64 = accelerate.numba.float64
vectorize = accelerate.numba.vectorize
import mkl
max_threads = mkl.get_max_threads()
# mkl.set_num_threads(1)
except ImportError:
import numba
vectorize = numba.vectorize
autojit, jit = numba.autojit, numba.jit
cfunc = numba.cfunc
generated_jit = numba.generated_jit
pass
#@profile
def RK45CK(dAdzmm, u1, dz, M, n2, lamda, tsh, dt, hf, w_tiled):
"""
Propagates the nonlinear operator for 1 step using a 5th order Runge
Kutta method
use: [A delta] = RK5mm(u1, dz)
where u1 is the initial time vector
hf is the Fourier transform of the Raman nonlinear response time
dz is the step over which to propagate
in output: A is new time vector
delta is the norm of the maximum estimated error between a 5th
order and a 4th order integration
"""
A1 = dz*dAdzmm(u1, M, n2, lamda, tsh, dt, hf, w_tiled)
u2 = A2_temp(u1, A1)
A2 = dz*dAdzmm(u2, M, n2, lamda, tsh, dt, hf, w_tiled)
u3 = A3_temp(u1, A1,A2)
A3 = dz*dAdzmm(u3, M, n2, lamda, tsh, dt, hf, w_tiled)
u4 = A4_temp(u1, A1, A2, A3)
A4 = dz*dAdzmm(u4,M, n2, lamda, tsh, dt, hf, w_tiled)
u5 = A5_temp(u1, A1, A2, A3, A4)
A5 = dz*dAdzmm(u5, M, n2, lamda, tsh, dt, hf, w_tiled)
u6 = A6_temp(u1, A1, A2, A3, A4, A5)
A6 = dz*dAdzmm(u6, M, n2, lamda, tsh, dt, hf, w_tiled)
A = A_temp(u1, A1, A3, A4, A6) # Fifth order accuracy
Afourth = Afourth_temp(u1, A1, A3, A4,A5, A6) # Fourth order accuracy
delta = np.linalg.norm(A - Afourth, 2)
return A, delta
trgt = 'cpu'
#trgt = 'parallel'
#trgt = 'cuda'
#@vectorize(['complex128(complex128,complex128,complex128,complex128,complex128,complex128)'], target=trgt)
@jit
def Afourth_temp(u1, A1, A3, A4,A5, A6):
return u1 + (2825./27648)*A1 + (18575./48384)*A3 + (13525./55296) * \
A4 + (277./14336)*A5 + (1./4)*A6
#@vectorize(['complex128(complex128,complex128,complex128,complex128,complex128)'], target=trgt)
@jit
def A_temp(u1, A1, A3, A4, A6):
return u1 + (37./378)*A1 + (250./621)*A3 + (125./594) * \
A4 + (512./1771)*A6
#@vectorize(['complex128(complex128,complex128)'], target=trgt)
@jit
def A2_temp(u1, A1):
return u1 + (1./5)*A1
#@vectorize(['complex128(complex128,complex128,complex128)'], target=trgt)
@jit
def A3_temp(u1, A1, A2):
return u1 + (3./40)*A1 + (9./40)*A2
#@vectorize(['complex128(complex128,complex128,complex128,complex128)'], target=trgt)
@jit
def A4_temp(u1, A1, A2, A3):
return u1 + (3./10)*A1 - (9./10)*A2 + (6./5)*A3
#@vectorize(['complex128(complex128,complex128,complex128,complex128,complex128)'], target=trgt)
@jit
def A5_temp(u1, A1, A2, A3, A4):
return u1 - (11./54)*A1 + (5./2)*A2 - (70./27)*A3 + (35./27)*A4
#@vectorize(['complex128(complex128,complex128,complex128,complex128,complex128,complex128)'], target=trgt)
@jit
def A6_temp(u1, A1, A2, A3, A4, A5):
return u1 + (1631./55296)*A1 + (175./512)*A2 + (575./13824)*A3 +\
(44275./110592)*A4 + (253./4096)*A5
def RK34(dAdzmm, u1, dz, M, n2, lamda, tsh, dt, hf, w_tiled):
"""
Propagates the nonlinear operator for 1 step using a 5th order Runge
Kutta method
use: [A delta] = RK5mm(u1, dz)
where u1 is the initial time vector
hf is the Fourier transform of the Raman nonlinear response time
dz is the step over which to propagate
in output: A is new time vector
delta is the norm of the maximum estimated error between a 5th
order and a 4th order integration
"""
#third order:
A1 = dz*dAdzmm(u1,
M, n2, lamda, tsh, dt, hf, w_tiled)
A2 = dz*dAdzmm(u1 + 0.5*A1,
M, n2, lamda, tsh, dt, hf, w_tiled)
A3 = dz*dAdzmm(u1 - A1 + 2*A2,
M, n2, lamda, tsh, dt, hf, w_tiled)
Athird = u1 + 1/6 * (A1 + 4 * A2 + A3)
A3 = dz*dAdzmm(u1 + 0.5*A2,
M, n2, lamda, tsh, dt, hf, w_tiled)
A4 = dz*dAdzmm(u1 + A3,
M, n2, lamda, tsh, dt, hf, w_tiled)
A = u1 + 1/6 * (A1 + 2 * A2 + 2* A3 + A4)
delta = np.linalg.norm(A - Athird, 2)
return A, delta
def dAdzmm_roff_s0(u0, M, n2, lamda, tsh, dt, hf, w_tiled):
"""
calculates the nonlinear operator for a given field u0
use: dA = dAdzmm(u0)
"""
#print(M,lamda)
M3 = uabs(np.ascontiguousarray(u0.real), np.ascontiguousarray(u0.imag))
N = nonlin_ker(M, u0, M3)
N *= -1j*n2*2*pi/lamda
return N
#@profile
def dAdzmm_roff_s1(u0, M, n2, lamda, tsh, dt, hf, w_tiled):
"""
calculates the nonlinear operator for a given field u0
use: dA = dAdzmm(u0)
"""
#print('no')
M3 = uabs(np.ascontiguousarray(u0.real), np.ascontiguousarray(u0.imag))
N = nonlin_ker(M, u0, M3)
N = -1j*n2*2*pi/lamda*(N + tsh*ifft((w_tiled)*fft(N)))
return N
def dAdzmm_ron_s0(u0, M, n2, lamda, tsh, dt, hf, w_tiled):
"""
calculates the nonlinear operator for a given field u0
use: dA = dAdzmm(u0)
"""
print(u0.real.flags)
print(u0.imag.flags)
M3 = uabs(np.ascontiguousarray(u0.real), np.ascontiguousarray(u0.imag))
temp = fftshift(ifft(fft(M3)*hf))
# for i in (M, u0,M3, dt, temp):
# print(i.dtype)
N = nonlin_ram(M, u0, M3, dt, temp)
N *= -1j*n2*2*pi/lamda
return N
"""
def dAdzmm_ron_s1(u0,M,n2,lamda,tsh,dt,hf, w_tiled):
M3 = np.abs(u0)**2
N = (0.82*M3 + 0.18*dt*fftshift(ifft(fft(M3)*hf)))*M *u0
N = -1j*n2*2*pi/lamda*(N + tsh*ifft((w_tiled)*fft(N)))
return N
"""
#from time import time
#import sys
#@vectorize('complex128(complex128,float64,float64,float64,float64,float64,complex128,float64)')
#@profile
def dAdzmm_ron_s1(u0, M, n2, lamda, tsh, dt, hf, w_tiled):
# calculates the nonlinear operator for a given field u0
# use: dA = dAdzmm(u0)
#t1 = time()
#M3 = np.abs(u0)**2
#print(u0.real.flags)
#print(u0.imag.flags)
M3 = uabs(u0.real, u0.imag)
# print(np.isfortran(u0))
# print(np.isfortran(M3))
# print(np.isfortran(fft(M3)*hf))
temp = fftshift(ifft(fft(M3)*hf))
# for i in (M, u0,M3, dt, temp):
# print(i.dtype)
N = nonlin_ram(M, u0, M3, dt, temp)
# print(np.isfortran(N))
#print(np.isfortran(w_tiled * fft(N)))
# sys.exit()
#N = M*u0*(0.82*M3 + 0.18*dt*temp)
#temp = multi(w_tiled,fft(N))
N = -1j*n2*2*pi/lamda * (N + tsh*ifft(w_tiled * fft(N)))
#temp = ifft(w_tiled*fft(N))
#N = self_step(n2, lamda,N, tsh, temp,np.pi )
#t2 = time() - t1
# print(t2)
# sys.exit()
return N
trgt = 'cpu'
#trgt = 'parallel'
#trgt = 'cuda'
@vectorize(['complex128(complex128,complex128)'], target=trgt)
def multi(x, y):
return x*y
@vectorize(['complex128(complex128,complex128)'], target=trgt)
def add(x, y):
return x + y
@vectorize(['float64(float64,float64)'], target=trgt)
def uabs(u0r, u0i):
return u0r*u0r + u0i*u0i
@vectorize(['complex128(float64,complex128,\
float64)'], target=trgt)
def nonlin_ker(M, u0, M3):
return 0.82*M*u0*M3
@vectorize(['complex128(float64,complex128,\
float64,float64,complex128)'], target=trgt)
def nonlin_ram(M, u0, M3, dt, temp):
return M*u0*(0.82*M3 + 0.18*dt*temp)
@vectorize(['complex128(float64,float64,complex128,\
float64,complex128,float64)'], target=trgt)
def self_step(n2, lamda, N, tsh, temp, rp):
return -1j*n2*2*rp/lamda*(N + tsh*temp)
#Dormant-Prince-Not found to be faster than cash-karp
#@autojit
#
def RK45DP(dAdzmm, u1, dz, M, n2, lamda, tsh, dt, hf, w_tiled):
A1 = dz*dAdzmm(u1,
M, n2, lamda, tsh, dt, hf, w_tiled)
A2 = dz*dAdzmm(u1 + (1./5)*A1,
M,n2,lamda,tsh,dt,hf, w_tiled)
A3 = dz*dAdzmm(u1 + (3./40)*A1 + (9./40)*A2,
M,n2,lamda,tsh,dt,hf, w_tiled)
A4 = dz*dAdzmm(u1 + (44./45)*A1 - (56./15)*A2 + (32./9)*A3,
M,n2,lamda,tsh,dt,hf, w_tiled)
A5 = dz*dAdzmm(u1 + (19372./6561)*A1 - (25360./2187)*A2 + (64448./6561)*A3 - (212./729)*A4,
M,n2,lamda,tsh,dt,hf, w_tiled)
A6 = dz*dAdzmm(u1 + (9017./3168)*A1 - (355./33)*A2 + (46732./5247)*A3 + (49./176)*A4 - (5103./18656)*A5,
M,n2,lamda,tsh,dt,hf, w_tiled)
A = u1+ (35./384)*A1 + (500./1113)*A3 + (125./192)*A4 - (2187./6784)*A5 + (11./84)*A6
A7 = dz*dAdzmm(A,
M,n2,lamda,tsh,dt,hf, w_tiled)
Afourth = u1 + (5179/57600)*A1 + (7571/16695)*A3 + (393/640)*A4 - (92097/339200)*A5 + (187/2100)*A6+ (1/40)*A7#Fourth order accuracy
delta = np.linalg.norm(A - Afourth,2)
return A, delta
|
python
|
class Solution:
def countConsistentStrings(self, allowed: str, words: List[str]) -> int:
c = 0
allowed = set(allowed)
for word in words:
# word = set(word)
for letter in word:
if letter not in allowed:
break
else:
c+=1
return c
|
python
|
#!/usr/bin/python
import sys, ctypes
"""
rdi contains argc
rsi contains argv (in reverse) - Note rdi decrements
"""
def echo_args(): # See function 'echo_args' in echo.asm
global rdi, rsi, rdx, stack
stack.append(rdi) # "push"
stack.append(rsi) # "push"
## Stack alignment?? rsp = index or something?
rdi = rsi[rdi-1] # Take next arg.. they keep getting popped (below)
puts()
## add rsp, 8 --- Restore stack. Need to implement stack class or something
## to really illustrate it.
rsi = stack.pop()
rdi = stack.pop()
rsi.pop()
rdi -= 1
if rdi != 0:
echo_args()
return rax # Exit code 0?
def puts():
print rdi
def main(): # See function 'main' in maxofthree.c
global rdi, rsi, rdx
rsi = list(reversed(sys.argv))
rdi = len(sys.argv)
echo_args()
exit(0)
if __name__ == "__main__":
""" Initialize registers (just because)
"""
rax = ctypes.c_int(64)
rdi = None
rsi = None
stack = []
main()
|
python
|
from abc import ABC, abstractmethod
from zpy.api.reponse import Builder
from zpy.api.stages import Decrypt
from zpy.logger import g_log
from zpy.api.errors import ErrorBuilder
from flask import Flask
from typing import Any
from flask.wrappers import Request, Response
from zpy.utils.Encryptor import AESEncryptor
import json
__author__ = "Noé Cruz | [email protected]"
__copyright__ = "Copyright 2021, Small APi Project"
__credits__ = ["Noé Cruz", "Zurck'z"]
__license__ = "upax"
__version__ = "0.0.1"
__maintainer__ = "Noé Cruz"
__email__ = "[email protected]"
__status__ = "Dev"
# Middlewares | Zurck'Z Middlware
# Base middleware for flask
class ZMiddleware(ABC):
def __init__(self, app: Flask, **kwargs) -> None:
super().__init__()
self.app = app
self.kwargs = kwargs
@abstractmethod
def __call__(self, environ: Any, start_response: Any) -> Any:
return self.app(environ, start_response)
# Custom Middlewares
# Encrypt body of responses with AES algorithm
class EncryptMiddleWare(ZMiddleware):
def __init__(self, app: Flask, **kwargs) -> None:
super().__init__(app, **kwargs)
self.app = app
def __call__(self, environ: Any, start_response: Any) -> Any:
response = Response(environ)
return super().__call__(environ, start_response)
# return response(environ,start_response)
# Custom Middlewares
# Encrypt body of responses with AES algorithm
class DecryptMiddleWare(ZMiddleware):
def __init__(self, app: Flask, **kwargs):
super().__init__(app, **kwargs)
self.app = app
def __call__(self, environ, start_response):
try:
if environ["request"]:
aes: str = None
if (
self.kwargs != None and "aes_sk" in self.kwargs
): # ! WARNING HARD KEY FOR EXTARCT AES SK
aes = self.kwargs["aes_sk"]
encrypt_data = environ["request"]
decrypt_data = AESEncryptor.decrypt_ws_response(
encrypt_data, secret_key=aes
)
environ["request"] = decrypt_data
return self.app(environ, start_response)
except Exception as e:
stage = Decrypt()
g_log(e, stage)
res = Response(
json.dumps(
Builder.error(
errors=[
ErrorBuilder().common(
"Threw exception on decrypt process",
"Request supplied not have a valid format",
stage,
)
]
)
),
mimetype="text/json",
status=500,
)
return res(environ, start_response)
class ParserMiddleWare(ZMiddleware):
"""
Default middleware for custom access response
"""
def __init__(self, app: Flask, **kwargs):
super().__init__(app, **kwargs)
self.app = app
def __call__(self, environ, start_response):
request = Request(environ)
try:
if request.data:
environ["request"] = json.loads(request.data)
else:
environ["request"] = None
return self.app(environ, start_response)
except Exception as e:
stage = Decrypt()
g_log(e, stage)
res = Response(
json.dumps(
Builder.error(
errors=[
ErrorBuilder().common(
"Threw exception on decrypt process",
"Request supplied not have a valid format",
stage,
)
]
)
),
mimetype="text/json",
status=500,
)
return res(environ, start_response)
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_nms
from maskrcnn_benchmark.structures.boxlist_ops import remove_small_boxes
from ..utils import cat
from .utils import permute_and_flatten
class RPNPostProcessor(torch.nn.Module):
"""
Performs post-processing on the outputs of the RPN boxes, before feeding the
proposals to the heads
"""
def __init__(
self,
pre_nms_top_n,
post_nms_top_n,
nms_thresh,
min_size,
box_coder=None,
fpn_post_nms_top_n=None,
fpn_post_nms_per_batch=True,
):
"""
Arguments:
pre_nms_top_n (int)
post_nms_top_n (int)
nms_thresh (float)
min_size (int)
box_coder (BoxCoder)
fpn_post_nms_top_n (int)
"""
super(RPNPostProcessor, self).__init__()
self.pre_nms_top_n = pre_nms_top_n
self.post_nms_top_n = post_nms_top_n
self.nms_thresh = nms_thresh
self.min_size = min_size
if box_coder is None:
box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
self.box_coder = box_coder
if fpn_post_nms_top_n is None:
fpn_post_nms_top_n = post_nms_top_n
self.fpn_post_nms_top_n = fpn_post_nms_top_n
self.fpn_post_nms_per_batch = fpn_post_nms_per_batch
def add_gt_proposals(self, proposals, targets):
"""
Arguments:
proposals: list[BoxList]
targets: list[BoxList]
"""
# Get the device we're operating on
device = proposals[0].bbox.device
gt_boxes = [target.copy_with_fields([]) for target in targets]
# later cat of bbox requires all fields to be present for all bbox
# so we need to add a dummy for objectness that's missing
for gt_box in gt_boxes:
gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
proposals = [
cat_boxlist((proposal, gt_box))
for proposal, gt_box in zip(proposals, gt_boxes)
]
return proposals
def forward_for_single_feature_map(self, anchors_left, anchors_right, objectness_left, objectness_right,\
box_regression_left, box_regression_right):
"""
Arguments:
anchors: list[BoxList]
objectness: tensor of size N, A, H, W
box_regression: tensor of size N, A * 4, H, W
"""
device = objectness_left.device
N, A, H, W = objectness_left.shape
# put in the same format as anchors
objectness_left = permute_and_flatten(objectness_left, N, A, 1, H, W).view(N, -1)
objectness_right = permute_and_flatten(objectness_right, N, A, 1, H, W).view(N, -1)
objectness_left = objectness_left.sigmoid()
objectness_right = objectness_right.sigmoid()
box_regression_left = permute_and_flatten(box_regression_left, N, A, 4, H, W)
box_regression_right = permute_and_flatten(box_regression_right, N, A, 4, H, W)
num_anchors = A * H * W
pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
objectness_left, topk_idx_left = objectness_left.topk(pre_nms_top_n, dim=1, sorted=True)
objectness_right, topk_idx_right = objectness_right.topk(pre_nms_top_n, dim=1, sorted=True)
batch_idx = torch.arange(N, device=device)[:, None]
box_regression_left = box_regression_left[batch_idx, topk_idx_left]
box_regression_right = box_regression_right[batch_idx, topk_idx_right]
image_shapes = [box.size for box in anchors_left]
concat_anchors_left = torch.cat([a.bbox for a in anchors_left], dim=0)
concat_anchors_left = concat_anchors_left.reshape(N, -1, 4)[batch_idx, topk_idx_left]
concat_anchors_right = torch.cat([a.bbox for a in anchors_right], dim=0)
concat_anchors_right = concat_anchors_right.reshape(N, -1, 4)[batch_idx, topk_idx_right]
proposals_left = self.box_coder.decode(
box_regression_left.view(-1, 4), concat_anchors_left.view(-1, 4)
)
proposals_right = self.box_coder.decode(
box_regression_right.view(-1, 4), concat_anchors_right.view(-1, 4)
)
proposals_left = proposals_left.view(N, -1, 4)
proposals_right = proposals_right.view(N, -1, 4)
result_left = []; result_right = []
for proposal_left, score_left, proposal_right, score_right, im_shape in zip(proposals_left, objectness_left,\
proposals_right, objectness_right, image_shapes):
boxlist_left = BoxList(proposal_left, im_shape, mode="xyxy")
boxlist_right = BoxList(proposal_right, im_shape, mode="xyxy")
boxlist_left.add_field("objectness", score_left)
boxlist_right.add_field("objectness", score_right)
boxlist_left = boxlist_left.clip_to_image(remove_empty=False)
boxlist_right = boxlist_right.clip_to_image(remove_empty=False)
boxlist_left = remove_small_boxes(boxlist_left, self.min_size)
boxlist_right = remove_small_boxes(boxlist_right, self.min_size)
boxlist_left, boxlist_right = boxlist_nms(
boxlist_left, boxlist_right,
self.nms_thresh,
max_proposals=self.post_nms_top_n,
score_field="objectness",
)
result_left.append(boxlist_left)
result_right.append(boxlist_right)
return result_left, result_right
def forward(self, anchors_left, anchors_right, objectness_left, objectness_right, box_regression_left,\
box_regression_right, targets_left=None, targets_right=None):
"""
Arguments:
anchors: list[list[BoxList]]
objectness: list[tensor]
box_regression: list[tensor]
Returns:
boxlists (list[BoxList]): the post-processed anchors, after
applying box decoding and NMS
"""
sampled_boxes_left = []; sampled_boxes_right = []
num_levels = len(objectness_left) #FPN层数
anchors_left = list(zip(*anchors_left))
anchors_right = list(zip(*anchors_right))
for aleft, aright, oleft, oright, bleft, bright in zip(anchors_left, anchors_right, objectness_left, \
objectness_right, box_regression_left, box_regression_right):
sample_left, sample_right = self.forward_for_single_feature_map(aleft, aright, oleft, oright, bleft, bright)
sampled_boxes_left.append(sample_left)
sampled_boxes_right.append(sample_right)
boxlists_left = list(zip(*sampled_boxes_left))
boxlists_right = list(zip(*sampled_boxes_right))
boxlists_left = [cat_boxlist(boxlist_left) for boxlist_left in boxlists_left]
boxlists_right = [cat_boxlist(boxlist_right) for boxlist_right in boxlists_right]
if num_levels > 1:
boxlists_left = self.select_over_all_levels(boxlists_left)
boxlists_right = self.select_over_all_levels(boxlists_right)
# append ground-truth bboxes to proposals
if self.training and targets_left is not None and targets_right is not None:
boxlists_left = self.add_gt_proposals(boxlists_left, targets_left)
boxlists_right = self.add_gt_proposals(boxlists_right, targets_right)
return boxlists_left, boxlists_right
def select_over_all_levels(self, boxlists):
num_images = len(boxlists)
# different behavior during training and during testing:
# during training, post_nms_top_n is over *all* the proposals combined, while
# during testing, it is over the proposals for each image
# NOTE: it should be per image, and not per batch. However, to be consistent
# with Detectron, the default is per batch (see Issue #672)
if self.training and self.fpn_post_nms_per_batch:
objectness = torch.cat(
[boxlist.get_field("objectness") for boxlist in boxlists], dim=0
)
box_sizes = [len(boxlist) for boxlist in boxlists]
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)
inds_mask = torch.zeros_like(objectness, dtype=torch.uint8) #torch.bool
inds_mask[inds_sorted] = 1
inds_mask = inds_mask.split(box_sizes)
for i in range(num_images):
boxlists[i] = boxlists[i][inds_mask[i]]
else:
for i in range(num_images):
objectness = boxlists[i].get_field("objectness")
post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))
_, inds_sorted = torch.topk(
objectness, post_nms_top_n, dim=0, sorted=True
)
boxlists[i] = boxlists[i][inds_sorted]
return boxlists
def make_rpn_postprocessor(config, rpn_box_coder, is_train):
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN
if not is_train:
fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN
if not is_train:
pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST
post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST
fpn_post_nms_per_batch = config.MODEL.RPN.FPN_POST_NMS_PER_BATCH
nms_thresh = config.MODEL.RPN.NMS_THRESH
min_size = config.MODEL.RPN.MIN_SIZE
box_selector = RPNPostProcessor(
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
box_coder=rpn_box_coder,
fpn_post_nms_top_n=fpn_post_nms_top_n,
fpn_post_nms_per_batch=fpn_post_nms_per_batch,
)
return box_selector
|
python
|
from __future__ import print_function
import argparse
import shutil
import torch
import torchvision
import random
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
writer = SummaryWriter()
from resnet import ResNet_small
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def train(model, dataloader, optimizer, scheduler, loss_fn, epoch):
# Set the model into train mode
model.train()
train_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
for batch_idx, (train_batch, labels_batch) in enumerate(dataloader):
# move the data onto the device
train_batch, labels_batch = train_batch.to(device), labels_batch.to(device)
optimizer.zero_grad()
# compute model outputs and loss
outputs = model(train_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
loss.backward()
# after computing gradients based on current batch loss,
# apply them to parameters
optimizer.step()
scheduler.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# write to tensorboard
writer.add_scalar(
"train/loss",
train_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"train/lr",
scheduler._last_lr[0],
(datacount * (epoch + 1)) + (batch_idx + 1),
)
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(train_batch),
len(dataloader.dataset),
100.0 * batch_idx / len(dataloader),
(train_loss / (batch_idx + 1)),
# loss,
),
end="\r",
flush=True,
)
print()
return train_loss / datacount, 100.0 * correct / total
def test(model, dataloader, loss_fn, epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
datacount = len(dataloader)
with torch.no_grad():
for batch_idx, (test_batch, labels_batch) in enumerate(dataloader):
# move the data onto device
test_batch, labels_batch = test_batch.to(device), labels_batch.to(device)
# compute the model output
outputs = model(test_batch)
loss = loss_fn(outputs, labels_batch.squeeze())
test_loss += loss.item()
_, predicted = outputs.max(1)
total += labels_batch.size(0)
correct += predicted.eq(labels_batch.squeeze()).sum().item()
# log the test_loss
writer.add_scalar(
"test/loss",
test_loss / (batch_idx + 1),
(datacount * (epoch + 1)) + (batch_idx + 1),
)
writer.add_scalar(
"test/accuracy",
100.0 * correct / total,
(datacount * (epoch + 1)) + (batch_idx + 1),
)
test_loss = test_loss / datacount
acc = 100 * correct / total
print("Test accuracy:", acc)
return test_loss, acc
def save_ckp(state, checkpoint_dir):
f_path = "gender-best-checkpoint.pt"
torch.save(state, f_path)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch GENDER CV LAB")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 200)",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--save_model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--load_checkpoint",
type=str,
default=False,
help="Path of checkpoint to restore, if none will start training from 0",
)
args = parser.parse_args()
random.seed(args.seed)
os.environ["PYTHONHASHSEED"] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 8, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
# Load
x_train = np.load("data/x_train.npy")
x_test = np.load("data/x_test.npy")
x_train = x_train / 255
x_test = x_test / 255
x_train = torch.from_numpy(x_train).squeeze().permute(0, 3, 1, 2).float()
x_test = torch.from_numpy(x_test).squeeze().permute(0, 3, 1, 2).float()
y_train = np.load("data/y_train.npy")
y_test = np.load("data/y_test.npy")
y_train = torch.from_numpy(y_train).squeeze().long()
y_test = torch.from_numpy(y_test).squeeze().long()
dataset1 = torch.utils.data.TensorDataset(x_train, y_train.unsqueeze(1))
dataset2 = torch.utils.data.TensorDataset(x_test, y_test.unsqueeze(1))
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = ResNet_small().to(device)
print(summary(model, (3, 100, 100)))
print(
"Trainable parameters",
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=0.1, steps_per_epoch=len(train_loader), epochs=200
) # epoch 187
epoch = 1
loss = nn.CrossEntropyLoss()
if args.load_checkpoint:
print("Loading checkpoint args.load_checkpoint")
checkpoint = torch.load(args.load_checkpoint)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
epoch = checkpoint["epoch"]
best_acc = 0
l_train_loss = []
l_test_loss = []
l_train_acc = []
l_test_acc = []
l_lr = []
for epoch in range(epoch, args.epochs + 1):
train_loss, train_acc = train(
model, train_loader, optimizer, scheduler, loss, epoch
)
test_loss, test_acc = test(model, test_loader, loss, epoch)
if test_acc > best_acc:
best_acc = test_acc
if test_acc > 97.0:
print("Error < 3.0 achieved, stopped training")
break
if args.save_model and test_acc >= best_acc:
checkpoint = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}
print("Saving checkpoint as best model to gender-best-checkpoint.pt")
save_ckp(checkpoint, "")
l_train_loss.append(train_loss)
l_test_loss.append(test_loss)
l_train_acc.append(train_acc)
l_test_acc.append(test_acc)
l_lr.append(scheduler._last_lr[0])
# PLOTS
fig = plt.figure()
plt.plot(l_train_loss, color="red", label="Train")
plt.plot(l_test_loss, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Loss", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_loss.png")
plt.close()
fig = plt.figure()
plt.plot(l_train_acc, color="red", label="Train")
plt.plot(l_test_acc, color="blue", label="Test")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Accuracy", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_acc.png")
plt.close()
fig = plt.figure()
plt.plot(l_lr, color="orange", label="Learning rate")
plt.xlabel("Epochs", fontsize=10)
plt.ylabel("Learning rate", fontsize=8)
plt.legend()
plt.grid()
fig.savefig("figures/gender_lr.png")
plt.close()
if __name__ == "__main__":
main()
|
python
|
"""
These test cover Google searches
"""
import pytest
from pages.result import GoogleResultPage
from pages.search import GoogleSearchPage
@pytest.mark.parametrize('phrase', ['nintendo', 'xbox', 'steam'])
def test_basic_google_search(browser, phrase):
search_page = GoogleSearchPage(browser)
result_page = GoogleResultPage(browser)
search_page.load()
search_page.search(phrase)
assert phrase == result_page.search_input_value()
# for title in result_page.result_link_titles():
# assert phrase.lower() in title.lower()
titles = result_page.result_link_titles()
matches = [t for t in titles if phrase.lower() in t.lower()]
assert len(matches) > 0
assert phrase in result_page.title()
|
python
|
from modern_greek_accentuation.accentuation import is_accented, where_is_accent, put_accent, count_syllables,\
put_accent_on_the_antepenultimate, put_accent_on_the_penultimate, remove_all_diacritics, put_accent_on_the_ultimate
from modern_greek_accentuation.syllabify import modern_greek_syllabify
from modern_greek_accentuation.resources import vowels
from ..resources import greek_corpus, irregular_comparatives, irregular_comparative_adverbs
from modern_greek_inflexion.exceptions import NotLegalAdjectiveException
def create_all_basic_adj_forms(adj, inflection=None):
"""
:param inflection: if relevant, add 'aklito' flag if it is certain does not have any declination (like μωβ)
:param adj: masc nom sg form (`ωραίος`)
:return: dictionary with keys:
'adj': masc, fem, neut forms as a string divided with / ('ωραίος/ωραία/ωραίο') if alternatives, they are added and
separated with a coma
'comparative': if exists in form parathetiko + ',' + alt_parathetiko + '/' + uperthetiko + ',' + alt_uperthetiko with
form only in masc sing nom
'adverb': adverb form, if alternatives, then separated with coma
'adverb_comparative': if exists, adverb_parathetiko + ',' + alt_adverb_parathetiko + '/' + adverb_uperthetiko + ',' + alt_adverb_uperthetiko
"""
# correct possible errors in the list
# print(adj)
if adj[-2:] == 'ον' and adj + 'τα' in greek_corpus:
adj = adj[:-2] + 'ων'
elif adj[-2:] == 'ές' and adj[:-2] + 'ής' in greek_corpus:
# ['εκκρεμές', 'λυκαυγές', 'αλκαλοειδές']:
adj = adj[:-2] + 'ής'
elif adj[-2:] == 'έν' and adj[:-2] + 'είς' in greek_corpus:
# ['ανακοινωθέν']:
adj = adj[:-2] + 'είς'
elif adj[-2:] == 'ού':
if adj[:-2] + 'άς' in greek_corpus:
adj = adj[:-2] + 'άς'
elif put_accent_on_the_penultimate(adj[:-2] + 'ης') in greek_corpus:
adj = put_accent_on_the_penultimate(adj[:-2] + 'ης')
elif adj[-1] == 'ί' and adj[:-1] + 'ής' in greek_corpus:
adj = adj[:-1] + 'ής'
accent = where_is_accent(adj, true_syllabification=False)
adj_temp = {'adj': 'masc,fem,neuter', 'comparative': '', 'adverb': '', 'adverb_comparative': ''}
adj_forms = []
# most basic case -os
if adj[-2:] in ['ός', 'ος']:
masc = adj
adj_forms.append(masc)
if accent == 'ultimate':
fem = adj[:-2] + 'ή'
else:
fem = adj[:-2] + 'η'
fem_alt = None
if adj[-3] in vowels and count_syllables(adj) <= 2:
if accent == 'ultimate':
fem = adj[:-2] + 'ά'
else:
fem = adj[:-2] + 'α'
elif adj[-3] in vowels and count_syllables(adj) > 2 and not is_accented(modern_greek_syllabify(adj)[-3]):
if accent == 'ultimate':
fem = adj[:-2] + 'ά'
else:
fem = adj[:-2] + 'α'
if adj[-3] in ['κ', 'θ', 'χ']:
if accent == 'ultimate':
fem_alt = adj[:-2] + 'ιά'
else:
fem_alt = adj[:-2] + 'ια'
if fem in greek_corpus and fem_alt in greek_corpus:
fem = fem + ',' + fem_alt
elif fem not in greek_corpus and fem_alt in greek_corpus:
fem = fem_alt
elif fem in greek_corpus and fem_alt not in greek_corpus:
fem = fem
else:
# for the most part forms on h should be correct, but adj is not very common, so is lacking from db
# check for -a by looking for genitive on as in db
if accent == 'ultimate':
gen = adj[:-2] + 'άς'
beta_fem = adj[:-2] + 'ά'
else:
gen = adj[:-2] + 'ας'
beta_fem = adj[:-2] + 'α'
if gen in greek_corpus:
fem = beta_fem
# if its lacking from the db, still the best guess is to leave the form on -h
adj_forms.append(fem)
neuter = adj[:-1]
adj_forms.append(neuter)
elif adj[-2:] in ['ής', 'ης']:
# first check which type
stem = adj[:-2]
if stem + 'ικο' in greek_corpus:
# type hs, a, iko, here accent is always on the last syllable of the stem
masc = adj
fem = stem + 'α'
if stem + 'ισσα' in greek_corpus:
fem = stem + 'ισσα'
neuter = stem + 'ικο'
elif where_is_accent(adj) == 'ultimate' and (stem + 'ὶ' in greek_corpus or stem + 'ιά' in greek_corpus):
# type, hs, ia, i, mostly colors
masc = adj
fem = put_accent(stem + 'ια', accent)
neuter = put_accent(stem + 'ι', accent)
elif put_accent(stem + 'ους', accent, true_syllabification=False) in greek_corpus:
# type hs, hs, es
masc, fem = adj, adj
neuter = put_accent(stem + 'ες', accent, true_syllabification=False)
if accent != 'ultimate' and neuter not in greek_corpus:
neuter = put_accent(stem + 'ες', 'antepenultimate', true_syllabification=False)
elif stem + 'ού' in greek_corpus:
# type kafetzhs kafetzou, but is it a adj?
masc = adj
fem = adj[:-2] + 'ού'
neuter = adj[:-1] + 'δικο'
else:
"""
In cases where my corpus cannot help me, I will surmise that it's hs, a (or issa), iko
"""
if accent == 'penultimate':
if adj.endswith('ώδης'):
masc, fem = adj, adj
neuter = stem + 'ες'
else:
masc = adj
fem = stem + 'α'
if stem + 'ισσα' in greek_corpus:
fem = stem + 'ισσα'
neuter = stem + 'ικο'
elif accent == 'ultimate':
masc, fem = adj, adj
neuter = stem + 'ές'
# raise AssertionError
elif adj[-3:] == 'ους':
masc, fem = adj, adj
neuter = adj[:-1] + 'ν'
elif adj[-2:] in ['υς', 'ύς'] or adj in ['γλυκύ']:
# my database is unfortunately not that great...
stem = adj[:-2]
masc = adj
neuter = adj[:-1]
if adj in ['γλυκύ']:
# unfortunately there are some mistakes in my word list wherever forms are given as lemma
# and so I have to correct them in this way
stem = adj[:-1]
masc = adj + 'ς'
neuter = adj
fem = stem + 'ιά'
if fem + 'ς' not in greek_corpus:
# look for gen because nom fem can be mistaken for acc pl
fem_eia = stem + 'εία'
if fem_eia in greek_corpus:
fem = fem_eia
if adj[-5:] == 'πολύς':
fem = adj[:-5] + 'πολλή'
elif adj[-2:] in ['ων', 'ών']:
stem = adj[:-2]
masc = adj
fem = None
neuter = None
if accent == 'penultimate' or not accent:
fem = stem + 'ουσα'
# if not accent:
# fem = put_accent_on_the_penultimate(fem)
neuter = stem + 'ον'
if accent == 'ultimate' or not accent:
fem = stem + 'ούσα'
neuter = stem + 'ούν'
neuter_alt_1 = stem + 'ών'
neuter_alt_2 = stem + 'ούν'
if neuter + 'τα' in greek_corpus or neuter + 'τες' in greek_corpus:
fem = stem + 'ούσα'
elif neuter_alt_1 + 'τα' in greek_corpus or neuter_alt_1 + 'τες' in greek_corpus or adj in ['ζων', 'κυβερνών', 'επιζών']:
fem = stem + 'ώσα'
neuter = neuter_alt_1
elif neuter_alt_2 + 'τα' in greek_corpus or neuter_alt_2 + 'τες' in greek_corpus or neuter_alt_2 + 'των' in greek_corpus:
fem = stem + 'ούσα'
neuter = neuter_alt_2
if not accent:
neuter = remove_all_diacritics(neuter)
# it is also possible, that there are wn, onos
if adj[:-2] + 'ονος' in greek_corpus:
masc, fem = adj, adj
neuter = adj[:-2] + 'ον'
elif adj[-3:] == 'είς':
# passive aorist participles
if not adj[:-3] + 'έντα' in greek_corpus:
# print(adj)
raise NotLegalAdjectiveException
masc = adj
fem = adj[:-1] + 'σα'
neuter = adj[:-3] + 'έν'
elif adj[-2:] in ['ας', 'άς']:
# pas, pasa pan and active aorist participles
# pas pasa pan
pl_nta = adj[:-1] + 'ντα'
fem_sa =adj[:-1] + 'σα'
if count_syllables(adj) == 1:
pl_nta = put_accent(pl_nta, 'penultimate')
fem_sa = put_accent(fem_sa, 'penultimate')
if pl_nta in greek_corpus:
masc = adj
fem = fem_sa
neuter = adj[:-1] + 'ν'
elif adj in ['μέλας']:
masc = adj
fem = adj[:-2] + 'αινα'
neuter = adj[:-1] + 'ν'
elif adj == 'μέγας':
masc = adj
fem = 'μαγάλη'
neuter = 'μέγα'
elif adj[-4:] == 'ονας':
masc = adj
fem = adj[:-4] + 'ων'
neuter = adj[:-2]
elif where_is_accent(adj) == 'ultimate':
masc = adj
fem = adj[:-2] + 'ού'
neuter = adj[:-1] + 'δικο'
else:
raise NotLegalAdjectiveException
elif adj in ['προβεβηκώς', 'κεχηνώς', 'τεθνεώς', 'αφεστώς', 'ἐνεστώς']:
masc = adj
fem = adj[:-1] + 'σα'
neuter = adj
# rare but sometimes ancient perf participle
elif adj in ['άρρην']:
# so rare that it can be solved like that
masc = adj
fem = adj
neuter = masc[:-2] + 'εν'
elif adj in ['περίφροντις', 'φέρελπις', 'άφροντις', 'φιλόπατρις', 'μόνορχις', 'παλίμπαις', 'πολύφροντις',
'αρνησίπατρις', 'άπολις', 'άπατρις', 'αφιλόπατρις', 'ενήλιξ', 'πυρρόθριξ', 'δασύθριξ', 'ουλόθριξ',
'κεντρόφυξ', 'πυρρόθριξ', 'υπερήλιξ', 'βλαξ', 'ομήλιξ', 'υπερμέτρωψ', 'κεντρόφυξ', 'μεσήλιξ']:
masc, fem = adj, adj
neuter = '-'
elif adj in ['εύχαρις', 'επίχαρις', 'άχαρις']:
masc, fem = adj, adj
neuter = adj[:-1]
elif adj in ['ίλεως']:
masc, fem = adj, adj
neuter = adj[:-1] + 'ν'
else:
masc, fem, neuter = adj, adj, adj
if inflection == 'aklito':
masc, fem, neuter = adj, adj, adj
adj_forms = [masc, fem, neuter]
adj_temp['adj'] = '/'.join(adj_forms)
# παραθετικά
stem = neuter
if stem[-1] == 'ς':
stem = stem[:-1] + 'σ'
parathetika = None
alt_parathetiko = None
uperthetiko = '-'
alt_uperthetiko = None
parathetiko = put_accent_on_the_antepenultimate(stem + 'τερος')
if parathetiko not in greek_corpus:
parathetiko = None
else:
uperthetiko = put_accent_on_the_antepenultimate(parathetiko[:-5] + 'τατος')
if uperthetiko not in greek_corpus:
uperthetiko = '-'
if neuter[-1] in ['ο', 'ό']:
alt_parathetiko = remove_all_diacritics(neuter[:-1]) + 'ύτερος'
if alt_parathetiko not in greek_corpus:
alt_parathetiko = None
else:
alt_uperthetiko =put_accent_on_the_antepenultimate(alt_parathetiko[:-5] + 'τατος')
if alt_uperthetiko not in greek_corpus:
alt_uperthetiko = '-'
if parathetiko and alt_parathetiko:
parathetika = parathetiko + ',' + alt_parathetiko + '/' + uperthetiko + ',' + alt_uperthetiko
elif parathetiko:
parathetika = parathetiko + '/' + uperthetiko
elif alt_parathetiko and alt_uperthetiko:
parathetika = alt_parathetiko + '/' + alt_uperthetiko
if neuter in irregular_comparatives.keys():
parathetiko = irregular_comparatives[neuter].split('/')[0]
uperthetiko = irregular_comparatives[neuter].split('/')[1]
alt_parathetiko, alt_uperthetiko = None, None
parathetika = irregular_comparatives[neuter]
if parathetika:
adj_temp['comparative'] = parathetika
# επιρρήματα
alt_adv = None
if neuter[-1] in ['ο', 'ό']:
accent = where_is_accent(neuter)
if accent != 'ultimate':
adverb = neuter[:-1] + 'α'
alt_adv = put_accent_on_the_penultimate(neuter[:-1] + 'ως', true_syllabification=False)
else:
adverb = neuter[:-1] + 'ά'
alt_adv = neuter[:-1] + 'ώς'
elif masc[-2:] in ['ής', 'ης'] and neuter[-2:] in ['ές', 'ες']:
adverb = remove_all_diacritics(neuter[:-2]) + 'ώς'
if adverb not in greek_corpus and neuter[:-2] + 'ως' in greek_corpus:
adverb = neuter[:-2] + 'ως'
alt_adv = neuter[:-2] + 'ά'
elif neuter[-1] in ['υ', 'ύ'] and masc[-1] == 'ς':
# it should have the ancient form on ews
adverb = put_accent_on_the_penultimate(neuter[:-1] + 'εως')
if adverb not in greek_corpus:
adverb = adj_forms[1]
elif neuter[-1] == 'ί':
# colors
adverb = put_accent_on_the_ultimate(adj_forms[2] + 'α')
elif (masc[-2:] in ['ας', 'άς', 'ων', 'ών'] or masc[-3:] in ['εις', 'είς']) and fem[-2:] == 'σα' and neuter[
-1] == 'ν':
# ancient adverbs
adverb = put_accent_on_the_penultimate(neuter + 'τως')
else:
# for aklita
adverb = neuter
if neuter in ['λίγο', 'πολύ', 'ήσσον', 'κάλλιον']:
adverb = neuter
# special cases
if neuter in ['μέγα', 'μεγάλο']:
# special case
adverb = 'μέγα'
alt_adv = 'μεγάλως'
elif (masc[-4:] == 'ονας' or masc[-2:] == 'ων') and fem[-2:] == 'ων':
adverb = None
elif masc in ['άρρην', 'μέλας']:
adverb = None
epirrimata = [e for e in [adverb, alt_adv] if e and e in greek_corpus]
epirrimata = ','.join(epirrimata)
if epirrimata:
adj_temp['adverb'] = epirrimata
# comparative epirrimata
adv_parathetika = None
adverb_parathetiko = alt_adverb_parathetiko =adverb_uperthetiko = alt_adverb_uperthetiko = ''
if parathetiko:
adverb_parathetiko = parathetiko[:-2] + 'α'
if uperthetiko != '-':
adverb_uperthetiko = ','.join([yp[:-2] + 'α' for yp in uperthetiko.split(',')])
else:
adverb_uperthetiko = '-'
if alt_parathetiko:
alt_adverb_parathetiko = alt_parathetiko[:-2] + 'α'
if alt_uperthetiko:
alt_adverb_uperthetiko = alt_uperthetiko[:-2] + 'α'
else:
alt_adverb_uperthetiko = '-'
if parathetiko and alt_parathetiko:
adv_parathetika = adverb_parathetiko + ',' + alt_adverb_parathetiko + '/' + adverb_uperthetiko + ',' + alt_adverb_uperthetiko
elif parathetiko:
adv_parathetika = adverb_parathetiko + '/' + adverb_uperthetiko
elif alt_parathetiko:
adv_parathetika = alt_adverb_parathetiko + '/' + alt_adverb_uperthetiko
if neuter in irregular_comparative_adverbs.keys():
adv_parathetika = irregular_comparative_adverbs[neuter]
if adv_parathetika:
adj_temp['adverb_comparative'] = adv_parathetika
return adj_temp
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import warnings
from builtins import str
from typing import Any, Dict, Optional, Text
from rasa.nlu.extractors.entity_synonyms import EntitySynonymMapper
class EntitySynonymBegin(EntitySynonymMapper):
name = "EntitySynonymBegin"
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
updated_entities = message.get("entities", [])[:]
updated_entities.sort(key=lambda x: x["start"])
self.replace_synonyms(updated_entities)
def shift_entities(entities, shift):
for e in entities:
e["start"] += shift
e["end"] += shift
if len(updated_entities):
for i, entity in enumerate(updated_entities):
literal = message.text[entity["start"] : entity["end"]]
value = entity["value"]
if value != literal and isinstance(value, str):
entity["literal"] = literal
message.text = (
message.text[0 : entity["start"]]
+ value
+ message.text[entity["end"] :]
)
shift = len(value) - (entity["end"] - entity["start"])
entity["end"] = entity["start"] + len(value)
if len(updated_entities) > i + 1: # more entities:
shift_entities(updated_entities[i + 1 :], shift)
message.set("entities", updated_entities, add_to_output=True)
class EntitySynonymEnd(EntitySynonymMapper):
name = "EntitySynonymEnd"
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
updated_entities = message.get("entities", [])[:]
updated_entities.sort(key=lambda x: x["start"])
def shift_entities(entities, shift):
for e in entities:
e["start"] += shift
e["end"] += shift
for i, entity in enumerate(updated_entities):
if "literal" in entity:
message.text = (
message.text[0 : entity["start"]]
+ entity["literal"]
+ message.text[entity["end"] :]
)
shift = len(entity["literal"]) - (entity["end"] - entity["start"])
entity["end"] = entity["start"] + len(entity["literal"])
del entity["literal"]
if len(updated_entities) > i + 1: # more entities:
shift_entities(updated_entities[i + 1 :], shift)
message.set("entities", updated_entities, add_to_output=True)
|
python
|
import numpy as np
import torch
import random
from skimage import io, transform
import torch.nn.functional as F
from torchvision import transforms
torch.manual_seed(17)
random.seed(42)
class Resize(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def _resize(self, image):
h, w = image.size()[1:3]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = F.interpolate(image.unsqueeze(0), (new_h, new_w))
return img.squeeze(0)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
new_raw_image = self._resize(raw_image)
new_ref_image = self._resize(ref_image)
return {'raw_image': new_raw_image, 'ref_image': new_ref_image}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def _transpose(self, image, channels=(2, 0, 1)):
return image.transpose(channels)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
new_raw_image = self._transpose(raw_image)
new_ref_image = self._transpose(ref_image)
return {'raw_image': torch.from_numpy(new_raw_image).float(),
'ref_image': torch.from_numpy(new_ref_image).float()}
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation."""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def _normalize(self, image):
return transforms.Normalize(self.mean, self.std)(image)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
norm_raw_image = self._normalize(raw_image)
norm_ref_image = self._normalize(ref_image)
return {'raw_image': norm_raw_image,
'ref_image': norm_ref_image}
class RandomRotation(object):
"""Rotate the image by angle."""
def _rotate(self, image, angle):
return transforms.functional.rotate(image, angle)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
angle = random.randint(0, 360)
rotate_raw_image = self._rotate(raw_image, angle)
rotate_ref_image = self._rotate(ref_image, angle)
return {'raw_image': rotate_raw_image,
'ref_image': rotate_ref_image}
|
python
|
import argparse
# import paraview modules.
from paraview.web import pv_wslink
from paraview.web import protocols as pv_protocols
from paraview import simple
from wslink import server
from enlil import EnlilDataset
# =============================================================================
# Create custom PVServerProtocol class to handle clients requests
# =============================================================================
class _DemoServer(pv_wslink.PVServerProtocol):
authKey = "wslink-secret"
data_file = "/data/pv-data-3d.nc"
viewportScale = 1.0
viewportMaxWidth = 2560
viewportMaxHeight = 1440
settingsLODThreshold = 102400
@staticmethod
def add_arguments(parser):
parser.add_argument("--dir", default="/data",
help=("Path to the NetCDF file to load"),
dest="data_dir")
parser.add_argument("--viewport-scale", default=1.0, type=float,
help="Viewport scaling factor",
dest="viewportScale")
parser.add_argument("--viewport-max-width", default=2560, type=int,
help="Viewport maximum size in width",
dest="viewportMaxWidth")
parser.add_argument("--viewport-max-height", default=1440, type=int,
help="Viewport maximum size in height",
dest="viewportMaxHeight")
parser.add_argument("--settings-lod-threshold", default=102400,
type=int,
help="LOD Threshold in Megabytes",
dest="settingsLODThreshold")
@staticmethod
def configure(args):
# Update this server based on the passed in arguments
_DemoServer.authKey = args.authKey
_DemoServer.data_dir = args.data_dir
_DemoServer.viewportScale = args.viewportScale
_DemoServer.viewportMaxWidth = args.viewportMaxWidth
_DemoServer.viewportMaxHeight = args.viewportMaxHeight
_DemoServer.settingsLODThreshold = args.settingsLODThreshold
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(
pv_protocols.ParaViewWebPublishImageDelivery(decode=False))
self.updateSecret(_DemoServer.authKey)
# tell the C++ web app to use no encoding.
# ParaViewWebPublishImageDelivery must be set to decode=False to match.
self.getApplication().SetImageEncoding(0)
# Disable interactor-based render calls
simple.GetRenderView().EnableRenderOnInteraction = 0
# The directory containing the NetCDF file with the data
self.enlil = EnlilDataset(self.data_dir)
# Register the Paraview protocols for dispatching methods
self.registerVtkWebProtocol(self.enlil)
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaViewWeb Demo")
# Add default arguments
server.add_arguments(parser)
_DemoServer.add_arguments(parser)
# Extract arguments
args = parser.parse_args()
_DemoServer.configure(args)
# Start server
server.start_webserver(options=args, protocol=_DemoServer)
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('all/', views.BlogList.as_view(), name='blog-list'),
path('bloggers/',views.BlogAuthorList.as_view(), name='blogauthor-list'),
path('bloggers/<int:pk>',views.BlogAuthorDetail.as_view(),name='blogauthor-detail'),
path('<int:pk>/',views.BlogDetail.as_view(),name='blog-detail'),
path('<int:pk>/create',views.newComment,name='comment_create'),
]
|
python
|
def main():
#fh, abs_path = mkstemp()
antImages = 4000
first = 4
second = 4
newFileName = '%s_%s_bench_door_%s'%(first, second, antImages)
with open(newFileName + '_local.txt','w') as new_file:
with open('4_4_bench_door_4000_final.txt') as old_file:
lines = old_file.readlines()
for line in lines:
strings = line.split('/')
del strings[0]
print (strings[0])
strings[0] = 'E:/kristina'
new_line = '/'.join(strings)
print(new_line)
new_file.write(new_line)
if __name__ == '__main__':
main()
|
python
|
# Generated from Java9.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .Java9Parser import Java9Parser
else:
from Java9Parser import Java9Parser
# This class defines a complete listener for a parse tree produced by Java9Parser.
class Java9Listener(ParseTreeListener):
# Enter a parse tree produced by Java9Parser#literal.
def enterLiteral(self, ctx:Java9Parser.LiteralContext):
pass
# Exit a parse tree produced by Java9Parser#literal.
def exitLiteral(self, ctx:Java9Parser.LiteralContext):
pass
# Enter a parse tree produced by Java9Parser#primitiveType.
def enterPrimitiveType(self, ctx:Java9Parser.PrimitiveTypeContext):
pass
# Exit a parse tree produced by Java9Parser#primitiveType.
def exitPrimitiveType(self, ctx:Java9Parser.PrimitiveTypeContext):
pass
# Enter a parse tree produced by Java9Parser#numericType.
def enterNumericType(self, ctx:Java9Parser.NumericTypeContext):
pass
# Exit a parse tree produced by Java9Parser#numericType.
def exitNumericType(self, ctx:Java9Parser.NumericTypeContext):
pass
# Enter a parse tree produced by Java9Parser#integralType.
def enterIntegralType(self, ctx:Java9Parser.IntegralTypeContext):
pass
# Exit a parse tree produced by Java9Parser#integralType.
def exitIntegralType(self, ctx:Java9Parser.IntegralTypeContext):
pass
# Enter a parse tree produced by Java9Parser#floatingPointType.
def enterFloatingPointType(self, ctx:Java9Parser.FloatingPointTypeContext):
pass
# Exit a parse tree produced by Java9Parser#floatingPointType.
def exitFloatingPointType(self, ctx:Java9Parser.FloatingPointTypeContext):
pass
# Enter a parse tree produced by Java9Parser#referenceType.
def enterReferenceType(self, ctx:Java9Parser.ReferenceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#referenceType.
def exitReferenceType(self, ctx:Java9Parser.ReferenceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#classOrInterfaceType.
def enterClassOrInterfaceType(self, ctx:Java9Parser.ClassOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#classOrInterfaceType.
def exitClassOrInterfaceType(self, ctx:Java9Parser.ClassOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#classType.
def enterClassType(self, ctx:Java9Parser.ClassTypeContext):
pass
# Exit a parse tree produced by Java9Parser#classType.
def exitClassType(self, ctx:Java9Parser.ClassTypeContext):
pass
# Enter a parse tree produced by Java9Parser#classType_lf_classOrInterfaceType.
def enterClassType_lf_classOrInterfaceType(self, ctx:Java9Parser.ClassType_lf_classOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#classType_lf_classOrInterfaceType.
def exitClassType_lf_classOrInterfaceType(self, ctx:Java9Parser.ClassType_lf_classOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#classType_lfno_classOrInterfaceType.
def enterClassType_lfno_classOrInterfaceType(self, ctx:Java9Parser.ClassType_lfno_classOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#classType_lfno_classOrInterfaceType.
def exitClassType_lfno_classOrInterfaceType(self, ctx:Java9Parser.ClassType_lfno_classOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceType.
def enterInterfaceType(self, ctx:Java9Parser.InterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceType.
def exitInterfaceType(self, ctx:Java9Parser.InterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceType_lf_classOrInterfaceType.
def enterInterfaceType_lf_classOrInterfaceType(self, ctx:Java9Parser.InterfaceType_lf_classOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceType_lf_classOrInterfaceType.
def exitInterfaceType_lf_classOrInterfaceType(self, ctx:Java9Parser.InterfaceType_lf_classOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceType_lfno_classOrInterfaceType.
def enterInterfaceType_lfno_classOrInterfaceType(self, ctx:Java9Parser.InterfaceType_lfno_classOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceType_lfno_classOrInterfaceType.
def exitInterfaceType_lfno_classOrInterfaceType(self, ctx:Java9Parser.InterfaceType_lfno_classOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#typeVariable.
def enterTypeVariable(self, ctx:Java9Parser.TypeVariableContext):
pass
# Exit a parse tree produced by Java9Parser#typeVariable.
def exitTypeVariable(self, ctx:Java9Parser.TypeVariableContext):
pass
# Enter a parse tree produced by Java9Parser#arrayType.
def enterArrayType(self, ctx:Java9Parser.ArrayTypeContext):
pass
# Exit a parse tree produced by Java9Parser#arrayType.
def exitArrayType(self, ctx:Java9Parser.ArrayTypeContext):
pass
# Enter a parse tree produced by Java9Parser#dims.
def enterDims(self, ctx:Java9Parser.DimsContext):
pass
# Exit a parse tree produced by Java9Parser#dims.
def exitDims(self, ctx:Java9Parser.DimsContext):
pass
# Enter a parse tree produced by Java9Parser#typeParameter.
def enterTypeParameter(self, ctx:Java9Parser.TypeParameterContext):
pass
# Exit a parse tree produced by Java9Parser#typeParameter.
def exitTypeParameter(self, ctx:Java9Parser.TypeParameterContext):
pass
# Enter a parse tree produced by Java9Parser#typeParameterModifier.
def enterTypeParameterModifier(self, ctx:Java9Parser.TypeParameterModifierContext):
pass
# Exit a parse tree produced by Java9Parser#typeParameterModifier.
def exitTypeParameterModifier(self, ctx:Java9Parser.TypeParameterModifierContext):
pass
# Enter a parse tree produced by Java9Parser#typeBound.
def enterTypeBound(self, ctx:Java9Parser.TypeBoundContext):
pass
# Exit a parse tree produced by Java9Parser#typeBound.
def exitTypeBound(self, ctx:Java9Parser.TypeBoundContext):
pass
# Enter a parse tree produced by Java9Parser#additionalBound.
def enterAdditionalBound(self, ctx:Java9Parser.AdditionalBoundContext):
pass
# Exit a parse tree produced by Java9Parser#additionalBound.
def exitAdditionalBound(self, ctx:Java9Parser.AdditionalBoundContext):
pass
# Enter a parse tree produced by Java9Parser#typeArguments.
def enterTypeArguments(self, ctx:Java9Parser.TypeArgumentsContext):
pass
# Exit a parse tree produced by Java9Parser#typeArguments.
def exitTypeArguments(self, ctx:Java9Parser.TypeArgumentsContext):
pass
# Enter a parse tree produced by Java9Parser#typeArgumentList.
def enterTypeArgumentList(self, ctx:Java9Parser.TypeArgumentListContext):
pass
# Exit a parse tree produced by Java9Parser#typeArgumentList.
def exitTypeArgumentList(self, ctx:Java9Parser.TypeArgumentListContext):
pass
# Enter a parse tree produced by Java9Parser#typeArgument.
def enterTypeArgument(self, ctx:Java9Parser.TypeArgumentContext):
pass
# Exit a parse tree produced by Java9Parser#typeArgument.
def exitTypeArgument(self, ctx:Java9Parser.TypeArgumentContext):
pass
# Enter a parse tree produced by Java9Parser#wildcard.
def enterWildcard(self, ctx:Java9Parser.WildcardContext):
pass
# Exit a parse tree produced by Java9Parser#wildcard.
def exitWildcard(self, ctx:Java9Parser.WildcardContext):
pass
# Enter a parse tree produced by Java9Parser#wildcardBounds.
def enterWildcardBounds(self, ctx:Java9Parser.WildcardBoundsContext):
pass
# Exit a parse tree produced by Java9Parser#wildcardBounds.
def exitWildcardBounds(self, ctx:Java9Parser.WildcardBoundsContext):
pass
# Enter a parse tree produced by Java9Parser#moduleName.
def enterModuleName(self, ctx:Java9Parser.ModuleNameContext):
pass
# Exit a parse tree produced by Java9Parser#moduleName.
def exitModuleName(self, ctx:Java9Parser.ModuleNameContext):
pass
# Enter a parse tree produced by Java9Parser#packageName.
def enterPackageName(self, ctx:Java9Parser.PackageNameContext):
pass
# Exit a parse tree produced by Java9Parser#packageName.
def exitPackageName(self, ctx:Java9Parser.PackageNameContext):
pass
# Enter a parse tree produced by Java9Parser#typeName.
def enterTypeName(self, ctx:Java9Parser.TypeNameContext):
pass
# Exit a parse tree produced by Java9Parser#typeName.
def exitTypeName(self, ctx:Java9Parser.TypeNameContext):
pass
# Enter a parse tree produced by Java9Parser#packageOrTypeName.
def enterPackageOrTypeName(self, ctx:Java9Parser.PackageOrTypeNameContext):
pass
# Exit a parse tree produced by Java9Parser#packageOrTypeName.
def exitPackageOrTypeName(self, ctx:Java9Parser.PackageOrTypeNameContext):
pass
# Enter a parse tree produced by Java9Parser#expressionName.
def enterExpressionName(self, ctx:Java9Parser.ExpressionNameContext):
pass
# Exit a parse tree produced by Java9Parser#expressionName.
def exitExpressionName(self, ctx:Java9Parser.ExpressionNameContext):
pass
# Enter a parse tree produced by Java9Parser#methodName.
def enterMethodName(self, ctx:Java9Parser.MethodNameContext):
pass
# Exit a parse tree produced by Java9Parser#methodName.
def exitMethodName(self, ctx:Java9Parser.MethodNameContext):
pass
# Enter a parse tree produced by Java9Parser#ambiguousName.
def enterAmbiguousName(self, ctx:Java9Parser.AmbiguousNameContext):
pass
# Exit a parse tree produced by Java9Parser#ambiguousName.
def exitAmbiguousName(self, ctx:Java9Parser.AmbiguousNameContext):
pass
# Enter a parse tree produced by Java9Parser#compilationUnit.
def enterCompilationUnit(self, ctx:Java9Parser.CompilationUnitContext):
pass
# Exit a parse tree produced by Java9Parser#compilationUnit.
def exitCompilationUnit(self, ctx:Java9Parser.CompilationUnitContext):
pass
# Enter a parse tree produced by Java9Parser#ordinaryCompilation.
def enterOrdinaryCompilation(self, ctx:Java9Parser.OrdinaryCompilationContext):
pass
# Exit a parse tree produced by Java9Parser#ordinaryCompilation.
def exitOrdinaryCompilation(self, ctx:Java9Parser.OrdinaryCompilationContext):
pass
# Enter a parse tree produced by Java9Parser#modularCompilation.
def enterModularCompilation(self, ctx:Java9Parser.ModularCompilationContext):
pass
# Exit a parse tree produced by Java9Parser#modularCompilation.
def exitModularCompilation(self, ctx:Java9Parser.ModularCompilationContext):
pass
# Enter a parse tree produced by Java9Parser#packageDeclaration.
def enterPackageDeclaration(self, ctx:Java9Parser.PackageDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#packageDeclaration.
def exitPackageDeclaration(self, ctx:Java9Parser.PackageDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#packageModifier.
def enterPackageModifier(self, ctx:Java9Parser.PackageModifierContext):
pass
# Exit a parse tree produced by Java9Parser#packageModifier.
def exitPackageModifier(self, ctx:Java9Parser.PackageModifierContext):
pass
# Enter a parse tree produced by Java9Parser#importDeclaration.
def enterImportDeclaration(self, ctx:Java9Parser.ImportDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#importDeclaration.
def exitImportDeclaration(self, ctx:Java9Parser.ImportDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#singleTypeImportDeclaration.
def enterSingleTypeImportDeclaration(self, ctx:Java9Parser.SingleTypeImportDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#singleTypeImportDeclaration.
def exitSingleTypeImportDeclaration(self, ctx:Java9Parser.SingleTypeImportDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#typeImportOnDemandDeclaration.
def enterTypeImportOnDemandDeclaration(self, ctx:Java9Parser.TypeImportOnDemandDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#typeImportOnDemandDeclaration.
def exitTypeImportOnDemandDeclaration(self, ctx:Java9Parser.TypeImportOnDemandDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#singleStaticImportDeclaration.
def enterSingleStaticImportDeclaration(self, ctx:Java9Parser.SingleStaticImportDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#singleStaticImportDeclaration.
def exitSingleStaticImportDeclaration(self, ctx:Java9Parser.SingleStaticImportDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#staticImportOnDemandDeclaration.
def enterStaticImportOnDemandDeclaration(self, ctx:Java9Parser.StaticImportOnDemandDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#staticImportOnDemandDeclaration.
def exitStaticImportOnDemandDeclaration(self, ctx:Java9Parser.StaticImportOnDemandDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#typeDeclaration.
def enterTypeDeclaration(self, ctx:Java9Parser.TypeDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#typeDeclaration.
def exitTypeDeclaration(self, ctx:Java9Parser.TypeDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#moduleDeclaration.
def enterModuleDeclaration(self, ctx:Java9Parser.ModuleDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#moduleDeclaration.
def exitModuleDeclaration(self, ctx:Java9Parser.ModuleDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#moduleDirective.
def enterModuleDirective(self, ctx:Java9Parser.ModuleDirectiveContext):
pass
# Exit a parse tree produced by Java9Parser#moduleDirective.
def exitModuleDirective(self, ctx:Java9Parser.ModuleDirectiveContext):
pass
# Enter a parse tree produced by Java9Parser#requiresModifier.
def enterRequiresModifier(self, ctx:Java9Parser.RequiresModifierContext):
pass
# Exit a parse tree produced by Java9Parser#requiresModifier.
def exitRequiresModifier(self, ctx:Java9Parser.RequiresModifierContext):
pass
# Enter a parse tree produced by Java9Parser#classDeclaration.
def enterClassDeclaration(self, ctx:Java9Parser.ClassDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#classDeclaration.
def exitClassDeclaration(self, ctx:Java9Parser.ClassDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#normalClassDeclaration.
def enterNormalClassDeclaration(self, ctx:Java9Parser.NormalClassDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#normalClassDeclaration.
def exitNormalClassDeclaration(self, ctx:Java9Parser.NormalClassDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#classModifier.
def enterClassModifier(self, ctx:Java9Parser.ClassModifierContext):
pass
# Exit a parse tree produced by Java9Parser#classModifier.
def exitClassModifier(self, ctx:Java9Parser.ClassModifierContext):
pass
# Enter a parse tree produced by Java9Parser#typeParameters.
def enterTypeParameters(self, ctx:Java9Parser.TypeParametersContext):
pass
# Exit a parse tree produced by Java9Parser#typeParameters.
def exitTypeParameters(self, ctx:Java9Parser.TypeParametersContext):
pass
# Enter a parse tree produced by Java9Parser#typeParameterList.
def enterTypeParameterList(self, ctx:Java9Parser.TypeParameterListContext):
pass
# Exit a parse tree produced by Java9Parser#typeParameterList.
def exitTypeParameterList(self, ctx:Java9Parser.TypeParameterListContext):
pass
# Enter a parse tree produced by Java9Parser#superclass.
def enterSuperclass(self, ctx:Java9Parser.SuperclassContext):
pass
# Exit a parse tree produced by Java9Parser#superclass.
def exitSuperclass(self, ctx:Java9Parser.SuperclassContext):
pass
# Enter a parse tree produced by Java9Parser#superinterfaces.
def enterSuperinterfaces(self, ctx:Java9Parser.SuperinterfacesContext):
pass
# Exit a parse tree produced by Java9Parser#superinterfaces.
def exitSuperinterfaces(self, ctx:Java9Parser.SuperinterfacesContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceTypeList.
def enterInterfaceTypeList(self, ctx:Java9Parser.InterfaceTypeListContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceTypeList.
def exitInterfaceTypeList(self, ctx:Java9Parser.InterfaceTypeListContext):
pass
# Enter a parse tree produced by Java9Parser#classBody.
def enterClassBody(self, ctx:Java9Parser.ClassBodyContext):
pass
# Exit a parse tree produced by Java9Parser#classBody.
def exitClassBody(self, ctx:Java9Parser.ClassBodyContext):
pass
# Enter a parse tree produced by Java9Parser#classBodyDeclaration.
def enterClassBodyDeclaration(self, ctx:Java9Parser.ClassBodyDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#classBodyDeclaration.
def exitClassBodyDeclaration(self, ctx:Java9Parser.ClassBodyDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#classMemberDeclaration.
def enterClassMemberDeclaration(self, ctx:Java9Parser.ClassMemberDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#classMemberDeclaration.
def exitClassMemberDeclaration(self, ctx:Java9Parser.ClassMemberDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#fieldDeclaration.
def enterFieldDeclaration(self, ctx:Java9Parser.FieldDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#fieldDeclaration.
def exitFieldDeclaration(self, ctx:Java9Parser.FieldDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#fieldModifier.
def enterFieldModifier(self, ctx:Java9Parser.FieldModifierContext):
pass
# Exit a parse tree produced by Java9Parser#fieldModifier.
def exitFieldModifier(self, ctx:Java9Parser.FieldModifierContext):
pass
# Enter a parse tree produced by Java9Parser#variableDeclaratorList.
def enterVariableDeclaratorList(self, ctx:Java9Parser.VariableDeclaratorListContext):
pass
# Exit a parse tree produced by Java9Parser#variableDeclaratorList.
def exitVariableDeclaratorList(self, ctx:Java9Parser.VariableDeclaratorListContext):
pass
# Enter a parse tree produced by Java9Parser#variableDeclarator.
def enterVariableDeclarator(self, ctx:Java9Parser.VariableDeclaratorContext):
pass
# Exit a parse tree produced by Java9Parser#variableDeclarator.
def exitVariableDeclarator(self, ctx:Java9Parser.VariableDeclaratorContext):
pass
# Enter a parse tree produced by Java9Parser#variableDeclaratorId.
def enterVariableDeclaratorId(self, ctx:Java9Parser.VariableDeclaratorIdContext):
pass
# Exit a parse tree produced by Java9Parser#variableDeclaratorId.
def exitVariableDeclaratorId(self, ctx:Java9Parser.VariableDeclaratorIdContext):
pass
# Enter a parse tree produced by Java9Parser#variableInitializer.
def enterVariableInitializer(self, ctx:Java9Parser.VariableInitializerContext):
pass
# Exit a parse tree produced by Java9Parser#variableInitializer.
def exitVariableInitializer(self, ctx:Java9Parser.VariableInitializerContext):
pass
# Enter a parse tree produced by Java9Parser#unannType.
def enterUnannType(self, ctx:Java9Parser.UnannTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannType.
def exitUnannType(self, ctx:Java9Parser.UnannTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannPrimitiveType.
def enterUnannPrimitiveType(self, ctx:Java9Parser.UnannPrimitiveTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannPrimitiveType.
def exitUnannPrimitiveType(self, ctx:Java9Parser.UnannPrimitiveTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannReferenceType.
def enterUnannReferenceType(self, ctx:Java9Parser.UnannReferenceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannReferenceType.
def exitUnannReferenceType(self, ctx:Java9Parser.UnannReferenceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannClassOrInterfaceType.
def enterUnannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannClassOrInterfaceType.
def exitUnannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannClassType.
def enterUnannClassType(self, ctx:Java9Parser.UnannClassTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannClassType.
def exitUnannClassType(self, ctx:Java9Parser.UnannClassTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannClassType_lf_unannClassOrInterfaceType.
def enterUnannClassType_lf_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassType_lf_unannClassOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannClassType_lf_unannClassOrInterfaceType.
def exitUnannClassType_lf_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassType_lf_unannClassOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannClassType_lfno_unannClassOrInterfaceType.
def enterUnannClassType_lfno_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassType_lfno_unannClassOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannClassType_lfno_unannClassOrInterfaceType.
def exitUnannClassType_lfno_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannClassType_lfno_unannClassOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannInterfaceType.
def enterUnannInterfaceType(self, ctx:Java9Parser.UnannInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannInterfaceType.
def exitUnannInterfaceType(self, ctx:Java9Parser.UnannInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannInterfaceType_lf_unannClassOrInterfaceType.
def enterUnannInterfaceType_lf_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannInterfaceType_lf_unannClassOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannInterfaceType_lf_unannClassOrInterfaceType.
def exitUnannInterfaceType_lf_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannInterfaceType_lf_unannClassOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannInterfaceType_lfno_unannClassOrInterfaceType.
def enterUnannInterfaceType_lfno_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannInterfaceType_lfno_unannClassOrInterfaceTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannInterfaceType_lfno_unannClassOrInterfaceType.
def exitUnannInterfaceType_lfno_unannClassOrInterfaceType(self, ctx:Java9Parser.UnannInterfaceType_lfno_unannClassOrInterfaceTypeContext):
pass
# Enter a parse tree produced by Java9Parser#unannTypeVariable.
def enterUnannTypeVariable(self, ctx:Java9Parser.UnannTypeVariableContext):
pass
# Exit a parse tree produced by Java9Parser#unannTypeVariable.
def exitUnannTypeVariable(self, ctx:Java9Parser.UnannTypeVariableContext):
pass
# Enter a parse tree produced by Java9Parser#unannArrayType.
def enterUnannArrayType(self, ctx:Java9Parser.UnannArrayTypeContext):
pass
# Exit a parse tree produced by Java9Parser#unannArrayType.
def exitUnannArrayType(self, ctx:Java9Parser.UnannArrayTypeContext):
pass
# Enter a parse tree produced by Java9Parser#methodDeclaration.
def enterMethodDeclaration(self, ctx:Java9Parser.MethodDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#methodDeclaration.
def exitMethodDeclaration(self, ctx:Java9Parser.MethodDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#methodModifier.
def enterMethodModifier(self, ctx:Java9Parser.MethodModifierContext):
pass
# Exit a parse tree produced by Java9Parser#methodModifier.
def exitMethodModifier(self, ctx:Java9Parser.MethodModifierContext):
pass
# Enter a parse tree produced by Java9Parser#methodHeader.
def enterMethodHeader(self, ctx:Java9Parser.MethodHeaderContext):
pass
# Exit a parse tree produced by Java9Parser#methodHeader.
def exitMethodHeader(self, ctx:Java9Parser.MethodHeaderContext):
pass
# Enter a parse tree produced by Java9Parser#result.
def enterResult(self, ctx:Java9Parser.ResultContext):
pass
# Exit a parse tree produced by Java9Parser#result.
def exitResult(self, ctx:Java9Parser.ResultContext):
pass
# Enter a parse tree produced by Java9Parser#methodDeclarator.
def enterMethodDeclarator(self, ctx:Java9Parser.MethodDeclaratorContext):
pass
# Exit a parse tree produced by Java9Parser#methodDeclarator.
def exitMethodDeclarator(self, ctx:Java9Parser.MethodDeclaratorContext):
pass
# Enter a parse tree produced by Java9Parser#formalParameterList.
def enterFormalParameterList(self, ctx:Java9Parser.FormalParameterListContext):
pass
# Exit a parse tree produced by Java9Parser#formalParameterList.
def exitFormalParameterList(self, ctx:Java9Parser.FormalParameterListContext):
pass
# Enter a parse tree produced by Java9Parser#formalParameters.
def enterFormalParameters(self, ctx:Java9Parser.FormalParametersContext):
pass
# Exit a parse tree produced by Java9Parser#formalParameters.
def exitFormalParameters(self, ctx:Java9Parser.FormalParametersContext):
pass
# Enter a parse tree produced by Java9Parser#formalParameter.
def enterFormalParameter(self, ctx:Java9Parser.FormalParameterContext):
pass
# Exit a parse tree produced by Java9Parser#formalParameter.
def exitFormalParameter(self, ctx:Java9Parser.FormalParameterContext):
pass
# Enter a parse tree produced by Java9Parser#variableModifier.
def enterVariableModifier(self, ctx:Java9Parser.VariableModifierContext):
pass
# Exit a parse tree produced by Java9Parser#variableModifier.
def exitVariableModifier(self, ctx:Java9Parser.VariableModifierContext):
pass
# Enter a parse tree produced by Java9Parser#lastFormalParameter.
def enterLastFormalParameter(self, ctx:Java9Parser.LastFormalParameterContext):
pass
# Exit a parse tree produced by Java9Parser#lastFormalParameter.
def exitLastFormalParameter(self, ctx:Java9Parser.LastFormalParameterContext):
pass
# Enter a parse tree produced by Java9Parser#receiverParameter.
def enterReceiverParameter(self, ctx:Java9Parser.ReceiverParameterContext):
pass
# Exit a parse tree produced by Java9Parser#receiverParameter.
def exitReceiverParameter(self, ctx:Java9Parser.ReceiverParameterContext):
pass
# Enter a parse tree produced by Java9Parser#throws_.
def enterThrows_(self, ctx:Java9Parser.Throws_Context):
pass
# Exit a parse tree produced by Java9Parser#throws_.
def exitThrows_(self, ctx:Java9Parser.Throws_Context):
pass
# Enter a parse tree produced by Java9Parser#exceptionTypeList.
def enterExceptionTypeList(self, ctx:Java9Parser.ExceptionTypeListContext):
pass
# Exit a parse tree produced by Java9Parser#exceptionTypeList.
def exitExceptionTypeList(self, ctx:Java9Parser.ExceptionTypeListContext):
pass
# Enter a parse tree produced by Java9Parser#exceptionType.
def enterExceptionType(self, ctx:Java9Parser.ExceptionTypeContext):
pass
# Exit a parse tree produced by Java9Parser#exceptionType.
def exitExceptionType(self, ctx:Java9Parser.ExceptionTypeContext):
pass
# Enter a parse tree produced by Java9Parser#methodBody.
def enterMethodBody(self, ctx:Java9Parser.MethodBodyContext):
pass
# Exit a parse tree produced by Java9Parser#methodBody.
def exitMethodBody(self, ctx:Java9Parser.MethodBodyContext):
pass
# Enter a parse tree produced by Java9Parser#instanceInitializer.
def enterInstanceInitializer(self, ctx:Java9Parser.InstanceInitializerContext):
pass
# Exit a parse tree produced by Java9Parser#instanceInitializer.
def exitInstanceInitializer(self, ctx:Java9Parser.InstanceInitializerContext):
pass
# Enter a parse tree produced by Java9Parser#staticInitializer.
def enterStaticInitializer(self, ctx:Java9Parser.StaticInitializerContext):
pass
# Exit a parse tree produced by Java9Parser#staticInitializer.
def exitStaticInitializer(self, ctx:Java9Parser.StaticInitializerContext):
pass
# Enter a parse tree produced by Java9Parser#constructorDeclaration.
def enterConstructorDeclaration(self, ctx:Java9Parser.ConstructorDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#constructorDeclaration.
def exitConstructorDeclaration(self, ctx:Java9Parser.ConstructorDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#constructorModifier.
def enterConstructorModifier(self, ctx:Java9Parser.ConstructorModifierContext):
pass
# Exit a parse tree produced by Java9Parser#constructorModifier.
def exitConstructorModifier(self, ctx:Java9Parser.ConstructorModifierContext):
pass
# Enter a parse tree produced by Java9Parser#constructorDeclarator.
def enterConstructorDeclarator(self, ctx:Java9Parser.ConstructorDeclaratorContext):
pass
# Exit a parse tree produced by Java9Parser#constructorDeclarator.
def exitConstructorDeclarator(self, ctx:Java9Parser.ConstructorDeclaratorContext):
pass
# Enter a parse tree produced by Java9Parser#simpleTypeName.
def enterSimpleTypeName(self, ctx:Java9Parser.SimpleTypeNameContext):
pass
# Exit a parse tree produced by Java9Parser#simpleTypeName.
def exitSimpleTypeName(self, ctx:Java9Parser.SimpleTypeNameContext):
pass
# Enter a parse tree produced by Java9Parser#constructorBody.
def enterConstructorBody(self, ctx:Java9Parser.ConstructorBodyContext):
pass
# Exit a parse tree produced by Java9Parser#constructorBody.
def exitConstructorBody(self, ctx:Java9Parser.ConstructorBodyContext):
pass
# Enter a parse tree produced by Java9Parser#explicitConstructorInvocation.
def enterExplicitConstructorInvocation(self, ctx:Java9Parser.ExplicitConstructorInvocationContext):
pass
# Exit a parse tree produced by Java9Parser#explicitConstructorInvocation.
def exitExplicitConstructorInvocation(self, ctx:Java9Parser.ExplicitConstructorInvocationContext):
pass
# Enter a parse tree produced by Java9Parser#enumDeclaration.
def enterEnumDeclaration(self, ctx:Java9Parser.EnumDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#enumDeclaration.
def exitEnumDeclaration(self, ctx:Java9Parser.EnumDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#enumBody.
def enterEnumBody(self, ctx:Java9Parser.EnumBodyContext):
pass
# Exit a parse tree produced by Java9Parser#enumBody.
def exitEnumBody(self, ctx:Java9Parser.EnumBodyContext):
pass
# Enter a parse tree produced by Java9Parser#enumConstantList.
def enterEnumConstantList(self, ctx:Java9Parser.EnumConstantListContext):
pass
# Exit a parse tree produced by Java9Parser#enumConstantList.
def exitEnumConstantList(self, ctx:Java9Parser.EnumConstantListContext):
pass
# Enter a parse tree produced by Java9Parser#enumConstant.
def enterEnumConstant(self, ctx:Java9Parser.EnumConstantContext):
pass
# Exit a parse tree produced by Java9Parser#enumConstant.
def exitEnumConstant(self, ctx:Java9Parser.EnumConstantContext):
pass
# Enter a parse tree produced by Java9Parser#enumConstantModifier.
def enterEnumConstantModifier(self, ctx:Java9Parser.EnumConstantModifierContext):
pass
# Exit a parse tree produced by Java9Parser#enumConstantModifier.
def exitEnumConstantModifier(self, ctx:Java9Parser.EnumConstantModifierContext):
pass
# Enter a parse tree produced by Java9Parser#enumBodyDeclarations.
def enterEnumBodyDeclarations(self, ctx:Java9Parser.EnumBodyDeclarationsContext):
pass
# Exit a parse tree produced by Java9Parser#enumBodyDeclarations.
def exitEnumBodyDeclarations(self, ctx:Java9Parser.EnumBodyDeclarationsContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceDeclaration.
def enterInterfaceDeclaration(self, ctx:Java9Parser.InterfaceDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceDeclaration.
def exitInterfaceDeclaration(self, ctx:Java9Parser.InterfaceDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#normalInterfaceDeclaration.
def enterNormalInterfaceDeclaration(self, ctx:Java9Parser.NormalInterfaceDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#normalInterfaceDeclaration.
def exitNormalInterfaceDeclaration(self, ctx:Java9Parser.NormalInterfaceDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceModifier.
def enterInterfaceModifier(self, ctx:Java9Parser.InterfaceModifierContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceModifier.
def exitInterfaceModifier(self, ctx:Java9Parser.InterfaceModifierContext):
pass
# Enter a parse tree produced by Java9Parser#extendsInterfaces.
def enterExtendsInterfaces(self, ctx:Java9Parser.ExtendsInterfacesContext):
pass
# Exit a parse tree produced by Java9Parser#extendsInterfaces.
def exitExtendsInterfaces(self, ctx:Java9Parser.ExtendsInterfacesContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceBody.
def enterInterfaceBody(self, ctx:Java9Parser.InterfaceBodyContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceBody.
def exitInterfaceBody(self, ctx:Java9Parser.InterfaceBodyContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceMemberDeclaration.
def enterInterfaceMemberDeclaration(self, ctx:Java9Parser.InterfaceMemberDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceMemberDeclaration.
def exitInterfaceMemberDeclaration(self, ctx:Java9Parser.InterfaceMemberDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#constantDeclaration.
def enterConstantDeclaration(self, ctx:Java9Parser.ConstantDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#constantDeclaration.
def exitConstantDeclaration(self, ctx:Java9Parser.ConstantDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#constantModifier.
def enterConstantModifier(self, ctx:Java9Parser.ConstantModifierContext):
pass
# Exit a parse tree produced by Java9Parser#constantModifier.
def exitConstantModifier(self, ctx:Java9Parser.ConstantModifierContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceMethodDeclaration.
def enterInterfaceMethodDeclaration(self, ctx:Java9Parser.InterfaceMethodDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceMethodDeclaration.
def exitInterfaceMethodDeclaration(self, ctx:Java9Parser.InterfaceMethodDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#interfaceMethodModifier.
def enterInterfaceMethodModifier(self, ctx:Java9Parser.InterfaceMethodModifierContext):
pass
# Exit a parse tree produced by Java9Parser#interfaceMethodModifier.
def exitInterfaceMethodModifier(self, ctx:Java9Parser.InterfaceMethodModifierContext):
pass
# Enter a parse tree produced by Java9Parser#annotationTypeDeclaration.
def enterAnnotationTypeDeclaration(self, ctx:Java9Parser.AnnotationTypeDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#annotationTypeDeclaration.
def exitAnnotationTypeDeclaration(self, ctx:Java9Parser.AnnotationTypeDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#annotationTypeBody.
def enterAnnotationTypeBody(self, ctx:Java9Parser.AnnotationTypeBodyContext):
pass
# Exit a parse tree produced by Java9Parser#annotationTypeBody.
def exitAnnotationTypeBody(self, ctx:Java9Parser.AnnotationTypeBodyContext):
pass
# Enter a parse tree produced by Java9Parser#annotationTypeMemberDeclaration.
def enterAnnotationTypeMemberDeclaration(self, ctx:Java9Parser.AnnotationTypeMemberDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#annotationTypeMemberDeclaration.
def exitAnnotationTypeMemberDeclaration(self, ctx:Java9Parser.AnnotationTypeMemberDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#annotationTypeElementDeclaration.
def enterAnnotationTypeElementDeclaration(self, ctx:Java9Parser.AnnotationTypeElementDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#annotationTypeElementDeclaration.
def exitAnnotationTypeElementDeclaration(self, ctx:Java9Parser.AnnotationTypeElementDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#annotationTypeElementModifier.
def enterAnnotationTypeElementModifier(self, ctx:Java9Parser.AnnotationTypeElementModifierContext):
pass
# Exit a parse tree produced by Java9Parser#annotationTypeElementModifier.
def exitAnnotationTypeElementModifier(self, ctx:Java9Parser.AnnotationTypeElementModifierContext):
pass
# Enter a parse tree produced by Java9Parser#defaultValue.
def enterDefaultValue(self, ctx:Java9Parser.DefaultValueContext):
pass
# Exit a parse tree produced by Java9Parser#defaultValue.
def exitDefaultValue(self, ctx:Java9Parser.DefaultValueContext):
pass
# Enter a parse tree produced by Java9Parser#annotation.
def enterAnnotation(self, ctx:Java9Parser.AnnotationContext):
pass
# Exit a parse tree produced by Java9Parser#annotation.
def exitAnnotation(self, ctx:Java9Parser.AnnotationContext):
pass
# Enter a parse tree produced by Java9Parser#normalAnnotation.
def enterNormalAnnotation(self, ctx:Java9Parser.NormalAnnotationContext):
pass
# Exit a parse tree produced by Java9Parser#normalAnnotation.
def exitNormalAnnotation(self, ctx:Java9Parser.NormalAnnotationContext):
pass
# Enter a parse tree produced by Java9Parser#elementValuePairList.
def enterElementValuePairList(self, ctx:Java9Parser.ElementValuePairListContext):
pass
# Exit a parse tree produced by Java9Parser#elementValuePairList.
def exitElementValuePairList(self, ctx:Java9Parser.ElementValuePairListContext):
pass
# Enter a parse tree produced by Java9Parser#elementValuePair.
def enterElementValuePair(self, ctx:Java9Parser.ElementValuePairContext):
pass
# Exit a parse tree produced by Java9Parser#elementValuePair.
def exitElementValuePair(self, ctx:Java9Parser.ElementValuePairContext):
pass
# Enter a parse tree produced by Java9Parser#elementValue.
def enterElementValue(self, ctx:Java9Parser.ElementValueContext):
pass
# Exit a parse tree produced by Java9Parser#elementValue.
def exitElementValue(self, ctx:Java9Parser.ElementValueContext):
pass
# Enter a parse tree produced by Java9Parser#elementValueArrayInitializer.
def enterElementValueArrayInitializer(self, ctx:Java9Parser.ElementValueArrayInitializerContext):
pass
# Exit a parse tree produced by Java9Parser#elementValueArrayInitializer.
def exitElementValueArrayInitializer(self, ctx:Java9Parser.ElementValueArrayInitializerContext):
pass
# Enter a parse tree produced by Java9Parser#elementValueList.
def enterElementValueList(self, ctx:Java9Parser.ElementValueListContext):
pass
# Exit a parse tree produced by Java9Parser#elementValueList.
def exitElementValueList(self, ctx:Java9Parser.ElementValueListContext):
pass
# Enter a parse tree produced by Java9Parser#markerAnnotation.
def enterMarkerAnnotation(self, ctx:Java9Parser.MarkerAnnotationContext):
pass
# Exit a parse tree produced by Java9Parser#markerAnnotation.
def exitMarkerAnnotation(self, ctx:Java9Parser.MarkerAnnotationContext):
pass
# Enter a parse tree produced by Java9Parser#singleElementAnnotation.
def enterSingleElementAnnotation(self, ctx:Java9Parser.SingleElementAnnotationContext):
pass
# Exit a parse tree produced by Java9Parser#singleElementAnnotation.
def exitSingleElementAnnotation(self, ctx:Java9Parser.SingleElementAnnotationContext):
pass
# Enter a parse tree produced by Java9Parser#arrayInitializer.
def enterArrayInitializer(self, ctx:Java9Parser.ArrayInitializerContext):
pass
# Exit a parse tree produced by Java9Parser#arrayInitializer.
def exitArrayInitializer(self, ctx:Java9Parser.ArrayInitializerContext):
pass
# Enter a parse tree produced by Java9Parser#variableInitializerList.
def enterVariableInitializerList(self, ctx:Java9Parser.VariableInitializerListContext):
pass
# Exit a parse tree produced by Java9Parser#variableInitializerList.
def exitVariableInitializerList(self, ctx:Java9Parser.VariableInitializerListContext):
pass
# Enter a parse tree produced by Java9Parser#block.
def enterBlock(self, ctx:Java9Parser.BlockContext):
pass
# Exit a parse tree produced by Java9Parser#block.
def exitBlock(self, ctx:Java9Parser.BlockContext):
pass
# Enter a parse tree produced by Java9Parser#blockStatements.
def enterBlockStatements(self, ctx:Java9Parser.BlockStatementsContext):
pass
# Exit a parse tree produced by Java9Parser#blockStatements.
def exitBlockStatements(self, ctx:Java9Parser.BlockStatementsContext):
pass
# Enter a parse tree produced by Java9Parser#blockStatement.
def enterBlockStatement(self, ctx:Java9Parser.BlockStatementContext):
pass
# Exit a parse tree produced by Java9Parser#blockStatement.
def exitBlockStatement(self, ctx:Java9Parser.BlockStatementContext):
pass
# Enter a parse tree produced by Java9Parser#localVariableDeclarationStatement.
def enterLocalVariableDeclarationStatement(self, ctx:Java9Parser.LocalVariableDeclarationStatementContext):
pass
# Exit a parse tree produced by Java9Parser#localVariableDeclarationStatement.
def exitLocalVariableDeclarationStatement(self, ctx:Java9Parser.LocalVariableDeclarationStatementContext):
pass
# Enter a parse tree produced by Java9Parser#localVariableDeclaration.
def enterLocalVariableDeclaration(self, ctx:Java9Parser.LocalVariableDeclarationContext):
pass
# Exit a parse tree produced by Java9Parser#localVariableDeclaration.
def exitLocalVariableDeclaration(self, ctx:Java9Parser.LocalVariableDeclarationContext):
pass
# Enter a parse tree produced by Java9Parser#statement.
def enterStatement(self, ctx:Java9Parser.StatementContext):
pass
# Exit a parse tree produced by Java9Parser#statement.
def exitStatement(self, ctx:Java9Parser.StatementContext):
pass
# Enter a parse tree produced by Java9Parser#statementNoShortIf.
def enterStatementNoShortIf(self, ctx:Java9Parser.StatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#statementNoShortIf.
def exitStatementNoShortIf(self, ctx:Java9Parser.StatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#statementWithoutTrailingSubstatement.
def enterStatementWithoutTrailingSubstatement(self, ctx:Java9Parser.StatementWithoutTrailingSubstatementContext):
pass
# Exit a parse tree produced by Java9Parser#statementWithoutTrailingSubstatement.
def exitStatementWithoutTrailingSubstatement(self, ctx:Java9Parser.StatementWithoutTrailingSubstatementContext):
pass
# Enter a parse tree produced by Java9Parser#emptyStatement.
def enterEmptyStatement(self, ctx:Java9Parser.EmptyStatementContext):
pass
# Exit a parse tree produced by Java9Parser#emptyStatement.
def exitEmptyStatement(self, ctx:Java9Parser.EmptyStatementContext):
pass
# Enter a parse tree produced by Java9Parser#labeledStatement.
def enterLabeledStatement(self, ctx:Java9Parser.LabeledStatementContext):
pass
# Exit a parse tree produced by Java9Parser#labeledStatement.
def exitLabeledStatement(self, ctx:Java9Parser.LabeledStatementContext):
pass
# Enter a parse tree produced by Java9Parser#labeledStatementNoShortIf.
def enterLabeledStatementNoShortIf(self, ctx:Java9Parser.LabeledStatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#labeledStatementNoShortIf.
def exitLabeledStatementNoShortIf(self, ctx:Java9Parser.LabeledStatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#expressionStatement.
def enterExpressionStatement(self, ctx:Java9Parser.ExpressionStatementContext):
pass
# Exit a parse tree produced by Java9Parser#expressionStatement.
def exitExpressionStatement(self, ctx:Java9Parser.ExpressionStatementContext):
pass
# Enter a parse tree produced by Java9Parser#statementExpression.
def enterStatementExpression(self, ctx:Java9Parser.StatementExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#statementExpression.
def exitStatementExpression(self, ctx:Java9Parser.StatementExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#ifThenStatement.
def enterIfThenStatement(self, ctx:Java9Parser.IfThenStatementContext):
pass
# Exit a parse tree produced by Java9Parser#ifThenStatement.
def exitIfThenStatement(self, ctx:Java9Parser.IfThenStatementContext):
pass
# Enter a parse tree produced by Java9Parser#ifThenElseStatement.
def enterIfThenElseStatement(self, ctx:Java9Parser.IfThenElseStatementContext):
pass
# Exit a parse tree produced by Java9Parser#ifThenElseStatement.
def exitIfThenElseStatement(self, ctx:Java9Parser.IfThenElseStatementContext):
pass
# Enter a parse tree produced by Java9Parser#ifThenElseStatementNoShortIf.
def enterIfThenElseStatementNoShortIf(self, ctx:Java9Parser.IfThenElseStatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#ifThenElseStatementNoShortIf.
def exitIfThenElseStatementNoShortIf(self, ctx:Java9Parser.IfThenElseStatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#assertStatement.
def enterAssertStatement(self, ctx:Java9Parser.AssertStatementContext):
pass
# Exit a parse tree produced by Java9Parser#assertStatement.
def exitAssertStatement(self, ctx:Java9Parser.AssertStatementContext):
pass
# Enter a parse tree produced by Java9Parser#switchStatement.
def enterSwitchStatement(self, ctx:Java9Parser.SwitchStatementContext):
pass
# Exit a parse tree produced by Java9Parser#switchStatement.
def exitSwitchStatement(self, ctx:Java9Parser.SwitchStatementContext):
pass
# Enter a parse tree produced by Java9Parser#switchBlock.
def enterSwitchBlock(self, ctx:Java9Parser.SwitchBlockContext):
pass
# Exit a parse tree produced by Java9Parser#switchBlock.
def exitSwitchBlock(self, ctx:Java9Parser.SwitchBlockContext):
pass
# Enter a parse tree produced by Java9Parser#switchBlockStatementGroup.
def enterSwitchBlockStatementGroup(self, ctx:Java9Parser.SwitchBlockStatementGroupContext):
pass
# Exit a parse tree produced by Java9Parser#switchBlockStatementGroup.
def exitSwitchBlockStatementGroup(self, ctx:Java9Parser.SwitchBlockStatementGroupContext):
pass
# Enter a parse tree produced by Java9Parser#switchLabels.
def enterSwitchLabels(self, ctx:Java9Parser.SwitchLabelsContext):
pass
# Exit a parse tree produced by Java9Parser#switchLabels.
def exitSwitchLabels(self, ctx:Java9Parser.SwitchLabelsContext):
pass
# Enter a parse tree produced by Java9Parser#switchLabel.
def enterSwitchLabel(self, ctx:Java9Parser.SwitchLabelContext):
pass
# Exit a parse tree produced by Java9Parser#switchLabel.
def exitSwitchLabel(self, ctx:Java9Parser.SwitchLabelContext):
pass
# Enter a parse tree produced by Java9Parser#enumConstantName.
def enterEnumConstantName(self, ctx:Java9Parser.EnumConstantNameContext):
pass
# Exit a parse tree produced by Java9Parser#enumConstantName.
def exitEnumConstantName(self, ctx:Java9Parser.EnumConstantNameContext):
pass
# Enter a parse tree produced by Java9Parser#whileStatement.
def enterWhileStatement(self, ctx:Java9Parser.WhileStatementContext):
pass
# Exit a parse tree produced by Java9Parser#whileStatement.
def exitWhileStatement(self, ctx:Java9Parser.WhileStatementContext):
pass
# Enter a parse tree produced by Java9Parser#whileStatementNoShortIf.
def enterWhileStatementNoShortIf(self, ctx:Java9Parser.WhileStatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#whileStatementNoShortIf.
def exitWhileStatementNoShortIf(self, ctx:Java9Parser.WhileStatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#doStatement.
def enterDoStatement(self, ctx:Java9Parser.DoStatementContext):
pass
# Exit a parse tree produced by Java9Parser#doStatement.
def exitDoStatement(self, ctx:Java9Parser.DoStatementContext):
pass
# Enter a parse tree produced by Java9Parser#forStatement.
def enterForStatement(self, ctx:Java9Parser.ForStatementContext):
pass
# Exit a parse tree produced by Java9Parser#forStatement.
def exitForStatement(self, ctx:Java9Parser.ForStatementContext):
pass
# Enter a parse tree produced by Java9Parser#forStatementNoShortIf.
def enterForStatementNoShortIf(self, ctx:Java9Parser.ForStatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#forStatementNoShortIf.
def exitForStatementNoShortIf(self, ctx:Java9Parser.ForStatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#basicForStatement.
def enterBasicForStatement(self, ctx:Java9Parser.BasicForStatementContext):
pass
# Exit a parse tree produced by Java9Parser#basicForStatement.
def exitBasicForStatement(self, ctx:Java9Parser.BasicForStatementContext):
pass
# Enter a parse tree produced by Java9Parser#basicForStatementNoShortIf.
def enterBasicForStatementNoShortIf(self, ctx:Java9Parser.BasicForStatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#basicForStatementNoShortIf.
def exitBasicForStatementNoShortIf(self, ctx:Java9Parser.BasicForStatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#forInit.
def enterForInit(self, ctx:Java9Parser.ForInitContext):
pass
# Exit a parse tree produced by Java9Parser#forInit.
def exitForInit(self, ctx:Java9Parser.ForInitContext):
pass
# Enter a parse tree produced by Java9Parser#forUpdate.
def enterForUpdate(self, ctx:Java9Parser.ForUpdateContext):
pass
# Exit a parse tree produced by Java9Parser#forUpdate.
def exitForUpdate(self, ctx:Java9Parser.ForUpdateContext):
pass
# Enter a parse tree produced by Java9Parser#statementExpressionList.
def enterStatementExpressionList(self, ctx:Java9Parser.StatementExpressionListContext):
pass
# Exit a parse tree produced by Java9Parser#statementExpressionList.
def exitStatementExpressionList(self, ctx:Java9Parser.StatementExpressionListContext):
pass
# Enter a parse tree produced by Java9Parser#enhancedForStatement.
def enterEnhancedForStatement(self, ctx:Java9Parser.EnhancedForStatementContext):
pass
# Exit a parse tree produced by Java9Parser#enhancedForStatement.
def exitEnhancedForStatement(self, ctx:Java9Parser.EnhancedForStatementContext):
pass
# Enter a parse tree produced by Java9Parser#enhancedForStatementNoShortIf.
def enterEnhancedForStatementNoShortIf(self, ctx:Java9Parser.EnhancedForStatementNoShortIfContext):
pass
# Exit a parse tree produced by Java9Parser#enhancedForStatementNoShortIf.
def exitEnhancedForStatementNoShortIf(self, ctx:Java9Parser.EnhancedForStatementNoShortIfContext):
pass
# Enter a parse tree produced by Java9Parser#breakStatement.
def enterBreakStatement(self, ctx:Java9Parser.BreakStatementContext):
pass
# Exit a parse tree produced by Java9Parser#breakStatement.
def exitBreakStatement(self, ctx:Java9Parser.BreakStatementContext):
pass
# Enter a parse tree produced by Java9Parser#continueStatement.
def enterContinueStatement(self, ctx:Java9Parser.ContinueStatementContext):
pass
# Exit a parse tree produced by Java9Parser#continueStatement.
def exitContinueStatement(self, ctx:Java9Parser.ContinueStatementContext):
pass
# Enter a parse tree produced by Java9Parser#returnStatement.
def enterReturnStatement(self, ctx:Java9Parser.ReturnStatementContext):
pass
# Exit a parse tree produced by Java9Parser#returnStatement.
def exitReturnStatement(self, ctx:Java9Parser.ReturnStatementContext):
pass
# Enter a parse tree produced by Java9Parser#throwStatement.
def enterThrowStatement(self, ctx:Java9Parser.ThrowStatementContext):
pass
# Exit a parse tree produced by Java9Parser#throwStatement.
def exitThrowStatement(self, ctx:Java9Parser.ThrowStatementContext):
pass
# Enter a parse tree produced by Java9Parser#synchronizedStatement.
def enterSynchronizedStatement(self, ctx:Java9Parser.SynchronizedStatementContext):
pass
# Exit a parse tree produced by Java9Parser#synchronizedStatement.
def exitSynchronizedStatement(self, ctx:Java9Parser.SynchronizedStatementContext):
pass
# Enter a parse tree produced by Java9Parser#tryStatement.
def enterTryStatement(self, ctx:Java9Parser.TryStatementContext):
pass
# Exit a parse tree produced by Java9Parser#tryStatement.
def exitTryStatement(self, ctx:Java9Parser.TryStatementContext):
pass
# Enter a parse tree produced by Java9Parser#catches.
def enterCatches(self, ctx:Java9Parser.CatchesContext):
pass
# Exit a parse tree produced by Java9Parser#catches.
def exitCatches(self, ctx:Java9Parser.CatchesContext):
pass
# Enter a parse tree produced by Java9Parser#catchClause.
def enterCatchClause(self, ctx:Java9Parser.CatchClauseContext):
pass
# Exit a parse tree produced by Java9Parser#catchClause.
def exitCatchClause(self, ctx:Java9Parser.CatchClauseContext):
pass
# Enter a parse tree produced by Java9Parser#catchFormalParameter.
def enterCatchFormalParameter(self, ctx:Java9Parser.CatchFormalParameterContext):
pass
# Exit a parse tree produced by Java9Parser#catchFormalParameter.
def exitCatchFormalParameter(self, ctx:Java9Parser.CatchFormalParameterContext):
pass
# Enter a parse tree produced by Java9Parser#catchType.
def enterCatchType(self, ctx:Java9Parser.CatchTypeContext):
pass
# Exit a parse tree produced by Java9Parser#catchType.
def exitCatchType(self, ctx:Java9Parser.CatchTypeContext):
pass
# Enter a parse tree produced by Java9Parser#finally_.
def enterFinally_(self, ctx:Java9Parser.Finally_Context):
pass
# Exit a parse tree produced by Java9Parser#finally_.
def exitFinally_(self, ctx:Java9Parser.Finally_Context):
pass
# Enter a parse tree produced by Java9Parser#tryWithResourcesStatement.
def enterTryWithResourcesStatement(self, ctx:Java9Parser.TryWithResourcesStatementContext):
pass
# Exit a parse tree produced by Java9Parser#tryWithResourcesStatement.
def exitTryWithResourcesStatement(self, ctx:Java9Parser.TryWithResourcesStatementContext):
pass
# Enter a parse tree produced by Java9Parser#resourceSpecification.
def enterResourceSpecification(self, ctx:Java9Parser.ResourceSpecificationContext):
pass
# Exit a parse tree produced by Java9Parser#resourceSpecification.
def exitResourceSpecification(self, ctx:Java9Parser.ResourceSpecificationContext):
pass
# Enter a parse tree produced by Java9Parser#resourceList.
def enterResourceList(self, ctx:Java9Parser.ResourceListContext):
pass
# Exit a parse tree produced by Java9Parser#resourceList.
def exitResourceList(self, ctx:Java9Parser.ResourceListContext):
pass
# Enter a parse tree produced by Java9Parser#resource.
def enterResource(self, ctx:Java9Parser.ResourceContext):
pass
# Exit a parse tree produced by Java9Parser#resource.
def exitResource(self, ctx:Java9Parser.ResourceContext):
pass
# Enter a parse tree produced by Java9Parser#variableAccess.
def enterVariableAccess(self, ctx:Java9Parser.VariableAccessContext):
pass
# Exit a parse tree produced by Java9Parser#variableAccess.
def exitVariableAccess(self, ctx:Java9Parser.VariableAccessContext):
pass
# Enter a parse tree produced by Java9Parser#primary.
def enterPrimary(self, ctx:Java9Parser.PrimaryContext):
pass
# Exit a parse tree produced by Java9Parser#primary.
def exitPrimary(self, ctx:Java9Parser.PrimaryContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray.
def enterPrimaryNoNewArray(self, ctx:Java9Parser.PrimaryNoNewArrayContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray.
def exitPrimaryNoNewArray(self, ctx:Java9Parser.PrimaryNoNewArrayContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lf_arrayAccess.
def enterPrimaryNoNewArray_lf_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lf_arrayAccessContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lf_arrayAccess.
def exitPrimaryNoNewArray_lf_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lf_arrayAccessContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lfno_arrayAccess.
def enterPrimaryNoNewArray_lfno_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_arrayAccessContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_arrayAccess.
def exitPrimaryNoNewArray_lfno_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_arrayAccessContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary.
def enterPrimaryNoNewArray_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary.
def exitPrimaryNoNewArray_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary.
def enterPrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary.
def exitPrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary.
def enterPrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary.
def exitPrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary.
def enterPrimaryNoNewArray_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary.
def exitPrimaryNoNewArray_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary.
def enterPrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary.
def exitPrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary.
def enterPrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary.
def exitPrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#classLiteral.
def enterClassLiteral(self, ctx:Java9Parser.ClassLiteralContext):
pass
# Exit a parse tree produced by Java9Parser#classLiteral.
def exitClassLiteral(self, ctx:Java9Parser.ClassLiteralContext):
pass
# Enter a parse tree produced by Java9Parser#classInstanceCreationExpression.
def enterClassInstanceCreationExpression(self, ctx:Java9Parser.ClassInstanceCreationExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#classInstanceCreationExpression.
def exitClassInstanceCreationExpression(self, ctx:Java9Parser.ClassInstanceCreationExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#classInstanceCreationExpression_lf_primary.
def enterClassInstanceCreationExpression_lf_primary(self, ctx:Java9Parser.ClassInstanceCreationExpression_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#classInstanceCreationExpression_lf_primary.
def exitClassInstanceCreationExpression_lf_primary(self, ctx:Java9Parser.ClassInstanceCreationExpression_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#classInstanceCreationExpression_lfno_primary.
def enterClassInstanceCreationExpression_lfno_primary(self, ctx:Java9Parser.ClassInstanceCreationExpression_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#classInstanceCreationExpression_lfno_primary.
def exitClassInstanceCreationExpression_lfno_primary(self, ctx:Java9Parser.ClassInstanceCreationExpression_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#typeArgumentsOrDiamond.
def enterTypeArgumentsOrDiamond(self, ctx:Java9Parser.TypeArgumentsOrDiamondContext):
pass
# Exit a parse tree produced by Java9Parser#typeArgumentsOrDiamond.
def exitTypeArgumentsOrDiamond(self, ctx:Java9Parser.TypeArgumentsOrDiamondContext):
pass
# Enter a parse tree produced by Java9Parser#fieldAccess.
def enterFieldAccess(self, ctx:Java9Parser.FieldAccessContext):
pass
# Exit a parse tree produced by Java9Parser#fieldAccess.
def exitFieldAccess(self, ctx:Java9Parser.FieldAccessContext):
pass
# Enter a parse tree produced by Java9Parser#fieldAccess_lf_primary.
def enterFieldAccess_lf_primary(self, ctx:Java9Parser.FieldAccess_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#fieldAccess_lf_primary.
def exitFieldAccess_lf_primary(self, ctx:Java9Parser.FieldAccess_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#fieldAccess_lfno_primary.
def enterFieldAccess_lfno_primary(self, ctx:Java9Parser.FieldAccess_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#fieldAccess_lfno_primary.
def exitFieldAccess_lfno_primary(self, ctx:Java9Parser.FieldAccess_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#arrayAccess.
def enterArrayAccess(self, ctx:Java9Parser.ArrayAccessContext):
pass
# Exit a parse tree produced by Java9Parser#arrayAccess.
def exitArrayAccess(self, ctx:Java9Parser.ArrayAccessContext):
pass
# Enter a parse tree produced by Java9Parser#arrayAccess_lf_primary.
def enterArrayAccess_lf_primary(self, ctx:Java9Parser.ArrayAccess_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#arrayAccess_lf_primary.
def exitArrayAccess_lf_primary(self, ctx:Java9Parser.ArrayAccess_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#arrayAccess_lfno_primary.
def enterArrayAccess_lfno_primary(self, ctx:Java9Parser.ArrayAccess_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#arrayAccess_lfno_primary.
def exitArrayAccess_lfno_primary(self, ctx:Java9Parser.ArrayAccess_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#methodInvocation.
def enterMethodInvocation(self, ctx:Java9Parser.MethodInvocationContext):
pass
# Exit a parse tree produced by Java9Parser#methodInvocation.
def exitMethodInvocation(self, ctx:Java9Parser.MethodInvocationContext):
pass
# Enter a parse tree produced by Java9Parser#methodInvocation_lf_primary.
def enterMethodInvocation_lf_primary(self, ctx:Java9Parser.MethodInvocation_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#methodInvocation_lf_primary.
def exitMethodInvocation_lf_primary(self, ctx:Java9Parser.MethodInvocation_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#methodInvocation_lfno_primary.
def enterMethodInvocation_lfno_primary(self, ctx:Java9Parser.MethodInvocation_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#methodInvocation_lfno_primary.
def exitMethodInvocation_lfno_primary(self, ctx:Java9Parser.MethodInvocation_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#argumentList.
def enterArgumentList(self, ctx:Java9Parser.ArgumentListContext):
pass
# Exit a parse tree produced by Java9Parser#argumentList.
def exitArgumentList(self, ctx:Java9Parser.ArgumentListContext):
pass
# Enter a parse tree produced by Java9Parser#methodReference.
def enterMethodReference(self, ctx:Java9Parser.MethodReferenceContext):
pass
# Exit a parse tree produced by Java9Parser#methodReference.
def exitMethodReference(self, ctx:Java9Parser.MethodReferenceContext):
pass
# Enter a parse tree produced by Java9Parser#methodReference_lf_primary.
def enterMethodReference_lf_primary(self, ctx:Java9Parser.MethodReference_lf_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#methodReference_lf_primary.
def exitMethodReference_lf_primary(self, ctx:Java9Parser.MethodReference_lf_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#methodReference_lfno_primary.
def enterMethodReference_lfno_primary(self, ctx:Java9Parser.MethodReference_lfno_primaryContext):
pass
# Exit a parse tree produced by Java9Parser#methodReference_lfno_primary.
def exitMethodReference_lfno_primary(self, ctx:Java9Parser.MethodReference_lfno_primaryContext):
pass
# Enter a parse tree produced by Java9Parser#arrayCreationExpression.
def enterArrayCreationExpression(self, ctx:Java9Parser.ArrayCreationExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#arrayCreationExpression.
def exitArrayCreationExpression(self, ctx:Java9Parser.ArrayCreationExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#dimExprs.
def enterDimExprs(self, ctx:Java9Parser.DimExprsContext):
pass
# Exit a parse tree produced by Java9Parser#dimExprs.
def exitDimExprs(self, ctx:Java9Parser.DimExprsContext):
pass
# Enter a parse tree produced by Java9Parser#dimExpr.
def enterDimExpr(self, ctx:Java9Parser.DimExprContext):
pass
# Exit a parse tree produced by Java9Parser#dimExpr.
def exitDimExpr(self, ctx:Java9Parser.DimExprContext):
pass
# Enter a parse tree produced by Java9Parser#constantExpression.
def enterConstantExpression(self, ctx:Java9Parser.ConstantExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#constantExpression.
def exitConstantExpression(self, ctx:Java9Parser.ConstantExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#expression.
def enterExpression(self, ctx:Java9Parser.ExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#expression.
def exitExpression(self, ctx:Java9Parser.ExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#lambdaExpression.
def enterLambdaExpression(self, ctx:Java9Parser.LambdaExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#lambdaExpression.
def exitLambdaExpression(self, ctx:Java9Parser.LambdaExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#lambdaParameters.
def enterLambdaParameters(self, ctx:Java9Parser.LambdaParametersContext):
pass
# Exit a parse tree produced by Java9Parser#lambdaParameters.
def exitLambdaParameters(self, ctx:Java9Parser.LambdaParametersContext):
pass
# Enter a parse tree produced by Java9Parser#inferredFormalParameterList.
def enterInferredFormalParameterList(self, ctx:Java9Parser.InferredFormalParameterListContext):
pass
# Exit a parse tree produced by Java9Parser#inferredFormalParameterList.
def exitInferredFormalParameterList(self, ctx:Java9Parser.InferredFormalParameterListContext):
pass
# Enter a parse tree produced by Java9Parser#lambdaBody.
def enterLambdaBody(self, ctx:Java9Parser.LambdaBodyContext):
pass
# Exit a parse tree produced by Java9Parser#lambdaBody.
def exitLambdaBody(self, ctx:Java9Parser.LambdaBodyContext):
pass
# Enter a parse tree produced by Java9Parser#assignmentExpression.
def enterAssignmentExpression(self, ctx:Java9Parser.AssignmentExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#assignmentExpression.
def exitAssignmentExpression(self, ctx:Java9Parser.AssignmentExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#assignment.
def enterAssignment(self, ctx:Java9Parser.AssignmentContext):
pass
# Exit a parse tree produced by Java9Parser#assignment.
def exitAssignment(self, ctx:Java9Parser.AssignmentContext):
pass
# Enter a parse tree produced by Java9Parser#leftHandSide.
def enterLeftHandSide(self, ctx:Java9Parser.LeftHandSideContext):
pass
# Exit a parse tree produced by Java9Parser#leftHandSide.
def exitLeftHandSide(self, ctx:Java9Parser.LeftHandSideContext):
pass
# Enter a parse tree produced by Java9Parser#assignmentOperator.
def enterAssignmentOperator(self, ctx:Java9Parser.AssignmentOperatorContext):
pass
# Exit a parse tree produced by Java9Parser#assignmentOperator.
def exitAssignmentOperator(self, ctx:Java9Parser.AssignmentOperatorContext):
pass
# Enter a parse tree produced by Java9Parser#conditionalExpression.
def enterConditionalExpression(self, ctx:Java9Parser.ConditionalExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#conditionalExpression.
def exitConditionalExpression(self, ctx:Java9Parser.ConditionalExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#conditionalOrExpression.
def enterConditionalOrExpression(self, ctx:Java9Parser.ConditionalOrExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#conditionalOrExpression.
def exitConditionalOrExpression(self, ctx:Java9Parser.ConditionalOrExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#conditionalAndExpression.
def enterConditionalAndExpression(self, ctx:Java9Parser.ConditionalAndExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#conditionalAndExpression.
def exitConditionalAndExpression(self, ctx:Java9Parser.ConditionalAndExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#inclusiveOrExpression.
def enterInclusiveOrExpression(self, ctx:Java9Parser.InclusiveOrExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#inclusiveOrExpression.
def exitInclusiveOrExpression(self, ctx:Java9Parser.InclusiveOrExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#exclusiveOrExpression.
def enterExclusiveOrExpression(self, ctx:Java9Parser.ExclusiveOrExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#exclusiveOrExpression.
def exitExclusiveOrExpression(self, ctx:Java9Parser.ExclusiveOrExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#andExpression.
def enterAndExpression(self, ctx:Java9Parser.AndExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#andExpression.
def exitAndExpression(self, ctx:Java9Parser.AndExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#equalityExpression.
def enterEqualityExpression(self, ctx:Java9Parser.EqualityExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#equalityExpression.
def exitEqualityExpression(self, ctx:Java9Parser.EqualityExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#relationalExpression.
def enterRelationalExpression(self, ctx:Java9Parser.RelationalExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#relationalExpression.
def exitRelationalExpression(self, ctx:Java9Parser.RelationalExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#shiftExpression.
def enterShiftExpression(self, ctx:Java9Parser.ShiftExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#shiftExpression.
def exitShiftExpression(self, ctx:Java9Parser.ShiftExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#additiveExpression.
def enterAdditiveExpression(self, ctx:Java9Parser.AdditiveExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#additiveExpression.
def exitAdditiveExpression(self, ctx:Java9Parser.AdditiveExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#multiplicativeExpression.
def enterMultiplicativeExpression(self, ctx:Java9Parser.MultiplicativeExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#multiplicativeExpression.
def exitMultiplicativeExpression(self, ctx:Java9Parser.MultiplicativeExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#unaryExpression.
def enterUnaryExpression(self, ctx:Java9Parser.UnaryExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#unaryExpression.
def exitUnaryExpression(self, ctx:Java9Parser.UnaryExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#preIncrementExpression.
def enterPreIncrementExpression(self, ctx:Java9Parser.PreIncrementExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#preIncrementExpression.
def exitPreIncrementExpression(self, ctx:Java9Parser.PreIncrementExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#preDecrementExpression.
def enterPreDecrementExpression(self, ctx:Java9Parser.PreDecrementExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#preDecrementExpression.
def exitPreDecrementExpression(self, ctx:Java9Parser.PreDecrementExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#unaryExpressionNotPlusMinus.
def enterUnaryExpressionNotPlusMinus(self, ctx:Java9Parser.UnaryExpressionNotPlusMinusContext):
pass
# Exit a parse tree produced by Java9Parser#unaryExpressionNotPlusMinus.
def exitUnaryExpressionNotPlusMinus(self, ctx:Java9Parser.UnaryExpressionNotPlusMinusContext):
pass
# Enter a parse tree produced by Java9Parser#postfixExpression.
def enterPostfixExpression(self, ctx:Java9Parser.PostfixExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#postfixExpression.
def exitPostfixExpression(self, ctx:Java9Parser.PostfixExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#postIncrementExpression.
def enterPostIncrementExpression(self, ctx:Java9Parser.PostIncrementExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#postIncrementExpression.
def exitPostIncrementExpression(self, ctx:Java9Parser.PostIncrementExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#postIncrementExpression_lf_postfixExpression.
def enterPostIncrementExpression_lf_postfixExpression(self, ctx:Java9Parser.PostIncrementExpression_lf_postfixExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#postIncrementExpression_lf_postfixExpression.
def exitPostIncrementExpression_lf_postfixExpression(self, ctx:Java9Parser.PostIncrementExpression_lf_postfixExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#postDecrementExpression.
def enterPostDecrementExpression(self, ctx:Java9Parser.PostDecrementExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#postDecrementExpression.
def exitPostDecrementExpression(self, ctx:Java9Parser.PostDecrementExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#postDecrementExpression_lf_postfixExpression.
def enterPostDecrementExpression_lf_postfixExpression(self, ctx:Java9Parser.PostDecrementExpression_lf_postfixExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#postDecrementExpression_lf_postfixExpression.
def exitPostDecrementExpression_lf_postfixExpression(self, ctx:Java9Parser.PostDecrementExpression_lf_postfixExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#castExpression.
def enterCastExpression(self, ctx:Java9Parser.CastExpressionContext):
pass
# Exit a parse tree produced by Java9Parser#castExpression.
def exitCastExpression(self, ctx:Java9Parser.CastExpressionContext):
pass
# Enter a parse tree produced by Java9Parser#identifier.
def enterIdentifier(self, ctx:Java9Parser.IdentifierContext):
pass
# Exit a parse tree produced by Java9Parser#identifier.
def exitIdentifier(self, ctx:Java9Parser.IdentifierContext):
pass
del Java9Parser
|
python
|
import argparse
import pysam
from ragtag_utilities.utilities import reverse_complement
"""
Like bedtools getfasta, but use the gff ID attribute as the FASTA header and
always force strandedness.
"""
def main():
parser = argparse.ArgumentParser(description="Get fasta sequences from a GFF file")
parser.add_argument("fasta", metavar="<sequences.fasta>", type=str, help="AGP v2.1 file")
parser.add_argument("gff", metavar="<genes.gff>", type=str, help="FASTA file with component sequences to be scaffolded. must not be gzipped")
args = parser.parse_args()
fasta_file = args.fasta
gff_file = args.gff
x = pysam.FastaFile(fasta_file)
# Read the gff file
with open(gff_file, "r") as f:
for line in f:
if not line.startswith("#"):
seqname, source, feature, start, end, score, strand, fname, attributes = line.rstrip().split("\t")
start, end = int(start)-1, int(end)
# Get the ID attribute
gff_id = None
tags = attributes.split(";")
for j in tags:
if j.startswith("ID="):
gff_id = j[3:]
if gff_id is None:
raise ValueError("Need an ID attribute for each gff line.")
print(">" + gff_id)
if strand == "+":
print(x.fetch(seqname, start, end))
elif strand == "-":
print(reverse_complement(x.fetch(seqname, start, end)))
else:
raise ValueError("Incorrect strand value")
x.close()
if __name__ == "__main__":
main()
|
python
|
"""Runs inference on clips much longer than 1s, by running a sliding window and aggregating predictions."""
from argparse import ArgumentParser
from config_parser import get_config
import torch
import numpy as np
import librosa
from utils.misc import get_model
from tqdm import tqdm
import os
import glob
import json
def process_window(x, sr, audio_settings):
x = librosa.util.fix_length(x, sr)
x = librosa.feature.melspectrogram(y=x, **audio_settings)
x = librosa.feature.mfcc(S=librosa.power_to_db(x), n_mfcc=audio_settings["n_mels"])
return x
@torch.no_grad()
def get_clip_pred(net, audio_path, win_len, stride, thresh, config, batch_size, device, mode, label_map) -> list:
"""Performs clip-level inference."""
net.eval()
preds_list = []
audio_settings = config["hparams"]["audio"]
sr = audio_settings["sr"]
win_len, stride = int(win_len * sr), int(stride * sr)
x = librosa.load(audio_path, sr)[0]
windows, result = [], []
slice_positions = np.arange(0, len(x) - win_len + 1, stride)
for b, i in enumerate(slice_positions):
windows.append(
process_window(x[i: i + win_len], sr, audio_settings)
)
if (not (b + 1) % batch_size) or (b + 1) == len(slice_positions):
windows = torch.from_numpy(np.stack(windows)).float().unsqueeze(1)
windows = windows.to(device)
out = net(windows)
conf, preds = out.softmax(1).max(1)
conf, preds = conf.cpu().numpy().reshape(-1, 1), preds.cpu().numpy().reshape(-1, 1)
starts = slice_positions[b - preds.shape[0] + 1: b + 1, None]
ends = starts + win_len
res = np.hstack([preds, conf, starts, ends])
res = res[res[:, 1] > thresh].tolist()
if len(res):
result.extend(res)
windows = []
#######################
# pred aggregation
#######################
pred = []
if len(result):
result = np.array(result)
if mode == "max":
pred = result[result[:, 1].argmax()][0]
if label_map is not None:
pred = label_map[str(int(pred))]
elif mode == "n_voting":
pred = np.bincount(result[:, 0].astype(int)).argmax()
if label_map is not None:
pred = label_map[str(int(pred))]
elif mode == "multi":
if label_map is not None:
pred = list(map(lambda a: [label_map[str(int(a[0]))], a[1], a[2], a[3]], result))
else:
pred = result.tolist()
return pred
def main(args):
######################
# create model
######################
config = get_config(args.conf)
model = get_model(config["hparams"]["model"])
######################
# load weights
######################
ckpt = torch.load(args.ckpt, map_location="cpu")
model.load_state_dict(ckpt["model_state_dict"])
######################
# setup data
######################
if os.path.isdir(args.inp):
data_list = glob.glob(os.path.join(args.inp, "*.wav"))
elif os.path.isfile(args.inp):
data_list = [args.inp]
######################
# run inference
######################
if args.device == "auto":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device(args.device)
model = model.to(device)
label_map = None
if args.lmap:
with open(args.lmap, "r") as f:
label_map = json.load(f)
pred_dict = dict()
for file_path in data_list:
preds = get_clip_pred(model, file_path, args.wlen, args.stride, args.thresh, config, args.batch_size, device, args.mode, label_map)
pred_dict[file_path] = preds
os.makedirs(args.out, exist_ok=True)
out_path = os.path.join(args.out, "preds_clip.json")
with open(out_path, "w+") as f:
json.dump(pred_dict, f)
print(f"Saved preds to {out_path}")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--conf", type=str, required=True, help="Path to config file. Will be used only to construct model and process audio.")
parser.add_argument("--ckpt", type=str, required=True, help="Path to checkpoint file.")
parser.add_argument("--inp", type=str, required=True, help="Path to input. Can be a path to a .wav file, or a path to a folder containing .wav files.")
parser.add_argument("--out", type=str, default="./", help="Path to output folder. Predictions will be stored in {out}/preds.json.")
parser.add_argument("--lmap", type=str, default=None, help="Path to label_map.json. If not provided, will save predictions as class indices instead of class names.")
parser.add_argument("--device", type=str, default="auto", help="One of auto, cpu, or cuda.")
parser.add_argument("--batch_size", type=int, default=1, help="Batch size for batch inference.")
parser.add_argument("--wlen", type=float, default=1.0, help="Window length. E.g. for wlen = 1, will make inference on 1s windows from the clip.")
parser.add_argument("--stride", type=float, default=0.2, help="By how much the sliding window will be shifted.")
parser.add_argument("--thresh", type=float, default=0.85, help="Confidence threshold above which preds will be counted.")
parser.add_argument("--mode", type=str, default="multi", help="""Prediction logic. One of: max, n_voting, multi.
-'max' simply checks the confidences of every predicted window in a clip and returns the most confident prediction as the output.
-'n_voting' returns the most frequent predicted class above the threshold.
-'multi' expects that there are multiple different keyword classes in the audio. For each audio, the output is a list of lists,
each sub-list being of the form [class, confidence, start, end].""")
args = parser.parse_args()
assert os.path.exists(args.inp), f"Could not find input {args.inp}"
main(args)
|
python
|
import flask
import json
import inspect
from .handle import Resource
from .route import RouteFactory
from .utils import Logger
from .error import RequestError
from .db import TinyBDDatabase
class App(object):
def __init__(self, base_route, db=None):
self.base_route = base_route
self.debug = True
self.app = flask.Flask(__name__)
self.route_factory = RouteFactory(base_route)
self.routes = {}
self.db = None
if db is None:
self.db = TinyBDDatabase('db.json')
self._create_index()
@self.app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods',
'GET,PUT,POST,DELETE')
return response
def add_resource(self, resource):
if inspect.isclass(resource) and issubclass(resource, Resource):
instance = resource(self)
instance.on_create()
elif isinstance(resource, str):
instance = Resource(self, name=resource)
else:
raise ValueError(
'Resource should be a subclass of Resource or a string')
self.routes[instance.get_name()] = self._inspect_list(instance)[1]
self._inspect_post(instance)
self._inspect_get(instance)
self._inspect_delete(instance)
self._inspect_put(instance)
return instance
def _add_route(self, route, endpoint, as_name, method):
if inspect.ismethod(endpoint):
# wrap the instance method to a lambda function, see the comments
# below for explanation.
cb = endpoint
def endpoint(*args, **kwargs):
return cb(*args, **kwargs)
def wrapper(*args, **kwargs):
try:
return endpoint(*args, **kwargs)
except RequestError as e:
return self._response_error(e.status, e.message)
if as_name is not None:
# this is done to avoid flask's "View function mapping is
# overwriting an existing endpoint function: endpoint" error.
# but it requires that the function is not a an instance method.
wrapper.__name__ = as_name
self.app.route(route, methods=[method])(wrapper)
Logger.debug("Create route for %s: %s", wrapper.__name__, route)
return wrapper.__name__, route, method
def _inspect_list(self, instance):
route = self.route_factory.create_list(instance)
name = instance.get_name()
def endpoint():
return self._response_middleware(instance.list())
return self._add_route(route, endpoint, '%s_list' % name, 'GET')
def _inspect_post(self, instance):
route = self.route_factory.create_post(instance)
name = instance.get_name()
def endpoint():
body = flask.request.get_json()
return self._response_middleware(instance.post(body))
return self._add_route(route, endpoint, '%s_post' % name, 'POST')
def _inspect_get(self, instance):
route = self.route_factory.create_get(instance)
name = instance.get_name()
def endpoint(pk):
return self._response_middleware(instance.get(int(pk)))
return self._add_route(route, endpoint, '%s_get' % name, 'GET')
def _inspect_delete(self, instance):
route = self.route_factory.create_delete(instance)
name = instance.get_name()
def endpoint(pk):
return self._response_middleware(instance.delete(int(pk)))
return self._add_route(route, endpoint, '%s_delete' % name, 'DELETE')
def _inspect_put(self, instance):
route = self.route_factory.create_delete(instance)
name = instance.get_name()
def endpoint(pk):
body = flask.request.get_json()
return self._response_middleware(instance.put(int(pk), body))
return self._add_route(route, endpoint, '%s_put' % name, 'PUT')
@staticmethod
def _response_middleware(response):
return flask.Response(json.dumps(response),
mimetype='application/json')
@staticmethod
def _response_error(status, message):
return flask.Response(json.dumps({
'message': message
}), mimetype='application/json', status=status)
def _create_index(self):
base_route = '/%s/' % self.route_factory.base_route
self._add_route(base_route, self._index, 'index', 'GET')
def _index(self):
return self._response_middleware(self.routes)
def run(self, host='127.0.0.1', port=5000):
self.app.run(host=host, port=port, threaded=True)
|
python
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for clients that communicate with apps over a JSON RPC interface.
The JSON protocol expected by this module is:
.. code-block:: json
Request:
{
"id": <monotonically increasing integer containing the ID of
this request>
"method": <string containing the name of the method to execute>
"params": <JSON array containing the arguments to the method>
}
Response:
{
"id": <int id of request that this response maps to>,
"result": <Arbitrary JSON object containing the result of
executing the method. If the method could not be
executed or returned void, contains 'null'.>,
"error": <String containing the error thrown by executing the
method. If no error occurred, contains 'null'.>
"callback": <String that represents a callback ID used to
identify events associated with a particular
CallbackHandler object.>
}
"""
from builtins import str
import json
import socket
import threading
from mobly.controllers.android_device_lib import callback_handler
from mobly.controllers.android_device_lib import errors
# UID of the 'unknown' jsonrpc session. Will cause creation of a new session.
UNKNOWN_UID = -1
# Maximum time to wait for the socket to open on the device.
_SOCKET_CONNECTION_TIMEOUT = 60
# Maximum time to wait for a response message on the socket.
_SOCKET_READ_TIMEOUT = callback_handler.MAX_TIMEOUT
class Error(errors.DeviceError):
pass
class AppStartError(Error):
"""Raised when the app is not able to be started."""
class AppRestoreConnectionError(Error):
"""Raised when failed to restore app from disconnection."""
class ApiError(Error):
"""Raised when remote API reports an error."""
class ProtocolError(Error):
"""Raised when there is some error in exchanging data with server."""
NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'
NO_RESPONSE_FROM_SERVER = 'No response from server.'
MISMATCHED_API_ID = 'Mismatched API id.'
class JsonRpcCommand(object):
"""Commands that can be invoked on all jsonrpc clients.
INIT: Initializes a new session.
CONTINUE: Creates a connection.
"""
INIT = 'initiate'
CONTINUE = 'continue'
class JsonRpcClientBase(object):
"""Base class for jsonrpc clients that connect to remote servers.
Connects to a remote device running a jsonrpc-compatible app. Before opening
a connection a port forward must be setup to go over usb. This be done using
adb.forward([local, remote]). Once the port has been forwarded it can be
used in this object as the port of communication.
Attributes:
host_port: (int) The host port of this RPC client.
device_port: (int) The device port of this RPC client.
app_name: (str) The user-visible name of the app being communicated
with.
uid: (int) The uid of this session.
"""
def __init__(self, app_name, ad):
"""
Args:
app_name: (str) The user-visible name of the app being communicated
with.
ad: (AndroidDevice) The device object associated with a client.
"""
self.host_port = None
self.device_port = None
self.app_name = app_name
self._ad = ad
self.log = self._ad.log
self.uid = None
self._client = None # prevent close errors on connect failure
self._conn = None
self._counter = None
self._lock = threading.Lock()
self._event_client = None
def __del__(self):
self.disconnect()
# Methods to be implemented by subclasses.
def start_app_and_connect(self):
"""Starts the server app on the android device and connects to it.
After this, the self.host_port and self.device_port attributes must be
set.
Must be implemented by subclasses.
Raises:
AppStartError: When the app was not able to be started.
"""
raise NotImplementedError()
def stop_app(self):
"""Kills any running instance of the app.
Must be implemented by subclasses.
"""
raise NotImplementedError()
def restore_app_connection(self, port=None):
"""Reconnects to the app after device USB was disconnected.
Instead of creating new instance of the client:
- Uses the given port (or finds a new available host_port if none is
given).
- Tries to connect to remote server with selected port.
Must be implemented by subclasses.
Args:
port: If given, this is the host port from which to connect to remote
device port. If not provided, find a new available port as host
port.
Raises:
AppRestoreConnectionError: When the app was not able to be
reconnected.
"""
raise NotImplementedError()
def _start_event_client(self):
"""Starts a separate JsonRpc client to the same session for propagating
events.
This is an optional function that should only implement if the client
utilizes the snippet event mechanism.
Returns:
A JsonRpc Client object that connects to the same session as the
one on which this function is called.
"""
raise NotImplementedError()
# Rest of the client methods.
def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
"""Opens a connection to a JSON RPC server.
Opens a connection to a remote client. The connection attempt will time
out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each
subsequent operation over this socket will time out after
_SOCKET_READ_TIMEOUT seconds as well.
Args:
uid: int, The uid of the session to join, or UNKNOWN_UID to start a
new session.
cmd: JsonRpcCommand, The command to use for creating the connection.
Raises:
IOError: Raised when the socket times out from io error
socket.timeout: Raised when the socket waits to long for connection.
ProtocolError: Raised when there is an error in the protocol.
"""
self._counter = self._id_counter()
self._conn = socket.create_connection(('localhost', self.host_port),
_SOCKET_CONNECTION_TIMEOUT)
self._conn.settimeout(_SOCKET_READ_TIMEOUT)
self._client = self._conn.makefile(mode='brw')
resp = self._cmd(cmd, uid)
if not resp:
raise ProtocolError(self._ad,
ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
result = json.loads(str(resp, encoding='utf8'))
if result['status']:
self.uid = result['uid']
else:
self.uid = UNKNOWN_UID
def disconnect(self):
"""Close the connection to the remote client."""
if self._conn:
self._conn.close()
self._conn = None
def clear_host_port(self):
"""Stops the adb port forwarding of the host port used by this client.
"""
if self.host_port:
self._adb.forward(['--remove', 'tcp:%d' % self.host_port])
self.host_port = None
def _client_send(self, msg):
"""Sends an Rpc message through the connection.
Args:
msg: string, the message to send.
Raises:
Error: a socket error occurred during the send.
"""
try:
self._client.write(msg.encode("utf8") + b'\n')
self._client.flush()
self.log.debug('Snippet sent %s.', msg)
except socket.error as e:
raise Error(
self._ad,
'Encountered socket error "%s" sending RPC message "%s"' %
(e, msg))
def _client_receive(self):
"""Receives the server's response of an Rpc message.
Returns:
Raw byte string of the response.
Raises:
Error: a socket error occurred during the read.
"""
try:
response = self._client.readline()
self.log.debug('Snippet received: %s', response)
return response
except socket.error as e:
raise Error(
self._ad,
'Encountered socket error reading RPC response "%s"' % e)
def _cmd(self, command, uid=None):
"""Send a command to the server.
Args:
command: str, The name of the command to execute.
uid: int, the uid of the session to send the command to.
Returns:
The line that was written back.
"""
if not uid:
uid = self.uid
self._client_send(json.dumps({'cmd': command, 'uid': uid}))
return self._client_receive()
def _rpc(self, method, *args):
"""Sends an rpc to the app.
Args:
method: str, The name of the method to execute.
args: any, The args of the method.
Returns:
The result of the rpc.
Raises:
ProtocolError: Something went wrong with the protocol.
ApiError: The rpc went through, however executed with errors.
"""
with self._lock:
apiid = next(self._counter)
data = {'id': apiid, 'method': method, 'params': args}
request = json.dumps(data)
self._client_send(request)
response = self._client_receive()
if not response:
raise ProtocolError(self._ad,
ProtocolError.NO_RESPONSE_FROM_SERVER)
result = json.loads(str(response, encoding='utf8'))
if result['error']:
raise ApiError(self._ad, result['error'])
if result['id'] != apiid:
raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)
if result.get('callback') is not None:
if self._event_client is None:
self._event_client = self._start_event_client()
return callback_handler.CallbackHandler(
callback_id=result['callback'],
event_client=self._event_client,
ret_value=result['result'],
method_name=method,
ad=self._ad)
return result['result']
def disable_hidden_api_blacklist(self):
"""If necessary and possible, disables hidden api blacklist."""
version_codename = self._ad.adb.getprop('ro.build.version.codename')
sdk_version = int(self._ad.adb.getprop('ro.build.version.sdk'))
# we check version_codename in addition to sdk_version because P builds
# in development report sdk_version 27, but still enforce the blacklist.
if self._ad.is_rootable and (sdk_version >= 28
or version_codename == 'P'):
self._ad.adb.shell(
'settings put global hidden_api_blacklist_exemptions "*"')
def __getattr__(self, name):
"""Wrapper for python magic to turn method calls into RPC calls."""
def rpc_call(*args):
return self._rpc(name, *args)
return rpc_call
def _id_counter(self):
i = 0
while True:
yield i
i += 1
|
python
|
from django.test import TestCase
from base.models import Comic, Character, Creator, Story
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
class ComicTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
pass
def test_create_comic(self):
"""
Ensure we can create a new comic object.
"""
data = {'title': 'Deadpool', 'issue': '14'}
response = self.client.post('/api/comic/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Comic.objects.count(), 1)
self.assertEqual(Comic.objects.get().title, 'Deadpool')
def test_get_comic_list(self):
Comic.objects.create(title="Test Comic", issue=10)
response = self.client.get('/api/comic/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
class CharacterTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
pass
def test_create_character(self):
"""
Ensure we can create a new character object.
"""
data = {'name': 'Deadpool', 'description': 'hello'}
response = self.client.post('/api/character/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Character.objects.count(), 1)
self.assertEqual(Character.objects.get().name, 'Deadpool')
def test_get_character_list(self):
Character.objects.create(name="Test Character", description="test")
response = self.client.get('/api/character/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
class CreatorTestCase(TestCase):
"""Test suite for the api views."""
def setUp(self):
"""Define the test client and other test variables."""
pass
def test_create_creator(self):
"""
Ensure we can create a new creator object.
"""
data = {'name': 'chucknorris', 'description': 'this is my test'}
response = self.client.post('/api/creator/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Creator.objects.count(), 1)
self.assertEqual(Creator.objects.get().name, 'chucknorris')
def test_get_creator_list(self):
Creator.objects.create(name="Test Creator", description="test test")
response = self.client.get('/api/creator/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
|
python
|
from collections import OrderedDict
def contains_duplicate_extra_space(nums):
seen = {}
for i, val in enumerate(nums):
if val in seen:
return True
else:
seen[val] = i
return False
def contains_duplicate_2(nums, k):
seen = dict()
for i, val in enumerate(nums):
if str(val) in seen.keys() and abs(seen[str(val)]-i)<=k:
return True
else:
seen[str(val)] = i
if len(seen.keys())>k:
del seen[str(nums[i-k])]
return False
def contains_duplicate_3_brute_force(nums, k, t):
for i, val in enumerate(nums):
for j in range(i+1, min(len(nums), i+k+1)):
print(i,j,abs(val-nums[j]))
if abs(val-nums[j])<=t:
return True
return False
# Code Pending
def contains_duplicate_3_optimized(nums, k, t):
return False
def contains_duplicate(nums):
if len(nums)<2:
return False
nums = sorted(nums)
i, j = 0,1
while j<len(nums):
if nums[i] == nums[j]:
return True
i += 1
j += 1
return False
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
nums = [1,5,9,1,5,9]
k = 2
t = 3
print(contains_duplicate_3_brute_force(nums, k, t))
#print(contains_duplicate_extra_space(nums))
#print(contains_duplicate(nums))
#print(contains_duplicate_2(nums, k))
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
python
|
import math
from kivy.properties import ObjectProperty, Clock, NumericProperty
from cobiv.libs.magnet import Magnet
from cobiv.modules.views.browser.eolitem import EOLItem
class DraggableItem(Magnet):
thumb = ObjectProperty(None, allownone=True)
container = ObjectProperty(None)
cell_size = NumericProperty(None)
file_id = NumericProperty(None)
position = NumericProperty(None)
temp_idx = None
def __init__(self, **kwargs):
super(DraggableItem, self).__init__(**kwargs)
self.add_widget(self.thumb)
self.size_hint = (None, None)
self.bind(cell_size=self.on_cell_size)
def on_cell_size(self, instance, value):
self.size = (value, value)
def on_img(self, *args):
self.clear_widgets()
if self.thumb:
Clock.schedule_once(lambda *x: self.add_widget(self.thumb), 0)
def on_touch_down(self, touch, *args):
if self.collide_point(*touch.pos):
touch.grab(self)
self.remove_widget(self.thumb)
self.container.add_widget(self.thumb)
abs_pos = self.container.ids.scroll_view.to_parent(touch.pos[0], touch.pos[1])
self.thumb.center = abs_pos
return True
return super(DraggableItem, self).on_touch_down(touch, *args)
def on_touch_move(self, touch, *args):
grid_layout = self.container.ids.grid_layout
if touch.grab_current == self:
abs_pos = self.container.ids.scroll_view.to_parent(touch.pos[0], touch.pos[1])
self.thumb.center = abs_pos
grid_layout.remove_widget(self)
cnt_max = len(grid_layout.children)
x = math.floor(touch.pos[0] / self.cell_size)
y = math.floor((grid_layout.height + self.cell_size - touch.pos[1]) / self.cell_size)
idx = min(grid_layout.cols * max(0, y - 1) + x, cnt_max)
i = int(cnt_max - idx)
self.temp_idx = idx
if i > 0:
grid_layout.add_widget(self, i)
else:
last_child = grid_layout.children[0]
if isinstance(last_child, EOLItem):
grid_layout.add_widget(self, 1)
self.temp_idx = cnt_max - 1
else:
grid_layout.add_widget(self)
return super(DraggableItem, self).on_touch_move(touch, *args)
def on_touch_up(self, touch, *args):
if touch.grab_current == self:
self.thumb.center = touch.pos
self.container.remove_widget(self.thumb)
self.add_widget(self.thumb)
touch.ungrab(self)
self.container.on_image_touch_up(self, self.temp_idx)
self.temp_idx = None
return True
return super(DraggableItem, self).on_touch_up(touch, *args)
def set_selected(self, value):
self.thumb.selected = value
def is_selected(self):
return self.thumb.selected if self.thumb is not None else False
def set_marked(self, value):
if value is None:
self.thumb.marked = not self.thumb.marked
else:
self.thumb.marked = value
def is_marked(self):
return self.thumb.marked
|
python
|
"""Submodule that handles the generation of periodic table information."""
from __future__ import annotations
import typing
from .atomic_masses import ATOMIC_MASSES
def atomic_symbols_to_mass(atoms: typing.Sequence[str]) -> list[float]:
"""Converts atomic symbols to their atomic masses in amu.
Parameters
----------
atoms
List of atomic symbols
Returns
-------
List of atomic masses"""
masses = [ATOMIC_MASSES[atom] for atom in atoms]
return masses
|
python
|
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'includes': [
# FIXME: Sense whether upstream or downstream build, and
# include the right features.gypi
'../../WebKit/chromium/features.gypi',
'../WebCore.gypi',
],
# Location of the chromium src directory.
'conditions': [
['inside_chromium_build==0', {
# Webkit is being built outside of the full chromium project.
'variables': {'chromium_src_dir': '../../WebKit/chromium'},
},{
# WebKit is checked out in src/chromium/third_party/WebKit
'variables': {'chromium_src_dir': '../../../..'},
}],
['OS == "mac"', {
'targets': [
{
# On the Mac, libWebKitSystemInterface*.a is used to help WebCore
# interface with the system. This library is supplied as a static
# library in binary format. At present, it contains many global
# symbols not marked private_extern. It should be considered an
# implementation detail of WebCore, and does not need these symbols
# to be exposed so widely.
#
# This target contains an action that cracks open the existing
# static library and rebuilds it with these global symbols
# transformed to private_extern.
'target_name': 'webkit_system_interface',
'type': 'static_library',
'variables': {
'adjusted_library_path':
'<(PRODUCT_DIR)/libWebKitSystemInterfaceLeopardPrivateExtern.a',
},
'sources': [
# An empty source file is needed to convince Xcode to produce
# output for this target. The resulting library won't actually
# contain anything. The library at adjusted_library_path will,
# and that library is pushed to dependents of this target below.
'mac/Empty.cpp',
],
'actions': [
{
'action_name': 'Adjust Visibility',
'inputs': [
'mac/adjust_visibility.sh',
'../../WebKitLibraries/libWebKitSystemInterfaceLeopard.a',
],
'outputs': [
'<(adjusted_library_path)',
],
'action': [
'<@(_inputs)',
'<@(_outputs)',
'<(INTERMEDIATE_DIR)/adjust_visibility', # work directory
],
},
], # actions
'link_settings': {
'libraries': [
'<(adjusted_library_path)',
],
}, # link_settings
}, # target webkit_system_interface
], # targets
}], # condition OS == "mac"
], # conditions
'variables': {
# If set to 1, doesn't compile debug symbols into webcore reducing the
# size of the binary and increasing the speed of gdb. gcc only.
'remove_webcore_debug_symbols%': 0,
'webcore_include_dirs': [
'../',
'../accessibility',
'../accessibility/chromium',
'../bindings',
'../bindings/v8',
'../bindings/v8/custom',
'../bridge',
'../css',
'../dom',
'../dom/default',
'../editing',
'../history',
'../html',
'../html/canvas',
'../inspector',
'../loader',
'../loader/appcache',
'../loader/archive',
'../loader/icon',
'../notifications',
'../page',
'../page/animation',
'../page/chromium',
'../platform',
'../platform/animation',
'../platform/chromium',
'../platform/graphics',
'../platform/graphics/chromium',
'../platform/graphics/filters',
'../platform/graphics/opentype',
'../platform/graphics/skia',
'../platform/graphics/transforms',
'../platform/image-decoders',
'../platform/image-decoders/bmp',
'../platform/image-decoders/gif',
'../platform/image-decoders/ico',
'../platform/image-decoders/jpeg',
'../platform/image-decoders/png',
'../platform/image-decoders/skia',
'../platform/image-decoders/xbm',
'../platform/image-encoders/skia',
'../platform/mock',
'../platform/network',
'../platform/network/chromium',
'../platform/sql',
'../platform/text',
'../plugins',
'../plugins/chromium',
'../rendering',
'../rendering/style',
'../storage',
'../storage/chromium',
'../svg',
'../svg/animation',
'../svg/graphics',
'../svg/graphics/filters',
'../websockets',
'../workers',
'../xml',
],
'conditions': [
['OS=="mac"', {
'webcore_include_dirs+': [
# platform/graphics/cg and mac needs to come before
# platform/graphics/chromium so that the Mac build picks up the
# version of ImageBufferData.h in the cg directory and
# FontPlatformData.h in the mac directory. The + prepends this
# directory to the list.
# FIXME: This shouldn't need to be prepended.
# FIXME: Eliminate dependency on platform/graphics/mac and
# related directories.
# platform/graphics/cg may need to stick around, though.
'../platform/graphics/cg',
'../platform/graphics/mac',
],
'webcore_include_dirs': [
# FIXME: Eliminate dependency on platform/mac and related
# directories.
'../loader/archive/cf',
'../platform/mac',
'../platform/text/mac',
],
# enable -Wall and -Werror, just in Mac build for now
# FIXME: Also enable this for Linux/Windows after verifying no warnings
'chromium_code': 1,
}],
['OS=="win"', {
'webcore_include_dirs': [
'../page/win',
'../platform/graphics/win',
'../platform/text/win',
'../platform/win',
],
}],
],
},
'targets': [
{
'target_name': 'webcore_bindings_sources',
'type': 'none',
'hard_dependency': 1,
'sources': [
# bison rule
'../css/CSSGrammar.y',
'../xml/XPathGrammar.y',
# gperf rule
'../html/DocTypeStrings.gperf',
'../html/HTMLEntityNames.gperf',
'../platform/ColorData.gperf',
# idl rule
'<@(webcore_bindings_idl_files)',
],
'sources!': [
# Custom bindings in bindings/v8/custom exist for these.
'../dom/EventListener.idl',
'../dom/EventTarget.idl',
'../html/VoidCallback.idl',
# JSC-only.
'../inspector/JavaScriptCallFrame.idl',
# Bindings with custom Objective-C implementations.
'../page/AbstractView.idl',
# FIXME: I don't know why all of these are excluded.
# Extra SVG bindings to exclude.
'../svg/ElementTimeControl.idl',
'../svg/SVGAnimatedPathData.idl',
'../svg/SVGExternalResourcesRequired.idl',
'../svg/SVGFitToViewBox.idl',
'../svg/SVGHKernElement.idl',
'../svg/SVGLangSpace.idl',
'../svg/SVGLocatable.idl',
'../svg/SVGStylable.idl',
'../svg/SVGTests.idl',
'../svg/SVGTransformable.idl',
'../svg/SVGViewSpec.idl',
'../svg/SVGZoomAndPan.idl',
# FIXME: I don't know why these are excluded, either.
# Someone (me?) should figure it out and add appropriate comments.
'../css/CSSUnknownRule.idl',
],
'actions': [
# Actions to build derived sources.
{
'action_name': 'CSSPropertyNames',
'inputs': [
'../css/makeprop.pl',
'../css/CSSPropertyNames.in',
'../css/SVGCSSPropertyNames.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/CSSPropertyNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/CSSPropertyNames.h',
],
'action': [
'python',
'scripts/action_csspropertynames.py',
'<@(_outputs)',
'--',
'<@(_inputs)'
],
},
{
'action_name': 'CSSValueKeywords',
'inputs': [
'../css/makevalues.pl',
'../css/CSSValueKeywords.in',
'../css/SVGCSSValueKeywords.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/CSSValueKeywords.c',
'<(SHARED_INTERMEDIATE_DIR)/webkit/CSSValueKeywords.h',
],
'action': [
'python',
'scripts/action_cssvaluekeywords.py',
'<@(_outputs)',
'--',
'<@(_inputs)'
],
},
{
'action_name': 'HTMLNames',
'inputs': [
'../dom/make_names.pl',
'../html/HTMLTagNames.in',
'../html/HTMLAttributeNames.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/HTMLNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/HTMLNames.h',
'<(SHARED_INTERMEDIATE_DIR)/webkit/HTMLElementFactory.cpp',
# Pass --wrapperFactory to make_names to get these (JSC build?)
#'<(SHARED_INTERMEDIATE_DIR)/webkit/JSHTMLElementWrapperFactory.cpp',
#'<(SHARED_INTERMEDIATE_DIR)/webkit/JSHTMLElementWrapperFactory.h',
],
'action': [
'python',
'scripts/action_makenames.py',
'<@(_outputs)',
'--',
'<@(_inputs)',
'--',
'--factory',
'--extraDefines', '<(feature_defines)'
],
},
{
'action_name': 'SVGNames',
'inputs': [
'../dom/make_names.pl',
'../svg/svgtags.in',
'../svg/svgattrs.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/SVGNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/SVGNames.h',
'<(SHARED_INTERMEDIATE_DIR)/webkit/SVGElementFactory.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/SVGElementFactory.h',
# Pass --wrapperFactory to make_names to get these (JSC build?)
#'<(SHARED_INTERMEDIATE_DIR)/webkit/JSSVGElementWrapperFactory.cpp',
#'<(SHARED_INTERMEDIATE_DIR)/webkit/JSSVGElementWrapperFactory.h',
],
'action': [
'python',
'scripts/action_makenames.py',
'<@(_outputs)',
'--',
'<@(_inputs)',
'--',
'--factory',
'--extraDefines', '<(feature_defines)'
],
},
{
'action_name': 'UserAgentStyleSheets',
'inputs': [
'../css/make-css-file-arrays.pl',
'../css/html.css',
'../css/quirks.css',
'../css/view-source.css',
'../css/themeChromiumLinux.css',
'../css/themeWin.css',
'../css/themeWinQuirks.css',
'../css/svg.css',
'../css/mediaControls.css',
'../css/mediaControlsChromium.css',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/UserAgentStyleSheets.h',
'<(SHARED_INTERMEDIATE_DIR)/webkit/UserAgentStyleSheetsData.cpp',
],
'action': [
'python',
'scripts/action_useragentstylesheets.py',
'<@(_outputs)',
'--',
'<@(_inputs)'
],
},
{
'action_name': 'XLinkNames',
'inputs': [
'../dom/make_names.pl',
'../svg/xlinkattrs.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/XLinkNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/XLinkNames.h',
],
'action': [
'python',
'scripts/action_makenames.py',
'<@(_outputs)',
'--',
'<@(_inputs)',
'--',
'--extraDefines', '<(feature_defines)'
],
},
{
'action_name': 'XMLNames',
'inputs': [
'../dom/make_names.pl',
'../xml/xmlattrs.in',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/XMLNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/XMLNames.h',
],
'action': [
'python',
'scripts/action_makenames.py',
'<@(_outputs)',
'--',
'<@(_inputs)',
'--',
'--extraDefines', '<(feature_defines)'
],
},
{
'action_name': 'tokenizer',
'inputs': [
'../css/maketokenizer',
'../css/tokenizer.flex',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/tokenizer.cpp',
],
'action': [
'python',
'scripts/action_maketokenizer.py',
'<@(_outputs)',
'--',
'<@(_inputs)'
],
},
],
'rules': [
# Rules to build derived sources.
{
'rule_name': 'bison',
'extension': 'y',
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/<(RULE_INPUT_ROOT).cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/<(RULE_INPUT_ROOT).h'
],
'action': [
'python',
'scripts/rule_bison.py',
'<(RULE_INPUT_PATH)',
'<(SHARED_INTERMEDIATE_DIR)/webkit'
],
},
{
'rule_name': 'gperf',
'extension': 'gperf',
# gperf output is only ever #included by other source files. As
# such, process_outputs_as_sources is off. Some gperf output is
# #included as *.c and some as *.cpp. Since there's no way to tell
# which one will be needed in a rule definition, declare both as
# outputs. The harness script will generate one file and copy it to
# the other.
#
# This rule places outputs in SHARED_INTERMEDIATE_DIR because glue
# needs access to HTMLEntityNames.c.
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit/<(RULE_INPUT_ROOT).c',
'<(SHARED_INTERMEDIATE_DIR)/webkit/<(RULE_INPUT_ROOT).cpp',
],
'action': [
'python',
'scripts/rule_gperf.py',
'<(RULE_INPUT_PATH)',
'<(SHARED_INTERMEDIATE_DIR)/webkit'
],
'process_outputs_as_sources': 0,
},
# Rule to build generated JavaScript (V8) bindings from .idl source.
{
'rule_name': 'binding',
'extension': 'idl',
'msvs_external_rule': 1,
'inputs': [
'../bindings/scripts/generate-bindings.pl',
'../bindings/scripts/CodeGenerator.pm',
'../bindings/scripts/CodeGeneratorV8.pm',
'../bindings/scripts/IDLParser.pm',
'../bindings/scripts/IDLStructure.pm',
],
'outputs': [
# FIXME: The .cpp file should be in webkit/bindings once
# we coax GYP into supporting it (see 'action' below).
'<(SHARED_INTERMEDIATE_DIR)/webcore/bindings/V8<(RULE_INPUT_ROOT).cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/bindings/V8<(RULE_INPUT_ROOT).h',
],
'variables': {
'generator_include_dirs': [
'--include', '../css',
'--include', '../dom',
'--include', '../html',
'--include', '../notifications',
'--include', '../page',
'--include', '../plugins',
'--include', '../svg',
'--include', '../websockets',
'--include', '../workers',
'--include', '../xml',
],
},
# FIXME: Note that we put the .cpp files in webcore/bindings
# but the .h files in webkit/bindings. This is to work around
# the unfortunate fact that GYP strips duplicate arguments
# from lists. When we have a better GYP way to suppress that
# behavior, change the output location.
'action': [
'python',
'scripts/rule_binding.py',
'<(RULE_INPUT_PATH)',
'<(SHARED_INTERMEDIATE_DIR)/webcore/bindings',
'<(SHARED_INTERMEDIATE_DIR)/webkit/bindings',
'--',
'<@(_inputs)',
'--',
'--defines', '<(feature_defines) LANGUAGE_JAVASCRIPT V8_BINDING',
'--generator', 'V8',
'<@(generator_include_dirs)'
],
'message': 'Generating binding from <(RULE_INPUT_PATH)',
},
],
},
{
'target_name': 'webcore_bindings',
'type': '<(library)',
'hard_dependency': 1,
'dependencies': [
'webcore_bindings_sources',
'../../JavaScriptCore/JavaScriptCore.gyp/JavaScriptCore.gyp:pcre',
'../../JavaScriptCore/JavaScriptCore.gyp/JavaScriptCore.gyp:wtf',
'<(chromium_src_dir)/build/temp_gyp/googleurl.gyp:googleurl',
'<(chromium_src_dir)/skia/skia.gyp:skia',
'<(chromium_src_dir)/third_party/libjpeg/libjpeg.gyp:libjpeg',
'<(chromium_src_dir)/third_party/libpng/libpng.gyp:libpng',
'<(chromium_src_dir)/third_party/libxml/libxml.gyp:libxml',
'<(chromium_src_dir)/third_party/libxslt/libxslt.gyp:libxslt',
'<(chromium_src_dir)/third_party/npapi/npapi.gyp:npapi',
'<(chromium_src_dir)/third_party/sqlite/sqlite.gyp:sqlite',
],
'include_dirs': [
'<(INTERMEDIATE_DIR)',
# FIXME: Remove <(SHARED_INTERMEDIATE_DIR)/webcore when we
# can entice gyp into letting us put both the .cpp and .h
# files in the same output directory.
'<(SHARED_INTERMEDIATE_DIR)/webcore',
'<(SHARED_INTERMEDIATE_DIR)/webkit',
'<(SHARED_INTERMEDIATE_DIR)/webkit/bindings',
'<@(webcore_include_dirs)',
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/webkit',
'<(SHARED_INTERMEDIATE_DIR)/webkit/bindings',
],
},
'sources': [
# This file includes all the .cpp files generated from the .idl files
# in webcore_files.
'../bindings/v8/DerivedSourcesAllInOne.cpp',
# Additional .cpp files from webcore_bindings_sources actions.
'<(SHARED_INTERMEDIATE_DIR)/webkit/HTMLElementFactory.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/HTMLNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/SVGElementFactory.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/SVGNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/UserAgentStyleSheetsData.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/XLinkNames.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/XMLNames.cpp',
# Additional .cpp files from the webcore_bindings_sources rules.
'<(SHARED_INTERMEDIATE_DIR)/webkit/CSSGrammar.cpp',
'<(SHARED_INTERMEDIATE_DIR)/webkit/XPathGrammar.cpp',
],
'conditions': [
['javascript_engine=="v8"', {
'dependencies': [
'<(chromium_src_dir)/v8/tools/gyp/v8.gyp:v8',
],
}],
['OS=="mac"', {
'include_dirs': [
'../../WebKitLibraries',
],
}],
['OS=="win"', {
'dependencies': [
'<(chromium_src_dir)/build/win/system.gyp:cygwin'
],
'defines': [
'WEBCORE_NAVIGATOR_PLATFORM="Win32"',
'__PRETTY_FUNCTION__=__FUNCTION__',
],
# This is needed because Event.h in this directory is blocked
# by a system header on windows.
'include_dirs++': ['../dom'],
'direct_dependent_settings': {
'include_dirs+++': ['../dom'],
},
}],
['OS!="win" and remove_webcore_debug_symbols==1', {
'configurations': {
'Debug': {
'cflags!': ['-g'],
}
},
}],
],
},
{
'target_name': 'webcore',
'type': '<(library)',
'msvs_guid': '1C16337B-ACF3-4D03-AA90-851C5B5EADA6',
'dependencies': [
'webcore_bindings',
'../../JavaScriptCore/JavaScriptCore.gyp/JavaScriptCore.gyp:pcre',
'../../JavaScriptCore/JavaScriptCore.gyp/JavaScriptCore.gyp:wtf',
'<(chromium_src_dir)/build/temp_gyp/googleurl.gyp:googleurl',
'<(chromium_src_dir)/skia/skia.gyp:skia',
'<(chromium_src_dir)/third_party/libjpeg/libjpeg.gyp:libjpeg',
'<(chromium_src_dir)/third_party/libpng/libpng.gyp:libpng',
'<(chromium_src_dir)/third_party/libxml/libxml.gyp:libxml',
'<(chromium_src_dir)/third_party/libxslt/libxslt.gyp:libxslt',
'<(chromium_src_dir)/third_party/npapi/npapi.gyp:npapi',
'<(chromium_src_dir)/third_party/ots/ots.gyp:ots',
'<(chromium_src_dir)/third_party/sqlite/sqlite.gyp:sqlite',
],
'defines': [
'WEBCORE_NAVIGATOR_VENDOR="Google Inc."',
],
'include_dirs': [
'<(INTERMEDIATE_DIR)',
'<@(webcore_include_dirs)',
],
'sources': [
'<@(webcore_files)',
# For WebCoreSystemInterface, Mac-only.
'../../WebKit/mac/WebCoreSupport/WebSystemInterface.m',
],
'sources/': [
# Exclude JSC custom bindings.
['exclude', 'bindings/js'],
# Fortunately, many things can be excluded by using broad patterns.
# Exclude things that don't apply to the Chromium platform on the basis
# of their enclosing directories and tags at the ends of their
# filenames.
['exclude', '(android|cairo|cf|cg|curl|gtk|haiku|linux|mac|opentype|posix|qt|soup|symbian|win|wx)/'],
['exclude', '(?<!Chromium)(Android|Cairo|CF|CG|Curl|Gtk|Linux|Mac|OpenType|POSIX|Posix|Qt|Safari|Soup|Symbian|Win|Wx)\\.(cpp|mm?)$'],
['include', 'platform/graphics/opentype/OpenTypeSanitizer\\.cpp$'],
# Exclude everything in svg/ directly but not in subdirectories.
# Everything in svg/*.cpp is included in svg/SVGAllInOne.cpp.
['exclude', 'svg/[^/]+\\.cpp$'],
['include', 'svg/SVGAllInOne\\.cpp$'],
# JSC-only.
['exclude', 'inspector/JavaScript[^/]*\\.cpp$'],
# ENABLE_OFFLINE_WEB_APPLICATIONS, exclude most of webcore's impl
['exclude', 'loader/appcache/'],
['include', 'loader/appcache/ApplicationCacheHost\.h$'],
['include', 'loader/appcache/DOMApplicationCache\.(h|cpp)$'],
# Exclude some DB-related files.
['exclude', 'platform/sql/SQLiteFileSystem.cpp'],
['exclude', 'storage/DatabaseTracker.cpp'],
['exclude', 'storage/DatabaseTrackerClient.h'],
['exclude', 'storage/OriginQuotaManager.cpp'],
['exclude', 'storage/OriginQuotaManager.h'],
['exclude', 'storage/OriginUsageRecord.cpp'],
['exclude', 'storage/OriginUsageRecord.h'],
['exclude', 'storage/SQLTransactionClient.cpp'],
],
'sources!': [
# A few things can't be excluded by patterns. List them individually.
# Don't build StorageNamespace. We have our own implementation.
'../storage/StorageNamespace.cpp',
# Don't build StorageEventDispatcher. We have our own implementation.
'../storage/StorageEventDispatcher.cpp',
# Use history/BackForwardListChromium.cpp instead.
'../history/BackForwardList.cpp',
# Use loader/icon/IconDatabaseNone.cpp instead.
'../loader/icon/IconDatabase.cpp',
# Use platform/KURLGoogle.cpp instead.
'../platform/KURL.cpp',
# Use platform/MIMETypeRegistryChromium.cpp instead.
'../platform/MIMETypeRegistry.cpp',
# Theme.cpp is used only if we're using USE_NEW_THEME. We are not for
# Windows and Linux. We manually include Theme.cpp for the Mac below.
'../platform/Theme.cpp',
# Exclude some, but not all, of plugins.
'../plugins/PluginDatabase.cpp',
'../plugins/PluginInfoStore.cpp',
'../plugins/PluginMainThreadScheduler.cpp',
'../plugins/PluginPackage.cpp',
'../plugins/PluginStream.cpp',
'../plugins/PluginView.cpp',
'../plugins/npapi.cpp',
# Use LinkHashChromium.cpp instead
'../platform/LinkHash.cpp',
# Don't build these.
# FIXME: I don't know exactly why these are excluded. It would
# be nice to provide more explicit comments. Some of these do actually
# compile.
'../dom/StaticStringList.cpp',
'../loader/icon/IconFetcher.cpp',
'../loader/UserStyleSheetLoader.cpp',
'../platform/graphics/GraphicsLayer.cpp',
'../platform/graphics/RenderLayerBacking.cpp',
'../platform/graphics/RenderLayerCompositor.cpp',
# We use a multi-process version from the WebKit API.
'../dom/default/PlatformMessagePortChannel.cpp',
'../dom/default/PlatformMessagePortChannel.h',
],
'direct_dependent_settings': {
'include_dirs': [
'<@(webcore_include_dirs)',
],
'mac_framework_dirs': [
'$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework/Frameworks',
],
},
'export_dependent_settings': [
'webcore_bindings',
'../../JavaScriptCore/JavaScriptCore.gyp/JavaScriptCore.gyp:wtf',
'<(chromium_src_dir)/build/temp_gyp/googleurl.gyp:googleurl',
'<(chromium_src_dir)/skia/skia.gyp:skia',
'<(chromium_src_dir)/third_party/npapi/npapi.gyp:npapi',
],
'link_settings': {
'mac_bundle_resources': [
'../Resources/aliasCursor.png',
'../Resources/cellCursor.png',
'../Resources/contextMenuCursor.png',
'../Resources/copyCursor.png',
'../Resources/crossHairCursor.png',
'../Resources/eastResizeCursor.png',
'../Resources/eastWestResizeCursor.png',
'../Resources/helpCursor.png',
'../Resources/linkCursor.png',
'../Resources/missingImage.png',
'../Resources/moveCursor.png',
'../Resources/noDropCursor.png',
'../Resources/noneCursor.png',
'../Resources/northEastResizeCursor.png',
'../Resources/northEastSouthWestResizeCursor.png',
'../Resources/northResizeCursor.png',
'../Resources/northSouthResizeCursor.png',
'../Resources/northWestResizeCursor.png',
'../Resources/northWestSouthEastResizeCursor.png',
'../Resources/notAllowedCursor.png',
'../Resources/progressCursor.png',
'../Resources/southEastResizeCursor.png',
'../Resources/southResizeCursor.png',
'../Resources/southWestResizeCursor.png',
'../Resources/verticalTextCursor.png',
'../Resources/waitCursor.png',
'../Resources/westResizeCursor.png',
'../Resources/zoomInCursor.png',
'../Resources/zoomOutCursor.png',
],
},
'hard_dependency': 1,
'mac_framework_dirs': [
'$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework/Frameworks',
],
'msvs_disabled_warnings': [
4138, 4244, 4291, 4305, 4344, 4355, 4521, 4099,
],
'scons_line_length' : 1,
'xcode_settings': {
# Some Mac-specific parts of WebKit won't compile without having this
# prefix header injected.
# FIXME: make this a first-class setting.
'GCC_PREFIX_HEADER': '../WebCorePrefix.h',
},
'conditions': [
['javascript_engine=="v8"', {
'dependencies': [
'<(chromium_src_dir)/v8/tools/gyp/v8.gyp:v8',
],
'export_dependent_settings': [
'<(chromium_src_dir)/v8/tools/gyp/v8.gyp:v8',
],
}],
['OS=="linux" or OS=="freebsd"', {
'dependencies': [
'<(chromium_src_dir)/build/linux/system.gyp:fontconfig',
'<(chromium_src_dir)/build/linux/system.gyp:gtk',
],
'sources': [
'../platform/graphics/chromium/VDMXParser.cpp',
'../platform/graphics/chromium/HarfbuzzSkia.cpp',
],
'sources/': [
# Cherry-pick files excluded by the broader regular expressions above.
['include', 'platform/chromium/KeyCodeConversionGtk\\.cpp$'],
['include', 'platform/graphics/chromium/FontCacheLinux\\.cpp$'],
['include', 'platform/graphics/chromium/FontLinux\\.cpp$'],
['include', 'platform/graphics/chromium/FontPlatformDataLinux\\.cpp$'],
['include', 'platform/graphics/chromium/GlyphPageTreeNodeLinux\\.cpp$'],
['include', 'platform/graphics/chromium/SimpleFontDataLinux\\.cpp$'],
],
'cflags': [
# WebCore does not work with strict aliasing enabled.
# https://bugs.webkit.org/show_bug.cgi?id=25864
'-fno-strict-aliasing',
],
}],
['OS=="linux"', {
'defines': [
# Mozilla on Linux effectively uses uname -sm, but when running
# 32-bit x86 code on an x86_64 processor, it uses
# "Linux i686 (x86_64)". Matching that would require making a
# run-time determination.
'WEBCORE_NAVIGATOR_PLATFORM="Linux i686"',
],
}],
['OS=="mac"', {
'dependencies': [
'webkit_system_interface',
],
'defines': [
# Match Safari and Mozilla on Mac x86.
'WEBCORE_NAVIGATOR_PLATFORM="MacIntel"',
# Chromium's version of WebCore includes the following Objective-C
# classes. The system-provided WebCore framework may also provide
# these classes. Because of the nature of Objective-C binding
# (dynamically at runtime), it's possible for the Chromium-provided
# versions to interfere with the system-provided versions. This may
# happen when a system framework attempts to use WebCore.framework,
# such as when converting an HTML-flavored string to an
# NSAttributedString. The solution is to force Objective-C class
# names that would conflict to use alternate names.
# FIXME: This list will hopefully shrink but may also grow.
# Periodically run:
# nm libwebcore.a | grep -E '[atsATS] ([+-]\[|\.objc_class_name)'
# and make sure that everything listed there has the alternate
# ChromiumWebCoreObjC name, and that nothing extraneous is listed
# here. If all Objective-C can be eliminated from Chromium's WebCore
# library, these defines should be removed entirely.
'ScrollbarPrefsObserver=ChromiumWebCoreObjCScrollbarPrefsObserver',
'WebCoreRenderThemeNotificationObserver=ChromiumWebCoreObjCWebCoreRenderThemeNotificationObserver',
'WebFontCache=ChromiumWebCoreObjCWebFontCache',
],
'actions': [
{
# Allow framework-style #include of
# <WebCore/WebCoreSystemInterface.h>.
'action_name': 'WebCoreSystemInterface.h',
'inputs': [
'../platform/mac/WebCoreSystemInterface.h',
],
'outputs': [
'<(INTERMEDIATE_DIR)/WebCore/WebCoreSystemInterface.h',
],
'action': ['cp', '<@(_inputs)', '<@(_outputs)'],
},
],
'include_dirs': [
'../../WebKitLibraries',
],
'sources/': [
# Additional files from the WebCore Mac build that are presently
# used in the WebCore Chromium Mac build too.
# The Mac build is PLATFORM_CF but does not use CFNetwork.
['include', 'CF\\.cpp$'],
['exclude', 'network/cf/'],
# The Mac build is PLATFORM_CG too. platform/graphics/cg is the
# only place that CG files we want to build are located, and not
# all of them even have a CG suffix, so just add them by a
# regexp matching their directory.
['include', 'platform/graphics/cg/[^/]*(?<!Win)?\\.(cpp|mm?)$'],
# Use native Mac font code from WebCore.
['include', 'platform/(graphics/)?mac/[^/]*Font[^/]*\\.(cpp|mm?)$'],
['include', 'platform/graphics/mac/ComplexText[^/]*\\.(cpp|h)$'],
# Cherry-pick some files that can't be included by broader regexps.
# Some of these are used instead of Chromium platform files, see
# the specific exclusions in the "sources!" list below.
['include', 'loader/archive/cf/LegacyWebArchive\\.cpp$'],
['include', 'platform/graphics/mac/ColorMac\\.mm$'],
['include', 'platform/graphics/mac/FloatPointMac\\.mm$'],
['include', 'platform/graphics/mac/FloatRectMac\\.mm$'],
['include', 'platform/graphics/mac/FloatSizeMac\\.mm$'],
['include', 'platform/graphics/mac/GlyphPageTreeNodeMac\\.cpp$'],
['include', 'platform/graphics/mac/GraphicsContextMac\\.mm$'],
['include', 'platform/graphics/mac/IntRectMac\\.mm$'],
['include', 'platform/mac/BlockExceptions\\.mm$'],
['include', 'platform/mac/LocalCurrentGraphicsContext\\.mm$'],
['include', 'platform/mac/PurgeableBufferMac\\.cpp$'],
['include', 'platform/mac/WebCoreSystemInterface\\.mm$'],
['include', 'platform/mac/WebCoreTextRenderer\\.mm$'],
['include', 'platform/text/mac/ShapeArabic\\.c$'],
['include', 'platform/text/mac/String(Impl)?Mac\\.mm$'],
# Use USE_NEW_THEME on Mac.
['include', 'platform/Theme\\.cpp$'],
['include', 'WebKit/mac/WebCoreSupport/WebSystemInterface\\.m$'],
],
'sources!': [
# The Mac currently uses FontCustomPlatformData.cpp from
# platform/graphics/mac, included by regex above, instead.
'../platform/graphics/chromium/FontCustomPlatformData.cpp',
# The Mac currently uses ScrollbarThemeChromiumMac.mm, which is not
# related to ScrollbarThemeChromium.cpp.
'../platform/chromium/ScrollbarThemeChromium.cpp',
# The Mac uses ImageSourceCG.cpp from platform/graphics/cg, included
# by regex above, instead.
'../platform/graphics/ImageSource.cpp',
# These Skia files aren't currently built on the Mac, which uses
# CoreGraphics directly for this portion of graphics handling.
'../platform/graphics/skia/FloatPointSkia.cpp',
'../platform/graphics/skia/FloatRectSkia.cpp',
'../platform/graphics/skia/GradientSkia.cpp',
'../platform/graphics/skia/GraphicsContextSkia.cpp',
'../platform/graphics/skia/ImageBufferSkia.cpp',
'../platform/graphics/skia/ImageSkia.cpp',
'../platform/graphics/skia/ImageSourceSkia.cpp',
'../platform/graphics/skia/IntPointSkia.cpp',
'../platform/graphics/skia/IntRectSkia.cpp',
'../platform/graphics/skia/PathSkia.cpp',
'../platform/graphics/skia/PatternSkia.cpp',
'../platform/graphics/skia/TransformationMatrixSkia.cpp',
# RenderThemeChromiumSkia is not used on mac since RenderThemeChromiumMac
# does not reference the Skia code that is used by Windows and Linux.
'../rendering/RenderThemeChromiumSkia.cpp',
# Skia image-decoders are also not used on mac. CoreGraphics
# is used directly instead.
'../platform/image-decoders/ImageDecoder.h',
'../platform/image-decoders/bmp/BMPImageDecoder.cpp',
'../platform/image-decoders/bmp/BMPImageDecoder.h',
'../platform/image-decoders/bmp/BMPImageReader.cpp',
'../platform/image-decoders/bmp/BMPImageReader.h',
'../platform/image-decoders/gif/GIFImageDecoder.cpp',
'../platform/image-decoders/gif/GIFImageDecoder.h',
'../platform/image-decoders/gif/GIFImageReader.cpp',
'../platform/image-decoders/gif/GIFImageReader.h',
'../platform/image-decoders/ico/ICOImageDecoder.cpp',
'../platform/image-decoders/ico/ICOImageDecoder.h',
'../platform/image-decoders/jpeg/JPEGImageDecoder.cpp',
'../platform/image-decoders/jpeg/JPEGImageDecoder.h',
'../platform/image-decoders/png/PNGImageDecoder.cpp',
'../platform/image-decoders/png/PNGImageDecoder.h',
'../platform/image-decoders/skia/ImageDecoderSkia.cpp',
'../platform/image-decoders/xbm/XBMImageDecoder.cpp',
'../platform/image-decoders/xbm/XBMImageDecoder.h',
],
'direct_dependent_settings': {
'include_dirs': [
'../../WebKitLibraries',
'../../WebKit/mac/WebCoreSupport',
],
},
}],
['OS=="win"', {
'dependencies': [
'<(chromium_src_dir)/build/win/system.gyp:cygwin'
],
'sources/': [
['exclude', 'Posix\\.cpp$'],
['include', '/opentype/'],
['include', '/TransparencyWin\\.cpp$'],
['include', '/SkiaFontWin\\.cpp$'],
],
'defines': [
# Match Safari and Mozilla on Windows.
'WEBCORE_NAVIGATOR_PLATFORM="Win32"',
'__PRETTY_FUNCTION__=__FUNCTION__',
],
# This is needed because Event.h in this directory is blocked
# by a system header on windows.
'include_dirs++': ['../dom'],
'direct_dependent_settings': {
'include_dirs+++': ['../dom'],
},
}],
['OS!="linux" and OS!="freebsd"', {
'sources/': [['exclude', '(Gtk|Linux)\\.cpp$']]
}],
['OS!="mac"', {
'sources/': [['exclude', 'Mac\\.(cpp|mm?)$']]
}],
['OS!="win"', {
'sources/': [
['exclude', 'Win\\.cpp$'],
['exclude', '/(Windows|Uniscribe)[^/]*\\.cpp$']
],
}],
['OS!="win" and remove_webcore_debug_symbols==1', {
'configurations': {
'Debug': {
'cflags!': ['-g'],
}
},
}],
],
},
], # targets
}
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 27 10:55:34 2016
The plain vanila implementation of Recurrent Neural Network
@author: yaric
"""
import time
import datetime
from random import uniform
import numpy as np
import scipy.io as sio
class RNN(object):
def __init__(self, n_features, n_outputs, n_neurons=100, param_update_scheme='Adagrad',
learning_rate=1e-1, activation_rule='Tanh',
use_batch_step=False, batch_step_size=25, relu_neg_slope=0.01,
use_dropout_regularization=True, dropout_threshold=0.8,
reg_strenght=0.5, use_regularization=True,
sgd_shuffle=True):
"""
Initializes RNN
n_features the number of features per data sample
n_outputs the number of output values to find
n_neurons the number of neurons in hidden layer (Default: 100)
param_update_scheme the algorithm used to update parameters after gradients update (Default: 'Adagrad')
learning_rate - the start learning rate (Default: 1e-1)
activation_rule - the single neuron non-linearity activation rule (Default: 'Tanh')
use_batch_step the flag to indicate whether to use batch training (True), default - False
batch_step_size the number of samples per batch (Default: 25)
relu_neg_slope the ReLU negative slope (Default: 0.01)
use_dropout_regularization whether to use dropout regularization threshold (Default: True)
dropout_threshold the dropout threshold (Default: 0.8)
reg_strenght the L2 regularization strength for training parameters (Default:0.001)
use_regularization the flag to turn on/off regularization (Default: True)
sgd_shuffle whether to shuffle data samples randomly after each epoch (Default: True)
"""
self.hidden_size = n_neurons
self.n_features = n_features
self.n_outputs = n_outputs
self.use_batch_step = use_batch_step
self.batch_step_size = batch_step_size
self.param_update_scheme = param_update_scheme
self.learning_rate = learning_rate
self.activation_rule = activation_rule
self.relu_neg_slope = relu_neg_slope
self.use_dropout_regularization = use_dropout_regularization
self.dropout_threshold = dropout_threshold
self.reg_strenght = reg_strenght
self.use_regularization = use_regularization
self.sgd_shuffle = sgd_shuffle
def train(self, Xtr, ytr, ytr_missing, n_epochs, Xvl=None, yvl=None, yvl_missing=None, check_gradient=False):
"""
Trains neural network over specified epochs with optional validation if validation data provided
Xtr - the train features tenzor with shape (num_samples, num_features)
ytr - the train ground truth tenzor with shape (num_samples, num_outputs)
ytr_missing - the boolean flags denoting missing train outputs with shape (num_samples, num_outputs)
n_epochs - the number of epochs to use for training
Xvl - the validation features tenzor with shape (num_samples, num_features) (Default: None)
yvl - the validation ground truth tenzor with shape (num_samples, num_outputs) (Default: None)
yvl_missing - the boolean flags denoting missing validation outputs with shape (num_samples, num_outputs) (Default: None)
check_gradient - the boolean to indicate if gradient check should be done (Default: False)
return trained model parameters as well as train/validation errors and scores per epoch
"""
# parameters check
assert len(Xtr[0]) == self.n_features
assert len(ytr[0]) == self.n_outputs
assert len(ytr_missing[0]) == self.n_outputs
do_validation = (Xvl is not None)
if do_validation and (yvl is None or yvl_missing is None):
raise 'Validation outputs or missing falgs not specified when validation requested'
elif do_validation:
# check that validation parameters of correct size
assert len(Xtr[0]) == len(Xvl[0])
assert len(ytr[0]) == len(yvl[0])
assert len(yvl[0]) == len(yvl_missing[0])
# model parameters
self.__initNNParameters()
start_time = datetime.datetime.fromtimestamp(time.time())
# do train
mWxh, mWhh, mWhy = np.zeros_like(self.Wxh), np.zeros_like(self.Whh), np.zeros_like(self.Why)
mbh, mby = np.zeros_like(self.bh), np.zeros_like(self.by) # memory variables for Adagrad, RMSProp
vWxh, vWhh, vWhy = np.zeros_like(self.Wxh), np.zeros_like(self.Whh), np.zeros_like(self.Why)
vbh, vby = np.zeros_like(self.bh), np.zeros_like(self.by) # memory variables for Adam
train_errors = np.zeros((n_epochs, 1))
train_scores = np.zeros_like(train_errors)
if do_validation:
validation_errors = np.zeros_like(train_errors)
validation_scores = np.zeros_like(train_errors)
n = 0
step_f = self.__activationFunction()
for epoch in range(n_epochs):
# prepare for new epoch
if self.use_batch_step:
steps = len(Xtr) / self.batch_step_size
else:
steps = len(Xtr)
epoch_error = np.zeros((steps, 1))
epoch_score = np.zeros((steps, 1))
self.hprev = np.zeros((self.hidden_size, 1)) # reset RNN memory at start of new epoch
# shuffle data for stohastic gradient descent before new epoch start
if self.use_batch_step and self.sgd_shuffle:
perm = np.arange(Xtr.shape[0])
np.random.shuffle(perm)
Xtr = Xtr[perm]
ytr = ytr[perm]
# proceed with mini-batches
for j in range(steps):
if self.use_batch_step:
index = j * self.batch_step_size
inputs = Xtr[index : index + self.batch_step_size, :] # the slice of rows with batch_size length
targets = ytr[index : index + self.batch_step_size, :]
y_missing = ytr_missing[index : index + self.batch_step_size, :]
loss, score, dWxh, dWhh, dWhy, dbh, dby, self.hprev = step_f(inputs, targets, y_missing)
else:
inputs = Xtr[j : j + 1, :] # just one row
targets = ytr[j : j + 1, :]
loss, score, dWxh, dWhh, dWhy, dbh, dby, self.hprev = step_f(inputs, targets, ytr_missing[j])
epoch_error[j] = loss
epoch_score[j] = score
if j % 100 == 0: print '---iter %d, epoch: %d, step: %d from: %d, loss: %.5f' % (n, epoch, j, steps, loss) # print progress
n += 1 # total iteration counter
if check_gradient:
self.__gradCheck(inputs, targets, ytr_missing[j])
# perform parameter update
if self.param_update_scheme == 'Adagrad':
# with Adagrad
eps = 1e-8#1e-4#
for param, dparam, mem in zip([self.Wxh, self.Whh, self.Why, self.bh, self.by], [dWxh, dWhh, dWhy, dbh, dby], [mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -self.learning_rate * dparam / (np.sqrt(mem) + eps) # adagrad update
elif self.param_update_scheme == 'RMSProp':
# with RMSProp
eps = 1e-8 # {1e−4, 1e−5, 1e−6}
decay_rate = 0.95# {0.9, 0.95}
for param, dparam, mem in zip([self.Wxh, self.Whh, self.Why, self.bh, self.by], [dWxh, dWhh, dWhy, dbh, dby], [mWxh, mWhh, mWhy, mbh, mby]):
mem = decay_rate * mem + (1 - decay_rate) * dparam * dparam # cache = decay_rate * cache + (1 - decay_rate) * dx**2
param += -self.learning_rate * dparam / (np.sqrt(mem) + eps) # RMSProp update
elif self.param_update_scheme == 'Adam':
# with Adam
eps = 1e-8
beta1 = 0.9
beta2 = 0.999#0.99
for param, dparam, m, v in zip([self.Wxh, self.Whh, self.Why, self.bh, self.by], [dWxh, dWhh, dWhy, dbh, dby], [mWxh, mWhh, mWhy, mbh, mby], [vWxh, vWhh, vWhy, vbh, vby]):
m = beta1 * m + (1 - beta1) * dparam
v = beta2 * v + (1 - beta2) * (dparam * dparam)
#param += -self.learning_rate * m / (np.sqrt(v) + eps) # Adam update
# bias corrected
mt = m / (1 - pow(beta1, j + 1)) # N.B. j starts from 0
vt = v / (1 - pow(beta2, j + 1))
param += -self.learning_rate * mt / (np.sqrt(vt) + eps) # Adam update
elif self.param_update_scheme == 'AdaMax':
# with AdaMax - a variant of Adam based on the infinity norm.
eps = 1e-8
beta1 = 0.9
beta2 = 0.99 #0.999# 0.95 #
step_size = self.learning_rate / (1 - pow(beta1, j + 1)) #bias correction
for param, dparam, m, v in zip([self.Wxh, self.Whh, self.Why, self.bh, self.by], [dWxh, dWhh, dWhy, dbh, dby], [mWxh, mWhh, mWhy, mbh, mby], [vWxh, vWhh, vWhy, vbh, vby]):
m = beta1 * m + (1 - beta1) * dparam # Update biased first moment estimate
v = np.maximum(beta2 * v, np.abs(dparam) + eps) # Update the exponentially weighted infinity norm
param += - step_size * m / v
else:
raise "Uknown parameters update scheme: {}".format(self.param_update_scheme)
# Annealing the learning rate but avoid dropping it too low
if self.learning_rate > 1e-6 and epoch != 0 and epoch % 20 == 0: self.learning_rate *= 0.1
train_scores[epoch] = self.__make_score(epoch_score) # the score per epoch
train_errors[epoch] = np.average(epoch_error, axis=0) # the mean train error per epoch
# calculate validation if appropriate
if do_validation:
y_predicted = self.__predict(Xvl, np.zeros_like(self.hprev))
validation_errors[epoch], validation_scores[epoch] = self.__validate(y_predicted, yvl, yvl_missing)
print 'epoch: %d, learning rate: %s, train loss: %s, score: %s\nvalidation loss: %s, score: %s' % (epoch, self.learning_rate, train_errors[epoch], train_scores[epoch], validation_errors[epoch], validation_scores[epoch]) # print progress
else:
print 'epoch: %d, learning rate: %s, train loss: %s, score: %s' % (epoch, self.learning_rate, train_errors[epoch], train_scores[epoch]) # print progress
# The time spent
finish_date = datetime.datetime.fromtimestamp(time.time())
delta = finish_date - start_time
print '\n------------------------\nTrain time: \n%s\nTrain error: \n%s\nscores:\n%s\n' % (delta, train_errors, train_scores)
if do_validation:
print '\n------------------------\nValidation error: \n%s\nscores:\n%s\n' % (validation_errors, validation_scores)
return train_errors, train_scores, validation_errors, validation_scores
else:
return train_errors, train_scores
def predict(self, Xvl, use_prev_state = False):
"""
The method to predict outputs based on provided data samples
Xvl the data samples with shape (num_samples, n_features)
use_prev_state whether to use saved previous state of RNN or just reset its memory
return predicitions per data sample with shape (num_samples, n_outputs)
"""
hprev = self.hprev if use_prev_state else np.zeros_like(self.hprev)
return self.__predict(Xvl, hprev)
def saveModel(self, name):
"""
Saves trained model using provided file name
"""
vault = {'Wxh' : self.Wxh,
'Whh' : self.Whh,
'Why': self.Why,
'bh' : self.bh,
'by' : self.by,
'hprev' : self.hprev,
'hidden_size' : self.hidden_size,
'n_features' : self.n_features,
'n_outputs' : self.n_outputs,
'use_batch_step' : self.use_batch_step,
'batch_step_size' : self.batch_step_size,
'param_update_scheme' : self.param_update_scheme,
'learning_rate' : self.learning_rate,
'activation_rule' : self.activation_rule,
'relu_neg_slope' : self.relu_neg_slope,
'use_dropout_regularization' : self.use_dropout_regularization,
'dropout_threshold' : self.dropout_threshold,
'reg_strenght' : self.reg_strenght,
'use_regularization' : self.use_regularization }
sio.savemat(name, vault)
def loadModel(self, name):
"""
Loads model from spefied file
name the path to the model file
"""
mat_contents = sio.loadmat(name)
self.Wxh = mat_contents['Wxh']
self.Whh = mat_contents['Whh']
self.Why = mat_contents['Why']
self.bh = mat_contents['bh']
self.by = mat_contents['by']
self.hprev = mat_contents['hprev']
self.hidden_size = mat_contents['hidden_size']
self.n_features = mat_contents['n_features']
self.n_outputs = mat_contents['n_outputs']
self.use_batch_step = mat_contents['use_batch_step']
self.batch_step_size = mat_contents['batch_step_size']
self.param_update_scheme = mat_contents['param_update_scheme']
self.learning_rate = mat_contents['learning_rate']
self.activation_rule = mat_contents['activation_rule']
self.relu_neg_slope = mat_contents['relu_neg_slope']
self.use_dropout_regularization = mat_contents['use_dropout_regularization']
self.dropout_threshold = mat_contents['dropout_threshold']
self.reg_strenght = mat_contents['reg_strenght']
self.use_regularization = mat_contents['use_regularization']
def __step_tanh(self, inputs, targets, ytr_missing):
"""
The one step in RNN computations using Tanhents function as non-linear activation function
inputs, targets are both arrays of real numbers with shapes (input_size, 1) and (target_size, 1) respectively.
hprev is array of initial hidden state with shape (hidden_size, 1)
Wxh, Whh, Why - the neurons input/output weights
bh, by - the hidden/output layer bias
returns the loss, score_mean, gradients on model parameters, and last hidden state
"""
#
# forward pass
#
xs = inputs.T
hs = np.tanh(np.dot(self.Wxh, xs) + np.dot(self.Whh, self.hprev) + self.bh) # hidden state
if self.use_regularization and self.use_dropout_regularization:
U1 = (np.random.rand(*hs.shape) < self.dropout_threshold) / self.dropout_threshold # dropout mask
hs *= U1 # drop!
ys = np.dot(self.Why, hs) + self.by # unnormalized next outputs
ps = ys - targets.T
loss = np.sum(np.abs(ps)) # L1 norm
#
# backward pass: compute gradients going backwards
#
dy = np.sign(ps) # the gradient for y only inherits the sign of the difference for L1 norm (http://cs231n.github.io/neural-networks-2/#reg)
dWhy = np.dot(dy, hs.T)
dby = dy
dh = np.dot(self.Why.T, dy) # backprop into h
dhraw = (1 - hs * hs) * dh # backprop through tanh nonlinearity
dbh = dhraw
dWxh = np.dot(dhraw, inputs)
dWhh = np.dot(dhraw, self.hprev.T)
# add L2 regularization gradient contribution if not dropout
if self.use_regularization and not self.use_dropout_regularization:
dWhy += self.reg_strenght * self.Why
dWhh += self.reg_strenght * self.Whh
dWxh += self.reg_strenght * self.Wxh
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
score = self.__score_mean(np.abs(ps), ytr_missing) # IMPORTANT: use COVAR_y_MISSING flags for mean calculation without missed Y
return loss, score, dWxh, dWhh, dWhy, dbh, dby, hs
def __batch_step_tanh(self, inputs, targets, ytr_missing):
"""
The one step in RNN computations over min batch of input features using Tanhents function as non-linear activation function
inputs,targets are both list of real numbers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
input_size = len(inputs[0])
target_size = len(targets[0])
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(self.hprev)
loss = np.zeros((len(inputs), 1))
score = np.zeros((len(inputs), 1))
# forward pass
for t in range(len(inputs)):
xs[t] = np.reshape(inputs[t], (input_size, 1))
hs[t] = np.tanh(np.dot(self.Wxh, xs[t]) + np.dot(self.Whh, hs[t-1]) + self.bh) # hidden state
if self.use_regularization and self.use_dropout_regularization:
U1 = (np.random.rand(*hs[t].shape) < self.dropout_threshold) / self.dropout_threshold # dropout mask
hs[t] *= U1 # drop!
ys[t] = np.dot(self.Why, hs[t]) + self.by
ps[t] = ys[t] - np.reshape(targets[t], (target_size, 1))
loss[t] = np.sum(np.abs(ps[t])) # L1 norm
score[t] = self.__score_mean(np.abs(ps[t]), ytr_missing[t])
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(self.Wxh), np.zeros_like(self.Whh), np.zeros_like(self.Why)
dbh, dby = np.zeros_like(self.bh), np.zeros_like(self.by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.sign(ps[t]) # the gradient for y only inherits the sign of the difference for L1 norm (http://cs231n.github.io/neural-networks-2/#losses)
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(self.Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(self.Whh.T, dhraw)
# add L2 regularization gradient contribution if not dropout
if self.use_regularization and not self.use_dropout_regularization:
dWhy += self.reg_strenght * self.Why
dWhh += self.reg_strenght * self.Whh
dWxh += self.reg_strenght * self.Wxh
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return np.average(loss), np.average(score), dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def __step_relu(self, inputs, targets, ytr_missing):
"""
The one step in RNN computations using ReLU function as non-linear activation function
inputs, targets are both arrays of real numbers with shapes (input_size, 1) and (target_size, 1) respectively.
hprev is array of initial hidden state with shape (hidden_size, 1)
Wxh, Whh, Why - the neurons input/output weights
bh, by - the hidden/output layer bias
returns the loss, score_mean, gradients on model parameters, and last hidden state
"""
#
# forward pass
#
xs = inputs.T
#hs = np.maximum(0, np.dot(self.Wxh, xs) + np.dot(self.Whh, self.hprev) + self.bh) # hidden state, ReLU activation
hs = np.dot(self.Wxh, xs) + np.dot(self.Whh, self.hprev) + self.bh
hs[hs<0] *= self.relu_neg_slope
if self.use_regularization and self.use_dropout_regularization:
U1 = (np.random.rand(*hs.shape) < self.reg_strenght) / self.reg_strenght # dropout mask
hs *= U1 # drop!
ys = np.dot(self.Why, hs) + self.by # unnormalized next outputs
ps = ys - targets.T
loss = np.sum(np.abs(ps)) # L1 norm
#
# backward pass: compute gradients going backwards
#
dy = np.sign(ps) # the gradient for y only inherits the sign of the difference for L1 norm (http://cs231n.github.io/neural-networks-2/#reg)
dWhy = np.dot(dy, hs.T)
dby = dy
dh = np.dot(self.Why.T, dy) # backprop into h
dh[hs < 0] = 0 # backprop through ReLU non-linearity
dbh = dh
dWxh = np.dot(dh, inputs)
dWhh = np.dot(dh, self.hprev.T)
# add L2 regularization gradient contribution if not dropout
if self.use_regularization and not self.use_dropout_regularization:
dWhy += self.reg_strenght * self.Why
dWhh += self.reg_strenght * self.Whh
dWxh += self.reg_strenght * self.Wxh
#for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
# np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
score = self.__score_mean(np.abs(ps), ytr_missing) # IMPORTANT: use COVAR_y_MISSING flags for mean calculation without missed Y
return loss, score, dWxh, dWhh, dWhy, dbh, dby, hs
def __batch_step_relu(self, inputs, targets, ytr_missing):
"""
The one step in RNN computations over min batch of input features using ReLU function as non-linear activation function
inputs,targets are both list of real numbers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
input_size = len(inputs[0])
target_size = len(targets[0])
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(self.hprev)
loss = np.zeros((len(inputs), 1))
score = np.zeros((len(inputs), 1))
# forward pass
for t in range(len(inputs)):
xs[t] = np.reshape(inputs[t], (input_size, 1))
#hs[t] = np.maximum(0, np.dot(self.Wxh, xs[t]) + np.dot(self.Whh, hs[t-1]) + self.bh) # hidden state, ReLU Activation
hs[t] = np.dot(self.Wxh, xs[t]) + np.dot(self.Whh, hs[t-1]) + self.bh
hs[t][hs<0] *= self.relu_neg_slope
if self.use_regularization and self.use_dropout_regularization:
U1 = (np.random.rand(*hs[t].shape) < self.reg_strenght) / self.reg_strenght # dropout mask
hs[t] *= U1 # drop!
ys[t] = np.dot(self.Why, hs[t]) + self.by
ps[t] = ys[t] - np.reshape(targets[t], (target_size, 1))
loss[t] = np.sum(np.abs(ps[t])) # L1 norm
score[t] = self.__score_mean(np.abs(ps[t]), ytr_missing[t])
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(self.Wxh), np.zeros_like(self.Whh), np.zeros_like(self.Why)
dbh, dby = np.zeros_like(self.bh), np.zeros_like(self.by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.sign(ps[t]) # the gradient for y only inherits the sign of the difference for L1 norm (http://cs231n.github.io/neural-networks-2/#losses)
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(self.Why.T, dy) + dhnext # backprop into h
dh[hs[t] < 0] = 0 # backprop through ReLU non-linearity
dbh += dh
dWxh += np.dot(dh, xs[t].T)
dWhh += np.dot(dh, hs[t-1].T)
dhnext = np.dot(self.Whh.T, dh)
# add L2 regularization gradient contribution if not dropout
if self.use_regularization and not self.use_dropout_regularization:
dWhy += self.reg_strenght * self.Why
dWhh += self.reg_strenght * self.Whh
dWxh += self.reg_strenght * self.Wxh
#for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
# np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return np.average(loss), np.average(score), dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def __score_mean(self, abs_diff, y_missing):
"""
Calculates score mean on based absolute differences between Y predicted and target
abs_diff = |Ypred - Yeval|
y_missing the array with COVAR_y_MISSING flags with shape (target_size, 1)
"""
scores = abs_diff.flat[~y_missing]
return np.mean(scores)
def __make_score(self, mean_scores):
"""
Calculates final score from provided array of mean scores
mean_scores the array of mean scores
return score value
"""
n = len(mean_scores)
sum_r = np.sum(mean_scores)
score = 10 * (1 - sum_r/n)
return score
def __validate(self, y, y_target, y_missing):
"""
The method to validate calculated validation outputs against ground truth
y the calculated predictions with shape (num_samples, output_size)
y_target the ground trouth with shape (num_samples, output_size)
y_missing the array of flags denoting missed ground trouth value for predicition with shape (num_samples, output_size)
return calculated score and error values over provided data set
"""
num_samples = len(y)
scores = np.zeros((num_samples, 1))
errors = np.zeros_like(scores)
for t in range(num_samples):
# find error per sample
ps = y[t] - y_target[t]
errors[t] = np.sum(np.abs(ps)) # L1 norm
# find score per sample
scores[t] = self.__score_mean(np.abs(ps), y_missing[t])
# find total score and error
score = self.__make_score(scores)
error = np.average(errors, axis=0)
return error, score
def __predict(self, Xvl, hprev):
"""
The RNN predict method
Xvl - the test data features
"""
n = len(Xvl)
input_size = len(Xvl[0])
y_est = np.zeros((n, self.n_outputs))
for t in range(n):
x = np.reshape(Xvl[t], (input_size, 1))
hprev = np.tanh(np.dot(self.Wxh, x) + np.dot(self.Whh, hprev) + self.bh)
y = np.dot(self.Why, hprev) + self.by
y_est[t] = y.T
return y_est
def __initNNParameters(self):
"""
Do NN parameters initialization according to provided data samples
input_size the input layer size
output_size the output layer size
"""
self.Wxh = np.random.randn(self.hidden_size, self.n_features) * 0.01 # input to hidden
self.Whh = np.random.randn(self.hidden_size, self.hidden_size) * 0.01 # hidden to hidden
self.Why = np.random.randn(self.n_outputs, self.hidden_size) * 0.01 # hidden to output
self.bh = np.zeros((self.hidden_size, 1)) # hidden bias
self.by = np.zeros((self.n_outputs, 1)) # output bias
self.hprev = np.zeros((self.hidden_size,1))
def __activationFunction(self):
"""
Finds appropriate activation function depending on configuration
"""
step_f = None
if self.use_batch_step:
if self.activation_rule == 'Tanh':
step_f = self.__batch_step_tanh
elif self.activation_rule == 'ReLU':
step_f = self.__batch_step_relu
else:
if self.activation_rule == 'Tanh':
step_f = self.__step_tanh
elif self.activation_rule == 'ReLU':
step_f = self.__step_relu
if step_f == None:
raise 'Unsupported activation function specified: {}'.format(self.activation_rule)
return step_f
# gradient checking
def __gradCheck(self, inputs, targets, ytr_missing):
"""
The gradient check to test if analytic and numerical gradients converge
returns found gradient errors per paarameter as map
"""
num_checks, delta = 10, 1e-5
step_f = self.__activationFunction()
_, dWxh, dWhh, dWhy, dbh, dby, _ = step_f(inputs, targets, ytr_missing)
gradient_rel_errors = {}
for param,dparam,name in zip([self.Wxh, self.Whh, self.Why, self.bh, self.by], [dWxh, dWhh, dWhy, dbh, dby], ['Wxh', 'Whh', 'Why', 'bh', 'by']):
s0 = dparam.shape
s1 = param.shape
assert s0 == s1, 'Error dims dont match: %s and %s.' % (`s0`, `s1`)
print name
errors = np.zeros((num_checks, 1))
for i in xrange(num_checks):
ri = int(uniform(0, param.size))
# evaluate cost at [x + delta] and [x - delta]
old_val = param.flat[ri]
param.flat[ri] = old_val + delta
cg0, _, _, _, _, _, _ = step_f(inputs, targets, ytr_missing)
param.flat[ri] = old_val - delta
cg1, _, _, _, _, _, _ = step_f(inputs, targets, ytr_missing)
param.flat[ri] = old_val # reset old value for this parameter
# fetch both numerical and analytic gradient
grad_analytic = dparam.flat[ri]
grad_numerical = (cg0 - cg1) / ( 2 * delta )
if grad_numerical + grad_analytic != 0:
rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
print '%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error)
# rel_error should be on order of 1e-7 or less
errors[i] = rel_error
else:
errors[i] = 0
# store relative gradient error average per parameter
gradient_rel_errors[name] = np.average(errors)
return gradient_rel_errors
|
python
|
from baiji.serialization import json
from baiji.serialization.json import JSONDecoder
class BlmathJSONDecoder(JSONDecoder):
def __init__(self):
super(BlmathJSONDecoder, self).__init__()
self.register(self.decode_value)
def decode_value(self, dct):
from blmath.value import Value
if "__value__" in dct.keys():
return Value.from_json(dct)
def dump(obj, f, *args, **kwargs):
return json.dump(obj, f, *args, **kwargs)
def load(f, *args, **kwargs):
kwargs.update(decoder=BlmathJSONDecoder())
return json.load(f, *args, **kwargs)
def dumps(*args, **kwargs):
return json.dumps(*args, **kwargs)
def loads(*args, **kwargs):
kwargs.update(decoder=BlmathJSONDecoder())
return json.loads(*args, **kwargs)
|
python
|
class Solution:
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
if n < 1:
return 0
ugly = [0]*n
ugly[0] = 1
i2 = i3 = i5 = 0
nm_2, nm_3, nm_5 = 2, 3, 5
for i in range(1, n):
ugly[i] = min(nm_2, nm_3, nm_5)
if ugly[i] == nm_2:
i2 += 1
nm_2 = ugly[i2]*2
if ugly[i] == nm_3:
i3 += 1
nm_3 = ugly[i3]*3
if ugly[i] == nm_5:
i5 += 1
nm_5 = ugly[i5]*5
return ugly[-1]
|
python
|
def slices(series, length):
if length < 1 or length > len(series):
raise ValueError("length too high")
i = 0
result = []
while i+length <= len(series):
result.append(series[i:i+length])
i+=1
return result
|
python
|
name = input("What is your name?\n")
print ("Hello", name)
|
python
|
from keras.datasets import mnist
import os
from pandas import DataFrame
from PIL import Image
from autokeras.utils import ensure_dir
ensure_dir('mnist/train')
ensure_dir('mnist/test')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# x_train = x_train.reshape(x_train.shape + (1,))
# x_test = x_test.reshape(x_test.shape + (1,))
# file_names = []
# for i in range(len(x_train)):
# file_name = ("%05d" % (i, )) + '.jpg'
# Image.fromarray(x_train[i]).save(os.path.join('mnist', 'train', file_name))
# file_names.append(file_name)
#
# csv_data = {'File Name': file_names, 'Label': y_train}
# DataFrame(csv_data).to_csv('mnist/train/label.csv', index=False)
file_names = []
for i in range(len(x_test)):
file_name = ("%05d" % (i, )) + '.jpg'
Image.fromarray(x_test[i]).save(os.path.join('mnist', 'test', file_name))
file_names.append(file_name)
csv_data = {'File Name': file_names, 'Label': y_test}
DataFrame(csv_data).to_csv('mnist/test/label.csv', index=False)
|
python
|
from django.http import HttpResponse
# from django.shortcuts import render
# Create your views here.
def home(request):
return HttpResponse('Olá Django')
|
python
|
import os
import subprocess
if os.environ.get("HEXA_FEATURE_FLAG_S3FS", "false") == "true":
for bucket_name in os.environ.get("AWS_S3_BUCKET_NAMES", "").split(","):
path_to_umount = os.path.join(f"/home/jovyan/s3-{bucket_name}")
subprocess.run(
[
"umount",
path_to_umount,
]
)
subprocess.run(["rmdir", path_to_umount])
|
python
|
"""Returns compiled regex from regular expression."""
import re
import pytest
from mklists.returns import compile_regex
# pylint: disable=anomalous-backslash-in-string
# These are just tests...
def test_compile_regex():
"""Returns compiled regex from simple string."""
regex = "NOW"
assert isinstance(compile_regex(regex), re.Pattern)
def test_compile_regex_unescaped_parenthesis():
"""Raises exception when trying to compile regex with unescaped parenthesis."""
regex = "N(OW"
with pytest.raises(SystemExit):
compile_regex(regex)
def test_compile_regex_with_escaped_parenthesis():
"""Returns compiled regex with escaped parenthesis."""
regex = "N\(OW"
regex_compiled = compile_regex(regex)
assert re.search(regex_compiled, "N(OW")
def test_compile_regex_with_unescaped_backslash():
"""Raises exception when trying to compile regex with unescaped backslash."""
regex = "N\OW"
with pytest.raises(SystemExit):
compile_regex(regex)
def test_compile_regex_with_escaped_backslash():
"""Raises exception when trying to compile regex with escaped backslash."""
regex = "N\\OW"
with pytest.raises(SystemExit):
compile_regex(regex)
def test_compile_regex_with_double_escaped_backslash():
"""Compiles regex with double-escaped backslash."""
regex = "N\\\\OW"
regex_compiled = compile_regex(regex)
assert re.search(regex_compiled, "N\OW")
def test_compile_regex_uses_backslash_chain():
"""Returns compiled regex from string with backslash chain."""
regex = "\d\d\d"
regex_compiled = compile_regex(regex)
assert isinstance(compile_regex(regex), re.Pattern)
assert re.search(regex_compiled, "123")
def test_compile_regex_with_phone_number_regex():
"""Returns compiled regex from regex for a US telephone number."""
regex = "^(\d{3})-(\d{3})-(\d{4})$"
regex_compiled = compile_regex(regex)
assert re.search(regex_compiled, "216-321-1234")
def test_compile_regex_with_blanks():
"""Returns compiled regex from regex with blank spaces."""
regex = "^(\d{3}) (\d{3}) (\d{4})$"
regex_compiled = compile_regex(regex)
assert re.search(regex_compiled, "216 321 1234")
def test_compile_regex_with_uppercase_letters_only():
"""Returns compiled regex from regex with uppercase characters."""
regex = "^[A-Z]*$"
regex_compiled = compile_regex(regex)
assert re.search(regex_compiled, "ASDF")
def test_compile_regex_with_wildcards_and_one_space():
"""Returns compiled regex from regex with uppercase characters."""
regex = "^=* "
regex_compiled = compile_regex(regex)
assert re.search(regex_compiled, "= ")
assert re.search(regex_compiled, "== ")
assert re.search(regex_compiled, "====== ")
|
python
|
from __future__ import print_function, division
import scipy
#Import Require Libraries
import matplotlib.pyplot as plt
import cv2
import pandas
from keras.applications.vgg16 import VGG16
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.layers import LeakyReLU
from keras.layers import Reshape
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D,Conv2DTranspose
from keras.layers.core import Flatten
from keras.optimizers import RMSprop,Adam,SGD
import numpy as np
import keras
from keras.layers import Input, Dense
from keras.models import Model, Sequential
from keras.layers import *
from keras.models import model_from_json
import datetime
import matplotlib.pyplot as plt
import sys
from data_loader import DataLoader
import numpy as np
import os
import scipy.misc
from glob import glob
#Create a de raining class
class IDGAN():
def __init__(self):
self.img_rows = 256 #No of rows in image after resize
self.img_cols = 256 #No of columns in image after resize
self.channels = 3 #No of image channels
self.img_shape = (self.img_rows, self.img_cols, self.channels) #Image Shape
self.dataset_name = 'rain' # Name of the Dataset
self.data_loader = DataLoader(dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols)) #Loading the data from the data_loader.py
self.disc_out = (14, 14, 72) #Output of the Multi Scale Discriminator to incorporate Global context of Image
self.discriminator = self.build_discriminator() # Bulid the Discriminator
self.generator = self.build_generator() # Build the Generator
self.CGAN_model = self.build_CGAN() # Build the combined GAN Network
self.optimizer_cgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08) #Using Adam optimizer for Generator with Learning rate acc to Paper
self.optimizer_discriminator = SGD(lr=1E-3, momentum=0.9, decay=1e-6, nesterov=False) #Using SGD for discriminator
def build_CGAN(self):
self.discriminator.trainable = False # During training of Generator stop Discriminator
img_B = Input(shape=self.img_shape)
fake_A = self.generator(img_B) # Fake Image generated from generator
discriminator_output = self.discriminator([fake_A, img_B])
CGAN_model = Model(inputs = [img_B],
outputs = [fake_A, fake_A, discriminator_output],
name = 'CGAN') # 3 Outputs for 3 losses
return CGAN_model
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, bn=True): #Discriminator Layer
x = Conv2D(filters, kernel_size=f_size, strides=1)(layer_input)
x = PReLU()(x)
if bn:
x = BatchNormalization(momentum=0.8)(x)
x = MaxPooling2D()(x)
return x
def Deconv2d(layer_input, filters, kernel=4, dropout_rate=0): # Deconvolution Layer
x = UpSampling2D(size=2)(layer_input)
x = Conv2D(filters, kernel_size=kernel, strides=1, padding='same', activation='relu')(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = BatchNormalization(momentum=0.8)(x)
return x
def Pyramid_Pool(layer_input): # Spatial Pyramid Pooling
x_list = [layer_input]
def Pool(size):
x = MaxPooling2D(pool_size=(size*2,size*2))(layer_input)
for i in range(size):
x = Deconv2d(x,2)
return x
x_list.append(Pool(1)) # First level of Pyramid
x2 = MaxPooling2D(pool_size=(4,4))(layer_input) # Second level of Pyramid
x2 = Deconv2d(x2,2)
x2 = Deconv2d(x2,2)
x2 = ZeroPadding2D(padding=(1,1))(x2)
x_list.append(x2)
x3 = MaxPooling2D(pool_size=(8,8))(layer_input) # Last level of Pyramid
x3 = Deconv2d(x3,4)
x3 = Deconv2d(x3,4)
x3 = Deconv2d(x3,4)
x3 = ZeroPadding2D(padding=(3,3))(x3)
x_list.append(x3)
x = Concatenate(axis=-1)(x_list)
return x
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
combined_imgs = Concatenate(axis=-1)([img_A, img_B])
x0 = d_layer(combined_imgs,64,3)
x1 = d_layer(x0,256,3)
x2 = d_layer(x1,512,3)
x3 = d_layer(x2,64,3)
x4 = Pyramid_Pool(x3)
out = Activation('sigmoid')(x4) # Output is 72 channel for multi scale discriminator
return Model([img_A,img_B],out)
def build_generator(self):
def Conv2d(layer_input,no_filters,kernel,stride,bn=False,padding='valid'): # Generator Convolution Layer
x = Conv2D(filters=no_filters,kernel_size=kernel,strides=stride,padding=padding)(layer_input)
x = BatchNormalization(momentum=0.8)(x)
x = Activation('relu')(x)
return x
def dense_block(layer_input,num_layers): # Dense Block from Dense net Model using Skip Connections
x_list = [layer_input]
for i in range(num_layers):
x = Conv2D(filters=32,kernel_size=(3,3),padding='same')(layer_input)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x_list.append(x)
x = Concatenate(axis=-1)(x_list) #Concatenating all skip connections
return x
def Deconv2d(layer_input, filters, kernel=4, dropout_rate=0): # UpSampling block
x = UpSampling2D(size=2)(layer_input)
x = Conv2D(filters, kernel_size=kernel, strides=1, padding='same', activation='relu')(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = BatchNormalization(momentum=0.8)(x)
return x
inp=Input(shape=self.img_shape)
#DownSampling
x0 = Conv2d(inp,64,(3,3),(1,1),bn=True)
x0 = MaxPooling2D()(x0)
x1 = dense_block(x0,4)
x1 = Conv2d(x1,128,(3,3),(2,2),bn=True)
x2 = dense_block(x1,6)
x2 = Conv2d(x2,256,(3,3),(2,2),bn=True)
x3 = dense_block(x2,8)
x3 = Conv2d(x3,512,(3,3),(1,1),bn=True,padding='same')
x4 = dense_block(x3,8)
x4 = Conv2d(x4,128,(3,3),(1,1),bn=True,padding='same')
#UpSampling
x5 = dense_block(x4,6)
x5 = Deconv2d(x5,120)
x6 = dense_block(x5,4)
x6 = Deconv2d(x6,64)
x7 = dense_block(x6,4)
x7 = Deconv2d(x7,64)
x8 = dense_block(x7,4)
x8 = Conv2d(x8,16,(3,3),(1,1),bn=True,padding='same')
x9 = ZeroPadding2D(padding=(5,5))(x8)
x10 = Conv2D(filters=3,kernel_size=(3,3))(x9)
out = Activation('tanh')(x10)
return Model(inp,out)
def train(self, epochs, batch_size=5, sample_interval=28):
def perceptual_loss(img_true, img_generated): # Perceptual Loss as mentioned in paper using pretrained VGG16
image_shape = self.img_shape
vgg = VGG16(include_top=False, weights='imagenet', input_shape=image_shape)
loss_block3 = Model(inputs=vgg.input, outputs=vgg.get_layer('block3_conv3').output)
loss_block3.trainable = False
loss_block2 = Model(inputs=vgg.input, outputs=vgg.get_layer('block2_conv2').output)
loss_block2.trainable = False
loss_block1 = Model(input=vgg.input, outputs = vgg.get_layer('block1_conv2').output)
loss_block1.trainable = False
return K.mean(K.square(loss_block1(img_true) - loss_block1(img_generated))) + 2*K.mean(K.square(loss_block2(img_true) - loss_block2(img_generated))) + 5*K.mean(K.square(loss_block3(img_true) - loss_block3(img_generated)))
self.discriminator.trainable = False # Set the Discriminator to false for training Generator
self.generator.compile(loss=perceptual_loss , optimizer= self.optimizer_cgan) #Compile the Generator
CGAN_loss = ['mae', perceptual_loss, 'mse'] #All three Loses
CGAN_loss_weights = [6.6e-3, 1 , 6.6e-3]
self.CGAN_model.compile(loss = CGAN_loss, loss_weights = CGAN_loss_weights,
optimizer = self.optimizer_cgan)
#To train the Discriminator set trainable to true
self.discriminator.trainable = True
self.discriminator.compile(loss="mse",
optimizer = self.optimizer_discriminator)
start_time = datetime.datetime.now()
valid = np.ones((batch_size,) + self.disc_out) # For Real world Images
fake = np.zeros((batch_size,) + self.disc_out) # For Generated Images
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):
fake_A = self.generator.predict(imgs_B) # Generated Image
d_loss_real = self.discriminator.train_on_batch([imgs_A, imgs_B], valid)
d_loss_fake = self.discriminator.train_on_batch([fake_A, imgs_B], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) #Discriminator Loss
self.CGAN_model.trainable = True # Train the Combined model
self.discriminator.trainable = False
g_loss = self.CGAN_model.train_on_batch(imgs_B, [imgs_A,imgs_A,valid])
elapsed_time = datetime.datetime.now() - start_time
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] time: %s" % (epoch, epochs,
batch_i, self.data_loader.n_batches,
d_loss,
g_loss[0],
elapsed_time))
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i)
# Save all models after sample_interval here 25
com_model_json = self.CGAN_model.to_json()
gen_model_json = self.generator.to_json()
dis_model_json = self.discriminator.to_json()
with open("./saved_models/com_model.json", "w") as json_file:
json_file.write(com_model_json)
with open("./saved_models/gen_model.json", "w") as json_file:
json_file.write(gen_model_json)
with open("./saved_models/dis_model.json", "w") as json_file:
json_file.write(dis_model_json)
self.combined.save_weights("./saved_models/com_model.h5")
self.generator.save_weights("./saved_models/gen_model.h5")
self.discriminator.save_weights("./saved_models/dis_model.h5")
print("Model saved")
def sample_images(self, epoch, batch_i):
#Sample Images saved after sample interval epochs
os.makedirs('images/%s' % self.dataset_name, exist_ok=True)
r, c = 3, 3
imgs_A, imgs_B = self.data_loader.load_data(batch_size=3, is_testing=True)
fake_A = self.generator.predict(imgs_B)
gen_imgs = np.concatenate([imgs_B, fake_A, imgs_A])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['WithRain', 'Generated', 'Original']
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt])
axs[i, j].set_title(titles[i])
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i))
plt.close()
#Training
gan=IDGAN()
## Train the model
gan.train(epochs=150, batch_size=1, sample_interval=25)
#Testing
## use the trained model to generate data
test_model = gan.build_generator()
test_model.load_weights("./saved_models/gen_model.h5")
path = glob("./dataset/rain/test_nature/*")
num = 1
for img in path:
img_B = scipy.misc.imread(img, mode='RGB').astype(np.float)
m,n,d = img_B.shape
img_show = np.zeros((m,2*n,
img_b = np.array([img_B])/127.5 - 1
fake_A = 0.5* (test_model.predict(img_b))[0]+0.5
img_show[:,:n,:] = img_B/255
img_show[:,n:2*n,:] = fake_A
scipy.misc.imsave("./images/rain/test_nature/%d.jpg" % num,img_show)
num = num + 1
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for building a LaserTagger TF model."""
from __future__ import absolute_import, division, print_function
from bert import modeling, optimization
import numpy as np
from official_transformer import model_params
import tensorflow as tf
import transformer_decoder
class LaserTaggerConfig(modeling.BertConfig):
"""Model configuration for LaserTagger."""
def __init__(self,
use_t2t_decoder=True,
decoder_num_hidden_layers=1,
decoder_hidden_size=768,
decoder_num_attention_heads=4,
decoder_filter_size=3072,
use_full_attention=False,
**kwargs):
"""Initializes an instance of LaserTagger configuration.
This initializer expects both the BERT specific arguments and the
Transformer decoder arguments listed below.
Args:
use_t2t_decoder: Whether to use the Transformer decoder (i.e.
LaserTagger_AR). If False, the remaining args do not affect anything and
can be set to default values.
decoder_num_hidden_layers: Number of hidden decoder layers.
decoder_hidden_size: Decoder hidden size.
decoder_num_attention_heads: Number of decoder attention heads.
decoder_filter_size: Decoder filter size.
use_full_attention: Whether to use full encoder-decoder attention.
**kwargs: The arguments that the modeling.BertConfig initializer expects.
"""
super(LaserTaggerConfig, self).__init__(**kwargs)
self.use_t2t_decoder = use_t2t_decoder
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.decoder_hidden_size = decoder_hidden_size
self.decoder_num_attention_heads = decoder_num_attention_heads
self.decoder_filter_size = decoder_filter_size
self.use_full_attention = use_full_attention
class ModelFnBuilder(object):
"""Class for building `model_fn` closure for TPUEstimator."""
def __init__(self, config, num_tags, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, max_seq_length,
verb_deletion_loss_weight, verb_tags, delete_tags,
relative_loss_weight, smallest_add_tag, delete_tags_ids,
keep_tags_ids):
"""Initializes an instance of a LaserTagger model.
Args:
config: LaserTagger model configuration.
num_tags: Number of different tags to be predicted.
init_checkpoint: Path to a pretrained BERT checkpoint (optional).
learning_rate: Learning rate.
num_train_steps: Number of training steps.
num_warmup_steps: Number of warmup steps.
use_tpu: Whether to use TPU.
use_one_hot_embeddings: Whether to use one-hot embeddings for word
embeddings.
max_seq_length: Maximum sequence length.
verb_deletion_loss_weight: the weight of the loss of deleting verb
verb_tags: a list of pos tag (integers) corresponding to verb
delete_tags: a list of integers with length of the vocab number. The
integer is 0 if the corresponding vocab is not "DELETE" and is 1 if
the corresponding vocab is "DELETE".
"""
self._config = config
self._num_tags = num_tags
self._init_checkpoint = init_checkpoint
self._learning_rate = learning_rate
self._num_train_steps = num_train_steps
self._num_warmup_steps = num_warmup_steps
self._use_tpu = use_tpu
self._use_one_hot_embeddings = use_one_hot_embeddings
self._max_seq_length = max_seq_length
self._verb_deletion_loss_weight = verb_deletion_loss_weight
self._verb_tags = verb_tags
if self._config.use_t2t_decoder:
self._delete_tags = np.insert(delete_tags, 0, [0, 0], axis=0)
self._smallest_add_tags_ids = smallest_add_tag + 2
self._delete_tags_ids = np.unique(np.array(delete_tags_ids) + 2)
self._keep_tags_ids = np.unique(np.array(keep_tags_ids) + 2)
else:
self._delete_tags = delete_tags
self._smallest_add_tags_ids = smallest_add_tag
self._delete_tags_ids = delete_tags_ids
self._keep_tags_ids = keep_tags_ids
self._add_weight, self._keep_weight, self._delete_weight = relative_loss_weight
def _create_model(self, mode, input_ids, input_mask, segment_ids, labels,
labels_mask):
"""Creates a LaserTagger model."""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=self._config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=self._use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
if self._config.use_t2t_decoder:
# Size of the output vocabulary which contains the tags + begin and end
# tokens used by the Transformer decoder.
output_vocab_size = self._num_tags + 2
params = _get_decoder_params(self._config, self._use_tpu,
self._max_seq_length, output_vocab_size)
decoder = transformer_decoder.TransformerDecoder(params, is_training)
logits = decoder(input_mask, final_hidden, labels)
else:
if is_training:
# I.e., 0.1 dropout
final_hidden = tf.nn.dropout(final_hidden, keep_prob=0.9)
logits = tf.layers.dense(
final_hidden,
self._num_tags,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="output_projection")
with tf.variable_scope("loss"):
loss = None
per_example_loss = None
if mode != tf.estimator.ModeKeys.PREDICT:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
if self._verb_tags is not None and self._verb_deletion_loss_weight != 0:
logits_tensor_shape_as_list = logits.get_shape().as_list()
batch_size, token_length, number_of_tags = logits_tensor_shape_as_list[
0:3]
verb_mask = tf.constant(0.0,
dtype="float32",
shape=segment_ids.get_shape())
for verb_tag in self._verb_tags:
verb_mask = tf.math.add(
tf.cast(
tf.math.equal(tf.constant(verb_tag, dtype="int32"),
segment_ids), tf.float32), verb_mask)
delete_tags = self._delete_tags
delete_tags = np.repeat(delete_tags[np.newaxis, :],
token_length,
axis=0)
delete_tags = np.repeat(delete_tags[np.newaxis, :, :],
batch_size,
axis=0)
delete_tags_tensor = tf.constant(delete_tags, dtype="float32")
delete_probability = tf.math.divide(
tf.reduce_sum(tf.math.multiply(delete_tags_tensor, logits), 2),
tf.reduce_sum(logits, 2))
delete_loss = tf.math.scalar_mul(
tf.constant(self._verb_deletion_loss_weight, dtype="float32"),
tf.math.multiply(delete_probability, verb_mask))
# new loss = loss * (1 + delete_loss)
loss = tf.math.multiply(
loss,
tf.math.add(
tf.constant(1.0,
dtype="float32",
shape=delete_loss.get_shape()), delete_loss))
# Adjust loss using weights of different edits (add, delete, keep)
if self._add_weight != 1:
add_label_mask = tf.cast(
tf.math.greater_equal(
tf.constant(self._smallest_add_tags_ids, dtype="int32"),
labels), tf.float32)
add_loss_weight = tf.math.scalar_mul(
tf.constant(self._add_weight - 1, dtype="float32"),
add_label_mask)
loss = tf.math.multiply(
loss,
tf.math.add(
tf.constant(1.0,
dtype="float32",
shape=add_loss_weight.get_shape()),
add_loss_weight))
loss = _update_loss_with_weight(loss, self._keep_weight,
self._keep_tags_ids, labels)
loss = _update_loss_with_weight(loss, self._delete_weight,
self._delete_tags_ids, labels)
per_example_loss = tf.truediv(
tf.reduce_sum(loss, axis=1),
tf.dtypes.cast(tf.reduce_sum(labels_mask, axis=1), tf.float32))
loss = tf.reduce_mean(per_example_loss)
pred = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
else:
if self._config.use_t2t_decoder:
pred = logits["outputs"]
# Transformer decoder reserves the first two IDs to the begin and the
# end token so we shift the IDs back.
pred -= 2
else:
pred = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
return (loss, per_example_loss, pred)
def build(self):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s", name, features[name].shape)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
labels = None
labels_mask = None
if mode != tf.estimator.ModeKeys.PREDICT:
if self._config.use_t2t_decoder:
# Account for the begin and end tokens used by Transformer.
labels = features["labels"] + 2
else:
labels = features["labels"]
labels_mask = tf.cast(features["labels_mask"], tf.float32)
(total_loss, per_example_loss,
predictions) = self._create_model(mode, input_ids, input_mask,
segment_ids, labels, labels_mask)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if self._init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(
tvars, self._init_checkpoint)
if self._use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(self._init_checkpoint,
assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(self._init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
tf.logging.info("Initializing the model from: %s",
self._init_checkpoint)
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss,
self._learning_rate,
self._num_train_steps,
self._num_warmup_steps,
self._use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, labels, labels_mask, predictions):
"""Compute eval metrics."""
accuracy = tf.cast(
tf.reduce_all(tf.logical_or(tf.equal(labels, predictions),
~tf.cast(labels_mask, tf.bool)),
axis=1), tf.float32)
return {
# This is equal to the Exact score if the final realization step
# doesn't introduce errors.
"sentence_level_acc": tf.metrics.mean(accuracy),
"eval_loss": tf.metrics.mean(per_example_loss),
}
eval_metrics = (metric_fn,
[per_example_loss, labels, labels_mask, predictions])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"pred": predictions},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def _get_decoder_params(config, use_tpu, max_seq_length, output_vocab_size):
"""Returns hyperparameters for TransformerDecoder.
Args:
config: LaserTagger model configuration.
use_tpu: Whether to train on TPUs.
max_seq_length: Maximum sequence length.
output_vocab_size: Size of the output vocabulary.
Returns:
Hyperparameter dictionary.
"""
params = model_params.BASE_PARAMS
params.update(
num_hidden_layers=config.decoder_num_hidden_layers,
hidden_size=config.decoder_hidden_size,
num_heads=config.decoder_num_attention_heads,
filter_size=config.decoder_filter_size,
vocab_size=output_vocab_size,
use_tpu=use_tpu,
max_length=max_seq_length,
# This parameter should not be changed since we want the number of decoded
# tags to equal the number of source tokens.
extra_decode_length=0)
return params
def _update_loss_with_weight(loss, weight, filter_labels, labels):
""" Returns loss adjusted with weights.
Args:
loss: original loss before weighting
weight: weight for this edit
filter_labels: the id number of the vocab corresponding to the edit
labels: predicted labels
Returns:
Updated loss
"""
if weight == 1:
return loss
else:
filter_label_mask = tf.constant(0.0,
dtype="float32",
shape=labels.get_shape())
for filter_label in filter_labels:
filter_label_mask = tf.math.add(
tf.cast(
tf.math.equal(tf.constant(filter_label, dtype="int32"), labels),
tf.float32), filter_label_mask)
loss_weight = tf.math.scalar_mul(tf.constant(weight - 1, dtype="float32"),
filter_label_mask)
new_loss = tf.math.multiply(
loss,
tf.math.add(
tf.constant(1.0, dtype="float32", shape=loss_weight.get_shape()),
loss_weight))
return new_loss
|
python
|
# Copyright (c) 2019, Digi International, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import xbee
print(" +-------------------------------------------+")
print(" | XBee MicroPython Network Discovery Sample |")
print(" +-------------------------------------------+\n")
print("Discovering network devices...\n")
# Discover the network devices and print their basic information.
for device in xbee.discover():
addr64 = device['sender_eui64']
node_id = device['node_id']
rssi = device['rssi']
print("New discovered device:\n"
" - 64-bit address: %s\n"
" - Node identifier: %s\n"
" - RSSI: %d\n"
% (''.join('{:02x}'.format(x).upper() for x in addr64), node_id, rssi))
print("Network discovery finished")
|
python
|
#write a function to convert decimal number to binary, octal and Heaxdecimal equivalent
n = input("Enter the Decimal")
print("BINARY EQUIVALENT",bin(n))
print("OCTAL EQUIVALENT",oct(n))
print("HEXADECIMAL EQUIVALENT",hex(n))
|
python
|
from microbit import *
import time
while True:
temp_c = pin1.read_analog() / 10.23 - 20
temp_f = int(temp_c * 9 / 5 + 32)
display.scroll(temp_f)
time.sleep(0.5)
|
python
|