max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
qualcoder/GUI/ui_dialog_view_image.py
ericbrasiln/QualCoder
150
131100
<reponame>ericbrasiln/QualCoder<gh_stars>100-1000 # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui_dialog_view_image.ui' # # Created by: PyQt5 UI code generator 5.14.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog_view_image(object): def setupUi(self, Dialog_view_image): Dialog_view_image.setObjectName("Dialog_view_image") Dialog_view_image.resize(1021, 715) self.gridLayout = QtWidgets.QGridLayout(Dialog_view_image) self.gridLayout.setObjectName("gridLayout") self.horizontalSlider = QtWidgets.QSlider(Dialog_view_image) self.horizontalSlider.setMinimum(9) self.horizontalSlider.setSingleStep(3) self.horizontalSlider.setProperty("value", 99) self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal) self.horizontalSlider.setTickPosition(QtWidgets.QSlider.TicksBelow) self.horizontalSlider.setTickInterval(10) self.horizontalSlider.setObjectName("horizontalSlider") self.gridLayout.addWidget(self.horizontalSlider, 2, 0, 1, 1) self.textEdit = QtWidgets.QTextEdit(Dialog_view_image) self.textEdit.setMaximumSize(QtCore.QSize(16777215, 80)) self.textEdit.setObjectName("textEdit") self.gridLayout.addWidget(self.textEdit, 3, 0, 1, 1) self.scrollArea = QtWidgets.QScrollArea(Dialog_view_image) self.scrollArea.setWidgetResizable(True) self.scrollArea.setObjectName("scrollArea") self.scrollAreaWidgetContents = QtWidgets.QWidget() self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1001, 583)) self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents") self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) self.gridLayout_2.setObjectName("gridLayout_2") self.graphicsView = QtWidgets.QGraphicsView(self.scrollAreaWidgetContents) self.graphicsView.setObjectName("graphicsView") self.gridLayout_2.addWidget(self.graphicsView, 0, 0, 1, 1) self.scrollArea.setWidget(self.scrollAreaWidgetContents) self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1) self.retranslateUi(Dialog_view_image) QtCore.QMetaObject.connectSlotsByName(Dialog_view_image) def retranslateUi(self, Dialog_view_image): _translate = QtCore.QCoreApplication.translate Dialog_view_image.setWindowTitle(_translate("Dialog_view_image", "View Image")) self.textEdit.setToolTip(_translate("Dialog_view_image", "<html><head/><body><p>Memo</p></body></html>")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Dialog_view_image = QtWidgets.QDialog() ui = Ui_Dialog_view_image() ui.setupUi(Dialog_view_image) Dialog_view_image.show() sys.exit(app.exec_())
lib/transmissionrpc/error.py
0x20Man/Watcher3
320
131121
<gh_stars>100-1000 # -*- coding: utf-8 -*- # Copyright (c) 2008-2013 <NAME> <<EMAIL>> # Licensed under the MIT license. from six import string_types, integer_types class TransmissionError(Exception): """ This exception is raised when there has occurred an error related to communication with Transmission. It is a subclass of Exception. """ def __init__(self, message='', original=None): Exception.__init__(self) self.message = message self.original = original def __str__(self): if self.original: original_name = type(self.original).__name__ return '%s Original exception: %s, "%s"' % (self.message, original_name, str(self.original)) else: return self.message class HTTPHandlerError(Exception): """ This exception is raised when there has occurred an error related to the HTTP handler. It is a subclass of Exception. """ def __init__(self, httpurl=None, httpcode=None, httpmsg=None, httpheaders=None, httpdata=None): Exception.__init__(self) self.url = '' self.code = 600 self.message = '' self.headers = {} self.data = '' if isinstance(httpurl, string_types): self.url = httpurl if isinstance(httpcode, integer_types): self.code = httpcode if isinstance(httpmsg, string_types): self.message = httpmsg if isinstance(httpheaders, dict): self.headers = httpheaders if isinstance(httpdata, string_types): self.data = httpdata def __repr__(self): return '<HTTPHandlerError %d, %s>' % (self.code, self.message) def __str__(self): return 'HTTPHandlerError %d: %s' % (self.code, self.message) def __unicode__(self): return 'HTTPHandlerError %d: %s' % (self.code, self.message)
tests/test_blobxfer.py
amishra-dev/blobxfer
147
131130
# coding=utf-8 """Tests for miscellaneous""" # stdlib imports # non-stdlib imports import azure.storage.common # module under test import blobxfer.version def test_user_agent_monkey_patch(): verstr = 'blobxfer/{}'.format(blobxfer.version.__version__) assert azure.storage.common._constants.USER_AGENT_STRING_PREFIX.startswith( verstr)
train.py
asafnadler/DeepDGA
240
131145
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import numpy as np import tensorflow as tf import model from data_reader import load_data, DataReader flags = tf.flags # data flags.DEFINE_string('data_dir', 'data', 'data directory. Should contain train.txt/valid.txt/test.txt with input data') flags.DEFINE_string('train_dir', 'cv', 'training directory (models and summaries are saved there periodically)') flags.DEFINE_string('load_model', None, '(optional) filename of the model to load. Useful for re-starting training from a checkpoint') # model params flags.DEFINE_integer('rnn_size', 650, 'size of LSTM internal state') flags.DEFINE_integer('highway_layers', 2, 'number of highway layers') flags.DEFINE_integer('char_embed_size', 15, 'dimensionality of character embeddings') flags.DEFINE_string ('kernels', '[1,2,3,4,5,6,7]', 'CNN kernel widths') flags.DEFINE_string ('kernel_features', '[50,100,150,200,200,200,200]', 'number of features in the CNN kernel') flags.DEFINE_integer('rnn_layers', 2, 'number of layers in the LSTM') flags.DEFINE_float ('dropout', 0.5, 'dropout. 0 = no dropout') # optimization flags.DEFINE_float ('learning_rate_decay', 0.5, 'learning rate decay') flags.DEFINE_float ('learning_rate', 1.0, 'starting learning rate') flags.DEFINE_float ('decay_when', 1.0, 'decay if validation perplexity does not improve by more than this much') flags.DEFINE_float ('param_init', 0.05, 'initialize parameters at') flags.DEFINE_integer('num_unroll_steps', 35, 'number of timesteps to unroll for') flags.DEFINE_integer('batch_size', 20, 'number of sequences to train on in parallel') flags.DEFINE_integer('max_epochs', 25, 'number of full passes through the training data') flags.DEFINE_float ('max_grad_norm', 5.0, 'normalize gradients at') flags.DEFINE_integer('max_word_length', 65, 'maximum word length') # bookkeeping flags.DEFINE_integer('seed', 3435, 'random number generator seed') flags.DEFINE_integer('print_every', 5, 'how often to print current loss') flags.DEFINE_string ('EOS', '+', '<EOS> symbol. should be a single unused character (like +) for PTB and blank for others') FLAGS = flags.FLAGS def run_test(session, m, data, batch_size, num_steps): """Runs the model on the given data.""" costs = 0.0 iters = 0 state = session.run(m.initial_state) for step, (x, y) in enumerate(reader.dataset_iterator(data, batch_size, num_steps)): cost, state = session.run([m.cost, m.final_state], { m.input_data: x, m.targets: y, m.initial_state: state }) costs += cost iters += 1 return costs / iters def main(_): ''' Trains model from data ''' if not os.path.exists(FLAGS.train_dir): os.mkdir(FLAGS.train_dir) print('Created training directory', FLAGS.train_dir) word_vocab, char_vocab, word_tensors, char_tensors, max_word_length = \ load_data(FLAGS.data_dir, FLAGS.max_word_length, eos=FLAGS.EOS) train_reader = DataReader(word_tensors['train'], char_tensors['train'], FLAGS.batch_size, FLAGS.num_unroll_steps) valid_reader = DataReader(word_tensors['valid'], char_tensors['valid'], FLAGS.batch_size, FLAGS.num_unroll_steps) test_reader = DataReader(word_tensors['test'], char_tensors['test'], FLAGS.batch_size, FLAGS.num_unroll_steps) print('initialized all dataset readers') with tf.Graph().as_default(), tf.Session() as session: # tensorflow seed must be inside graph tf.set_random_seed(FLAGS.seed) np.random.seed(seed=FLAGS.seed) ''' build training graph ''' initializer = tf.random_uniform_initializer(-FLAGS.param_init, FLAGS.param_init) with tf.variable_scope("Model", initializer=initializer): train_model = model.inference_graph( char_vocab_size=char_vocab.size, word_vocab_size=word_vocab.size, char_embed_size=FLAGS.char_embed_size, batch_size=FLAGS.batch_size, num_highway_layers=FLAGS.highway_layers, num_rnn_layers=FLAGS.rnn_layers, rnn_size=FLAGS.rnn_size, max_word_length=max_word_length, kernels=eval(FLAGS.kernels), kernel_features=eval(FLAGS.kernel_features), num_unroll_steps=FLAGS.num_unroll_steps, dropout=FLAGS.dropout) train_model.update(model.loss_graph(train_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps)) # scaling loss by FLAGS.num_unroll_steps effectively scales gradients by the same factor. # we need it to reproduce how the original Torch code optimizes. Without this, our gradients will be # much smaller (i.e. 35 times smaller) and to get system to learn we'd have to scale learning rate and max_grad_norm appropriately. # Thus, scaling gradients so that this trainer is exactly compatible with the original train_model.update(model.training_graph(train_model.loss * FLAGS.num_unroll_steps, FLAGS.learning_rate, FLAGS.max_grad_norm)) # create saver before creating more graph nodes, so that we do not save any vars defined below saver = tf.train.Saver(max_to_keep=50) ''' build graph for validation and testing (shares parameters with the training graph!) ''' with tf.variable_scope("Model", reuse=True): valid_model = model.inference_graph( char_vocab_size=char_vocab.size, word_vocab_size=word_vocab.size, char_embed_size=FLAGS.char_embed_size, batch_size=FLAGS.batch_size, num_highway_layers=FLAGS.highway_layers, num_rnn_layers=FLAGS.rnn_layers, rnn_size=FLAGS.rnn_size, max_word_length=max_word_length, kernels=eval(FLAGS.kernels), kernel_features=eval(FLAGS.kernel_features), num_unroll_steps=FLAGS.num_unroll_steps, dropout=0.0) valid_model.update(model.loss_graph(valid_model.logits, FLAGS.batch_size, FLAGS.num_unroll_steps)) if FLAGS.load_model: saver.restore(session, FLAGS.load_model) print('Loaded model from', FLAGS.load_model, 'saved at global step', train_model.global_step.eval()) else: tf.global_variables_initializer().run() session.run(train_model.clear_char_embedding_padding) print('Created and initialized fresh model. Size:', model.model_size()) summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=session.graph) ''' take learning rate from CLI, not from saved graph ''' session.run( tf.assign(train_model.learning_rate, FLAGS.learning_rate), ) ''' training starts here ''' best_valid_loss = None rnn_state = session.run(train_model.initial_rnn_state) for epoch in range(FLAGS.max_epochs): epoch_start_time = time.time() avg_train_loss = 0.0 count = 0 for x, y in train_reader.iter(): count += 1 start_time = time.time() loss, _, rnn_state, gradient_norm, step, _ = session.run([ train_model.loss, train_model.train_op, train_model.final_rnn_state, train_model.global_norm, train_model.global_step, train_model.clear_char_embedding_padding ], { train_model.input : x, train_model.targets: y, train_model.initial_rnn_state: rnn_state }) avg_train_loss += 0.05 * (loss - avg_train_loss) time_elapsed = time.time() - start_time if count % FLAGS.print_every == 0: print('%6d: %d [%5d/%5d], train_loss/perplexity = %6.8f/%6.7f secs/batch = %.4fs, grad.norm=%6.8f' % (step, epoch, count, train_reader.length, loss, np.exp(loss), time_elapsed, gradient_norm)) print('Epoch training time:', time.time()-epoch_start_time) # epoch done: time to evaluate avg_valid_loss = 0.0 count = 0 rnn_state = session.run(valid_model.initial_rnn_state) for x, y in valid_reader.iter(): count += 1 start_time = time.time() loss, rnn_state = session.run([ valid_model.loss, valid_model.final_rnn_state ], { valid_model.input : x, valid_model.targets: y, valid_model.initial_rnn_state: rnn_state, }) if count % FLAGS.print_every == 0: print("\t> validation loss = %6.8f, perplexity = %6.8f" % (loss, np.exp(loss))) avg_valid_loss += loss / valid_reader.length print("at the end of epoch:", epoch) print("train loss = %6.8f, perplexity = %6.8f" % (avg_train_loss, np.exp(avg_train_loss))) print("validation loss = %6.8f, perplexity = %6.8f" % (avg_valid_loss, np.exp(avg_valid_loss))) save_as = '%s/epoch%03d_%.4f.model' % (FLAGS.train_dir, epoch, avg_valid_loss) saver.save(session, save_as) print('Saved model', save_as) ''' write out summary events ''' summary = tf.Summary(value=[ tf.Summary.Value(tag="train_loss", simple_value=avg_train_loss), tf.Summary.Value(tag="valid_loss", simple_value=avg_valid_loss) ]) summary_writer.add_summary(summary, step) ''' decide if need to decay learning rate ''' if best_valid_loss is not None and np.exp(avg_valid_loss) > np.exp(best_valid_loss) - FLAGS.decay_when: print('validation perplexity did not improve enough, decay learning rate') current_learning_rate = session.run(train_model.learning_rate) print('learning rate was:', current_learning_rate) current_learning_rate *= FLAGS.learning_rate_decay if current_learning_rate < 1.e-5: print('learning rate too small - stopping now') break session.run(train_model.learning_rate.assign(current_learning_rate)) print('new learning rate is:', current_learning_rate) else: best_valid_loss = avg_valid_loss if __name__ == "__main__": tf.app.run()
tests/unit/viz/test_map.py
CartoDB/cartoframes
236
131147
<reponame>CartoDB/cartoframes from cartoframes.auth import Credentials from cartoframes.viz import Map, Layer, popup_element, constants from cartoframes.viz.source import Source from cartoframes.io.managers.context_manager import ContextManager from .utils import build_geodataframe from ..mocks.kuviz_mock import KuvizPublisherMock def setup_mocks(mocker): mocker.patch('cartoframes.viz.map._get_publisher', return_value=KuvizPublisherMock()) mocker.patch.object(ContextManager, 'compute_query', return_value='select * from fake_table') mocker.patch.object(ContextManager, 'get_geom_type', return_value='point') mocker.patch.object(ContextManager, 'get_bounds', return_value=None) class TestMap(object): def test_is_defined(self): """Map""" assert Map is not None class TestMapInitialization(object): def test_size(self): """Map should set the size by default""" map = Map() assert map.size is None def test__init(self): """Map should return a valid template""" map = Map() map._repr_html_() assert map.bounds is not None assert map._html_map is not None def test_bounds(self): """Map should set the bounds""" map = Map(bounds={ 'west': -10, 'east': 10, 'north': -10, 'south': 10 }) assert map.bounds == [[-10, 10], [10, -10]] def test_bounds_clamp(self): """Map should set the bounds clamped""" map = Map(bounds={ 'west': -1000, 'east': 1000, 'north': -1000, 'south': 1000 }) assert map.bounds == [[-180, 90], [180, -90]] class TestMapLayer(object): def test_one_layer(self): """Map layer should be able to initialize one layer""" source = Source(build_geodataframe([-10, 0], [-10, 0])) layer = Layer(source) map = Map(layer) assert map.layers == [layer] layer_def = map.layers[0].get_layer_def() assert layer_def.get('interactivity') == [] assert layer_def.get('credentials') is None assert layer_def.get('legends') is not None assert layer_def.get('widgets') is not None assert layer_def.get('data') is not None assert layer_def.get('type') == 'GeoJSON' assert layer_def.get('viz') is not None def test_two_layers(self): """Map layer should be able to initialize two layers in the correct order""" source_1 = Source(build_geodataframe([-10, 0], [-10, 0])) source_2 = Source(build_geodataframe([0, 10], [10, 0])) layer_1 = Layer(source_1) layer_2 = Layer(source_2) map = Map([layer_1, layer_2]) assert map.layers == [layer_1, layer_2] def test_interactive_layer(self): """Map layer should indicate if the layer has interactivity configured""" source_1 = Source(build_geodataframe([-10, 0], [-10, 0], ['pop', 'name'])) layer = Layer( source_1, popup_click=[ popup_element('pop'), popup_element('name') ], popup_hover=[ popup_element('pop', 'Pop') ] ) map = Map(layer) layer_def = map.layers[0].get_layer_def() assert layer_def.get('interactivity') == [ { 'event': 'click', 'attrs': { 'name': 'v6ae999', 'title': 'name', 'format': None } }, { 'event': 'click', 'attrs': { 'name': 'v4f197c', 'title': 'pop', 'format': None } }, { 'event': 'hover', 'attrs': { 'name': 'v4f197c', 'title': 'Pop', 'format': None } } ] def test_default_interactive_layer(self): """Map layer should get the default event if the interactivity is set to []""" source_1 = Source(build_geodataframe([-10, 0], [-10, 0])) layer = Layer( source_1 ) map = Map(layer) layer_def = map.layers[0].get_layer_def() assert layer_def.get('interactivity') == [] class TestMapDevelopmentPath(object): def test_default_carto_vl_path(self): """Map dev path should use default paths if none are given""" map = Map() map._repr_html_() template = map._html_map.html assert constants.CARTO_VL_URL in template def test_custom_carto_vl_path(self): """Map dev path should use custom paths""" _carto_vl_path = 'custom_carto_vl_path' map = Map(_carto_vl_path=_carto_vl_path) map._repr_html_() template = map._html_map.html assert _carto_vl_path + constants.CARTO_VL_DEV in template def test_default_airship_path(self): """Map dev path should use default paths if none are given""" map = Map() map._repr_html_() template = map._html_map.html assert constants.AIRSHIP_COMPONENTS_URL in template assert constants.AIRSHIP_BRIDGE_URL in template assert constants.AIRSHIP_STYLES_URL in template assert constants.AIRSHIP_MODULE_URL in template assert constants.AIRSHIP_ICONS_URL in template def test_custom_airship_path(self): """Map dev path should use custom paths""" _airship_path = 'custom_airship_path' map = Map(_airship_path=_airship_path) map._repr_html_() template = map._html_map.html assert _airship_path + constants.AIRSHIP_COMPONENTS_DEV in template assert _airship_path + constants.AIRSHIP_BRIDGE_DEV in template assert _airship_path + constants.AIRSHIP_STYLES_DEV in template assert _airship_path + constants.AIRSHIP_MODULE_DEV in template assert _airship_path + constants.AIRSHIP_ICONS_DEV in template class TestMapPublication(object): def setup_method(self): self.username = 'fake_username' self.api_key = 'fake_api_key' self.credentials = Credentials(username=self.username, api_key=self.api_key) self.test_geojson = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Point", "coordinates": [ -3.1640625, 42.032974332441405 ] } } ] } def assert_kuviz_dict(self, kuviz_dict, name, privacy): assert kuviz_dict['id'] is not None assert kuviz_dict['url'] is not None assert kuviz_dict['name'] == name assert kuviz_dict['privacy'] == privacy def test_map_publish_remote_default(self, mocker): setup_mocks(mocker) mock_set_content = mocker.patch('cartoframes.viz.html.html_map.HTMLMap.set_content') vmap = Map(Layer('fake_table', credentials=self.credentials)) name = 'cf_publish' kuviz_dict = vmap.publish(name, None, self.credentials) self.assert_kuviz_dict(kuviz_dict, name, 'public') mock_set_content.assert_called_once_with( _airship_path=None, _carto_vl_path=None, basemap='Positron', bounds=[[-180, -90], [180, 90]], camera=None, description=None, is_embed=True, is_static=None, layer_selector=False, layers=[{ 'credentials': { 'username': 'fake_username', 'api_key': 'fake_api_key', 'base_url': 'https://fake_username.carto.com' }, 'interactivity': [], 'legends': [], 'has_legend_list': True, 'encode_data': True, 'widgets': [], 'data': 'select * from fake_table', 'type': 'Query', 'title': None, 'options': {}, 'map_index': 0, 'source': 'select * from fake_table', 'viz': '''color: hex("#EE4D5A") strokeColor: opacity(#222,ramp(linear(zoom(),0,18),[0,0.6])) strokeWidth: ramp(linear(zoom(),0,18),[0,1]) width: ramp(linear(zoom(),0,18),[2,10]) '''}], show_info=False, size=None, theme=None, title='cf_publish' ) def test_map_publish_remote_params(self, mocker): setup_mocks(mocker) mock_set_content = mocker.patch('cartoframes.viz.html.html_map.HTMLMap.set_content') vmap = Map( Layer('fake_table', credentials=self.credentials), basemap='yellow', bounds={'west': 1, 'east': 2, 'north': 3, 'south': 4}, viewport={'zoom': 5, 'lat': 50, 'lng': -10}, is_static=True, layer_selector=False, theme='dark', title='title', description='description' ) name = 'cf_publish' kuviz_dict = vmap.publish(name, None, self.credentials, maps_api_key='1234567890') self.assert_kuviz_dict(kuviz_dict, name, 'public') mock_set_content.assert_called_once_with( _airship_path=None, _carto_vl_path=None, basemap='yellow', bounds=[[1, 2], [4, 3]], camera={'bearing': None, 'center': [-10, 50], 'pitch': None, 'zoom': 5}, description='description', is_embed=True, is_static=True, layer_selector=False, layers=[{ 'credentials': { 'username': 'fake_username', 'api_key': '1234567890', 'base_url': 'https://fake_username.carto.com' }, 'interactivity': [], 'legends': [], 'has_legend_list': True, 'encode_data': True, 'widgets': [], 'data': 'select * from fake_table', 'type': 'Query', 'title': None, 'options': {}, 'map_index': 0, 'source': 'select * from fake_table', 'viz': '''color: hex("#EE4D5A") strokeColor: opacity(#222,ramp(linear(zoom(),0,18),[0,0.6])) strokeWidth: ramp(linear(zoom(),0,18),[0,1]) width: ramp(linear(zoom(),0,18),[2,10]) '''}], show_info=False, size=None, theme='dark', title='cf_publish' ) def test_map_publish_with_password(self, mocker): setup_mocks(mocker) map = Map(Layer(Source('fake_table', credentials=self.credentials))) name = 'cf_publish' kuviz_dict = map.publish(name, '1234', credentials=self.credentials) self.assert_kuviz_dict(kuviz_dict, name, 'password') def test_map_publish_update_name(self, mocker): setup_mocks(mocker) map = Map(Layer(Source('fake_table', credentials=self.credentials))) name = 'cf_publish' map.publish(name, None, credentials=self.credentials) new_name = 'cf_update' kuviz_dict = map.update_publication(new_name, password=None) self.assert_kuviz_dict(kuviz_dict, new_name, 'public') def test_map_publish_update_password(self, mocker): setup_mocks(mocker) map = Map(Layer(Source('fake_table', credentials=self.credentials))) name = 'cf_publish' map.publish(name, None, credentials=self.credentials) kuviz_dict = map.update_publication(name, '1234"') self.assert_kuviz_dict(kuviz_dict, name, 'password')
webapp/graphite/functions/views.py
parrotpock/graphite-web
4,281
131151
import json from graphite.util import jsonResponse, HttpResponse, HttpError from graphite.functions import SeriesFunctions, SeriesFunction, PieFunctions, PieFunction, functionInfo class jsonInfinityEncoder(json.JSONEncoder): def encode(self, o): return super(jsonInfinityEncoder, self).encode(o).replace('Infinity,', '1e9999,') def default(self, o): if hasattr(o, 'toJSON'): return o.toJSON() return o.__dict__ @jsonResponse(encoder=jsonInfinityEncoder) def functionList(request, queryParams): if request.method != 'GET': return HttpResponse(status=405) if queryParams.get('type') == 'pie': funcs = PieFunctions() else: funcs = SeriesFunctions() grouped = queryParams.get('grouped', '').lower() in ['1', 'true'] group = queryParams.get('group') result = {} for (name, func) in funcs.items(): info = functionInfo(name, func) if group is not None and group != info['group']: continue if grouped: if info['group'] not in result: result[info['group']] = {} result[info['group']][name] = info else: result[name] = info return result @jsonResponse(encoder=jsonInfinityEncoder) def functionDetails(request, queryParams, name): if request.method != 'GET': return HttpResponse(status=405) try: if queryParams.get('type') == 'pie': func = PieFunction(name) else: func = SeriesFunction(name) except KeyError: raise HttpError('Function not found: %s' % name, status=404) return functionInfo(name, func)
books/PRML/PRML-master-Python/test/nn/nonlinear/tanh.py
iamfaith/DeepLearning
7,581
131160
<filename>books/PRML/PRML-master-Python/test/nn/nonlinear/tanh.py import unittest from prml import nn class TestTanh(unittest.TestCase): def test_tanh(self): self.assertEqual(nn.tanh(0).value, 0) if __name__ == '__main__': unittest.main()
solidity/python/EmulationExpTestCross.py
surzm/contracts-solidity
200
131173
import Web3Wrapper import InputGenerator import FormulaSolidityPort MINIMUM_VALUE_BALANCE = 100 MAXIMUM_VALUE_BALANCE = 10 ** 34 GROWTH_FACTOR_BALANCE = 2.5 MINIMUM_VALUE_WEIGHT = 100000 MAXIMUM_VALUE_WEIGHT = 900000 GROWTH_FACTOR_WEIGHT = 1.5 MINIMUM_VALUE_AMOUNT = 1 MAXIMUM_VALUE_AMOUNT = 10 ** 34 GROWTH_FACTOR_AMOUNT = 2.5 def Main(): rangeBalance1 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, GROWTH_FACTOR_BALANCE) rangeWeight1 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, GROWTH_FACTOR_WEIGHT) rangeBalance2 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_BALANCE, MAXIMUM_VALUE_BALANCE, GROWTH_FACTOR_BALANCE) rangeWeight2 = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_WEIGHT, MAXIMUM_VALUE_WEIGHT, GROWTH_FACTOR_WEIGHT) rangeAmount = InputGenerator.ExponentialDistribution(MINIMUM_VALUE_AMOUNT, MAXIMUM_VALUE_AMOUNT, GROWTH_FACTOR_AMOUNT) testNum = 0 numOfTests = len(rangeBalance1) * len(rangeWeight1) * len(rangeBalance2) * len(rangeWeight2) * len(rangeAmount) FormulaContract = Web3Wrapper.Contract('BancorFormula') FormulaContract.setter().init() FormulaContractAddr = FormulaContract.getter() for balance1 in rangeBalance1: for weight1 in rangeWeight1: for balance2 in rangeBalance2: for weight2 in rangeWeight2: for amount in rangeAmount: testNum += 1 if True: resultSolidityPort = Run(FormulaSolidityPort, balance1, weight1, balance2, weight2, amount) resultContractAddr = Run(FormulaContractAddr, balance1, weight1, balance2, weight2, amount) print('Test {} out of {}: resultSolidityPort = {}, resultContractAddr = {}'.format(testNum, numOfTests, resultSolidityPort, resultContractAddr)) if resultSolidityPort != resultContractAddr: print('Emulation Error:') print('balance1 = {}'.format(balance1)) print('weight1 = {}'.format(weight1)) print('balance2 = {}'.format(balance2)) print('weight2 = {}'.format(weight2)) print('amount = {}'.format(amount)) return def Run(module, balance1, weight1, balance2, weight2, amount): try: return module.crossReserveTargetAmount(balance1, weight1, balance2, weight2, amount) except: return -1 Main()
homeassistant/components/template/trigger_entity.py
andersop91/core
22,481
131175
<filename>homeassistant/components/template/trigger_entity.py """Trigger entity.""" from __future__ import annotations import logging from typing import Any from homeassistant.const import ( CONF_DEVICE_CLASS, CONF_ICON, CONF_NAME, CONF_UNIQUE_ID, CONF_UNIT_OF_MEASUREMENT, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import template, update_coordinator from . import TriggerUpdateCoordinator from .const import CONF_ATTRIBUTES, CONF_AVAILABILITY, CONF_PICTURE class TriggerEntity(update_coordinator.CoordinatorEntity): """Template entity based on trigger data.""" domain: str extra_template_keys: tuple | None = None extra_template_keys_complex: tuple | None = None def __init__( self, hass: HomeAssistant, coordinator: TriggerUpdateCoordinator, config: dict, ) -> None: """Initialize the entity.""" super().__init__(coordinator) entity_unique_id = config.get(CONF_UNIQUE_ID) if entity_unique_id and coordinator.unique_id: self._unique_id = f"{coordinator.unique_id}-{entity_unique_id}" else: self._unique_id = entity_unique_id self._config = config self._static_rendered = {} self._to_render_simple = [] self._to_render_complex = [] for itm in ( CONF_NAME, CONF_ICON, CONF_PICTURE, CONF_AVAILABILITY, ): if itm not in config: continue if config[itm].is_static: self._static_rendered[itm] = config[itm].template else: self._to_render_simple.append(itm) if self.extra_template_keys is not None: self._to_render_simple.extend(self.extra_template_keys) if self.extra_template_keys_complex is not None: self._to_render_complex.extend(self.extra_template_keys_complex) # We make a copy so our initial render is 'unknown' and not 'unavailable' self._rendered = dict(self._static_rendered) self._parse_result = {CONF_AVAILABILITY} @property def name(self): """Name of the entity.""" return self._rendered.get(CONF_NAME) @property def unique_id(self): """Return unique ID of the entity.""" return self._unique_id @property def device_class(self): """Return device class of the entity.""" return self._config.get(CONF_DEVICE_CLASS) @property def unit_of_measurement(self) -> str | None: """Return unit of measurement.""" return self._config.get(CONF_UNIT_OF_MEASUREMENT) @property def icon(self) -> str | None: """Return icon.""" return self._rendered.get(CONF_ICON) @property def entity_picture(self) -> str | None: """Return entity picture.""" return self._rendered.get(CONF_PICTURE) @property def available(self): """Return availability of the entity.""" return ( self._rendered is not self._static_rendered and # Check against False so `None` is ok self._rendered.get(CONF_AVAILABILITY) is not False ) @property def extra_state_attributes(self) -> dict[str, Any] | None: """Return extra attributes.""" return self._rendered.get(CONF_ATTRIBUTES) async def async_added_to_hass(self) -> None: """Handle being added to Home Assistant.""" template.attach(self.hass, self._config) await super().async_added_to_hass() if self.coordinator.data is not None: self._process_data() @callback def _process_data(self) -> None: """Process new data.""" try: rendered = dict(self._static_rendered) for key in self._to_render_simple: rendered[key] = self._config[key].async_render( self.coordinator.data["run_variables"], parse_result=key in self._parse_result, ) for key in self._to_render_complex: rendered[key] = template.render_complex( self._config[key], self.coordinator.data["run_variables"], ) if CONF_ATTRIBUTES in self._config: rendered[CONF_ATTRIBUTES] = template.render_complex( self._config[CONF_ATTRIBUTES], self.coordinator.data["run_variables"], ) self._rendered = rendered except template.TemplateError as err: logging.getLogger(f"{__package__}.{self.entity_id.split('.')[0]}").error( "Error rendering %s template for %s: %s", key, self.entity_id, err ) self._rendered = self._static_rendered self.async_set_context(self.coordinator.data["context"]) @callback def _handle_coordinator_update(self) -> None: """Handle updated data from the coordinator.""" self._process_data() self.async_write_ha_state()
tests/hikari/test_embeds.py
sabidib/hikari
520
131212
<filename>tests/hikari/test_embeds.py # -*- coding: utf-8 -*- # Copyright (c) 2020 Nekokatt # Copyright (c) 2021 davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import mock import pytest from hikari import embeds class TestEmbedResource: @pytest.fixture() def resource(self): return embeds.EmbedResource(resource=mock.Mock()) def test_url(self, resource): assert resource.url is resource.resource.url def test_filename(self, resource): assert resource.filename is resource.resource.filename def test_stream(self, resource): mock_executor = object() assert resource.stream(executor=mock_executor, head_only=True) is resource.resource.stream.return_value resource.resource.stream.assert_called_once_with(executor=mock_executor, head_only=True) class TestEmbedResourceWithProxy: @pytest.fixture() def resource_with_proxy(self): return embeds.EmbedResourceWithProxy(resource=mock.Mock(), proxy_resource=mock.Mock()) def test_proxy_url(self, resource_with_proxy): assert resource_with_proxy.proxy_url is resource_with_proxy.proxy_resource.url def test_proxy_url_when_resource_is_none(self, resource_with_proxy): resource_with_proxy.proxy_resource = None assert resource_with_proxy.proxy_url is None def test_proxy_filename(self, resource_with_proxy): assert resource_with_proxy.proxy_filename is resource_with_proxy.proxy_resource.filename def test_proxy_filename_when_resource_is_none(self, resource_with_proxy): resource_with_proxy.proxy_resource = None assert resource_with_proxy.proxy_filename is None
www/src/Lib/test/test_abc.py
stefanhoelzl/brython
652
131218
<filename>www/src/Lib/test/test_abc.py # Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Unit tests for abc.py.""" import unittest from test import support import abc from inspect import isabstract class TestLegacyAPI(unittest.TestCase): def test_abstractproperty_basics(self): @abc.abstractproperty def foo(self): pass self.assertTrue(foo.__isabstractmethod__) def bar(self): pass self.assertFalse(hasattr(bar, "__isabstractmethod__")) class C(metaclass=abc.ABCMeta): @abc.abstractproperty def foo(self): return 3 self.assertRaises(TypeError, C) class D(C): @property def foo(self): return super().foo self.assertEqual(D().foo, 3) self.assertFalse(getattr(D.foo, "__isabstractmethod__", False)) def test_abstractclassmethod_basics(self): @abc.abstractclassmethod def foo(cls): pass self.assertTrue(foo.__isabstractmethod__) @classmethod def bar(cls): pass self.assertFalse(getattr(bar, "__isabstractmethod__", False)) class C(metaclass=abc.ABCMeta): @abc.abstractclassmethod def foo(cls): return cls.__name__ self.assertRaises(TypeError, C) class D(C): @classmethod def foo(cls): return super().foo() self.assertEqual(D.foo(), 'D') self.assertEqual(D().foo(), 'D') def test_abstractstaticmethod_basics(self): @abc.abstractstaticmethod def foo(): pass self.assertTrue(foo.__isabstractmethod__) @staticmethod def bar(): pass self.assertFalse(getattr(bar, "__isabstractmethod__", False)) class C(metaclass=abc.ABCMeta): @abc.abstractstaticmethod def foo(): return 3 self.assertRaises(TypeError, C) class D(C): @staticmethod def foo(): return 4 self.assertEqual(D.foo(), 4) self.assertEqual(D().foo(), 4) class TestABC(unittest.TestCase): def test_abstractmethod_basics(self): @abc.abstractmethod def foo(self): pass self.assertTrue(foo.__isabstractmethod__) def bar(self): pass self.assertFalse(hasattr(bar, "__isabstractmethod__")) def test_abstractproperty_basics(self): @property @abc.abstractmethod def foo(self): pass self.assertTrue(foo.__isabstractmethod__) def bar(self): pass self.assertFalse(getattr(bar, "__isabstractmethod__", False)) class C(metaclass=abc.ABCMeta): @property @abc.abstractmethod def foo(self): return 3 self.assertRaises(TypeError, C) class D(C): @C.foo.getter def foo(self): return super().foo self.assertEqual(D().foo, 3) def test_abstractclassmethod_basics(self): @classmethod @abc.abstractmethod def foo(cls): pass self.assertTrue(foo.__isabstractmethod__) @classmethod def bar(cls): pass self.assertFalse(getattr(bar, "__isabstractmethod__", False)) class C(metaclass=abc.ABCMeta): @classmethod @abc.abstractmethod def foo(cls): return cls.__name__ self.assertRaises(TypeError, C) class D(C): @classmethod def foo(cls): return super().foo() self.assertEqual(D.foo(), 'D') self.assertEqual(D().foo(), 'D') def test_abstractstaticmethod_basics(self): @staticmethod @abc.abstractmethod def foo(): pass self.assertTrue(foo.__isabstractmethod__) @staticmethod def bar(): pass self.assertFalse(getattr(bar, "__isabstractmethod__", False)) class C(metaclass=abc.ABCMeta): @staticmethod @abc.abstractmethod def foo(): return 3 self.assertRaises(TypeError, C) class D(C): @staticmethod def foo(): return 4 self.assertEqual(D.foo(), 4) self.assertEqual(D().foo(), 4) def test_abstractmethod_integration(self): for abstractthing in [abc.abstractmethod, abc.abstractproperty, abc.abstractclassmethod, abc.abstractstaticmethod]: class C(metaclass=abc.ABCMeta): @abstractthing def foo(self): pass # abstract def bar(self): pass # concrete self.assertEqual(C.__abstractmethods__, {"foo"}) self.assertRaises(TypeError, C) # because foo is abstract self.assertTrue(isabstract(C)) class D(C): def bar(self): pass # concrete override of concrete self.assertEqual(D.__abstractmethods__, {"foo"}) self.assertRaises(TypeError, D) # because foo is still abstract self.assertTrue(isabstract(D)) class E(D): def foo(self): pass self.assertEqual(E.__abstractmethods__, set()) E() # now foo is concrete, too self.assertFalse(isabstract(E)) class F(E): @abstractthing def bar(self): pass # abstract override of concrete self.assertEqual(F.__abstractmethods__, {"bar"}) self.assertRaises(TypeError, F) # because bar is abstract now self.assertTrue(isabstract(F)) def test_descriptors_with_abstractmethod(self): class C(metaclass=abc.ABCMeta): @property @abc.abstractmethod def foo(self): return 3 @foo.setter @abc.abstractmethod def foo(self, val): pass self.assertRaises(TypeError, C) class D(C): @C.foo.getter def foo(self): return super().foo self.assertRaises(TypeError, D) class E(D): @D.foo.setter def foo(self, val): pass self.assertEqual(E().foo, 3) # check that the property's __isabstractmethod__ descriptor does the # right thing when presented with a value that fails truth testing: class NotBool(object): def __nonzero__(self): raise ValueError() __len__ = __nonzero__ with self.assertRaises(ValueError): class F(C): def bar(self): pass bar.__isabstractmethod__ = NotBool() foo = property(bar) def test_customdescriptors_with_abstractmethod(self): class Descriptor: def __init__(self, fget, fset=None): self._fget = fget self._fset = fset def getter(self, callable): return Descriptor(callable, self._fget) def setter(self, callable): return Descriptor(self._fget, callable) @property def __isabstractmethod__(self): return (getattr(self._fget, '__isabstractmethod__', False) or getattr(self._fset, '__isabstractmethod__', False)) class C(metaclass=abc.ABCMeta): @Descriptor @abc.abstractmethod def foo(self): return 3 @foo.setter @abc.abstractmethod def foo(self, val): pass self.assertRaises(TypeError, C) class D(C): @C.foo.getter def foo(self): return super().foo self.assertRaises(TypeError, D) class E(D): @D.foo.setter def foo(self, val): pass self.assertFalse(E.foo.__isabstractmethod__) def test_metaclass_abc(self): # Metaclasses can be ABCs, too. class A(metaclass=abc.ABCMeta): @abc.abstractmethod def x(self): pass self.assertEqual(A.__abstractmethods__, {"x"}) class meta(type, A): def x(self): return 1 class C(metaclass=meta): pass def test_registration_basics(self): class A(metaclass=abc.ABCMeta): pass class B(object): pass b = B() self.assertFalse(issubclass(B, A)) self.assertFalse(issubclass(B, (A,))) self.assertNotIsInstance(b, A) self.assertNotIsInstance(b, (A,)) B1 = A.register(B) self.assertTrue(issubclass(B, A)) self.assertTrue(issubclass(B, (A,))) self.assertIsInstance(b, A) self.assertIsInstance(b, (A,)) self.assertIs(B1, B) class C(B): pass c = C() self.assertTrue(issubclass(C, A)) self.assertTrue(issubclass(C, (A,))) self.assertIsInstance(c, A) self.assertIsInstance(c, (A,)) def test_register_as_class_deco(self): class A(metaclass=abc.ABCMeta): pass @A.register class B(object): pass b = B() self.assertTrue(issubclass(B, A)) self.assertTrue(issubclass(B, (A,))) self.assertIsInstance(b, A) self.assertIsInstance(b, (A,)) @A.register class C(B): pass c = C() self.assertTrue(issubclass(C, A)) self.assertTrue(issubclass(C, (A,))) self.assertIsInstance(c, A) self.assertIsInstance(c, (A,)) self.assertIs(C, A.register(C)) def test_isinstance_invalidation(self): class A(metaclass=abc.ABCMeta): pass class B: pass b = B() self.assertFalse(isinstance(b, A)) self.assertFalse(isinstance(b, (A,))) A.register(B) self.assertTrue(isinstance(b, A)) self.assertTrue(isinstance(b, (A,))) def test_registration_builtins(self): class A(metaclass=abc.ABCMeta): pass A.register(int) self.assertIsInstance(42, A) self.assertIsInstance(42, (A,)) self.assertTrue(issubclass(int, A)) self.assertTrue(issubclass(int, (A,))) class B(A): pass B.register(str) class C(str): pass self.assertIsInstance("", A) self.assertIsInstance("", (A,)) self.assertTrue(issubclass(str, A)) self.assertTrue(issubclass(str, (A,))) self.assertTrue(issubclass(C, A)) self.assertTrue(issubclass(C, (A,))) def test_registration_edge_cases(self): class A(metaclass=abc.ABCMeta): pass A.register(A) # should pass silently class A1(A): pass self.assertRaises(RuntimeError, A1.register, A) # cycles not allowed class B(object): pass A1.register(B) # ok A1.register(B) # should pass silently class C(A): pass A.register(C) # should pass silently self.assertRaises(RuntimeError, C.register, A) # cycles not allowed C.register(B) # ok def test_register_non_class(self): class A(metaclass=abc.ABCMeta): pass self.assertRaisesRegex(TypeError, "Can only register classes", A.register, 4) def test_registration_transitiveness(self): class A(metaclass=abc.ABCMeta): pass self.assertTrue(issubclass(A, A)) self.assertTrue(issubclass(A, (A,))) class B(metaclass=abc.ABCMeta): pass self.assertFalse(issubclass(A, B)) self.assertFalse(issubclass(A, (B,))) self.assertFalse(issubclass(B, A)) self.assertFalse(issubclass(B, (A,))) class C(metaclass=abc.ABCMeta): pass A.register(B) class B1(B): pass self.assertTrue(issubclass(B1, A)) self.assertTrue(issubclass(B1, (A,))) class C1(C): pass B1.register(C1) self.assertFalse(issubclass(C, B)) self.assertFalse(issubclass(C, (B,))) self.assertFalse(issubclass(C, B1)) self.assertFalse(issubclass(C, (B1,))) self.assertTrue(issubclass(C1, A)) self.assertTrue(issubclass(C1, (A,))) self.assertTrue(issubclass(C1, B)) self.assertTrue(issubclass(C1, (B,))) self.assertTrue(issubclass(C1, B1)) self.assertTrue(issubclass(C1, (B1,))) C1.register(int) class MyInt(int): pass self.assertTrue(issubclass(MyInt, A)) self.assertTrue(issubclass(MyInt, (A,))) self.assertIsInstance(42, A) self.assertIsInstance(42, (A,)) def test_all_new_methods_are_called(self): class A(metaclass=abc.ABCMeta): pass class B(object): counter = 0 def __new__(cls): B.counter += 1 return super().__new__(cls) class C(A, B): pass self.assertEqual(B.counter, 0) C() self.assertEqual(B.counter, 1) if __name__ == "__main__": unittest.main()
algorithms/backtrack/factor_combinations.py
zhengli0817/algorithms
128
131230
<gh_stars>100-1000 """ Numbers can be regarded as product of its factors. For example, 8 = 2 x 2 x 2; = 2 x 4. Write a function that takes an integer n and return all possible combinations of its factors. Note: You may assume that n is always positive. Factors should be greater than 1 and less than n. Examples: input: 1 output: [] input: 37 output: [] input: 12 output: [ [2, 6], [2, 2, 3], [3, 4] ] input: 32 output: [ [2, 16], [2, 2, 8], [2, 2, 2, 4], [2, 2, 2, 2, 2], [2, 4, 4], [4, 8] ] """ # Iterative: def get_factors(n): todo, combis = [(n, 2, [])], [] while todo: n, i, combi = todo.pop() while i * i <= n: if n % i == 0: combis.append(combi + [i, n//i]) todo.append((n//i, i, combi+[i])) i += 1 return combis # Recursive: def get_factors_recur(n): def factor(n, i, combi, combis): while i * i <= n: if n % i == 0: combis.append(combi + [i, n//i]), factor(n//i, i, combi+[i], combis) i += 1 return combis return factor(n, 2, [], [])
scripts/topModuleGen/src/TopModuleGen.py
d-m-bailey/openlane-openroad
354
131231
# Copyright 2020 Efabless Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import argparse import copy import re import os import pyverilog.utils from pyverilog.vparser.parser import parse from pyverilog.ast_code_generator.codegen import ASTCodeGenerator import contextlib #import StringIO try: from StringIO import StringIO except ImportError: from io import StringIO parser = argparse.ArgumentParser( description="top module generation for a given (core design, pads library) pair") parser.add_argument('--design', '-d', action='store', required=True, help="The json description of the design") parser.add_argument('--padsLibs', '-p', action='store', required=True, help="The pad libraries json description") parser.add_argument('--verilog', '-v', action='store', required=True, help="The input verilog file containing the core module header definition") parser.add_argument('--output', '-o', action='store', required=True, help="The verilog file to output to") args = parser.parse_args() design = args.design padsLibs = args.padsLibs output = args.output verilog_file = args.verilog #description of the design parsed into a dict if not os.path.exists(design): raise IOError("file not found: " + design) designJSONOpener = open(design, 'r') design_json_complete = json.load(designJSONOpener) designJSONOpener.close() #description of the libraries parsed into a dict if not os.path.exists(padsLibs): raise IOError("file not found: " + padsLibs) padsLibsJSONOpener = open(padsLibs, 'r') padsLibs_json = json.load(padsLibsJSONOpener) padsLibsJSONOpener.close() #Finding the used pads library padsLib_json = dict() for padsLib in padsLibs_json: if padsLib["library_name"] == design_json_complete["pads_library"]: padsLib_json = padsLib break if len(padsLib_json) == 0: raise Exception("Used Pad Lib is not found in the given Pad Libraries JSON") #extracting the core module needed pads design_json = design_json_complete["module"] #Necessary intermediate containers written_macros = list() wiresToDeclare = dict() headerSignalsToDeclare = dict() #Segments of the final written verilog code topModuleHeader="module "+design_json_complete["design_name"]+"(\n" topModuleDeclarations = "" topModuleDefines = "`timescale 1 ns / 1 ps\n\n`define USE_PG_PIN\n`define functional\n\n" topModuleIncludes = "" topModuleBody = "" topModuleMacros = "" designDeclaration = "" topModuleExtra = "" padFrameModule="" padFrameHeader="chip_io padframe(\n" padFrameHeaderDefinition="module chip_io(\n" padFrameWires= "" #parsePads is responsible for parsing the pads except for power/corner pads def parsePads(): global topModuleHeader for user_pad in design_json["pads"]: #find pad direction padType = user_pad["type"] padUsed = dict() #extract pad for library_pad in padsLib["pads"]: if library_pad["type"] == padType: padUsed = library_pad break if len(padUsed) ==0: raise Exception("Used Pad is not of a defined type") writePad(padUsed, user_pad) topModuleHeader= topModuleHeader[:-2]+");\n" #writePad is responsible for adding a non-power/non-corner pad to the output verilog def writePad(padUsed, user_info): global topModuleHeader global padFrameModule global padFrameHeaderDefinition global padFrameHeader v = user_info["size"] > 1 macro_name = "" if v: macro_name+=padUsed["type"]+"_V" else: macro_name+=padUsed["type"] writePadMacro(padUsed, v, macro_name) instType = resolveInterfaceType(user_info["type"]) instSize = resolveSize(user_info["size"]) topModuleHeader+= instType +" " topModuleHeader+= instSize +" " topModuleHeader+= user_info["name"]+",\n" if instType == "input": instType = "inout" padFrameHeaderDefinition+= instType +" " padFrameHeaderDefinition+= instSize +" " padFrameHeaderDefinition+= user_info["name"]+",\n" padFrameHeader+="."+user_info["name"]+"("+user_info["name"]+"),\n" inst_body="`"+macro_name+" (" if v: inst_body+=str(user_info["size"])+"," for key in padUsed["mapping"].keys(): res = None if key == "name": res = [user_info] elif key in user_info.keys(): res = user_info[key] else: for port in padUsed["ports"]: if "condition" in port.keys(): continue if port["name"] == padUsed["mapping"][key]: res = port["connection"] break if res is not None: if type(res) == type(dict()) and "name" not in res.keys(): wire_name=padUsed["type"]+"_"+user_info["name"]+"_"+key createDictWireForPads(wire_name, res,user_info["size"]) inst_body+=wire_name+"," elif len(res) > 1 and key != "name": wire_name=padUsed["type"]+"_"+user_info["name"]+"_"+key createWireForPads(wire_name, res) inst_body+=wire_name+"," else: if type(res[0]) == type(dict()): inst_body+=res[0]["name"]+" " if "size" in res[0].keys() and key != "name": inst_body+=resolveSize(res[0]["size"]) else: inst_body+=res[0]+" " inst_body+="," else: inst_body+= " , " inst_body=inst_body[:-1]+");\n" padFrameModule+=inst_body #createDictWireForPads creates the wire for a pad macro parameter if it has a complex object description. The description is parsed into an assign to that wire. i.e. "mode":{"bit_0":{}, "bit_1":{}, "bit_2":{}} def createDictWireForPads(wire_name,wire_info,size): global padFrameWires wire_body ="wire ["+str(size * len(wire_info.keys()) -1)+":0] "+wire_name+" = {\n\t\t" for i in reversed(range(size)): for key in wire_info.keys(): wire_body+=wire_info[key]+"["+str(i)+"]," wire_body+="\n\t\t" wire_body=wire_body[:-4]+"};\n" padFrameWires+=wire_body #createWireForPads creates the wire for a pad macro parameter if it has a list description. def createWireForPads(wire_name,wire_info): global padFrameWires wire_body ="wire ["+str(getConcatSize(wire_info)-1)+":0] "+wire_name+" = {" for con in wire_info: if type(con) == type(dict()): wire_body+=con["name"]+" " if "size" in con.keys(): wire_body+=resolveSize(con["size"]) else: wire_body+=con+" " wire_body+=", " wire_body= wire_body[:-2]+"};\n" padFrameWires+=wire_body #getConcatSize gets the total size of a concatenation def getConcatSize(concatentation): tot_size = 0 for signal in concatentation: if type(signal) == type(dict()): if "size" in signal.keys(): if type(signal["size"]) == type(1): tot_size+=signal["size"] else: tot_size+=signal["size"]["offset"] else: tot_size+=1 else: tot_size+=1 return tot_size #writePadMacro is responsible for writing the macro definition of the pads except for power/corner pads def writePadMacro(pad_macro, v,macro_name): if macro_name in written_macros: return pad_macro_copy = copy.deepcopy(pad_macro) written_macros.append(macro_name) global topModuleMacros macro_body = "" if v: macro_body+="`define "+macro_name+"(V," else: macro_body+="`define "+macro_name+"(" for key in pad_macro_copy["mapping"].keys(): macro_body+=key.upper() +"," pad_macro_copy["ports"] = resolvePortMapping(pad_macro_copy["mapping"],pad_macro_copy["ports"]) macro_body = macro_body[:-1]+") \\\n" for wire in pad_macro_copy["wire_declaration_info"]: macro_body+="wire " if v: macro_body +="[V-1:0] " macro_body += resolveConnectionName(wire["name"])+"; \\\n" macro_body+=pad_macro_copy["pad_name"]+ " NAME``_pad " if v: macro_body+= "[V-1:0] ( \\\n" else: macro_body+="( \\\n" if "defines" in pad_macro_copy.keys(): macro_body+= addPadDefines(pad_macro_copy["defines"],True) for port in pad_macro_copy["ports"]: if "condition" in port.keys(): hasElse= False if "def" in port["condition"].keys(): portd = port["condition"]["def"] macro_body+="`ifdef "+ port["condition"]["name"]+" \\\n" macro_body+="\t."+portd["name"]+"( " if portd["connection"] is not None: if len(portd["connection"]) > 1: macro_body+="{" for connection in portd["connection"]: macro_body+= resolveConnectionName(connection)+"," macro_body= macro_body[:-1]+"}," else: macro_body+=resolveConnectionName(portd["connection"][0])+"," macro_body=macro_body[:-1]+"), \\\n" hasElse = True if "ndef" in port["condition"].keys(): portd = port["condition"]["ndef"] if hasElse: macro_body += "`else \\\n" else: macro_body+="`ifndef "+ port["condition"]["name"]+" \\\n" macro_body+="\t."+portd["name"]+"( " if portd["connection"] is not None: if len(portd["connection"]) > 1: macro_body+="{" for connection in portd["connection"]: macro_body+= resolveConnectionName(connection)+"," macro_body= macro_body[:-1]+"}," else: macro_body+=resolveConnectionName(portd["connection"][0])+"," macro_body=macro_body[:-1]+"), \\\n" macro_body+="`endif \\\n" else: macro_body+="."+port["name"]+"( " if port["connection"] is not None: if len(port["connection"]) > 1: macro_body+="{" for connection in port["connection"]: macro_body+= resolveConnectionName(connection)+"," macro_body= macro_body[:-1]+"}," else: macro_body+=resolveConnectionName(port["connection"][0])+"," macro_body=macro_body[:-1]+"), \\\n" macro_body=macro_body[:-4]+")\n\n" topModuleMacros+=macro_body #resolveConnectionName to replace any references to $name with the proper ``NAME or NAME`` def resolveConnectionName(connection): if connection.find("$name") == 0: return connection.replace("$name", "NAME``") else: return connection.replace("$name", "``NAME") #resolvePortMapping to resolve the value of defined port with the user defined value or retrieve it from the the default values defined in the PADs library def resolvePortMapping(mapping, ports): for key in mapping.keys(): for portIdx in range(len(ports)): port = ports[portIdx] if "condition" in port.keys(): if "def" in port["condition"].keys(): if port["condition"]["def"]["name"] == mapping[key]: port["condition"]["def"]["connection"] = [key.upper()] if "ndef" in port["condition"].keys(): if port["condition"]["ndef"]["name"] == mapping[key]: port["condition"]["ndef"]["connection"] = [key.upper()] elif port["name"] == mapping[key]: port["connection"] = [key.upper()] ports[portIdx] = port return ports #resolveSize to resolve the size whether its defined as 1, [size-1:0], or [offset-start+1:start] def resolveSize(size): if type(size) == type(1): if int(size) > 1: return "["+str(size -1)+":0] " else: return "" else: return "["+str(size["offset"]+size["start"] -1)+":"+str(size["start"])+"] " #resolveInterfaceType to resolve type of interface to input, output, or inout def resolveInterfaceType(padType): if padType in ["DIGITAL_INPUT", "ANALOG_INPUT", "DIGITAL_INPUT_TV2", "XRES"]: return "input" if padType in ["DIGITAL_OUTPUT", "ANALOG_OUTPUT","DIGITAL_OUTPUT_TV2"]: return "output" if padType in ["DIGITAL_INOUT", "ANALOG_INOUT"]: return "inout" raise Exception("Used Pad is not of a defined type") #parsePowerCornerPads is responsible for parsing the power/corner pads def parsePowerCornerPads(): #Handle Case of user give power/corner pads for pad in padsLib["pads"]: if "count" in pad.keys(): global padFrameModule if "powerCornerPads" in design_json_complete.keys(): for new_pad in design_json_complete["powerCornerPads"]: if pad["type"] == new_pad["type"]: if "count" in new_pad.keys(): pad["count"] = new_pad["count"] if "ports" in new_pad.keys(): for p_new in new_pad["ports"]: for p_default_idx in range(len(pad["ports"])): if p_new["name"] ==pad["ports"][p_default_idx]["name"]: pad["ports"][p_default_idx] = p_new break break if "condition" in pad.keys(): if pad["condition"]["def"]: padFrameModule+= "`ifdef "+pad["condition"]["name"]+"\n" else: padFrameModule+= "`ifndef "+pad["condition"]["name"]+"\n" padFrameModule+= writePowerCornerPad(pad) padFrameModule+= "`endif\n" else: padFrameModule+= writePowerCornerPad(pad)+"\n" #writePowerCornerPad is responsible for adding a power/corner pad to the output verilog def writePowerCornerPad(padUsed): padBody = str(padUsed["pad_name"]) + " " + str(padUsed["type"]) if int(padUsed["count"]) > 1: padBody+= " ["+str(int(padUsed["count"])-1)+":0] (\n" else: padBody+=" (\n" if "defines" in padUsed.keys(): padBody+=addPadDefines(padUsed["defines"],False) ports = padUsed["ports"] #add wires to the declaration plan if "wire_declaration_info" in padUsed.keys(): if padUsed["wire_declaration_info"] is not None: for wire in padUsed["wire_declaration_info"]: wiresToDeclare[wire["name"]] = wire #add header signals to the declaration plan if "interface_declaration_info" in padUsed.keys(): if padUsed["interface_declaration_info"] is not None: for signal in padUsed["interface_declaration_info"]: headerSignalsToDeclare[signal["name"]] = signal for port in ports: if "condition" in port.keys(): hasElse= False if "def" in port["condition"].keys(): portd = port["condition"]["def"] padBody+="`ifdef "+ port["condition"]["name"]+" \n" padBody+="\t."+portd["name"]+"( " if portd["connection"] is not None: if len(portd["connection"]) > 1: padBody+="{" for con in portd["connection"]: padBody+=con+", " padBody= padBody[:-2]+"}," else: padBody+=portd["connection"][0]+"," padBody=padBody[:-1]+"),\n" hasElse = True if "ndef" in port["condition"].keys(): portd = port["condition"]["ndef"] if hasElse: padBody += "`else \n" else: padBody+="`ifndef "+ port["condition"]["name"]+" \n" padBody+="\t."+portd["name"]+"( " if portd["connection"] is not None: padBody+="{" if len(portd["connection"]) > 1: for con in portd["connection"]: padBody+=con+", " padBody= padBody[:-2]+"}," else: padBody+=portd["connection"][0]+"," padBody=padBody[:-1]+"),\n" padBody+="`endif \n" else: padBody+="."+port["name"]+"( " if port["connection"] is not None: if len(port["connection"]) > 1: padBody+="{" for con in port["connection"]: padBody+=con+", " padBody= padBody[:-2]+"}," else: padBody+=port["connection"][0]+"," padBody=padBody[:-1]+"),\n" padBody=padBody[:-2]+");\n" return padBody #addPadDefines adds the defines inside the pads i.e. `ABUTMENT_PINS def addPadDefines(defines, isMacro): ret ="" for define in defines: if "condition" in define.keys(): hasElse=False if "def" in define["condition"]: hasElse = True ret += "`ifdef "+define["condition"]["name"] if isMacro: ret+=" \\\n" else: ret+=" \n" for single_define in define["condition"]["def"]: ret+="`"+single_define["name"] if isMacro: ret+=" \\\n" else: ret+=" \n" if "ndef" in define["condition"]: if hasElse: ret += "`else" if isMacro: ret+=" \\\n" else: ret+=" \n" else: ret += "`ifndef "+define["condition"]["name"] if isMacro: ret+=" \\\n" else: ret+=" \n" for single_define in define["condition"]["ndef"]: ret+="`"+single_define["name"] if isMacro: ret+=" \\\n" else: ret+=" \n" ret+="`endif" if isMacro: ret+=" \\\n" else: ret+=" \n" else: ret+="`"+define["name"] if isMacro: ret+=" \\\n" else: ret+=" \n" return ret #parseMacros parses user defines or pad library defines def parseMacros(defines): global topModuleDefines for define in defines: #add wires to the declaration plan if "wire_declaration_info" in define.keys(): if define["wire_declaration_info"] is not None: for wire in define["wire_declaration_info"]: wiresToDeclare[wire["name"]] = wire #add header signals to the declaration plan if "interface_declaration_info" in define.keys(): if define["interface_declaration_info"] is not None: for signal in define["interface_declaration_info"]: headerSignalsToDeclare[signal["name"]] = signal topModuleDefines+=writeMacro(define)[:-2]+"\n\n" #writeMacro writes the macro to the final verilog output def writeMacro(define): macro_body="" if "condition" in define.keys(): hasElse=False if "def" in define["condition"]: hasElse = True macro_body += "`ifdef "+define["condition"]["name"]+" \n" macro_body+=writeMacro(define["condition"]["def"]) if "ndef" in define["condition"]: if hasElse: macro_body = macro_body[:-2]+"\n`else \n" else: macro_body += "`ifndef "+define["condition"]["name"]+" \n" macro_body+=writeMacro(define["condition"]["ndef"]) macro_body=macro_body[:-2]+"\n`endif \n" else: macro_body+="`define "+define["name"]+" \\\n" if "ports" in define.keys(): for port in define["ports"]: if "condition" in port.keys(): hasElse=False if "def" in port["condition"]: hasElse = True portd = port["condition"]["def"] macro_body+="`ifdef "+ port["condition"]["name"]+" \\\n" macro_body+="\t."+portd["name"]+"( " if portd["connection"] is not None: if len(portd["connection"]) > 1: macro_body+="{" for con in portd["connection"]: macro_body+=con+", " macro_body= macro_body[:-2]+"}," else: macro_body+=portd["connection"][0]+"," macro_body=macro_body[:-1]+"), \\\n" if "ndef" in define["condition"]: portd = port["condition"]["ndef"] if hasElse: macro_body += "`else \\\n" else: macro_body += "`ifndef "+define["condition"]["name"]+" \\\n" macro_body+="\t."+portd["name"]+"( " if portd["connection"] is not None: if len(portd["connection"]) > 1: macro_body+="{" for con in portd["connection"]: macro_body+=con+", " macro_body= macro_body[:-2]+"}," else: macro_body+=portd["connection"][0]+"," macro_body=macro_body[:-1]+"), \\\n" macro_body+="`endif \\\n" else: macro_body+="\t."+port["name"]+"( " if port["connection"] is not None: if len(port["connection"]) > 1: macro_body+="{" for con in port["connection"]: macro_body+=con+", " macro_body= macro_body[:-2]+"}," else: macro_body+=port["connection"][0]+"," macro_body=macro_body[:-1]+"), \\\n" return macro_body #addTopModuleWires writes the wires required by the pads/user in the top module def addPadFrameWires(): global padFrameWires for wire in wiresToDeclare: if wire in headerSignalsToDeclare: continue else: decl = "\nwire " if "size" in wiresToDeclare[wire]: decl+= resolveSize(wiresToDeclare[wire]["size"]) decl+= wire+";" padFrameWires+=decl #Add the module interface required by the pads: i.e. vss, vdd1v8, vdd def addTopModulePadsInterface(): global topModuleHeader global padFrameHeaderDefinition global padFrameHeader if len(headerSignalsToDeclare): topModuleHeader=topModuleHeader[:-3]+",\n" for signal in headerSignalsToDeclare: decl = resolveInterfaceType(headerSignalsToDeclare[signal]["type"])+" " if "size" in headerSignalsToDeclare[signal]: decl+= resolveSize(headerSignalsToDeclare[signal]["size"]) +" " decl+= signal+",\n" topModuleHeader+=decl padFrameHeaderDefinition+=decl padFrameHeader+="."+signal+"("+signal+"),\n" topModuleHeader=topModuleHeader[:-2]+");\n" padFrameHeaderDefinition=padFrameHeaderDefinition[:-2]+");\n" padFrameHeader=padFrameHeader[:-2]+");\n" #parseDesignHeader parses the verilog module header of the core module and creates its wires and connections def parseDesignHeader(verilog_file): global padFrameHeaderDefinition global padFrameHeader global topModuleDeclarations global topModuleBody module_header = design_json["name"]+" core_inst(\n" wiresDeclarations = "" ast, x = parse([verilog_file]) #output = StringIO() out = StringIO() ast.show(buf=out) rlst=out.getvalue() #print(rlst) startString = "ModuleDef: " +design_json["name"] startPoint = rlst.find(startString) startPoint = rlst.find("Portlist:",startPoint) declIdx = rlst.find("Decl:",startPoint) instanceListIdx=rlst.find("InstanceList:",startPoint) moduleDefIdx=rlst.find("ModuleDef:",startPoint) if declIdx == -1: declIdx = 0x0fffffff if instanceListIdx == -1: instanceListIdx = 0x0fffffff if moduleDefIdx == -1: moduleDefIdx = 0x0fffffff endIdx = min(declIdx,instanceListIdx,moduleDefIdx) if endIdx != 0x0fffffff: rlst = rlst[:endIdx] portList =rlst[startPoint:].split("Ioport:") #print(portList) #print(portList) for port in portList[1:]: port_split = port.split("\n") for idx in range(len(port_split)): line = port_split[idx] if line.find("Input") != -1: signalName= getSignalName(line) signalSize = extractSizeFromVerilog(port_split,idx) module_header+= "."+signalName+"("+signalName+"),\n" wiresDeclarations+= "wire "+ signalSize+" "+signalName+";\n" padFrameHeader+= "."+signalName+"("+signalName+"),\n" padFrameHeaderDefinition+="output "+ signalSize+" "+signalName+",\n" break elif line.find("Output") != -1: signalName= getSignalName(line) signalSize = extractSizeFromVerilog(port_split,idx) module_header+= "."+signalName+"("+signalName+"),\n" wiresDeclarations+= "wire "+ signalSize+" "+signalName+";\n" padFrameHeader+= "."+signalName+"("+signalName+"),\n" padFrameHeaderDefinition+="input "+ signalSize+" "+signalName+",\n" break elif line.find("Inout") != -1: signalName= getSignalName(line).strip() signalSize = extractSizeFromVerilog(port_split,idx) module_header+= "."+signalName+"("+signalName+"),\n" wiresDeclarations+= "wire "+ signalSize+" "+signalName+";\n" padFrameHeader+= "."+signalName+"("+signalName+"),\n" padFrameHeaderDefinition+="inout "+ signalSize+" "+signalName+",\n" break module_header=module_header[:-2]+");\n" topModuleDeclarations+=wiresDeclarations topModuleBody+=module_header def getSignalName(line): pattern = re.compile(r'\s*?[\S+]+\,') for signal in re.findall(pattern, line): return signal[1:-1] def extractSizeFromVerilog(port_split, start_idx): size ="" end_idx = min(start_idx+3, len(port_split)) for idx in range(start_idx,end_idx): line = port_split[idx] if line.find("Width") != -1: pattern = re.compile(r'\s*?\d+\s') size+="[" for s in re.findall(pattern, port_split[idx+1]): size+= s+":" break for s in re.findall(pattern, port_split[idx+2]): size+= s+"]" break return size #parseIncludes parses the includes of a given json def parseIncludes(includes): global topModuleIncludes for include in includes: topModuleIncludes+=writeInclude(include)+"\n\n" #writeInclude writes the includes into the final verilog output def writeInclude(include): include_body="" if type(include) == type(dict()) and "condition" in include.keys(): hasElse=False if "def" in include["condition"]: hasElse = True include_body += "`ifdef "+include["condition"]["name"]+" \n" if type(include["condition"]["def"]) == type(list()): for i in include["condition"]["def"]: include_body+=writeInclude(i) else: include_body+=writeInclude(include["condition"]["def"]) if "ndef" in include["condition"]: if hasElse: include_body = include_body+"`else \n" else: include_body += "`ifndef " + include["condition"]["name"]+" \n" include_body+=writeInclude(include["condition"]["ndef"]) if type(include["condition"]["ndef"]) == type(list()): for i in include["condition"]["ndef"]: include_body+=writeInclude(i) else: include_body+=writeInclude(include["condition"]["ndef"]) include_body=include_body+"`endif \n" else: include_body+= "\t`include \""+include["name"]+"\"\n" return include_body #If defines/macros section exists in the descriptions, write those defines if "defines" in padsLib.keys() and len(padsLib["defines"]): topModuleDefines+="\n//PADs Library defines/macros\n" parseMacros(padsLib["defines"]) if "defines" in design_json_complete.keys() and len(design_json_complete["defines"]): topModuleDefines+="\n//User defines/macros\n" parseMacros(design_json_complete["defines"]) #If the includes section exists in the description, write those includes if "includes" in padsLib_json.keys() and len(padsLib_json["includes"]): topModuleIncludes+="\n//PADs Library includes\n" parseIncludes(padsLib_json["includes"]) if "includes" in design_json_complete.keys() and len(design_json_complete["includes"]): topModuleIncludes+="\n//User includes\n" parseIncludes(design_json_complete["includes"]) #Parse the pads padFrameModule+="\n//Input/Output PADs\n" parsePads() padFrameModule+="\n//Power/Corner PADs\n" parsePowerCornerPads() #if section extra_verilog exists, write append the extra_verilog to the module if "extra_verilog" in design_json_complete.keys(): if type(design_json_complete["extra_verilog"]) == type(list()): for ex in design_json_complete["extra_verilog"]: topModuleExtra+=ex+"\n" else: topModuleExtra=design_json_complete["extra_verilog"] #Read the user verilog file and parse the top module header if not os.path.exists(verilog_file): raise IOError("file not found: " + verilog_file) """verilogFileOpener = open(verilog_file, 'r') verilogFileData = verilogFileOpener.read() verilogFileOpener.close()""" topModuleBody+= "\n\n//Core Module Instantiation\n" parseDesignHeader(verilog_file) #add the module interface addTopModulePadsInterface() #add the used wires addPadFrameWires() #write endmodule topModuleBody+="\n\n//PadFrame Instantiation\n"+padFrameHeader topModuleBody+= "\n\nendmodule" #join padframe module sections padFrameModule=padFrameHeaderDefinition +"\n\n"+ padFrameWires+"\n\n" +padFrameModule+"\n\nendmodule" #join the code segments topModule = topModuleDefines+"\n\n\n"+topModuleIncludes+"\n\n\n"+topModuleMacros+"\n\n\n"+topModuleHeader+"\n\n\n"+topModuleDeclarations+"\n\n\n"+topModuleExtra+topModuleBody+"\n\n"+padFrameModule+"\n\n" #write the code outputFileOpener = open(output, 'w+') outputFileOpener.write(topModule) outputFileOpener.close()
MACServerDiscover.py
Lillecarl/WinboxPoC
495
131237
<filename>MACServerDiscover.py import socket, binascii, threading, time # MAC server discovery by BigNerd95 search = True devices = [] def discovery(sock): global search while search: sock.sendto(b"\x00\x00\x00\x00", ("255.255.255.255", 5678)) time.sleep(1) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.bind(("0.0.0.0", 5678)) threading.Thread(target=discovery, args=(sock,)).start() print("Looking for Mikrotik devices (MAC servers)\n") while search: try: data, addr = sock.recvfrom(1024) if b"\x00\x01\x00\x06" in data: start = data.index(b"\x00\x01\x00\x06") + 4 mac = data[start:start+6] if mac not in devices: devices.append(mac) #print(addr[0]) print('\t' + ':'.join('%02x' % b for b in mac)) print() except KeyboardInterrupt: search = False break
homeassistant/components/airnow/config_flow.py
MrDelik/core
30,023
131240
"""Config flow for AirNow integration.""" import logging from pyairnow import WebServiceAPI from pyairnow.errors import AirNowError, InvalidKeyError import voluptuous as vol from homeassistant import config_entries, core, exceptions from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from .const import DOMAIN _LOGGER = logging.getLogger(__name__) async def validate_input(hass: core.HomeAssistant, data): """ Validate the user input allows us to connect. Data has the keys from DATA_SCHEMA with values provided by the user. """ session = async_get_clientsession(hass) client = WebServiceAPI(data[CONF_API_KEY], session=session) lat = data[CONF_LATITUDE] lng = data[CONF_LONGITUDE] distance = data[CONF_RADIUS] # Check that the provided latitude/longitude provide a response try: test_data = await client.observations.latLong(lat, lng, distance=distance) except InvalidKeyError as exc: raise InvalidAuth from exc except AirNowError as exc: raise CannotConnect from exc if not test_data: raise InvalidLocation # Validation Succeeded return True class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for AirNow.""" VERSION = 1 async def async_step_user(self, user_input=None): """Handle the initial step.""" errors = {} if user_input is not None: # Set a unique id based on latitude/longitude await self.async_set_unique_id( f"{user_input[CONF_LATITUDE]}-{user_input[CONF_LONGITUDE]}" ) self._abort_if_unique_id_configured() try: # Validate inputs await validate_input(self.hass, user_input) except CannotConnect: errors["base"] = "cannot_connect" except InvalidAuth: errors["base"] = "invalid_auth" except InvalidLocation: errors["base"] = "invalid_location" except Exception: # pylint: disable=broad-except _LOGGER.exception("Unexpected exception") errors["base"] = "unknown" else: # Create Entry return self.async_create_entry( title=f"AirNow Sensor at {user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}", data=user_input, ) return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_API_KEY): str, vol.Optional( CONF_LATITUDE, default=self.hass.config.latitude ): cv.latitude, vol.Optional( CONF_LONGITUDE, default=self.hass.config.longitude ): cv.longitude, vol.Optional(CONF_RADIUS, default=150): int, } ), errors=errors, ) class CannotConnect(exceptions.HomeAssistantError): """Error to indicate we cannot connect.""" class InvalidAuth(exceptions.HomeAssistantError): """Error to indicate there is invalid auth.""" class InvalidLocation(exceptions.HomeAssistantError): """Error to indicate the location is invalid."""
utility/sprocket_utility.py
m95music/yukarin
139
131256
import yaml from scipy.signal import firwin, lfilter class SpeakerYML(object): def __init__(self, ymlf): # open yml file with open(ymlf) as yf: conf = yaml.safe_load(yf) # read parameter from yml file self.wav_fs = conf['wav']['fs'] self.wav_bit = conf['wav']['bit'] self.wav_fftl = conf['wav']['fftl'] self.wav_shiftms = conf['wav']['shiftms'] self.f0_minf0 = conf['f0']['minf0'] self.f0_maxf0 = conf['f0']['maxf0'] assert self.f0_minf0 < self.f0_maxf0, "should be minf0 < maxf0 in yml file" self.mcep_dim = conf['mcep']['dim'] self.mcep_alpha = conf['mcep']['alpha'] self.power_threshold = conf['power']['threshold'] self.analyzer = conf['analyzer'] def print_params(self): pass class PairYML(object): def __init__(self, ymlf): # open yml file with open(ymlf) as yf: conf = yaml.safe_load(yf) self.jnt_n_iter = conf['jnt']['n_iter'] self.GMM_mcep_n_mix = conf['GMM']['mcep']['n_mix'] self.GMM_mcep_n_iter = conf['GMM']['mcep']['n_iter'] self.GMM_mcep_covtype = conf['GMM']['mcep']['covtype'] self.GMM_mcep_cvtype = conf['GMM']['mcep']['cvtype'] self.GMM_codeap_n_mix = conf['GMM']['codeap']['n_mix'] self.GMM_codeap_n_iter = conf['GMM']['codeap']['n_iter'] self.GMM_codeap_covtype = conf['GMM']['codeap']['covtype'] self.GMM_codeap_cvtype = conf['GMM']['codeap']['cvtype'] self.GV_morph_coeff = conf['GV']['morph_coeff'] def print_params(self): passø def low_cut_filter(x, fs, cutoff=70): nyquist = fs // 2 norm_cutoff = cutoff / nyquist # low cut filter fil = firwin(255, norm_cutoff, pass_zero=False) lcf_x = lfilter(fil, 1, x) return lcf_x
python/graphscope/analytical/app/pagerank_nx.py
LI-Mingyu/GraphScope-MY
1,521
131264
<filename>python/graphscope/analytical/app/pagerank_nx.py<gh_stars>1000+ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from graphscope.framework.app import AppAssets from graphscope.framework.app import not_compatible_for from graphscope.framework.app import project_to_simple __all__ = ["pagerank_nx"] @project_to_simple @not_compatible_for("arrow_property", "dynamic_property") def pagerank_nx(graph, alpha=0.85, max_iter=100, tol=1e-06): """Evalute PageRank on a graph in NetworkX version. Args: graph (Graph): A projected simple graph. alpha (float, optional): Dumping factor. Defaults to 0.85. max_iter (int, optional): Maximum number of iteration. Defaults to 100. tol (float, optional): Error tolerance used to check convergence in power method solver. Returns: :class:`graphscope.framework.context.VertexDataContextDAGNode`: A context with each vertex assigned with the pagerank value, evaluated in eager mode. Examples: .. code:: python import graphscope as gs sess = gs.session() g = sess.g() pg = g.project(vertices={"vlabel": []}, edges={"elabel": []}) r = gs.pagerank(pg, alpha=0.85, max_iter=10) s.close() """ alpha = float(alpha) max_iter = int(max_iter) return AppAssets(algo="pagerank_nx", context="vertex_data")( graph, alpha, max_iter, tol )
tensorflow/python/estimator/warm_starting_util_test.py
shengfuintel/tensorflow
522
131266
<filename>tensorflow/python/estimator/warm_starting_util_test.py # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for warm_starting_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import six from tensorflow.python.estimator import warm_starting_util as ws_util from tensorflow.python.feature_column import feature_column as fc from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver as saver_lib ones = init_ops.ones_initializer norms = init_ops.truncated_normal_initializer rand = init_ops.random_uniform_initializer class WarmStartingUtilTest(test.TestCase): def _write_vocab(self, string_values, file_name): vocab_file = os.path.join(self.get_temp_dir(), file_name) with open(vocab_file, "w") as f: f.write("\n".join(string_values)) return vocab_file def _write_checkpoint(self, sess): sess.run(variables.global_variables_initializer()) saver = saver_lib.Saver() ckpt_prefix = os.path.join(self.get_temp_dir(), "model") ckpt_state_name = "checkpoint" saver.save( sess, ckpt_prefix, global_step=0, latest_filename=ckpt_state_name) def _create_prev_run_var(self, var_name, shape=None, initializer=None, partitioner=None): with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: var = variable_scope.get_variable( var_name, shape=shape, initializer=initializer, partitioner=partitioner) self._write_checkpoint(sess) if partitioner: self.assertTrue(isinstance(var, variables.PartitionedVariable)) var = var._get_variable_list() return var, sess.run(var) def _create_dummy_inputs(self): return { "sc_int": array_ops.sparse_placeholder(dtypes.int32), "sc_hash": array_ops.sparse_placeholder(dtypes.string), "sc_keys": array_ops.sparse_placeholder(dtypes.string), "sc_vocab": array_ops.sparse_placeholder(dtypes.string), "real": array_ops.placeholder(dtypes.float32) } def _create_linear_model(self, feature_cols, partitioner): cols_to_vars = {} with variable_scope.variable_scope("", partitioner=partitioner): # Create the variables. fc.linear_model( features=self._create_dummy_inputs(), feature_columns=feature_cols, units=1, cols_to_vars=cols_to_vars) # Return a dictionary mapping each column to its variable, dropping the # 'bias' key that's also filled. cols_to_vars.pop("bias") return cols_to_vars def _assert_cols_to_vars(self, cols_to_vars, cols_to_expected_values, sess): for col, expected_values in six.iteritems(cols_to_expected_values): for i, var in enumerate(cols_to_vars[col]): self.assertAllEqual(expected_values[i], var.eval(sess)) def testWarmStartVar(self): _, prev_val = self._create_prev_run_var( "fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]]) with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", initializer=[[0.], [0.], [0.], [0.]]) ws_util._warmstart_var(fruit_weights, self.get_temp_dir()) sess.run(variables.global_variables_initializer()) self.assertAllEqual(prev_val, fruit_weights.eval(sess)) def testWarmStartVarPrevVarPartitioned(self): _, weights = self._create_prev_run_var( "fruit_weights", shape=[4, 1], initializer=[[0.5], [1.], [1.5], [2.]], partitioner=lambda shape, dtype: [2, 1]) prev_val = np.concatenate([weights[0], weights[1]], axis=0) with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", initializer=[[0.], [0.], [0.], [0.]]) ws_util._warmstart_var(fruit_weights, self.get_temp_dir()) sess.run(variables.global_variables_initializer()) self.assertAllEqual(prev_val, fruit_weights.eval(sess)) def testWarmStartVarCurrentVarPartitioned(self): _, prev_val = self._create_prev_run_var( "fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]]) with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", shape=[4, 1], initializer=[[0.], [0.], [0.], [0.]], partitioner=lambda shape, dtype: [2, 1]) self.assertTrue( isinstance(fruit_weights, variables.PartitionedVariable)) ws_util._warmstart_var(fruit_weights, self.get_temp_dir()) sess.run(variables.global_variables_initializer()) fruit_weights = fruit_weights._get_variable_list() new_val = np.concatenate( [fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0) self.assertAllEqual(prev_val, new_val) def testWarmStartVarBothVarsPartitioned(self): _, weights = self._create_prev_run_var( "old_scope/fruit_weights", shape=[4, 1], initializer=[[0.5], [1.], [1.5], [2.]], partitioner=lambda shape, dtype: [2, 1]) prev_val = np.concatenate([weights[0], weights[1]], axis=0) # New session and new graph. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "new_scope/fruit_weights", shape=[4, 1], initializer=[[0.], [0.], [0.], [0.]], partitioner=lambda shape, dtype: [2, 1]) self.assertTrue( isinstance(fruit_weights, variables.PartitionedVariable)) ws_util._warmstart_var( fruit_weights, self.get_temp_dir(), prev_tensor_name="old_scope/fruit_weights") sess.run(variables.global_variables_initializer()) fruit_weights = fruit_weights._get_variable_list() new_val = np.concatenate( [fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0) self.assertAllEqual(prev_val, new_val) def testWarmStartVarWithVocab(self): prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "old_vocab") _, _ = self._create_prev_run_var( "fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]]) # New vocab with elements in reverse order and one new element. new_vocab_path = self._write_vocab( ["orange", "guava", "banana", "apple", "raspberry"], "new_vocab") # New session and new graph. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]]) ws_util._warmstart_var_with_vocab(fruit_weights, new_vocab_path, 5, self.get_temp_dir(), prev_vocab_path) sess.run(variables.global_variables_initializer()) self.assertAllEqual([[2.], [1.5], [1.], [0.5], [0.]], fruit_weights.eval(sess)) def testWarmStartVarWithVocabPrevVarPartitioned(self): prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "old_vocab") _, _ = self._create_prev_run_var( "fruit_weights", shape=[4, 1], initializer=[[0.5], [1.], [1.5], [2.]], partitioner=lambda shape, dtype: [2, 1]) # New vocab with elements in reverse order and one new element. new_vocab_path = self._write_vocab( ["orange", "guava", "banana", "apple", "raspberry"], "new_vocab") # New session and new graph. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]]) ws_util._warmstart_var_with_vocab(fruit_weights, new_vocab_path, 5, self.get_temp_dir(), prev_vocab_path) sess.run(variables.global_variables_initializer()) self.assertAllEqual([[2.], [1.5], [1.], [0.5], [0.]], fruit_weights.eval(sess)) def testWarmStartVarWithVocabCurrentVarPartitioned(self): prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "old_vocab") _, _ = self._create_prev_run_var( "fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]]) # New vocab with elements in reverse order and one new element. new_vocab_path = self._write_vocab( ["orange", "guava", "banana", "apple", "raspberry"], "new_vocab") # New session and new graph. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", shape=[6, 1], initializer=[[0.], [0.], [0.], [0.], [0.], [0.]], partitioner=lambda shape, dtype: [2, 1]) ws_util._warmstart_var_with_vocab( fruit_weights, new_vocab_path, 5, self.get_temp_dir(), prev_vocab_path, current_oov_buckets=1) sess.run(variables.global_variables_initializer()) self.assertTrue( isinstance(fruit_weights, variables.PartitionedVariable)) fruit_weights_vars = fruit_weights._get_variable_list() self.assertAllEqual([[2.], [1.5], [1.]], fruit_weights_vars[0].eval(sess)) self.assertAllEqual([[0.5], [0.], [0.]], fruit_weights_vars[1].eval(sess)) def testWarmStartVarWithVocabBothVarsPartitioned(self): prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "old_vocab") _, _ = self._create_prev_run_var( "fruit_weights", shape=[4, 1], initializer=[[0.5], [1.], [1.5], [2.]], partitioner=lambda shape, dtype: [2, 1]) # New vocab with elements in reverse order and two new elements. new_vocab_path = self._write_vocab( ["orange", "guava", "banana", "apple", "raspberry", "blueberry"], "new_vocab") # New session and new graph. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: fruit_weights = variable_scope.get_variable( "fruit_weights", shape=[6, 1], initializer=[[0.], [0.], [0.], [0.], [0.], [0.]], partitioner=lambda shape, dtype: [2, 1]) ws_util._warmstart_var_with_vocab(fruit_weights, new_vocab_path, 6, self.get_temp_dir(), prev_vocab_path) sess.run(variables.global_variables_initializer()) self.assertTrue( isinstance(fruit_weights, variables.PartitionedVariable)) fruit_weights_vars = fruit_weights._get_variable_list() self.assertAllEqual([[2.], [1.5], [1.]], fruit_weights_vars[0].eval(sess)) self.assertAllEqual([[0.5], [0.], [0.]], fruit_weights_vars[1].eval(sess)) def testWarmStartInputLayer_SparseColumnIntegerized(self): # Create feature column. sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10) # Save checkpoint from which to warm-start. _, prev_int_val = self._create_prev_run_var( "linear_model/sc_int/weights", shape=[10, 1], initializer=ones()) # Verify we initialized the values correctly. self.assertAllEqual(np.ones([10, 1]), prev_int_val) partitioner = lambda shape, dtype: [1] * len(shape) # New graph, new session WITHOUT warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([sc_int], partitioner) sess.run(variables.global_variables_initializer()) # Without warmstarting, the weights should be initialized using default # initializer (which is init_ops.zeros_initializer). self._assert_cols_to_vars(cols_to_vars, {sc_int: [np.zeros([10, 1])]}, sess) # New graph, new session with warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([sc_int], partitioner) ws_util._warmstart_input_layer(cols_to_vars, ws_util._WarmStartSettings( self.get_temp_dir())) sess.run(variables.global_variables_initializer()) # Verify weights were correctly warmstarted. self._assert_cols_to_vars(cols_to_vars, {sc_int: [prev_int_val]}, sess) def testWarmStartInputLayer_SparseColumnHashed(self): # Create feature column. sc_hash = fc.categorical_column_with_hash_bucket( "sc_hash", hash_bucket_size=15) # Save checkpoint from which to warm-start. _, prev_hash_val = self._create_prev_run_var( "linear_model/sc_hash/weights", shape=[15, 1], initializer=norms()) partitioner = lambda shape, dtype: [1] * len(shape) # New graph, new session WITHOUT warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([sc_hash], partitioner) sess.run(variables.global_variables_initializer()) # Without warmstarting, the weights should be initialized using default # initializer (which is init_ops.zeros_initializer). self._assert_cols_to_vars(cols_to_vars, {sc_hash: [np.zeros([15, 1])]}, sess) # New graph, new session with warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([sc_hash], partitioner) ws_util._warmstart_input_layer(cols_to_vars, ws_util._WarmStartSettings( self.get_temp_dir())) sess.run(variables.global_variables_initializer()) # Verify weights were correctly warmstarted. self._assert_cols_to_vars(cols_to_vars, {sc_hash: [prev_hash_val]}, sess) def testWarmStartInputLayer_SparseColumnVocabulary(self): # Create vocab for sparse column "sc_vocab". vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "vocab") # Create feature column. sc_vocab = fc.categorical_column_with_vocabulary_file( "sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4) # Save checkpoint from which to warm-start. _, prev_vocab_val = self._create_prev_run_var( "linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones()) partitioner = lambda shape, dtype: [1] * len(shape) # New graph, new session WITHOUT warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([sc_vocab], partitioner) sess.run(variables.global_variables_initializer()) # Without warmstarting, the weights should be initialized using default # initializer (which is init_ops.zeros_initializer). self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]}, sess) # New graph, new session with warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([sc_vocab], partitioner) # Since old vocab is not explicitly set in WarmStartSettings, the old # vocab is assumed to be same as new vocab. ws_util._warmstart_input_layer(cols_to_vars, ws_util._WarmStartSettings( self.get_temp_dir())) sess.run(variables.global_variables_initializer()) # Verify weights were correctly warmstarted. self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]}, sess) def testWarmStartInputLayer_BucketizedColumn(self): # Create feature column. real = fc.numeric_column("real") real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.]) # Save checkpoint from which to warm-start. _, prev_bucket_val = self._create_prev_run_var( "linear_model/real_bucketized/weights", shape=[5, 1], initializer=norms()) partitioner = lambda shape, dtype: [1] * len(shape) # New graph, new session WITHOUT warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([real_bucket], partitioner) sess.run(variables.global_variables_initializer()) # Without warmstarting, the weights should be initialized using default # initializer (which is init_ops.zeros_initializer). self._assert_cols_to_vars(cols_to_vars, {real_bucket: [np.zeros([5, 1])]}, sess) # New graph, new session with warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model([real_bucket], partitioner) ws_util._warmstart_input_layer(cols_to_vars, ws_util._WarmStartSettings( self.get_temp_dir())) sess.run(variables.global_variables_initializer()) # Verify weights were correctly warmstarted. self._assert_cols_to_vars(cols_to_vars, {real_bucket: [prev_bucket_val]}, sess) def testWarmStartInputLayer_MultipleCols(self): # Create vocab for sparse column "sc_vocab". vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "vocab") # Create feature columns. sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10) sc_hash = fc.categorical_column_with_hash_bucket( "sc_hash", hash_bucket_size=15) sc_keys = fc.categorical_column_with_vocabulary_list( "sc_keys", vocabulary_list=["a", "b", "c", "e"]) sc_vocab = fc.categorical_column_with_vocabulary_file( "sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4) real = fc.numeric_column("real") real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.]) cross = fc.crossed_column([sc_keys, sc_vocab], hash_bucket_size=20) all_linear_cols = [sc_int, sc_hash, sc_keys, sc_vocab, real_bucket, cross] # Save checkpoint from which to warm-start. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: sc_int_weights = variable_scope.get_variable( "linear_model/sc_int/weights", shape=[10, 1], initializer=ones()) sc_hash_weights = variable_scope.get_variable( "linear_model/sc_hash/weights", shape=[15, 1], initializer=norms()) sc_keys_weights = variable_scope.get_variable( "linear_model/sc_keys/weights", shape=[4, 1], initializer=rand()) sc_vocab_weights = variable_scope.get_variable( "linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones()) real_bucket_weights = variable_scope.get_variable( "linear_model/real_bucketized/weights", shape=[5, 1], initializer=norms()) cross_weights = variable_scope.get_variable( "linear_model/sc_keys_X_sc_vocab/weights", shape=[20, 1], initializer=rand()) self._write_checkpoint(sess) (prev_int_val, prev_hash_val, prev_keys_val, prev_vocab_val, prev_bucket_val, prev_cross_val) = sess.run([ sc_int_weights, sc_hash_weights, sc_keys_weights, sc_vocab_weights, real_bucket_weights, cross_weights ]) # Verify we initialized the values correctly. self.assertAllEqual(np.ones([10, 1]), prev_int_val) partitioner = lambda shape, dtype: [1] * len(shape) # New graph, new session WITHOUT warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model(all_linear_cols, partitioner) sess.run(variables.global_variables_initializer()) # Without warmstarting, all weights should be initialized using default # initializer (which is init_ops.zeros_initializer). self._assert_cols_to_vars(cols_to_vars, { sc_int: [np.zeros([10, 1])], sc_hash: [np.zeros([15, 1])], sc_keys: [np.zeros([4, 1])], sc_vocab: [np.zeros([4, 1])], real_bucket: [np.zeros([5, 1])], cross: [np.zeros([20, 1])], }, sess) # New graph, new session with warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model(all_linear_cols, partitioner) ws_util._warmstart_input_layer(cols_to_vars, ws_util._WarmStartSettings( self.get_temp_dir())) sess.run(variables.global_variables_initializer()) # Verify weights were correctly warmstarted. self._assert_cols_to_vars(cols_to_vars, { sc_int: [prev_int_val], sc_hash: [prev_hash_val], sc_keys: [prev_keys_val], sc_vocab: [prev_vocab_val], real_bucket: [prev_bucket_val], cross: [prev_cross_val], }, sess) def testWarmStartInputLayerMoreSettings(self): # Create old and new vocabs for sparse column "sc_vocab". prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"], "old_vocab") new_vocab_path = self._write_vocab( ["orange", "guava", "banana", "apple", "raspberry", "blueberry"], "new_vocab") # Create feature columns. sc_hash = fc.categorical_column_with_hash_bucket( "sc_hash", hash_bucket_size=15) sc_keys = fc.categorical_column_with_vocabulary_list( "sc_keys", vocabulary_list=["a", "b", "c", "e"]) sc_vocab = fc.categorical_column_with_vocabulary_file( "sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6) all_linear_cols = [sc_hash, sc_keys, sc_vocab] # Save checkpoint from which to warm-start. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: _ = variable_scope.get_variable( "linear_model/sc_hash/weights", shape=[15, 1], initializer=norms()) sc_keys_weights = variable_scope.get_variable( "some_other_name", shape=[4, 1], initializer=rand()) _ = variable_scope.get_variable( "linear_model/sc_vocab/weights", initializer=[[0.5], [1.], [2.], [3.]]) self._write_checkpoint(sess) prev_keys_val = sess.run(sc_keys_weights) def _partitioner(shape, dtype): # pylint:disable=unused-argument # Partition each var into 2 equal slices. partitions = [1] * len(shape) partitions[0] = min(2, shape[0].value) return partitions # New graph, new session with warmstarting. with ops.Graph().as_default() as g: with self.test_session(graph=g) as sess: cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner) ws_settings = ws_util._WarmStartSettings( self.get_temp_dir(), col_to_prev_vocab={sc_vocab: prev_vocab_path}, col_to_prev_tensor={sc_keys: "some_other_name"}, exclude_columns=[sc_hash]) ws_util._warmstart_input_layer(cols_to_vars, ws_settings) sess.run(variables.global_variables_initializer()) # Verify weights were correctly warmstarted. Var corresponding to # sc_hash should not be warm-started. Var corresponding to sc_vocab # should be correctly warmstarted after vocab remapping. self._assert_cols_to_vars(cols_to_vars, { sc_keys: np.split(prev_keys_val, 2), sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])], sc_vocab: [ np.array([[3.], [2.], [1.]]), np.array([[0.5], [0.], [0.]]) ] }, sess) def testErrorConditions(self): self.assertRaises(ValueError, ws_util._WarmStartSettings, None) x = variable_scope.get_variable( "x", shape=[4, 1], initializer=ones(), partitioner=lambda shape, dtype: [2, 1]) # List of PartitionedVariable is invalid type. self.assertRaises(TypeError, ws_util._warmstart_var, [x], prev_ckpt="/tmp") self.assertRaises(TypeError, ws_util._warmstart_var_with_vocab, [x], "/tmp", 5, "/tmp", "/tmp") # Keys of type other than FeatureColumn. self.assertRaises(TypeError, ws_util._warmstart_input_layer, {"StringType": x}, ws_util._WarmStartSettings("/tmp")) if __name__ == "__main__": test.main()
scripts/end_date_volumes_invariant_13a.py
simpsonw/atmosphere
197
131268
#!/usr/bin/env python import argparse import django django.setup() from core.models import Volume from django.db import connection from django.db.models.functions import Now def main(): ''' This script will end date volumes that come up for Invariant #13a on https://tasmo.atmo.cloud and will be run via cron every ___________. ''' # Dry run option parser = argparse.ArgumentParser() parser.add_argument( "--dry-run", action="store_true", help="Do not actually end-date any volumes" ) args = parser.parse_args() if args.dry_run: print 'DRY RUN -- No Volumes will be end-dated' volumes_from_invariant_13a = [] # This query comes from here: https://tasmo.atmo.cloud/queries/64/source#87 query = '''WITH volumes_users_allocations AS ( SELECT volume.id AS volume_id, volume.name AS volume_name, volume.description AS volume_description, proj.name AS atmo_project_name, proj.description AS atmo_project_description, au.id AS user_id, au.username, au.is_staff, au.is_superuser, CASE WHEN ins_src.provider_id = 4 THEN 'IU' WHEN ins_src.provider_id = 5 THEN 'TACC' ELSE 'UNKNOWN' END AS src_provider, ins_src.identifier AS openstack_identifier, ins_src.start_date, ins_src.end_date, string_agg(current_als.name, ',') AS current_allocation_sources FROM volume LEFT OUTER JOIN instance_source ins_src ON volume.instance_source_id = ins_src.id LEFT OUTER JOIN project proj ON volume.project_id = proj.id LEFT OUTER JOIN atmosphere_user au ON ins_src.created_by_id = au.id LEFT OUTER JOIN user_allocation_source current_uals ON au.id = current_uals.user_id LEFT OUTER JOIN allocation_source current_als ON current_uals.allocation_source_id = current_als.id GROUP BY volume.id, proj.id, au.id, ins_src.id), user_allocation_source_deleted_events AS ( SELECT DISTINCT event_table.name AS event_name, event_table.entity_id AS username, event_table.payload :: json ->> 'allocation_source_name' AS allocation_source_name, max(TIMESTAMP) AS last_event, min(TIMESTAMP) AS first_event FROM event_table WHERE event_table.name = 'user_allocation_source_deleted' GROUP BY event_table.name, event_table.entity_id, event_table.payload :: json ->> 'allocation_source_name' ), user_allocation_source_deleted_events_grouped AS ( SELECT DISTINCT event_name, username, string_agg(DISTINCT allocation_source_name, ',') AS historic_allocation_sources, max(last_event) AS last_event, min(first_event) AS first_event FROM user_allocation_source_deleted_events GROUP BY event_name, username ), users_with_no_allocation_sources AS ( SELECT au.id AS user_id, au.username, au.is_staff, au.is_superuser FROM atmosphere_user au LEFT OUTER JOIN user_allocation_source uas ON au.id = uas.user_id WHERE uas.id IS NULL ), users_with_no_allocation_source_over_six_months AS ( SELECT uwnas.user_id, uwnas.username, uwnas.is_staff, uwnas.is_superuser, uasdeg.last_event, uasdeg.historic_allocation_sources FROM users_with_no_allocation_sources uwnas LEFT OUTER JOIN user_allocation_source_deleted_events_grouped uasdeg ON uasdeg.username = uwnas.username WHERE uasdeg.last_event IS NULL OR uasdeg.last_event < NOW() - INTERVAL '6 months' ), active_volumes_for_users_with_no_allocation_source_over_six_months AS ( SELECT * FROM volumes_users_allocations vua LEFT JOIN users_with_no_allocation_source_over_six_months uwnasosm ON vua.user_id = uwnasosm.user_id WHERE uwnasosm.user_id IS NOT NULL AND vua.end_date IS NULL AND vua.username <> 'atmoadmin' ), instancesources_appversions_apps AS ( SELECT DISTINCT isrc.identifier AS openstack_image_identifier, isrc.start_date AS isrc_start_date, isrc.end_date AS isrc_end_date, CASE WHEN isrc.provider_id = 4 THEN 'IU' WHEN isrc.provider_id = 5 THEN 'TACC' ELSE 'UNKNOWN' END AS isrc_provider, appv.created_by_id AS appv_created_by_id, appv.start_date AS appv_start_date, appv.end_date AS appv_end_date, appv.name AS appv_name, app.created_by_id AS app_created_by_id, app.name AS app_name, app.description AS app_description, app.start_date AS app_start_date, app.end_date AS app_end_date FROM application_version appv LEFT OUTER JOIN provider_machine pm ON appv.id = pm.application_version_id LEFT OUTER JOIN application app ON app.id = appv.application_id LEFT OUTER JOIN instance_source isrc ON pm.instance_source_id = isrc.id ), instancesources_appversions_apps_instances AS ( SELECT DISTINCT isrc.identifier AS openstack_image_identifier, isrc.start_date AS isrc_start_date, isrc.end_date AS isrc_end_date, appv.created_by_id AS appv_created_by_id, appv.start_date AS appv_start_date, appv.end_date AS appv_end_date, app.created_by_id AS app_created_by_id, app.start_date AS app_start_date, app.end_date AS app_end_date, ins.id AS instance_id, ins.created_by_id AS instance_created_by_id, ins.start_date AS instance_start_date, ins.end_date AS instance_end_date FROM application_version appv LEFT OUTER JOIN provider_machine pm ON appv.id = pm.application_version_id LEFT OUTER JOIN application app ON app.id = appv.application_id LEFT OUTER JOIN instance_source isrc ON pm.instance_source_id = isrc.id LEFT OUTER JOIN instance ins ON isrc.id = ins.source_id ), images_users_allocations_agg AS ( SELECT DISTINCT isrc.identifier AS openstack_identifier, jsonb_agg(DISTINCT isrc.*) AS instance_sources, jsonb_agg(DISTINCT pm.*) AS provider_machine, jsonb_agg(DISTINCT app.*) AS applications, jsonb_agg(DISTINCT appv.*) AS application_versions, jsonb_agg(DISTINCT ins.*) AS instances FROM application_version appv LEFT OUTER JOIN provider_machine pm ON appv.id = pm.application_version_id LEFT OUTER JOIN application app ON app.id = appv.application_id LEFT OUTER JOIN instance_source isrc ON pm.instance_source_id = isrc.id LEFT OUTER JOIN instance ins ON isrc.id = ins.source_id GROUP BY isrc.identifier ), active_instancesources_and_appversions_for_users_with_no_allocation_source_over_six_months AS ( SELECT iaa.*, uwnasosm.username AS created_by_user_username, uwnasosm.is_staff AS created_by_user_is_staff, uwnasosm.is_superuser AS created_by_user_is_superuser, uwnasosm.last_event AS created_by_user_last_allocation_end_date, uwnasosm.historic_allocation_sources AS created_by_user_historic_allocation_sources FROM instancesources_appversions_apps iaa LEFT JOIN users_with_no_allocation_source_over_six_months uwnasosm ON iaa.appv_created_by_id = uwnasosm.user_id WHERE uwnasosm.user_id IS NOT NULL AND (isrc_end_date IS NULL OR appv_end_date IS NULL OR app_end_date IS NULL) AND uwnasosm.username NOT IN ('admin', 'atmoadmin')), aiaafuwnasosm_with_current_allocation_sources AS ( SELECT aiaafuwnasosm.openstack_image_identifier, aiaafuwnasosm.isrc_provider, aiaafuwnasosm.isrc_end_date, aiaafuwnasosm.isrc_start_date, aiaafuwnasosm.appv_name, aiaafuwnasosm.appv_start_date, aiaafuwnasosm.appv_end_date, aiaafuwnasosm.appv_created_by_id, aiaafuwnasosm.app_end_date, aiaafuwnasosm.app_start_date, aiaafuwnasosm.app_description, aiaafuwnasosm.app_name, aiaafuwnasosm.app_created_by_id, aiaafuwnasosm.created_by_user_username, aiaafuwnasosm.created_by_user_is_staff, aiaafuwnasosm.created_by_user_is_superuser, aiaafuwnasosm.created_by_user_last_allocation_end_date, aiaafuwnasosm.created_by_user_historic_allocation_sources, string_agg(DISTINCT current_als.name, ',') AS current_allocation_sources FROM active_instancesources_and_appversions_for_users_with_no_allocation_source_over_six_months aiaafuwnasosm LEFT OUTER JOIN user_allocation_source current_uals ON aiaafuwnasosm.appv_created_by_id = current_uals.user_id LEFT OUTER JOIN allocation_source current_als ON current_uals.allocation_source_id = current_als.id GROUP BY aiaafuwnasosm.openstack_image_identifier, aiaafuwnasosm.isrc_provider, aiaafuwnasosm.isrc_end_date, aiaafuwnasosm.isrc_start_date, aiaafuwnasosm.appv_name, aiaafuwnasosm.appv_start_date, aiaafuwnasosm.appv_end_date, aiaafuwnasosm.appv_created_by_id, aiaafuwnasosm.app_end_date, aiaafuwnasosm.app_start_date, aiaafuwnasosm.app_description, aiaafuwnasosm.app_name, aiaafuwnasosm.app_created_by_id, aiaafuwnasosm.created_by_user_username, aiaafuwnasosm.created_by_user_is_staff, aiaafuwnasosm.created_by_user_is_superuser, aiaafuwnasosm.created_by_user_last_allocation_end_date, aiaafuwnasosm.created_by_user_historic_allocation_sources ORDER BY aiaafuwnasosm.created_by_user_last_allocation_end_date ASC ) SELECT * FROM active_volumes_for_users_with_no_allocation_source_over_six_months avfuwnasosm ORDER BY last_event ASC;''' # Use the query above to get volumes listed for Invariant #13a with connection.cursor() as cursor: cursor.execute(query) # Get the results as a dictionary rows = dictfetchall(cursor) # If there are any results from the query if rows: volumes = Volume.objects.all() # Get the Volume object and put it into our list for row in rows: volume = volumes.get(pk=row['volume_id']) volumes_from_invariant_13a.append(volume) print 'Here are volumes from invariant 13a:' ctr = 1 for vol in volumes_from_invariant_13a: print ctr ctr = ctr + 1 print vol.name.encode('utf-8') print vol if not args.dry_run: vol.end_date = Now() vol.save() print 'End-dated %s' % vol print '----' # Helper function to get query results as a dictionary def dictfetchall(cursor): columns = [col[0] for col in cursor.description] return [dict(zip(columns, row)) for row in cursor.fetchall()] if __name__ == "__main__": main()
library/source1/vtx/v7/structs/model.py
anderlli0053/SourceIO
199
131273
<filename>library/source1/vtx/v7/structs/model.py from typing import List from . import Base from . import ByteIO from .lod import ModelLod class Model(Base): def __init__(self): self.model_lods = [] # type: List[ModelLod] def read(self, reader: ByteIO): entry = reader.tell() lod_count, lod_offset = reader.read_fmt('ii') with reader.save_current_pos(): if lod_count > 0 and lod_offset != 0: reader.seek(entry + lod_offset) for lod_id in range(lod_count): model_lod = ModelLod(lod_id) model_lod.read(reader) self.model_lods.append(model_lod)
syte/views/instagram.py
doowb/syte
426
131296
<reponame>doowb/syte # -*- coding: utf-8 -*- import json import requests from django.shortcuts import redirect, render from django.conf import settings from django.http import HttpResponse def instagram_auth(request): context = dict() code = request.GET.get('code', None) error = request.GET.get('error_description', None) if not code and not error: return redirect('{0}?client_id={1}&redirect_uri={2}instagram/auth/&response_type=code'.format( settings.INSTAGRAM_OAUTH_AUTHORIZE_URL, settings.INSTAGRAM_CLIENT_ID, settings.SITE_ROOT_URI)) if code: r = requests.post(settings.INSTAGRAM_OAUTH_ACCESS_TOKEN_URL, data={ 'client_id': settings.INSTAGRAM_CLIENT_ID, 'client_secret': settings.INSTAGRAM_CLIENT_SECRET, 'grant_type': 'authorization_code', 'redirect_uri': '{0}instagram/auth/'.format(settings.SITE_ROOT_URI), 'code': code, }) data = json.loads(r.text) error = data.get('error_message', None) if not error: context['token'] = data['access_token'] context['user_id'] = data['user'].get('id', None) context['user_name'] = data['user'].get('full_name', None) if error: context['error'] = error return render(request, 'instagram_auth.html', context) def instagram(request): user_r = requests.get('{0}users/{1}/?access_token={2}'.format( settings.INSTAGRAM_API_URL, settings.INSTAGRAM_USER_ID, settings.INSTAGRAM_ACCESS_TOKEN)) user_data = json.loads(user_r.text) media_r = requests.get('{0}users/{1}/media/recent/?access_token={2}'.format( settings.INSTAGRAM_API_URL, settings.INSTAGRAM_USER_ID, settings.INSTAGRAM_ACCESS_TOKEN)) media_data = json.loads(media_r.text) context = { 'user': user_data.get('data', None), 'media': media_data.get('data', None), 'pagination': media_data.get('pagination', None), } return HttpResponse(content=json.dumps(context), status=media_r.status_code, content_type=media_r.headers['content-type']) def instagram_next(request, max_id): media_r = requests.get('{0}users/{1}/media/recent/?access_token={2}&max_id={3}'.format( settings.INSTAGRAM_API_URL, settings.INSTAGRAM_USER_ID, settings.INSTAGRAM_ACCESS_TOKEN, max_id)) media_data = json.loads(media_r.text) context = { 'media': media_data.get('data', None), 'pagination': media_data.get('pagination', None), } return HttpResponse(content=json.dumps(context), status=media_r.status_code, content_type=media_r.headers['content-type'])
minihack/agent/common/models/embed.py
samvelyan/minihack-1
217
131322
<reponame>samvelyan/minihack-1<gh_stars>100-1000 # Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch import nn import torch from nle import nethack as nh from typing import NamedTuple, Union from collections import namedtuple from minihack.agent.common.util.id_pairs import id_pairs_table import logging Ratio = Union[int, bool] class Targets(NamedTuple): """Class for configuring whch ids you want to embed into the single GlyphEmbedding, and in what ratios. The ratio is only relevant if do_linear_layer is false, and the embedding is pure concatenation. """ glyphs: Ratio = 0 groups: Ratio = 0 subgroup_ids: Ratio = 0 colors: Ratio = 0 chars: Ratio = 0 specials: Ratio = 0 do_linear_layer: bool = True def count_matrices(self): """Count of matrices required""" return sum(self) - int(self.do_linear_layer) GLYPH_TYPE_STRATEGIES = { "full": Targets(glyphs=True), "group_id": Targets(groups=True, subgroup_ids=True), "color_char": Targets(colors=True, chars=True, specials=True), "all": Targets( groups=True, subgroup_ids=True, colors=True, chars=True, specials=True ), "all_cat": Targets( groups=1, subgroup_ids=3, colors=1, chars=2, specials=1, do_linear_layer=False, ), } class GlyphEmbedding(nn.Module): """Take the glyph information and return an embedding vector.""" def __init__( self, glyph_type, dimension, device=None, use_index_select=None ): super(GlyphEmbedding, self).__init__() logging.debug("Emdedding on device: %s ", device) self.glyph_type = glyph_type self.use_index_select = use_index_select self.device = device self.dim = dimension if glyph_type not in GLYPH_TYPE_STRATEGIES: raise RuntimeError("unexpected glyph_type=%s" % self.glyph_type) strategy = GLYPH_TYPE_STRATEGIES[glyph_type] self.strategy = strategy self._unit_dim = dimension // strategy.count_matrices() self._remainder_dim = ( self.dim - self._unit_dim * strategy.count_matrices() ) if self.requires_id_pairs_table: self.register_buffer( "_id_pairs_table", torch.from_numpy(id_pairs_table()) ) else: self._id_pairs_table = None # Build our custom embedding matrices embed = {} if strategy.glyphs: embed["glyphs"] = nn.Embedding( nh.MAX_GLYPH, self._dim(strategy.glyphs) ) if strategy.colors: embed["colors"] = nn.Embedding(16, self._dim(strategy.colors)) if strategy.chars: embed["chars"] = nn.Embedding(256, self._dim(strategy.chars)) if strategy.specials: embed["specials"] = nn.Embedding(256, self._dim(strategy.specials)) if strategy.groups: num_groups = self.id_pairs_table.select(1, 1).max().item() + 1 embed["groups"] = nn.Embedding( num_groups, self._dim(strategy.groups) ) if strategy.subgroup_ids: num_subgroup_ids = ( self.id_pairs_table.select(1, 0).max().item() + 1 ) embed["subgroup_ids"] = nn.Embedding( num_subgroup_ids, self._dim(strategy.subgroup_ids) ) self.embeddings = nn.ModuleDict(embed) self.targets = list(embed.keys()) self.GlyphTuple = namedtuple("GlyphTuple", self.targets) if strategy.do_linear_layer and strategy.count_matrices() > 1: self.linear = nn.Linear( strategy.count_matrices() * self.dim, self.dim ) if device is not None: self.to(device) def _dim(self, units): """Decide the embedding size for a single matrix. If using a linear layer at the end this is always the embedding dimension, otherwise it is a fraction of the embedding dim""" if self.strategy.do_linear_layer: return self.dim else: dim = units * self._unit_dim + self._remainder_dim self._remainder_dim = 0 return dim @property def requires_id_pairs_table(self): return self.strategy.groups or self.strategy.subgroup_ids @property def id_pairs_table(self): return self._id_pairs_table def prepare_input(self, inputs): """Take the inputs to the network as dictionary and return a namedtuple of the input/index tensors to be embedded (GlyphTuple)""" embeddable_data = {} # Only flatten the data we want for key, value in inputs.items(): if key in self.embeddings: # -- [ T x B x ...] -> [ B' x ... ] embeddable_data[key] = torch.flatten(value, 0, 1).long() # add our group id and subgroup id if we want them if self.requires_id_pairs_table: ids, groups = self.glyphs_to_idgroup(inputs["glyphs"]) embeddable_data["groups"] = groups embeddable_data["subgroup_ids"] = ids # convert embeddable_data to a named tuple return self.GlyphTuple(**embeddable_data) def forward(self, data_tuple): """Output the embdedded tuple prepared in in prepare input. This will be a GlyphTuple.""" embs = [] for field, data in zip(self.targets, data_tuple): embs.append(self._select(self.embeddings[field], data)) if len(embs) == 1: return embs[0] embedded = torch.cat(embs, dim=-1) if self.strategy.do_linear_layer: embedded = self.linear(embedded) return embedded def _select(self, embedding_layer, x): if self.use_index_select: out = embedding_layer.weight.index_select(0, x.view(-1)) # handle reshaping x to 1-d and output back to N-d return out.view(x.shape + (-1,)) else: return embedding_layer(x) def glyphs_to_idgroup(self, glyphs): T, B, H, W = glyphs.shape ids_groups = self.id_pairs_table.index_select( 0, glyphs.view(-1).long() ) ids = ids_groups.select(1, 0).view(T * B, H, W).long() groups = ids_groups.select(1, 1).view(T * B, H, W).long() return (ids, groups)
Lib/test/test_types.py
Leonardofreua/RustPython
11,058
131338
# Python test set -- part 6, built-in types from test.support import run_with_locale import collections.abc import inspect import pickle import locale import sys import types import unittest.mock import weakref class TypesTests(unittest.TestCase): def test_truth_values(self): if None: self.fail('None is true instead of false') if 0: self.fail('0 is true instead of false') if 0.0: self.fail('0.0 is true instead of false') if '': self.fail('\'\' is true instead of false') if not 1: self.fail('1 is false instead of true') if not 1.0: self.fail('1.0 is false instead of true') if not 'x': self.fail('\'x\' is false instead of true') if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true') def f(): pass class C: pass x = C() if not f: self.fail('f is false instead of true') if not C: self.fail('C is false instead of true') if not sys: self.fail('sys is false instead of true') if not x: self.fail('x is false instead of true') def test_boolean_ops(self): if 0 or 0: self.fail('0 or 0 is true instead of false') if 1 and 1: pass else: self.fail('1 and 1 is false instead of true') if not 1: self.fail('not 1 is true instead of false') def test_comparisons(self): if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass else: self.fail('int comparisons failed') if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass else: self.fail('float comparisons failed') if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass else: self.fail('string comparisons failed') if None is None: pass else: self.fail('identity test failed') def test_float_constructor(self): self.assertRaises(ValueError, float, '') self.assertRaises(ValueError, float, '5\0') self.assertRaises(ValueError, float, '5_5\0') def test_zero_division(self): try: 5.0 / 0.0 except ZeroDivisionError: pass else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError") try: 5.0 // 0.0 except ZeroDivisionError: pass else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError") try: 5.0 % 0.0 except ZeroDivisionError: pass else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError") try: 5 / 0 except ZeroDivisionError: pass else: self.fail("5 / 0 didn't raise ZeroDivisionError") try: 5 // 0 except ZeroDivisionError: pass else: self.fail("5 // 0 didn't raise ZeroDivisionError") try: 5 % 0 except ZeroDivisionError: pass else: self.fail("5 % 0 didn't raise ZeroDivisionError") def test_numeric_types(self): if 0 != 0.0 or 1 != 1.0 or -1 != -1.0: self.fail('int/float value not equal') # calling built-in types without argument must return 0 if int() != 0: self.fail('int() does not return 0') if float() != 0.0: self.fail('float() does not return 0.0') if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass else: self.fail('int() does not round properly') if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass else: self.fail('float() does not work properly') # TODO: RUSTPYTHON @unittest.expectedFailure def test_float_to_string(self): def test(f, result): self.assertEqual(f.__format__('e'), result) self.assertEqual('%e' % f, result) # test all 2 digit exponents, both with __format__ and with # '%' formatting for i in range(-99, 100): test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i)) # test some 3 digit exponents self.assertEqual(1.5e100.__format__('e'), '1.500000e+100') self.assertEqual('%e' % 1.5e100, '1.500000e+100') self.assertEqual(1.5e101.__format__('e'), '1.500000e+101') self.assertEqual('%e' % 1.5e101, '1.500000e+101') self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100') self.assertEqual('%e' % 1.5e-100, '1.500000e-100') self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101') self.assertEqual('%e' % 1.5e-101, '1.500000e-101') self.assertEqual('%g' % 1.0, '1') self.assertEqual('%#g' % 1.0, '1.00000') def test_normal_integers(self): # Ensure the first 256 integers are shared a = 256 b = 128*2 if a is not b: self.fail('256 is not shared') if 12 + 24 != 36: self.fail('int op') if 12 + (-24) != -12: self.fail('int op') if (-12) + 24 != 12: self.fail('int op') if (-12) + (-24) != -36: self.fail('int op') if not 12 < 24: self.fail('int op') if not -24 < -12: self.fail('int op') # Test for a particular bug in integer multiply xsize, ysize, zsize = 238, 356, 4 if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912): self.fail('int mul commutativity') # And another. m = -sys.maxsize - 1 for divisor in 1, 2, 4, 8, 16, 32: j = m // divisor prod = divisor * j if prod != m: self.fail("%r * %r == %r != %r" % (divisor, j, prod, m)) if type(prod) is not int: self.fail("expected type(prod) to be int, not %r" % type(prod)) # Check for unified integral type for divisor in 1, 2, 4, 8, 16, 32: j = m // divisor - 1 prod = divisor * j if type(prod) is not int: self.fail("expected type(%r) to be int, not %r" % (prod, type(prod))) # Check for unified integral type m = sys.maxsize for divisor in 1, 2, 4, 8, 16, 32: j = m // divisor + 1 prod = divisor * j if type(prod) is not int: self.fail("expected type(%r) to be int, not %r" % (prod, type(prod))) x = sys.maxsize self.assertIsInstance(x + 1, int, "(sys.maxsize + 1) should have returned int") self.assertIsInstance(-x - 1, int, "(-sys.maxsize - 1) should have returned int") self.assertIsInstance(-x - 2, int, "(-sys.maxsize - 2) should have returned int") try: 5 << -5 except ValueError: pass else: self.fail('int negative shift <<') try: 5 >> -5 except ValueError: pass else: self.fail('int negative shift >>') def test_floats(self): if 12.0 + 24.0 != 36.0: self.fail('float op') if 12.0 + (-24.0) != -12.0: self.fail('float op') if (-12.0) + 24.0 != 12.0: self.fail('float op') if (-12.0) + (-24.0) != -36.0: self.fail('float op') if not 12.0 < 24.0: self.fail('float op') if not -24.0 < -12.0: self.fail('float op') def test_strings(self): if len('') != 0: self.fail('len(\'\')') if len('a') != 1: self.fail('len(\'a\')') if len('abcdef') != 6: self.fail('len(\'abcdef\')') if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation') if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3') if 0*'abcde' != '': self.fail('string repetition 0*') if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string') if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass else: self.fail('in/not in string') x = 'x'*103 if '%s!'%x != x+'!': self.fail('nasty string formatting bug') #extended slices for strings a = '0123456789' self.assertEqual(a[::], a) self.assertEqual(a[::2], '02468') self.assertEqual(a[1::2], '13579') self.assertEqual(a[::-1],'9876543210') self.assertEqual(a[::-2], '97531') self.assertEqual(a[3::-2], '31') self.assertEqual(a[-100:100:], a) self.assertEqual(a[100:-100:-1], a[::-1]) self.assertEqual(a[-100:100:2], '02468') def test_type_function(self): self.assertRaises(TypeError, type, 1, 2) self.assertRaises(TypeError, type, 1, 2, 3, 4) # TODO: RUSTPYTHON @unittest.expectedFailure def test_int__format__(self): def test(i, format_spec, result): # just make sure we have the unified type for integers assert type(i) == int assert type(format_spec) == str self.assertEqual(i.__format__(format_spec), result) test(123456789, 'd', '123456789') test(123456789, 'd', '123456789') test(1, 'c', '\01') # sign and aligning are interdependent test(1, "-", '1') test(-1, "-", '-1') test(1, "-3", ' 1') test(-1, "-3", ' -1') test(1, "+3", ' +1') test(-1, "+3", ' -1') test(1, " 3", ' 1') test(-1, " 3", ' -1') test(1, " ", ' 1') test(-1, " ", '-1') # hex test(3, "x", "3") test(3, "X", "3") test(1234, "x", "4d2") test(-1234, "x", "-4d2") test(1234, "8x", " 4d2") test(-1234, "8x", " -4d2") test(1234, "x", "4d2") test(-1234, "x", "-4d2") test(-3, "x", "-3") test(-3, "X", "-3") test(int('be', 16), "x", "be") test(int('be', 16), "X", "BE") test(-int('be', 16), "x", "-be") test(-int('be', 16), "X", "-BE") # octal test(3, "o", "3") test(-3, "o", "-3") test(65, "o", "101") test(-65, "o", "-101") test(1234, "o", "2322") test(-1234, "o", "-2322") test(1234, "-o", "2322") test(-1234, "-o", "-2322") test(1234, " o", " 2322") test(-1234, " o", "-2322") test(1234, "+o", "+2322") test(-1234, "+o", "-2322") # binary test(3, "b", "11") test(-3, "b", "-11") test(1234, "b", "10011010010") test(-1234, "b", "-10011010010") test(1234, "-b", "10011010010") test(-1234, "-b", "-10011010010") test(1234, " b", " 10011010010") test(-1234, " b", "-10011010010") test(1234, "+b", "+10011010010") test(-1234, "+b", "-10011010010") # alternate (#) formatting test(0, "#b", '0b0') test(0, "-#b", '0b0') test(1, "-#b", '0b1') test(-1, "-#b", '-0b1') test(-1, "-#5b", ' -0b1') test(1, "+#5b", ' +0b1') test(100, "+#b", '+0b1100100') test(100, "#012b", '0b0001100100') test(-100, "#012b", '-0b001100100') test(0, "#o", '0o0') test(0, "-#o", '0o0') test(1, "-#o", '0o1') test(-1, "-#o", '-0o1') test(-1, "-#5o", ' -0o1') test(1, "+#5o", ' +0o1') test(100, "+#o", '+0o144') test(100, "#012o", '0o0000000144') test(-100, "#012o", '-0o000000144') test(0, "#x", '0x0') test(0, "-#x", '0x0') test(1, "-#x", '0x1') test(-1, "-#x", '-0x1') test(-1, "-#5x", ' -0x1') test(1, "+#5x", ' +0x1') test(100, "+#x", '+0x64') test(100, "#012x", '0x0000000064') test(-100, "#012x", '-0x000000064') test(123456, "#012x", '0x000001e240') test(-123456, "#012x", '-0x00001e240') test(0, "#X", '0X0') test(0, "-#X", '0X0') test(1, "-#X", '0X1') test(-1, "-#X", '-0X1') test(-1, "-#5X", ' -0X1') test(1, "+#5X", ' +0X1') test(100, "+#X", '+0X64') test(100, "#012X", '0X0000000064') test(-100, "#012X", '-0X000000064') test(123456, "#012X", '0X000001E240') test(-123456, "#012X", '-0X00001E240') test(123, ',', '123') test(-123, ',', '-123') test(1234, ',', '1,234') test(-1234, ',', '-1,234') test(123456, ',', '123,456') test(-123456, ',', '-123,456') test(1234567, ',', '1,234,567') test(-1234567, ',', '-1,234,567') # issue 5782, commas with no specifier type test(1234, '010,', '00,001,234') # Unified type for integers test(10**100, 'd', '1' + '0' * 100) test(10**100+100, 'd', '1' + '0' * 97 + '100') # make sure these are errors # precision disallowed self.assertRaises(ValueError, 3 .__format__, "1.3") # sign not allowed with 'c' self.assertRaises(ValueError, 3 .__format__, "+c") # format spec must be string self.assertRaises(TypeError, 3 .__format__, None) self.assertRaises(TypeError, 3 .__format__, 0) # can't have ',' with 'n' self.assertRaises(ValueError, 3 .__format__, ",n") # can't have ',' with 'c' self.assertRaises(ValueError, 3 .__format__, ",c") # can't have '#' with 'c' self.assertRaises(ValueError, 3 .__format__, "#c") # ensure that only int and float type specifiers work for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'bcdoxXeEfFgGn%': self.assertRaises(ValueError, 0 .__format__, format_spec) self.assertRaises(ValueError, 1 .__format__, format_spec) self.assertRaises(ValueError, (-1) .__format__, format_spec) # ensure that float type specifiers work; format converts # the int to a float for format_spec in 'eEfFgG%': for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]: self.assertEqual(value.__format__(format_spec), float(value).__format__(format_spec)) # Issue 6902 test(123456, "0<20", '12345600000000000000') test(123456, "1<20", '12345611111111111111') test(123456, "*<20", '123456**************') test(123456, "0>20", '00000000000000123456') test(123456, "1>20", '11111111111111123456') test(123456, "*>20", '**************123456') test(123456, "0=20", '00000000000000123456') test(123456, "1=20", '11111111111111123456') test(123456, "*=20", '**************123456') # TODO: RUSTPYTHON @unittest.expectedFailure @run_with_locale('LC_NUMERIC', 'en_US.UTF8') def test_float__format__locale(self): # test locale support for __format__ code 'n' for i in range(-10, 10): x = 1234567890.0 * (10.0 ** i) self.assertEqual(locale.format_string('%g', x, grouping=True), format(x, 'n')) self.assertEqual(locale.format_string('%.10g', x, grouping=True), format(x, '.10n')) @run_with_locale('LC_NUMERIC', 'en_US.UTF8') def test_int__format__locale(self): # test locale support for __format__ code 'n' for integers x = 123456789012345678901234567890 for i in range(0, 30): self.assertEqual(locale.format_string('%d', x, grouping=True), format(x, 'n')) # move to the next integer to test x = x // 10 rfmt = ">20n" lfmt = "<20n" cfmt = "^20n" for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900): self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt))) self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt))) self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt))) # TODO: RUSTPYTHON @unittest.expectedFailure def test_float__format__(self): def test(f, format_spec, result): self.assertEqual(f.__format__(format_spec), result) self.assertEqual(format(f, format_spec), result) test(0.0, 'f', '0.000000') # the default is 'g', except for empty format spec test(0.0, '', '0.0') test(0.01, '', '0.01') test(0.01, 'g', '0.01') # test for issue 3411 test(1.23, '1', '1.23') test(-1.23, '1', '-1.23') test(1.23, '1g', '1.23') test(-1.23, '1g', '-1.23') test( 1.0, ' g', ' 1') test(-1.0, ' g', '-1') test( 1.0, '+g', '+1') test(-1.0, '+g', '-1') test(1.1234e200, 'g', '1.1234e+200') test(1.1234e200, 'G', '1.1234E+200') test(1.0, 'f', '1.000000') test(-1.0, 'f', '-1.000000') test( 1.0, ' f', ' 1.000000') test(-1.0, ' f', '-1.000000') test( 1.0, '+f', '+1.000000') test(-1.0, '+f', '-1.000000') # Python versions <= 3.0 switched from 'f' to 'g' formatting for # values larger than 1e50. No longer. f = 1.1234e90 for fmt in 'f', 'F': # don't do a direct equality check, since on some # platforms only the first few digits of dtoa # will be reliable result = f.__format__(fmt) self.assertEqual(len(result), 98) self.assertEqual(result[-7], '.') self.assertIn(result[:12], ('112340000000', '112339999999')) f = 1.1234e200 for fmt in 'f', 'F': result = f.__format__(fmt) self.assertEqual(len(result), 208) self.assertEqual(result[-7], '.') self.assertIn(result[:12], ('112340000000', '112339999999')) test( 1.0, 'e', '1.000000e+00') test(-1.0, 'e', '-1.000000e+00') test( 1.0, 'E', '1.000000E+00') test(-1.0, 'E', '-1.000000E+00') test(1.1234e20, 'e', '1.123400e+20') test(1.1234e20, 'E', '1.123400E+20') # No format code means use g, but must have a decimal # and a number after the decimal. This is tricky, because # a totally empty format specifier means something else. # So, just use a sign flag test(1e200, '+g', '+1e+200') test(1e200, '+', '+1e+200') test(1.1e200, '+g', '+1.1e+200') test(1.1e200, '+', '+1.1e+200') # 0 padding test(1234., '010f', '1234.000000') test(1234., '011f', '1234.000000') test(1234., '012f', '01234.000000') test(-1234., '011f', '-1234.000000') test(-1234., '012f', '-1234.000000') test(-1234., '013f', '-01234.000000') test(-1234.12341234, '013f', '-01234.123412') test(-123456.12341234, '011.2f', '-0123456.12') # issue 5782, commas with no specifier type test(1.2, '010,.2', '0,000,001.2') # 0 padding with commas test(1234., '011,f', '1,234.000000') test(1234., '012,f', '1,234.000000') test(1234., '013,f', '01,234.000000') test(-1234., '012,f', '-1,234.000000') test(-1234., '013,f', '-1,234.000000') test(-1234., '014,f', '-01,234.000000') test(-12345., '015,f', '-012,345.000000') test(-123456., '016,f', '-0,123,456.000000') test(-123456., '017,f', '-0,123,456.000000') test(-123456.12341234, '017,f', '-0,123,456.123412') test(-123456.12341234, '013,.2f', '-0,123,456.12') # % formatting test(-1.0, '%', '-100.000000%') # format spec must be string self.assertRaises(TypeError, 3.0.__format__, None) self.assertRaises(TypeError, 3.0.__format__, 0) # other format specifiers shouldn't work on floats, # in particular int specifiers for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'eEfFgGn%': self.assertRaises(ValueError, format, 0.0, format_spec) self.assertRaises(ValueError, format, 1.0, format_spec) self.assertRaises(ValueError, format, -1.0, format_spec) self.assertRaises(ValueError, format, 1e100, format_spec) self.assertRaises(ValueError, format, -1e100, format_spec) self.assertRaises(ValueError, format, 1e-100, format_spec) self.assertRaises(ValueError, format, -1e-100, format_spec) # Alternate float formatting test(1.0, '.0e', '1e+00') test(1.0, '#.0e', '1.e+00') test(1.0, '.0f', '1') test(1.0, '#.0f', '1.') test(1.1, 'g', '1.1') test(1.1, '#g', '1.10000') test(1.0, '.0%', '100%') test(1.0, '#.0%', '100.%') # Issue 7094: Alternate formatting (specified by #) test(1.0, '0e', '1.000000e+00') test(1.0, '#0e', '1.000000e+00') test(1.0, '0f', '1.000000' ) test(1.0, '#0f', '1.000000') test(1.0, '.1e', '1.0e+00') test(1.0, '#.1e', '1.0e+00') test(1.0, '.1f', '1.0') test(1.0, '#.1f', '1.0') test(1.0, '.1%', '100.0%') test(1.0, '#.1%', '100.0%') # Issue 6902 test(12345.6, "0<20", '12345.60000000000000') test(12345.6, "1<20", '12345.61111111111111') test(12345.6, "*<20", '12345.6*************') test(12345.6, "0>20", '000000000000012345.6') test(12345.6, "1>20", '111111111111112345.6') test(12345.6, "*>20", '*************12345.6') test(12345.6, "0=20", '000000000000012345.6') test(12345.6, "1=20", '111111111111112345.6') test(12345.6, "*=20", '*************12345.6') # TODO: RUSTPYTHON @unittest.expectedFailure def test_format_spec_errors(self): # int, float, and string all share the same format spec # mini-language parser. # Check that we can't ask for too many digits. This is # probably a CPython specific test. It tries to put the width # into a C long. self.assertRaises(ValueError, format, 0, '1'*10000 + 'd') # Similar with the precision. self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd') # And may as well test both. self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd') # Make sure commas aren't allowed with various type codes for code in 'xXobns': self.assertRaises(ValueError, format, 0, ',' + code) # TODO: RUSTPYTHON @unittest.expectedFailure def test_internal_sizes(self): self.assertGreater(object.__basicsize__, 0) self.assertGreater(tuple.__itemsize__, 0) def test_slot_wrapper_types(self): self.assertIsInstance(object.__init__, types.WrapperDescriptorType) self.assertIsInstance(object.__str__, types.WrapperDescriptorType) self.assertIsInstance(object.__lt__, types.WrapperDescriptorType) self.assertIsInstance(int.__lt__, types.WrapperDescriptorType) def test_method_wrapper_types(self): self.assertIsInstance(object().__init__, types.MethodWrapperType) self.assertIsInstance(object().__str__, types.MethodWrapperType) self.assertIsInstance(object().__lt__, types.MethodWrapperType) self.assertIsInstance((42).__lt__, types.MethodWrapperType) def test_method_descriptor_types(self): self.assertIsInstance(str.join, types.MethodDescriptorType) self.assertIsInstance(list.append, types.MethodDescriptorType) self.assertIsInstance(''.join, types.BuiltinMethodType) self.assertIsInstance([].append, types.BuiltinMethodType) self.assertIsInstance(int.__dict__['from_bytes'], types.ClassMethodDescriptorType) self.assertIsInstance(int.from_bytes, types.BuiltinMethodType) self.assertIsInstance(int.__new__, types.BuiltinMethodType) class MappingProxyTests(unittest.TestCase): mappingproxy = types.MappingProxyType # TODO: RUSTPYTHON @unittest.expectedFailure def test_constructor(self): class userdict(dict): pass mapping = {'x': 1, 'y': 2} self.assertEqual(self.mappingproxy(mapping), mapping) mapping = userdict(x=1, y=2) self.assertEqual(self.mappingproxy(mapping), mapping) mapping = collections.ChainMap({'x': 1}, {'y': 2}) self.assertEqual(self.mappingproxy(mapping), mapping) self.assertRaises(TypeError, self.mappingproxy, 10) self.assertRaises(TypeError, self.mappingproxy, ("a", "tuple")) self.assertRaises(TypeError, self.mappingproxy, ["a", "list"]) # TODO: RUSTPYTHON @unittest.expectedFailure def test_methods(self): attrs = set(dir(self.mappingproxy({}))) - set(dir(object())) self.assertEqual(attrs, { '__contains__', '__getitem__', '__class_getitem__', '__ior__', '__iter__', '__len__', '__or__', '__reversed__', '__ror__', 'copy', 'get', 'items', 'keys', 'values', }) def test_get(self): view = self.mappingproxy({'a': 'A', 'b': 'B'}) self.assertEqual(view['a'], 'A') self.assertEqual(view['b'], 'B') self.assertRaises(KeyError, view.__getitem__, 'xxx') self.assertEqual(view.get('a'), 'A') self.assertIsNone(view.get('xxx')) self.assertEqual(view.get('xxx', 42), 42) # TODO: RUSTPYTHON @unittest.expectedFailure def test_missing(self): class dictmissing(dict): def __missing__(self, key): return "missing=%s" % key view = self.mappingproxy(dictmissing(x=1)) self.assertEqual(view['x'], 1) self.assertEqual(view['y'], 'missing=y') self.assertEqual(view.get('x'), 1) self.assertEqual(view.get('y'), None) self.assertEqual(view.get('y', 42), 42) self.assertTrue('x' in view) self.assertFalse('y' in view) # TODO: RUSTPYTHON @unittest.expectedFailure def test_customdict(self): class customdict(dict): def __contains__(self, key): if key == 'magic': return True else: return dict.__contains__(self, key) def __iter__(self): return iter(('iter',)) def __len__(self): return 500 def copy(self): return 'copy' def keys(self): return 'keys' def items(self): return 'items' def values(self): return 'values' def __getitem__(self, key): return "getitem=%s" % dict.__getitem__(self, key) def get(self, key, default=None): return "get=%s" % dict.get(self, key, 'default=%r' % default) custom = customdict({'key': 'value'}) view = self.mappingproxy(custom) self.assertTrue('key' in view) self.assertTrue('magic' in view) self.assertFalse('xxx' in view) self.assertEqual(view['key'], 'getitem=value') self.assertRaises(KeyError, view.__getitem__, 'xxx') self.assertEqual(tuple(view), ('iter',)) self.assertEqual(len(view), 500) self.assertEqual(view.copy(), 'copy') self.assertEqual(view.get('key'), 'get=value') self.assertEqual(view.get('xxx'), 'get=default=None') self.assertEqual(view.items(), 'items') self.assertEqual(view.keys(), 'keys') self.assertEqual(view.values(), 'values') # TODO: RUSTPYTHON @unittest.expectedFailure def test_chainmap(self): d1 = {'x': 1} d2 = {'y': 2} mapping = collections.ChainMap(d1, d2) view = self.mappingproxy(mapping) self.assertTrue('x' in view) self.assertTrue('y' in view) self.assertFalse('z' in view) self.assertEqual(view['x'], 1) self.assertEqual(view['y'], 2) self.assertRaises(KeyError, view.__getitem__, 'z') self.assertEqual(tuple(sorted(view)), ('x', 'y')) self.assertEqual(len(view), 2) copy = view.copy() self.assertIsNot(copy, mapping) self.assertIsInstance(copy, collections.ChainMap) self.assertEqual(copy, mapping) self.assertEqual(view.get('x'), 1) self.assertEqual(view.get('y'), 2) self.assertIsNone(view.get('z')) self.assertEqual(tuple(sorted(view.items())), (('x', 1), ('y', 2))) self.assertEqual(tuple(sorted(view.keys())), ('x', 'y')) self.assertEqual(tuple(sorted(view.values())), (1, 2)) def test_contains(self): view = self.mappingproxy(dict.fromkeys('abc')) self.assertTrue('a' in view) self.assertTrue('b' in view) self.assertTrue('c' in view) self.assertFalse('xxx' in view) def test_views(self): mapping = {} view = self.mappingproxy(mapping) keys = view.keys() values = view.values() items = view.items() self.assertEqual(list(keys), []) self.assertEqual(list(values), []) self.assertEqual(list(items), []) mapping['key'] = 'value' self.assertEqual(list(keys), ['key']) self.assertEqual(list(values), ['value']) self.assertEqual(list(items), [('key', 'value')]) # TODO: RUSTPYTHON @unittest.expectedFailure def test_len(self): for expected in range(6): data = dict.fromkeys('abcde'[:expected]) self.assertEqual(len(data), expected) view = self.mappingproxy(data) self.assertEqual(len(view), expected) def test_iterators(self): keys = ('x', 'y') values = (1, 2) items = tuple(zip(keys, values)) view = self.mappingproxy(dict(items)) self.assertEqual(set(view), set(keys)) self.assertEqual(set(view.keys()), set(keys)) self.assertEqual(set(view.values()), set(values)) self.assertEqual(set(view.items()), set(items)) # TODO: RUSTPYTHON @unittest.expectedFailure def test_reversed(self): d = {'a': 1, 'b': 2, 'foo': 0, 'c': 3, 'd': 4} mp = self.mappingproxy(d) del d['foo'] r = reversed(mp) self.assertEqual(list(r), list('dcba')) self.assertRaises(StopIteration, next, r) def test_copy(self): original = {'key1': 27, 'key2': 51, 'key3': 93} view = self.mappingproxy(original) copy = view.copy() self.assertEqual(type(copy), dict) self.assertEqual(copy, original) original['key1'] = 70 self.assertEqual(view['key1'], 70) self.assertEqual(copy['key1'], 27) # TODO: RUSTPYTHON @unittest.expectedFailure def test_union(self): mapping = {'a': 0, 'b': 1, 'c': 2} view = self.mappingproxy(mapping) with self.assertRaises(TypeError): view | [('r', 2), ('d', 2)] with self.assertRaises(TypeError): [('r', 2), ('d', 2)] | view with self.assertRaises(TypeError): view |= [('r', 2), ('d', 2)] other = {'c': 3, 'p': 0} self.assertDictEqual(view | other, {'a': 0, 'b': 1, 'c': 3, 'p': 0}) self.assertDictEqual(other | view, {'c': 2, 'p': 0, 'a': 0, 'b': 1}) self.assertEqual(view, {'a': 0, 'b': 1, 'c': 2}) self.assertDictEqual(mapping, {'a': 0, 'b': 1, 'c': 2}) self.assertDictEqual(other, {'c': 3, 'p': 0}) class ClassCreationTests(unittest.TestCase): class Meta(type): def __init__(cls, name, bases, ns, **kw): super().__init__(name, bases, ns) @staticmethod def __new__(mcls, name, bases, ns, **kw): return super().__new__(mcls, name, bases, ns) @classmethod def __prepare__(mcls, name, bases, **kw): ns = super().__prepare__(name, bases) ns["y"] = 1 ns.update(kw) return ns def test_new_class_basics(self): C = types.new_class("C") self.assertEqual(C.__name__, "C") self.assertEqual(C.__bases__, (object,)) def test_new_class_subclass(self): C = types.new_class("C", (int,)) self.assertTrue(issubclass(C, int)) def test_new_class_meta(self): Meta = self.Meta settings = {"metaclass": Meta, "z": 2} # We do this twice to make sure the passed in dict isn't mutated for i in range(2): C = types.new_class("C" + str(i), (), settings) self.assertIsInstance(C, Meta) self.assertEqual(C.y, 1) self.assertEqual(C.z, 2) def test_new_class_exec_body(self): Meta = self.Meta def func(ns): ns["x"] = 0 C = types.new_class("C", (), {"metaclass": Meta, "z": 2}, func) self.assertIsInstance(C, Meta) self.assertEqual(C.x, 0) self.assertEqual(C.y, 1) self.assertEqual(C.z, 2) def test_new_class_metaclass_keywords(self): #Test that keywords are passed to the metaclass: def meta_func(name, bases, ns, **kw): return name, bases, ns, kw res = types.new_class("X", (int, object), dict(metaclass=meta_func, x=0)) self.assertEqual(res, ("X", (int, object), {}, {"x": 0})) def test_new_class_defaults(self): # Test defaults/keywords: C = types.new_class("C", (), {}, None) self.assertEqual(C.__name__, "C") self.assertEqual(C.__bases__, (object,)) def test_new_class_meta_with_base(self): Meta = self.Meta def func(ns): ns["x"] = 0 C = types.new_class(name="C", bases=(int,), kwds=dict(metaclass=Meta, z=2), exec_body=func) self.assertTrue(issubclass(C, int)) self.assertIsInstance(C, Meta) self.assertEqual(C.x, 0) self.assertEqual(C.y, 1) self.assertEqual(C.z, 2) def test_new_class_with_mro_entry(self): class A: pass class C: def __mro_entries__(self, bases): return (A,) c = C() D = types.new_class('D', (c,), {}) self.assertEqual(D.__bases__, (A,)) self.assertEqual(D.__orig_bases__, (c,)) self.assertEqual(D.__mro__, (D, A, object)) def test_new_class_with_mro_entry_none(self): class A: pass class B: pass class C: def __mro_entries__(self, bases): return () c = C() D = types.new_class('D', (A, c, B), {}) self.assertEqual(D.__bases__, (A, B)) self.assertEqual(D.__orig_bases__, (A, c, B)) self.assertEqual(D.__mro__, (D, A, B, object)) def test_new_class_with_mro_entry_error(self): class A: pass class C: def __mro_entries__(self, bases): return A c = C() with self.assertRaises(TypeError): types.new_class('D', (c,), {}) def test_new_class_with_mro_entry_multiple(self): class A1: pass class A2: pass class B1: pass class B2: pass class A: def __mro_entries__(self, bases): return (A1, A2) class B: def __mro_entries__(self, bases): return (B1, B2) D = types.new_class('D', (A(), B()), {}) self.assertEqual(D.__bases__, (A1, A2, B1, B2)) def test_new_class_with_mro_entry_multiple_2(self): class A1: pass class A2: pass class A3: pass class B1: pass class B2: pass class A: def __mro_entries__(self, bases): return (A1, A2, A3) class B: def __mro_entries__(self, bases): return (B1, B2) class C: pass D = types.new_class('D', (A(), C, B()), {}) self.assertEqual(D.__bases__, (A1, A2, A3, C, B1, B2)) # Many of the following tests are derived from test_descr.py def test_prepare_class(self): # Basic test of metaclass derivation expected_ns = {} class A(type): def __new__(*args, **kwargs): return type.__new__(*args, **kwargs) def __prepare__(*args): return expected_ns B = types.new_class("B", (object,)) C = types.new_class("C", (object,), {"metaclass": A}) # The most derived metaclass of D is A rather than type. meta, ns, kwds = types.prepare_class("D", (B, C), {"metaclass": type}) self.assertIs(meta, A) self.assertIs(ns, expected_ns) self.assertEqual(len(kwds), 0) def test_bad___prepare__(self): # __prepare__() must return a mapping. class BadMeta(type): @classmethod def __prepare__(*args): return None with self.assertRaisesRegex(TypeError, r'^BadMeta\.__prepare__\(\) must ' r'return a mapping, not NoneType$'): class Foo(metaclass=BadMeta): pass # Also test the case in which the metaclass is not a type. class BadMeta: @classmethod def __prepare__(*args): return None with self.assertRaisesRegex(TypeError, r'^<metaclass>\.__prepare__\(\) must ' r'return a mapping, not NoneType$'): class Bar(metaclass=BadMeta()): pass def test_resolve_bases(self): class A: pass class B: pass class C: def __mro_entries__(self, bases): if A in bases: return () return (A,) c = C() self.assertEqual(types.resolve_bases(()), ()) self.assertEqual(types.resolve_bases((c,)), (A,)) self.assertEqual(types.resolve_bases((C,)), (C,)) self.assertEqual(types.resolve_bases((A, C)), (A, C)) self.assertEqual(types.resolve_bases((c, A)), (A,)) self.assertEqual(types.resolve_bases((A, c)), (A,)) x = (A,) y = (C,) z = (A, C) t = (A, C, B) for bases in [x, y, z, t]: self.assertIs(types.resolve_bases(bases), bases) def test_metaclass_derivation(self): # issue1294232: correct metaclass calculation new_calls = [] # to check the order of __new__ calls class AMeta(type): def __new__(mcls, name, bases, ns): new_calls.append('AMeta') return super().__new__(mcls, name, bases, ns) @classmethod def __prepare__(mcls, name, bases): return {} class BMeta(AMeta): def __new__(mcls, name, bases, ns): new_calls.append('BMeta') return super().__new__(mcls, name, bases, ns) @classmethod def __prepare__(mcls, name, bases): ns = super().__prepare__(name, bases) ns['BMeta_was_here'] = True return ns A = types.new_class("A", (), {"metaclass": AMeta}) self.assertEqual(new_calls, ['AMeta']) new_calls.clear() B = types.new_class("B", (), {"metaclass": BMeta}) # BMeta.__new__ calls AMeta.__new__ with super: self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() C = types.new_class("C", (A, B)) # The most derived metaclass is BMeta: self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() # BMeta.__prepare__ should've been called: self.assertIn('BMeta_was_here', C.__dict__) # The order of the bases shouldn't matter: C2 = types.new_class("C2", (B, A)) self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() self.assertIn('BMeta_was_here', C2.__dict__) # Check correct metaclass calculation when a metaclass is declared: D = types.new_class("D", (C,), {"metaclass": type}) self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() self.assertIn('BMeta_was_here', D.__dict__) E = types.new_class("E", (C,), {"metaclass": AMeta}) self.assertEqual(new_calls, ['BMeta', 'AMeta']) new_calls.clear() self.assertIn('BMeta_was_here', E.__dict__) def test_metaclass_override_function(self): # Special case: the given metaclass isn't a class, # so there is no metaclass calculation. class A(metaclass=self.Meta): pass marker = object() def func(*args, **kwargs): return marker X = types.new_class("X", (), {"metaclass": func}) Y = types.new_class("Y", (object,), {"metaclass": func}) Z = types.new_class("Z", (A,), {"metaclass": func}) self.assertIs(marker, X) self.assertIs(marker, Y) self.assertIs(marker, Z) def test_metaclass_override_callable(self): # The given metaclass is a class, # but not a descendant of type. new_calls = [] # to check the order of __new__ calls prepare_calls = [] # to track __prepare__ calls class ANotMeta: def __new__(mcls, *args, **kwargs): new_calls.append('ANotMeta') return super().__new__(mcls) @classmethod def __prepare__(mcls, name, bases): prepare_calls.append('ANotMeta') return {} class BNotMeta(ANotMeta): def __new__(mcls, *args, **kwargs): new_calls.append('BNotMeta') return super().__new__(mcls) @classmethod def __prepare__(mcls, name, bases): prepare_calls.append('BNotMeta') return super().__prepare__(name, bases) A = types.new_class("A", (), {"metaclass": ANotMeta}) self.assertIs(ANotMeta, type(A)) self.assertEqual(prepare_calls, ['ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['ANotMeta']) new_calls.clear() B = types.new_class("B", (), {"metaclass": BNotMeta}) self.assertIs(BNotMeta, type(B)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() C = types.new_class("C", (A, B)) self.assertIs(BNotMeta, type(C)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() C2 = types.new_class("C2", (B, A)) self.assertIs(BNotMeta, type(C2)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() # This is a TypeError, because of a metaclass conflict: # BNotMeta is neither a subclass, nor a superclass of type with self.assertRaises(TypeError): D = types.new_class("D", (C,), {"metaclass": type}) E = types.new_class("E", (C,), {"metaclass": ANotMeta}) self.assertIs(BNotMeta, type(E)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() F = types.new_class("F", (object(), C)) self.assertIs(BNotMeta, type(F)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() F2 = types.new_class("F2", (C, object())) self.assertIs(BNotMeta, type(F2)) self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta']) prepare_calls.clear() self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta']) new_calls.clear() # TypeError: BNotMeta is neither a # subclass, nor a superclass of int with self.assertRaises(TypeError): X = types.new_class("X", (C, int())) with self.assertRaises(TypeError): X = types.new_class("X", (int(), C)) def test_one_argument_type(self): expected_message = 'type.__new__() takes exactly 3 arguments (1 given)' # Only type itself can use the one-argument form (#27157) self.assertIs(type(5), int) class M(type): pass with self.assertRaises(TypeError) as cm: M(5) self.assertEqual(str(cm.exception), expected_message) class N(type, metaclass=M): pass with self.assertRaises(TypeError) as cm: N(5) self.assertEqual(str(cm.exception), expected_message) class SimpleNamespaceTests(unittest.TestCase): def test_constructor(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2) ns3 = types.SimpleNamespace(**dict(x=1, y=2)) with self.assertRaises(TypeError): types.SimpleNamespace(1, 2, 3) with self.assertRaises(TypeError): types.SimpleNamespace(**{1: 2}) self.assertEqual(len(ns1.__dict__), 0) self.assertEqual(vars(ns1), {}) self.assertEqual(len(ns2.__dict__), 2) self.assertEqual(vars(ns2), {'y': 2, 'x': 1}) self.assertEqual(len(ns3.__dict__), 2) self.assertEqual(vars(ns3), {'y': 2, 'x': 1}) def test_unbound(self): ns1 = vars(types.SimpleNamespace()) ns2 = vars(types.SimpleNamespace(x=1, y=2)) self.assertEqual(ns1, {}) self.assertEqual(ns2, {'y': 2, 'x': 1}) def test_underlying_dict(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2) ns3 = types.SimpleNamespace(a=True, b=False) mapping = ns3.__dict__ del ns3 self.assertEqual(ns1.__dict__, {}) self.assertEqual(ns2.__dict__, {'y': 2, 'x': 1}) self.assertEqual(mapping, dict(a=True, b=False)) def test_attrget(self): ns = types.SimpleNamespace(x=1, y=2, w=3) self.assertEqual(ns.x, 1) self.assertEqual(ns.y, 2) self.assertEqual(ns.w, 3) with self.assertRaises(AttributeError): ns.z def test_attrset(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2, w=3) ns1.a = 'spam' ns1.b = 'ham' ns2.z = 4 ns2.theta = None self.assertEqual(ns1.__dict__, dict(a='spam', b='ham')) self.assertEqual(ns2.__dict__, dict(x=1, y=2, w=3, z=4, theta=None)) def test_attrdel(self): ns1 = types.SimpleNamespace() ns2 = types.SimpleNamespace(x=1, y=2, w=3) with self.assertRaises(AttributeError): del ns1.spam with self.assertRaises(AttributeError): del ns2.spam del ns2.y self.assertEqual(vars(ns2), dict(w=3, x=1)) ns2.y = 'spam' self.assertEqual(vars(ns2), dict(w=3, x=1, y='spam')) del ns2.y self.assertEqual(vars(ns2), dict(w=3, x=1)) ns1.spam = 5 self.assertEqual(vars(ns1), dict(spam=5)) del ns1.spam self.assertEqual(vars(ns1), {}) # TODO: RUSTPYTHON @unittest.expectedFailure def test_repr(self): ns1 = types.SimpleNamespace(x=1, y=2, w=3) ns2 = types.SimpleNamespace() ns2.x = "spam" ns2._y = 5 name = "namespace" self.assertEqual(repr(ns1), "{name}(x=1, y=2, w=3)".format(name=name)) self.assertEqual(repr(ns2), "{name}(x='spam', _y=5)".format(name=name)) # TODO: RUSTPYTHON @unittest.expectedFailure def test_equal(self): ns1 = types.SimpleNamespace(x=1) ns2 = types.SimpleNamespace() ns2.x = 1 self.assertEqual(types.SimpleNamespace(), types.SimpleNamespace()) self.assertEqual(ns1, ns2) self.assertNotEqual(ns2, types.SimpleNamespace()) def test_nested(self): ns1 = types.SimpleNamespace(a=1, b=2) ns2 = types.SimpleNamespace() ns3 = types.SimpleNamespace(x=ns1) ns2.spam = ns1 ns2.ham = '?' ns2.spam = ns3 self.assertEqual(vars(ns1), dict(a=1, b=2)) self.assertEqual(vars(ns2), dict(spam=ns3, ham='?')) self.assertEqual(ns2.spam, ns3) self.assertEqual(vars(ns3), dict(x=ns1)) self.assertEqual(ns3.x.a, 1) def test_recursive(self): ns1 = types.SimpleNamespace(c='cookie') ns2 = types.SimpleNamespace() ns3 = types.SimpleNamespace(x=1) ns1.spam = ns1 ns2.spam = ns3 ns3.spam = ns2 self.assertEqual(ns1.spam, ns1) self.assertEqual(ns1.spam.spam, ns1) self.assertEqual(ns1.spam.spam, ns1.spam) self.assertEqual(ns2.spam, ns3) self.assertEqual(ns3.spam, ns2) self.assertEqual(ns2.spam.spam, ns2) # TODO: RUSTPYTHON @unittest.expectedFailure def test_recursive_repr(self): ns1 = types.SimpleNamespace(c='cookie') ns2 = types.SimpleNamespace() ns3 = types.SimpleNamespace(x=1) ns1.spam = ns1 ns2.spam = ns3 ns3.spam = ns2 name = "namespace" repr1 = "{name}(c='cookie', spam={name}(...))".format(name=name) repr2 = "{name}(spam={name}(x=1, spam={name}(...)))".format(name=name) self.assertEqual(repr(ns1), repr1) self.assertEqual(repr(ns2), repr2) def test_as_dict(self): ns = types.SimpleNamespace(spam='spamspamspam') with self.assertRaises(TypeError): len(ns) with self.assertRaises(TypeError): iter(ns) with self.assertRaises(TypeError): 'spam' in ns with self.assertRaises(TypeError): ns['spam'] def test_subclass(self): class Spam(types.SimpleNamespace): pass spam = Spam(ham=8, eggs=9) self.assertIs(type(spam), Spam) self.assertEqual(vars(spam), {'ham': 8, 'eggs': 9}) # TODO: RUSTPYTHON @unittest.expectedFailure def test_pickle(self): ns = types.SimpleNamespace(breakfast="spam", lunch="spam") for protocol in range(pickle.HIGHEST_PROTOCOL + 1): pname = "protocol {}".format(protocol) try: ns_pickled = pickle.dumps(ns, protocol) except TypeError as e: raise TypeError(pname) from e ns_roundtrip = pickle.loads(ns_pickled) self.assertEqual(ns, ns_roundtrip, pname) def test_fake_namespace_compare(self): # Issue #24257: Incorrect use of PyObject_IsInstance() caused # SystemError. class FakeSimpleNamespace(str): __class__ = types.SimpleNamespace self.assertFalse(types.SimpleNamespace() == FakeSimpleNamespace()) self.assertTrue(types.SimpleNamespace() != FakeSimpleNamespace()) with self.assertRaises(TypeError): types.SimpleNamespace() < FakeSimpleNamespace() with self.assertRaises(TypeError): types.SimpleNamespace() <= FakeSimpleNamespace() with self.assertRaises(TypeError): types.SimpleNamespace() > FakeSimpleNamespace() with self.assertRaises(TypeError): types.SimpleNamespace() >= FakeSimpleNamespace() class CoroutineTests(unittest.TestCase): def test_wrong_args(self): samples = [None, 1, object()] for sample in samples: with self.assertRaisesRegex(TypeError, 'types.coroutine.*expects a callable'): types.coroutine(sample) def test_non_gen_values(self): @types.coroutine def foo(): return 'spam' self.assertEqual(foo(), 'spam') class Awaitable: def __await__(self): return () aw = Awaitable() @types.coroutine def foo(): return aw self.assertIs(aw, foo()) # decorate foo second time foo = types.coroutine(foo) self.assertIs(aw, foo()) # TODO: RUSTPYTHON @unittest.expectedFailure def test_async_def(self): # Test that types.coroutine passes 'async def' coroutines # without modification async def foo(): pass foo_code = foo.__code__ foo_flags = foo.__code__.co_flags decorated_foo = types.coroutine(foo) self.assertIs(foo, decorated_foo) self.assertEqual(foo.__code__.co_flags, foo_flags) self.assertIs(decorated_foo.__code__, foo_code) foo_coro = foo() def bar(): return foo_coro for _ in range(2): bar = types.coroutine(bar) coro = bar() self.assertIs(foo_coro, coro) self.assertEqual(coro.cr_code.co_flags, foo_flags) coro.close() def test_duck_coro(self): class CoroLike: def send(self): pass def throw(self): pass def close(self): pass def __await__(self): return self coro = CoroLike() @types.coroutine def foo(): return coro self.assertIs(foo(), coro) self.assertIs(foo().__await__(), coro) def test_duck_corogen(self): class CoroGenLike: def send(self): pass def throw(self): pass def close(self): pass def __await__(self): return self def __iter__(self): return self def __next__(self): pass coro = CoroGenLike() @types.coroutine def foo(): return coro self.assertIs(foo(), coro) self.assertIs(foo().__await__(), coro) @unittest.skip("TODO: RUSTPYTHON, unittest.mock") def test_duck_gen(self): class GenLike: def send(self): pass def throw(self): pass def close(self): pass def __iter__(self): pass def __next__(self): pass # Setup generator mock object gen = unittest.mock.MagicMock(GenLike) gen.__iter__ = lambda gen: gen gen.__name__ = 'gen' gen.__qualname__ = 'test.gen' self.assertIsInstance(gen, collections.abc.Generator) self.assertIs(gen, iter(gen)) @types.coroutine def foo(): return gen wrapper = foo() self.assertIsInstance(wrapper, types._GeneratorWrapper) self.assertIs(wrapper.__await__(), wrapper) # Wrapper proxies duck generators completely: self.assertIs(iter(wrapper), wrapper) self.assertIsInstance(wrapper, collections.abc.Coroutine) self.assertIsInstance(wrapper, collections.abc.Awaitable) self.assertIs(wrapper.__qualname__, gen.__qualname__) self.assertIs(wrapper.__name__, gen.__name__) # Test AttributeErrors for name in {'gi_running', 'gi_frame', 'gi_code', 'gi_yieldfrom', 'cr_running', 'cr_frame', 'cr_code', 'cr_await'}: with self.assertRaises(AttributeError): getattr(wrapper, name) # Test attributes pass-through gen.gi_running = object() gen.gi_frame = object() gen.gi_code = object() gen.gi_yieldfrom = object() self.assertIs(wrapper.gi_running, gen.gi_running) self.assertIs(wrapper.gi_frame, gen.gi_frame) self.assertIs(wrapper.gi_code, gen.gi_code) self.assertIs(wrapper.gi_yieldfrom, gen.gi_yieldfrom) self.assertIs(wrapper.cr_running, gen.gi_running) self.assertIs(wrapper.cr_frame, gen.gi_frame) self.assertIs(wrapper.cr_code, gen.gi_code) self.assertIs(wrapper.cr_await, gen.gi_yieldfrom) wrapper.close() gen.close.assert_called_once_with() wrapper.send(1) gen.send.assert_called_once_with(1) gen.reset_mock() next(wrapper) gen.__next__.assert_called_once_with() gen.reset_mock() wrapper.throw(1, 2, 3) gen.throw.assert_called_once_with(1, 2, 3) gen.reset_mock() wrapper.throw(1, 2) gen.throw.assert_called_once_with(1, 2) gen.reset_mock() wrapper.throw(1) gen.throw.assert_called_once_with(1) gen.reset_mock() # Test exceptions propagation error = Exception() gen.throw.side_effect = error try: wrapper.throw(1) except Exception as ex: self.assertIs(ex, error) else: self.fail('wrapper did not propagate an exception') # Test invalid args gen.reset_mock() with self.assertRaises(TypeError): wrapper.throw() self.assertFalse(gen.throw.called) with self.assertRaises(TypeError): wrapper.close(1) self.assertFalse(gen.close.called) with self.assertRaises(TypeError): wrapper.send() self.assertFalse(gen.send.called) # Test that we do not double wrap @types.coroutine def bar(): return wrapper self.assertIs(wrapper, bar()) # Test weakrefs support ref = weakref.ref(wrapper) self.assertIs(ref(), wrapper) def test_duck_functional_gen(self): class Generator: """Emulates the following generator (very clumsy): def gen(fut): result = yield fut return result * 2 """ def __init__(self, fut): self._i = 0 self._fut = fut def __iter__(self): return self def __next__(self): return self.send(None) def send(self, v): try: if self._i == 0: assert v is None return self._fut if self._i == 1: raise StopIteration(v * 2) if self._i > 1: raise StopIteration finally: self._i += 1 def throw(self, tp, *exc): self._i = 100 if tp is not GeneratorExit: raise tp def close(self): self.throw(GeneratorExit) @types.coroutine def foo(): return Generator('spam') wrapper = foo() self.assertIsInstance(wrapper, types._GeneratorWrapper) async def corofunc(): return await foo() + 100 coro = corofunc() self.assertEqual(coro.send(None), 'spam') try: coro.send(20) except StopIteration as ex: self.assertEqual(ex.args[0], 140) else: self.fail('StopIteration was expected') # TODO: RUSTPYTHON @unittest.expectedFailure def test_gen(self): def gen_func(): yield 1 return (yield 2) gen = gen_func() @types.coroutine def foo(): return gen wrapper = foo() self.assertIsInstance(wrapper, types._GeneratorWrapper) self.assertIs(wrapper.__await__(), gen) for name in ('__name__', '__qualname__', 'gi_code', 'gi_running', 'gi_frame'): self.assertIs(getattr(foo(), name), getattr(gen, name)) self.assertIs(foo().cr_code, gen.gi_code) self.assertEqual(next(wrapper), 1) self.assertEqual(wrapper.send(None), 2) with self.assertRaisesRegex(StopIteration, 'spam'): wrapper.send('spam') gen = gen_func() wrapper = foo() wrapper.send(None) with self.assertRaisesRegex(Exception, 'ham'): wrapper.throw(Exception, Exception('ham')) # decorate foo second time foo = types.coroutine(foo) self.assertIs(foo().__await__(), gen) def test_returning_itercoro(self): @types.coroutine def gen(): yield gencoro = gen() @types.coroutine def foo(): return gencoro self.assertIs(foo(), gencoro) # decorate foo second time foo = types.coroutine(foo) self.assertIs(foo(), gencoro) # TODO: RUSTPYTHON @unittest.expectedFailure def test_genfunc(self): def gen(): yield self.assertIs(types.coroutine(gen), gen) self.assertIs(types.coroutine(types.coroutine(gen)), gen) self.assertTrue(gen.__code__.co_flags & inspect.CO_ITERABLE_COROUTINE) self.assertFalse(gen.__code__.co_flags & inspect.CO_COROUTINE) g = gen() self.assertTrue(g.gi_code.co_flags & inspect.CO_ITERABLE_COROUTINE) self.assertFalse(g.gi_code.co_flags & inspect.CO_COROUTINE) self.assertIs(types.coroutine(gen), gen) def test_wrapper_object(self): def gen(): yield @types.coroutine def coro(): return gen() wrapper = coro() self.assertIn('GeneratorWrapper', repr(wrapper)) self.assertEqual(repr(wrapper), str(wrapper)) self.assertTrue(set(dir(wrapper)).issuperset({ '__await__', '__iter__', '__next__', 'cr_code', 'cr_running', 'cr_frame', 'gi_code', 'gi_frame', 'gi_running', 'send', 'close', 'throw'})) if __name__ == '__main__': unittest.main()
tools/mo/unit_tests/mo/front/AttributedRollToRoll_test.py
ryanloney/openvino-1
1,127
131364
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import unittest from openvino.tools.mo.front.AttributedRollToRoll import AttributedRollToRoll from openvino.tools.mo.front.common.partial_infer.utils import int64_array from openvino.tools.mo.graph.graph import Node from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs from unit_tests.utils.graph import build_graph, const, result, regular_op nodes_attributes = { **regular_op('placeholder', {'type': 'Parameter'}), **regular_op('attr_roll', {'type': 'AttributedRoll', 'op': 'AttributedRoll', 'axes': int64_array([-1, 2, 3]), 'shift': int64_array([5, -2, 3])}), **result('result'), # new Roll node and inputs **regular_op('roll', {'type': 'Roll'}), **const('roll_axes', int64_array([-1, 2, 3])), **const('roll_shift', int64_array([5, -2, 3])) } class AttributedRollToRollTest(unittest.TestCase): def test_axes_shift(self): graph = build_graph(nodes_attributes, [('placeholder', 'attr_roll', {'in': 0, 'out': 0}), ('attr_roll', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) graph_ref = build_graph(nodes_attributes, [('placeholder', 'roll', {'in': 0, 'out': 0}), ('roll_shift', 'roll', {'in': 1, 'out': 0}), ('roll_axes', 'roll', {'in': 2, 'out': 0}), ('roll', 'result')], {}, nodes_with_edges_only=True) graph.stage = 'front' AttributedRollToRoll().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) self.assertTrue(flag, resp) self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Roll')[0]]['name'] == 'attr_roll') def test_axes(self): graph = build_graph(nodes_attributes, [('placeholder', 'attr_roll', {'in': 0, 'out': 0}), ('attr_roll', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) Node(graph, 'attr_roll')['axes'] = None graph_ref = build_graph(nodes_attributes, [('placeholder', 'roll', {'in': 0, 'out': 0}), ('roll_shift', 'roll', {'in': 1, 'out': 0}), ('roll', 'result')], {}, nodes_with_edges_only=True) graph.stage = 'front' AttributedRollToRoll().find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) self.assertTrue(flag, resp) self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Roll')[0]]['name'] == 'attr_roll')
tests/packagedcode/test_bower.py
Siddhant-K-code/scancode-toolkit
1,511
131380
# # Copyright (c) nexB Inc. and others. All rights reserved. # ScanCode is a trademark of nexB Inc. # SPDX-License-Identifier: Apache-2.0 # See http://www.apache.org/licenses/LICENSE-2.0 for the license text. # See https://github.com/nexB/scancode-toolkit for support or download. # See https://aboutcode.org for more information about nexB OSS projects. # import os.path from packagedcode import bower from packages_test_utils import PackageTester class TestBower(PackageTester): test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def test_is_manifest_bower_json(self): test_file = self.get_test_loc('bower/basic/bower.json') assert bower.BowerJson.is_manifest(test_file) def test_parse_bower_json_basic(self): test_file = self.get_test_loc('bower/basic/bower.json') package = bower.BowerJson.recognize(test_file) expected_loc = self.get_test_loc('bower/basic/expected.json') self.check_packages(package, expected_loc, regen=False) def test_parse_bower_json_list_of_licenses(self): test_file = self.get_test_loc('bower/list-of-licenses/bower.json') package = bower.BowerJson.recognize(test_file) expected_loc = self.get_test_loc('bower/list-of-licenses/expected.json') self.check_packages(package, expected_loc, regen=False) def test_parse_bower_json_author_objects(self): test_file = self.get_test_loc('bower/author-objects/bower.json') package = bower.BowerJson.recognize(test_file) expected_loc = self.get_test_loc('bower/author-objects/expected.json') self.check_packages(package, expected_loc, regen=False)
testfixtures/tests/test_diff.py
abcdenis/testfixtures
184
131390
<reponame>abcdenis/testfixtures from unittest import TestCase from testfixtures import diff class TestDiff(TestCase): def test_example(self): actual = diff(''' line1 line2 line3 ''', ''' line1 line changed line3 ''') expected = '''\ --- first +++ second @@ -1,5 +1,5 @@ line1 - line2 + line changed line3 ''' self.assertEqual( [line.strip() for line in expected.split("\n")], [line.strip() for line in actual.split("\n")], '\n%r\n!=\n%r' % (expected, actual) ) def test_no_newlines(self): actual = diff('x', 'y') # no rhyme or reason as to which of these comes back :-/ try: expected = '@@ -1 +1 @@\n-x\n+y' self.assertEqual( expected, actual, '\n%r\n!=\n%r' % (expected, actual) ) except AssertionError: # pragma: no cover expected = '--- first\n+++ second\n@@ -1 +1 @@\n-x\n+y' self.assertEqual( expected, actual, '\n%r\n!=\n%r' % (expected, actual) )
configs/_base_/models/groupfree3d.py
Guangyun-Xu/mmdetection3d
2,216
131391
<gh_stars>1000+ model = dict( type='GroupFree3DNet', backbone=dict( type='PointNet2SASSG', in_channels=3, num_points=(2048, 1024, 512, 256), radius=(0.2, 0.4, 0.8, 1.2), num_samples=(64, 32, 16, 16), sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), (128, 128, 256)), fp_channels=((256, 256), (256, 288)), norm_cfg=dict(type='BN2d'), sa_cfg=dict( type='PointSAModule', pool_mod='max', use_xyz=True, normalize_xyz=True)), bbox_head=dict( type='GroupFree3DHead', in_channels=288, num_decoder_layers=6, num_proposal=256, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='GroupFree3DMHA', embed_dims=288, num_heads=8, attn_drop=0.1, dropout_layer=dict(type='Dropout', drop_prob=0.1)), ffn_cfgs=dict( embed_dims=288, feedforward_channels=2048, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True)), operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), pred_layer_cfg=dict( in_channels=288, shared_conv_channels=(288, 288), bias=True), sampling_objectness_loss=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=8.0), objectness_loss=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), center_loss=dict( type='SmoothL1Loss', reduction='sum', loss_weight=10.0), dir_class_loss=dict( type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), dir_res_loss=dict( type='SmoothL1Loss', reduction='sum', loss_weight=10.0), size_class_loss=dict( type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), size_res_loss=dict( type='SmoothL1Loss', beta=1.0, reduction='sum', loss_weight=10.0), semantic_loss=dict( type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), # model training and testing settings train_cfg=dict(sample_mod='kps'), test_cfg=dict( sample_mod='kps', nms_thr=0.25, score_thr=0.0, per_class_proposal=True, prediction_stages='last'))
arviz/tests/base_tests/test_rcparams.py
sudojarvis/arviz
1,159
131393
<filename>arviz/tests/base_tests/test_rcparams.py # pylint: disable=redefined-outer-name import os import numpy as np import pytest from xarray.core.indexing import MemoryCachedArray from ...data import datasets, load_arviz_data from ...rcparams import ( _make_validate_choice, _make_validate_choice_regex, _validate_float_or_none, _validate_positive_int_or_none, _validate_probability, make_iterable_validator, rc_context, rc_params, rcParams, read_rcfile, ) from ...stats import compare from ..helpers import models # pylint: disable=unused-import ### Test rcparams classes ### def test_rc_context_dict(): rcParams["data.load"] = "lazy" with rc_context(rc={"data.load": "eager"}): assert rcParams["data.load"] == "eager" assert rcParams["data.load"] == "lazy" def test_rc_context_file(): path = os.path.dirname(os.path.abspath(__file__)) rcParams["data.load"] = "lazy" with rc_context(fname=os.path.join(path, "../test.rcparams")): assert rcParams["data.load"] == "eager" assert rcParams["data.load"] == "lazy" def test_bad_rc_file(): """Test bad value raises error.""" path = os.path.dirname(os.path.abspath(__file__)) with pytest.raises(ValueError, match="Bad val "): read_rcfile(os.path.join(path, "../bad.rcparams")) def test_warning_rc_file(caplog): """Test invalid lines and duplicated keys log warnings and bad value raises error.""" path = os.path.dirname(os.path.abspath(__file__)) read_rcfile(os.path.join(path, "../test.rcparams")) records = caplog.records assert len(records) == 1 assert records[0].levelname == "WARNING" assert "Duplicate key" in caplog.text def test_bad_key(): """Test the error when using unexistent keys in rcParams is correct.""" with pytest.raises(KeyError, match="bad_key is not a valid rc"): rcParams["bad_key"] = "nothing" def test_del_key_error(): """Check that rcParams keys cannot be deleted.""" with pytest.raises(TypeError, match="keys cannot be deleted"): del rcParams["data.load"] def test_clear_error(): """Check that rcParams cannot be cleared.""" with pytest.raises(TypeError, match="keys cannot be deleted"): rcParams.clear() def test_pop_error(): """Check rcParams pop error.""" with pytest.raises(TypeError, match=r"keys cannot be deleted.*get\(key\)"): rcParams.pop("data.load") def test_popitem_error(): """Check rcParams popitem error.""" with pytest.raises(TypeError, match=r"keys cannot be deleted.*get\(key\)"): rcParams.popitem() def test_setdefaults_error(): """Check rcParams popitem error.""" with pytest.raises(TypeError, match="Use arvizrc"): rcParams.setdefault("data.load", "eager") def test_rcparams_find_all(): data_rcparams = rcParams.find_all("data") assert len(data_rcparams) def test_rcparams_repr_str(): """Check both repr and str print all keys.""" repr_str = rcParams.__repr__() str_str = rcParams.__str__() assert repr_str.startswith("RcParams") for string in (repr_str, str_str): assert all((key in string for key in rcParams.keys())) ### Test arvizrc.template file is up to date ### def test_rctemplate_updated(): fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../arvizrc.template") rc_pars_template = read_rcfile(fname) rc_defaults = rc_params(ignore_files=True) assert all((key in rc_pars_template.keys() for key in rc_defaults.keys())), [ key for key in rc_defaults.keys() if key not in rc_pars_template ] assert all((value == rc_pars_template[key] for key, value in rc_defaults.items())), [ key for key, value in rc_defaults.items() if value != rc_pars_template[key] ] ### Test validation functions ### @pytest.mark.parametrize("param", ["data.load", "stats.information_criterion"]) def test_choice_bad_values(param): """Test error messages are correct for rcParams validated with _make_validate_choice.""" msg = "{}: bad_value is not one of".format(param.replace(".", r"\.")) with pytest.raises(ValueError, match=msg): rcParams[param] = "bad_value" @pytest.mark.parametrize("allow_none", (True, False)) @pytest.mark.parametrize("typeof", (str, int)) @pytest.mark.parametrize("args", [("not one", 10), (False, None), (False, 4)]) def test_make_validate_choice(args, allow_none, typeof): accepted_values = set(typeof(value) for value in (0, 1, 4, 6)) validate_choice = _make_validate_choice(accepted_values, allow_none=allow_none, typeof=typeof) raise_error, value = args if value is None and not allow_none: raise_error = "not one of" if typeof == str else "Could not convert" if raise_error: with pytest.raises(ValueError, match=raise_error): validate_choice(value) else: value = validate_choice(value) assert value in accepted_values or value is None @pytest.mark.parametrize("allow_none", (True, False)) @pytest.mark.parametrize( "args", [ (False, None), (False, "row"), (False, "54row"), (False, "4column"), ("or in regex", "square"), ], ) def test_make_validate_choice_regex(args, allow_none): accepted_values = {"row", "column"} accepted_values_regex = {r"\d*row", r"\d*column"} validate_choice = _make_validate_choice_regex( accepted_values, accepted_values_regex, allow_none=allow_none ) raise_error, value = args if value is None and not allow_none: raise_error = "or in regex" if raise_error: with pytest.raises(ValueError, match=raise_error): validate_choice(value) else: value_result = validate_choice(value) assert value == value_result @pytest.mark.parametrize("allow_none", (True, False)) @pytest.mark.parametrize("allow_auto", (True, False)) @pytest.mark.parametrize("value", [(1, 2), "auto", None, "(1, 4)"]) def test_make_iterable_validator_none_auto(value, allow_auto, allow_none): scalar_validator = _validate_float_or_none validate_iterable = make_iterable_validator( scalar_validator, allow_auto=allow_auto, allow_none=allow_none ) raise_error = False if value is None and not allow_none: raise_error = "Only ordered iterable" if value == "auto" and not allow_auto: raise_error = "Could not convert" if raise_error: with pytest.raises(ValueError, match=raise_error): validate_iterable(value) else: value = validate_iterable(value) assert np.iterable(value) or value is None or value == "auto" @pytest.mark.parametrize("length", (2, None)) @pytest.mark.parametrize("value", [(1, 5), (1, 3, 5), "(3, 4, 5)"]) def test_make_iterable_validator_length(value, length): scalar_validator = _validate_float_or_none validate_iterable = make_iterable_validator(scalar_validator, length=length) raise_error = False if length is not None and len(value) != length: raise_error = "Iterable must be of length" if raise_error: with pytest.raises(ValueError, match=raise_error): validate_iterable(value) else: value = validate_iterable(value) assert np.iterable(value) @pytest.mark.parametrize( "args", [ ("Only ordered iterable", set(["a", "b", "c"])), ("Could not convert", "johndoe"), ("Only ordered iterable", 15), ], ) def test_make_iterable_validator_illegal(args): scalar_validator = _validate_float_or_none validate_iterable = make_iterable_validator(scalar_validator) raise_error, value = args with pytest.raises(ValueError, match=raise_error): validate_iterable(value) @pytest.mark.parametrize( "args", [("Only positive", -1), ("Could not convert", "1.3"), (False, "2"), (False, None), (False, 1)], ) def test_validate_positive_int_or_none(args): raise_error, value = args if raise_error: with pytest.raises(ValueError, match=raise_error): _validate_positive_int_or_none(value) else: value = _validate_positive_int_or_none(value) assert isinstance(value, int) or value is None @pytest.mark.parametrize( "args", [ ("Only.+between 0 and 1", -1), ("Only.+between 0 and 1", "1.3"), ("not convert to float", "word"), (False, "0.6"), (False, 0), (False, 1), ], ) def test_validate_probability(args): raise_error, value = args if raise_error: with pytest.raises(ValueError, match=raise_error): _validate_probability(value) else: value = _validate_probability(value) assert isinstance(value, float) ### Test integration of rcParams in ArviZ ### def test_data_load(): rcParams["data.load"] = "lazy" idata_lazy = load_arviz_data("centered_eight") assert isinstance( idata_lazy.posterior.mu.variable._data, # pylint: disable=protected-access MemoryCachedArray, ) assert rcParams["data.load"] == "lazy" rcParams["data.load"] = "eager" idata_eager = load_arviz_data("centered_eight") assert isinstance( idata_eager.posterior.mu.variable._data, np.ndarray # pylint: disable=protected-access ) assert rcParams["data.load"] == "eager" def test_stats_information_criterion(models): rcParams["stats.information_criterion"] = "waic" df_comp = compare({"model1": models.model_1, "model2": models.model_2}) assert "waic" in df_comp.columns rcParams["stats.information_criterion"] = "loo" df_comp = compare({"model1": models.model_1, "model2": models.model_2}) assert "loo" in df_comp.columns def test_http_type_request(monkeypatch): def _urlretrive(url, _): raise Exception(f"URL Retrieved: {url}") # Hijack url retrieve to inspect url passed monkeypatch.setattr(datasets, "urlretrieve", _urlretrive) # Test HTTPS default with pytest.raises(Exception) as error: datasets.load_arviz_data("radon") assert "https://" in str(error) # Test HTTP setting with pytest.raises(Exception) as error: rcParams["data.http_protocol"] = "http" datasets.load_arviz_data("radon") assert "http://" in str(error)
bin/api_connector_splunk/jsonschema/compat.py
CyberGRX/api-connector-splunk
652
131409
<reponame>CyberGRX/api-connector-splunk import operator import sys try: from collections import MutableMapping, Sequence # noqa except ImportError: from collections.abc import MutableMapping, Sequence # noqa PY3 = sys.version_info[0] >= 3 PY26 = sys.version_info[:2] == (2, 6) if PY3: zip = zip from functools import lru_cache from io import StringIO from urllib.parse import ( unquote, urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit ) from urllib.request import urlopen str_types = str, int_types = int, iteritems = operator.methodcaller("items") else: from itertools import izip as zip # noqa from StringIO import StringIO from urlparse import ( urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit # noqa ) from urllib import unquote # noqa from urllib2 import urlopen # noqa str_types = basestring int_types = int, long iteritems = operator.methodcaller("iteritems") if PY26: from repoze.lru import lru_cache else: from functools32 import lru_cache # On python < 3.3 fragments are not handled properly with unknown schemes def urlsplit(url): scheme, netloc, path, query, fragment = _urlsplit(url) if "#" in path: path, fragment = path.split("#", 1) return SplitResult(scheme, netloc, path, query, fragment) def urldefrag(url): if "#" in url: s, n, p, q, frag = urlsplit(url) defrag = urlunsplit((s, n, p, q, '')) else: defrag = url frag = '' return defrag, frag # flake8: noqa
Src/StdLib/Lib/site-packages/win32comext/axscript/client/debug.py
cwensley/ironpython2
1,078
131472
import traceback, sys, string import win32com.server.util from win32com.util import IIDToInterfaceName from win32com.client.util import Enumerator from win32com.server.exception import COMException import pythoncom from framework import trace from win32com.axdebug import axdebug, gateways, contexts, stackframe, documents, adb from win32com.axdebug.codecontainer import SourceCodeContainer from win32com.axdebug.util import _wrap, _wrap_remove import win32com.client.connect import win32api, winerror import os try: os.environ["DEBUG_AXDEBUG"] debuggingTrace = 1 # Should we print "trace" output? except KeyError: debuggingTrace = 0 def trace(*args): """A function used instead of "print" for debugging output. """ if not debuggingTrace: return print win32api.GetCurrentThreadId(), for arg in args: print arg, print # Note that the DebugManager is not a COM gateway class for the # debugger - but it does create and manage them. class DebugManager: _debugger_interfaces_ = [axdebug.IID_IActiveScriptDebug] def __init__(self, scriptEngine): self.scriptEngine = scriptEngine self.adb = adb.Debugger() self.rootNode = None self.debugApplication = None self.ccProvider = documents.CodeContainerProvider() try: self.scriptSiteDebug = scriptEngine.GetScriptSite(axdebug.IID_IActiveScriptSiteDebug) except pythoncom.com_error: # No debugger interface (ie, dumb host). Do the extra work. trace("Scripting site has no debugger interface") self.scriptSiteDebug = None # Get the debug application object. self.debugApplication = None if self.scriptSiteDebug is not None: # Spec says that we should test for this, and if it fails revert to # PDM application. try: self.debugApplication = self.scriptSiteDebug.GetApplication() self.rootNode = self.scriptSiteDebug.GetRootApplicationNode() except pythoncom.com_error: self.debugApplication = None if self.debugApplication is None: # Try to get/create the default one # NOTE - Dont catch exceptions here - let the parent do it, # so it knows debug support is available. pdm=pythoncom.CoCreateInstance(axdebug.CLSID_ProcessDebugManager,None,pythoncom.CLSCTX_ALL, axdebug.IID_IProcessDebugManager) self.debugApplication = pdm.GetDefaultApplication() self.rootNode = self.debugApplication.GetRootNode() assert self.debugApplication is not None, "Need to have a DebugApplication object by now!" self.activeScriptDebug = None if self.debugApplication is not None: self.adb.AttachApp(self.debugApplication, self.ccProvider) self.codeContainers = {} self.activeScriptDebug = _wrap(ActiveScriptDebug(self, self.codeContainers), axdebug.IID_IActiveScriptDebug) def Close(self): # Called by the language engine when it receives a close request if self.activeScriptDebug is not None: _wrap_remove(self.activeScriptDebug) self.activeScriptDebug = None self.scriptEngine = None self.rootNode = None self.debugApplication = None self.scriptSiteDebug = None if self.ccProvider is not None: self.ccProvider.Close() self.ccProvider = None self.codeContainers = {} if self.adb: self.adb.CloseApp() self.adb = None # print "Close complete" def IsAnyHost(self): "Do we have _any_ debugging interfaces installed?" return self.debugApplication is not None def IsSimpleHost(self): return self.scriptSiteDebug is None def HandleRuntimeError( self ): """Called by the engine when a runtime error occurs. If we have a debugger, we let it know. The result is a boolean which indicates if the error handler should call IActiveScriptSite::OnScriptError() """ # if self.IsAnyHost: # site = _wrap(self, axdebug.IID_IActiveScriptSite) # breakResume, errorResume, fCallOnError = self.debugApplication(activeScriptErrorDebug, site) # Do something with these! # else: trace("HandleRuntimeError") fCallOnError = 1 return fCallOnError def _query_interface_for_debugger_(self, iid): if iid in self._debugger_interfaces_: return self.activeScriptDebug trace("DebugManager QI - unknown IID", iid) return 0 def OnEnterScript(self): trace("OnEnterScript") try: 1/0 except: # Bit of a hack - reach into engine. baseFrame = sys.exc_info()[2].tb_frame.f_back self.adb.SetupAXDebugging(baseFrame) def OnLeaveScript(self): trace("OnLeaveScript") self.adb.ResetAXDebugging() def AddScriptBlock(self, codeBlock): # If we dont have debugging support, dont bother. cc = DebugCodeBlockContainer(codeBlock, self.scriptSiteDebug) if self.IsSimpleHost(): document = documents.DebugDocumentText(cc) document = _wrap(document, axdebug.IID_IDebugDocument) provider = documents.DebugDocumentProvider(document) provider = _wrap(provider, axdebug.IID_IDebugDocumentProvider) cc.debugDocument = document newNode = self.debugApplication.CreateApplicationNode() newNode.SetDocumentProvider(provider) newNode.Attach(self.rootNode) else: newNode = None # Managed by smart host. self.codeContainers[cc.sourceContext] = cc self.ccProvider.AddCodeContainer(cc, newNode) class DebugCodeBlockContainer(SourceCodeContainer): def __init__(self, codeBlock, site): self.codeBlock = codeBlock SourceCodeContainer.__init__(self, codeBlock.codeText, codeBlock.GetFileName(), codeBlock.sourceContextCookie, codeBlock.startLineNumber, site) def GetName(self, dnt): if dnt==axdebug.DOCUMENTNAMETYPE_APPNODE: return self.codeBlock.GetDisplayName() elif dnt==axdebug.DOCUMENTNAMETYPE_TITLE: return self.codeBlock.GetDisplayName() # elif dnt==axdebug.DOCUMENTNAMETYPE_FILE_TAIL: # elif dnt==axdebug.DOCUMENTNAMETYPE_URL: else: raise COMException(scode=winerror.S_FALSE) class EnumDebugCodeContexts(gateways.EnumDebugCodeContexts): def _wrap(self, ob): return ob class ActiveScriptDebug: """The class which implements the IActiveScriptDebug interface for the Active Script engine. Only ever used by smart hosts. """ _public_methods_ = ["GetScriptTextAttributes", "GetScriptletTextAttributes", "EnumCodeContextsOfPosition"] _com_interfaces_ = [axdebug.IID_IActiveScriptDebug] def __init__(self, debugMgr, codeContainers): self.debugMgr = debugMgr self.scriptSiteDebug = debugMgr.scriptSiteDebug self.codeContainers = codeContainers def _Close(self): self.debugMgr = None self.scriptSiteDebug = None self.codeContainers = {} def _query_interface_(self, iid): trace("DebuggerQI with", iid) return _wrap(self.debugMgr.scriptEngine, iid) def GetScriptTextAttributes(self, code, delim, flags): container = SourceCodeContainer(code, "<Temp Code Block>") return container.GetSyntaxColorAttributes() def GetScriptletTextAttributes(self, code, delim, flags): trace ("GetScriptletTextAttributes", code, delim, flags) container = SourceCodeContainer(code, "<Temp Code Block>") return container.GetSyntaxColorAttributes() def EnumCodeContextsOfPosition(self, context, charOffset, numChars): trace("EnumCodeContextsOfPosition", context, charOffset, numChars) try: context = self.codeContainers[context].GetCodeContextAtPosition(charOffset) except KeyError: raise COMException(scode=winerror.E_UNEXPECTED) enum = EnumDebugCodeContexts([context]) return _wrap(enum, axdebug.IID_IEnumDebugCodeContexts)
tools/generate_text_generation.py
ankane/informers
340
131483
from pathlib import Path import tempfile from transformers.convert_graph_to_onnx import convert, quantize # requires: # transformers==4.0.0 # torch==1.7.1 dest = Path(tempfile.mkdtemp(), "text-generation.onnx") convert( pipeline_name="text-generation", model="gpt2", output=dest, framework="pt", opset=11 ) print(dest)
lit_nlp/components/pdp_test.py
eichinflo/lit
2,854
131500
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Lint as: python3 """Tests for lit_nlp.components.pdp.""" from typing import List from absl.testing import absltest from lit_nlp.api import dataset as lit_dataset from lit_nlp.api import model as lit_model from lit_nlp.api import types as lit_types from lit_nlp.components import pdp from lit_nlp.lib import caching from lit_nlp.lib import testing_utils JsonDict = lit_types.JsonDict class TestRegressionPdp(lit_model.Model): def input_spec(self): return {'num': lit_types.Scalar(), 'cats': lit_types.CategoryLabel(vocab=['One', 'None'])} def output_spec(self): return {'score': lit_types.RegressionScore()} def predict_minibatch(self, inputs: List[JsonDict], **kw): return [{'score': i['num'] + (1 if i['cats'] == 'One' else 0)} for i in inputs] class TestClassificationPdp(lit_model.Model): def input_spec(self): return {'num': lit_types.Scalar(), 'cats': lit_types.CategoryLabel(vocab=['One', 'None'])} def output_spec(self): return {'probas': lit_types.MulticlassPreds(vocab=['0', '1'])} def predict_minibatch(self, inputs: List[JsonDict], **kw): def pred(i): val = (i['num'] / 100) + (.5 if i['cats'] == 'One' else 0) return {'probas': [1 - val, val]} return [pred(i) for i in inputs] class PdpTest(absltest.TestCase): def setUp(self): super(PdpTest, self).setUp() self.pdp = pdp.PdpInterpreter() self.reg_model = TestRegressionPdp() self.class_model = TestClassificationPdp() examples = [ { 'num': 1, 'cats': 'One', }, { 'num': 10, 'cats': 'None', }, { 'num': 5, 'cats': 'One', }, ] indexed_inputs = [{'id': caching.input_hash(ex), 'data': ex} for ex in examples] self.dataset = lit_dataset.IndexedDataset( spec=self.reg_model.input_spec(), id_fn=caching.input_hash, indexed_examples=indexed_inputs) def test_regression_num(self): config = { 'feature': 'num', } result = self.pdp.run_with_metadata([self.dataset.indexed_examples[0]], self.reg_model, self.dataset, config=config) expected = {1.0: 2.0, 2.0: 3.0, 3.0: 4.0, 4.0: 5.0, 5.0: 6.0, 6.0: 7.0, 7.0: 8.0, 8.0: 9.0, 9.0: 10.0, 10.0: 11.0} testing_utils.assert_deep_almost_equal(self, result['score'], expected) def test_provided_range(self): config = { 'feature': 'num', 'range': [0, 9] } result = self.pdp.run_with_metadata([self.dataset.indexed_examples[0]], self.reg_model, self.dataset, config=config) expected = {0.0: 1.0, 1.0: 2.0, 2.0: 3.0, 3.0: 4.0, 4.0: 5.0, 5.0: 6.0, 6.0: 7.0, 7.0: 8.0, 8.0: 9.0, 9.0: 10.0} testing_utils.assert_deep_almost_equal(self, result['score'], expected) def test_regression_cat(self): config = { 'feature': 'cats', } result = self.pdp.run_with_metadata([self.dataset.indexed_examples[0]], self.reg_model, self.dataset, config=config) expected = {'One': 2.0, 'None': 1.0} testing_utils.assert_deep_almost_equal(self, result['score'], expected) def test_class_num(self): config = { 'feature': 'num', } result = self.pdp.run_with_metadata([self.dataset.indexed_examples[0]], self.class_model, self.dataset, config=config) expected = {1.0: [0.49, 0.51], 2.0: [0.48, 0.52], 3.0: [0.47, 0.53], 4.0: [0.46, 0.54], 5.0: [0.45, 0.55], 6.0: [0.44, 0.56], 7.0: [0.43, 0.57], 8.0: [0.42, 0.58], 9.0: [0.41, 0.59], 10.0: [0.4, 0.6]} testing_utils.assert_deep_almost_equal(self, result['probas'], expected) def test_classification_cat(self): config = { 'feature': 'cats', } result = self.pdp.run_with_metadata([self.dataset.indexed_examples[0]], self.class_model, self.dataset, config=config) expected = {'One': [0.49, 0.51], 'None': [0.99, 0.01]} testing_utils.assert_deep_almost_equal(self, result['probas'], expected) def test_multiple_inputs(self): config = { 'feature': 'num', } result = self.pdp.run_with_metadata(self.dataset.indexed_examples[0:2], self.reg_model, self.dataset, config=config) expected = {1.0: 1.5, 2.0: 2.5, 3.0: 3.5, 4.0: 4.5, 5.0: 5.5, 6.0: 6.5, 7.0: 7.5, 8.0: 8.5, 9.0: 9.5, 10.0: 10.5} testing_utils.assert_deep_almost_equal(self, result['score'], expected) if __name__ == '__main__': absltest.main()
sdk/translation/azure-ai-translation-document/azure/ai/translation/document/_helpers.py
rsdoherty/azure-sdk-for-python
2,728
131502
<filename>sdk/translation/azure-ai-translation-document/azure/ai/translation/document/_helpers.py # coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import datetime from typing import Union, Optional, List import six from azure.core.credentials import AzureKeyCredential from azure.core.pipeline.policies import AzureKeyCredentialPolicy from azure.core.pipeline.policies import HttpLoggingPolicy from ._generated.models import ( BatchRequest as _BatchRequest, SourceInput as _SourceInput, TargetInput as _TargetInput, DocumentFilter as _DocumentFilter, ) from ._models import DocumentTranslationInput COGNITIVE_KEY_HEADER = "Ocp-Apim-Subscription-Key" POLLING_INTERVAL = 1 def get_translation_input(args, kwargs, continuation_token): try: inputs = kwargs.pop("inputs", None) if not inputs: inputs = args[0] request = ( DocumentTranslationInput._to_generated_list( # pylint: disable=protected-access inputs ) if not continuation_token else None ) except (AttributeError, TypeError, IndexError): try: source_url = kwargs.pop("source_url", None) if not source_url: source_url = args[0] target_url = kwargs.pop("target_url", None) if not target_url: target_url = args[1] target_language_code = kwargs.pop("target_language_code", None) if not target_language_code: target_language_code = args[2] # Additional kwargs source_language_code = kwargs.pop("source_language_code", None) prefix = kwargs.pop("prefix", None) suffix = kwargs.pop("suffix", None) storage_type = kwargs.pop("storage_type", None) category_id = kwargs.pop("category_id", None) glossaries = kwargs.pop("glossaries", None) request = [ _BatchRequest( source=_SourceInput( source_url=source_url, filter=_DocumentFilter( prefix=prefix, suffix=suffix ), language=source_language_code, ), targets=[ _TargetInput( target_url=target_url, language=target_language_code, glossaries=[g._to_generated() for g in glossaries] # pylint: disable=protected-access if glossaries else None, category=category_id, ) ], storage_type=storage_type ) ] except (AttributeError, TypeError, IndexError): raise ValueError( "Pass 'inputs' for multiple inputs or 'source_url', 'target_url', " "and 'target_language_code' for a single input." ) return request def get_authentication_policy(credential): authentication_policy = None if credential is None: raise ValueError("Parameter 'credential' must not be None.") if isinstance(credential, AzureKeyCredential): authentication_policy = AzureKeyCredentialPolicy( name=COGNITIVE_KEY_HEADER, credential=credential ) elif credential is not None and not hasattr(credential, "get_token"): raise TypeError( "Unsupported credential: {}. Use an instance of AzureKeyCredential " "or a token credential from azure.identity".format(type(credential)) ) return authentication_policy def get_http_logging_policy(**kwargs): http_logging_policy = HttpLoggingPolicy(**kwargs) http_logging_policy.allowed_header_names.update( { "Operation-Location", "Content-Encoding", "Vary", "apim-request-id", "X-RequestId", "Set-Cookie", "X-Powered-By", "Strict-Transport-Security", "x-content-type-options", } ) http_logging_policy.allowed_query_params.update( { "$top", "$skip", "$maxpagesize", "ids", "statuses", "createdDateTimeUtcStart", "createdDateTimeUtcEnd", "$orderBy", } ) return http_logging_policy def convert_datetime(date_time): # type: (Union[str, datetime.datetime]) -> datetime.datetime if isinstance(date_time, datetime.datetime): return date_time if isinstance(date_time, six.string_types): try: return datetime.datetime.strptime(date_time, "%Y-%m-%d") except ValueError: try: return datetime.datetime.strptime(date_time, "%Y-%m-%dT%H:%M:%SZ") except ValueError: return datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S") raise TypeError("Bad datetime type") def convert_order_by(order_by): # type: (Optional[List[str]]) -> Optional[List[str]] if order_by: order_by = [order.replace("created_on", "createdDateTimeUtc") for order in order_by] return order_by
reader/bidafv1/layers.py
wsdm/RCZoo
166
131506
#!/usr/bin/env python3 # Copyright 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. """Definitions of model layers/NN modules""" import ipdb import torch import torch.nn as nn import os from torch.nn.utils.rnn import pad_packed_sequence as unpack from torch.nn.utils.rnn import pack_padded_sequence as pack import torch.utils.model_zoo as model_zoo import torch.nn.functional as F from torch.autograd import Variable # ------------------------------------------------------------------------------ # Modules # ------------------------------------------------------------------------------ class Highway(nn.Module): def __init__(self, layer_num: int, size: int, gate_bias=-2): super().__init__() self.n = layer_num self.linear = nn.ModuleList([nn.Linear(size, size) for _ in range(self.n)]) self.gate = nn.ModuleList([nn.Linear(size, size) for _ in range(self.n)]) for i in range(self.n): self.gate[i].bias.data.fill_(gate_bias) def forward(self, x): for i in range(self.n): gate = F.sigmoid(self.gate[i](x)) nonlinear = F.relu(self.linear[i](x)) x = gate * nonlinear + (1 - gate) * x return x class StackedBRNN(nn.Module): """Stacked Bi-directional RNNs. Differs from standard PyTorch library in that it has the option to save and concat the hidden states between layers. (i.e. the output hidden size for each sequence input is num_layers * hidden_size). """ def __init__(self, input_size, hidden_size, num_layers, dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM, concat_layers=False, padding=False): super(StackedBRNN, self).__init__() self.padding = padding self.dropout_output = dropout_output self.dropout_rate = dropout_rate self.num_layers = num_layers self.concat_layers = concat_layers self.rnns = nn.ModuleList() for i in range(num_layers): input_size = input_size if i == 0 else 2 * hidden_size self.rnns.append(rnn_type(input_size, hidden_size, num_layers=1, bidirectional=True)) def forward(self, x, x_mask): """Encode either padded or non-padded sequences. Can choose to either handle or ignore variable length sequences. Always handle padding in eval. Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_encoded: batch * len * hdim_encoded """ if x_mask.data.sum() == 0: # No padding necessary. output = self._forward_unpadded(x, x_mask) elif self.padding or not self.training: # Pad if we care or if its during eval. output = self._forward_padded(x, x_mask) else: # We don't care. output = self._forward_unpadded(x, x_mask) return output.contiguous() def _forward_unpadded(self, x, x_mask): """Faster encoding that ignores any padding.""" # Transpose batch and sequence dims x = x.transpose(0, 1) # Encode all layers outputs = [x] for i in range(self.num_layers): rnn_input = outputs[-1] # Apply dropout to hidden input if self.dropout_rate > 0: rnn_input = F.dropout(rnn_input, p=self.dropout_rate, training=self.training) # Forward rnn_output = self.rnns[i](rnn_input)[0] outputs.append(rnn_output) # Concat hidden layers if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[-1] # Transpose back output = output.transpose(0, 1) # Dropout on output layer if self.dropout_output and self.dropout_rate > 0: output = F.dropout(output, p=self.dropout_rate, training=self.training) return output def _forward_padded(self, x, x_mask): """Slower (significantly), but more precise, encoding that handles padding. """ # Compute sorted sequence lengths lengths = x_mask.data.eq(0).long().sum(1).squeeze() _, idx_sort = torch.sort(lengths, dim=0, descending=True) _, idx_unsort = torch.sort(idx_sort, dim=0) lengths = list(lengths[idx_sort]) idx_sort = Variable(idx_sort) idx_unsort = Variable(idx_unsort) # Sort x x = x.index_select(0, idx_sort) # Transpose batch and sequence dims x = x.transpose(0, 1) # Pack it up rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths) # Encode all layers outputs = [rnn_input] for i in range(self.num_layers): rnn_input = outputs[-1] # Apply dropout to input if self.dropout_rate > 0: dropout_input = F.dropout(rnn_input.data, p=self.dropout_rate, training=self.training) rnn_input = nn.utils.rnn.PackedSequence(dropout_input, rnn_input.batch_sizes) outputs.append(self.rnns[i](rnn_input)[0]) # Unpack everything for i, o in enumerate(outputs[1:], 1): outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0] # Concat hidden layers or take final if self.concat_layers: output = torch.cat(outputs[1:], 2) else: output = outputs[-1] # Transpose and unsort output = output.transpose(0, 1) output = output.index_select(0, idx_unsort) # Pad up to original batch sequence length if output.size(1) != x_mask.size(1): padding = torch.zeros(output.size(0), x_mask.size(1) - output.size(1), output.size(2)).type(output.data.type()) output = torch.cat([output, Variable(padding)], 1) # Dropout on output layer if self.dropout_output and self.dropout_rate > 0: output = F.dropout(output, p=self.dropout_rate, training=self.training) return output model_urls = { 'wmt-lstm' : 'https://s3.amazonaws.com/research.metamind.io/cove/wmtlstm-b142a7f2.pth' } model_cache = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.torch') class MTLSTM(nn.Module): def __init__(self, n_vocab=None, vectors=None, residual_embeddings=False): """Initialize an MTLSTM. Arguments: n_vocab (bool): If not None, initialize MTLSTM with an embedding matrix with n_vocab vectors vectors (Float Tensor): If not None, initialize embedding matrix with specified vectors residual_embedding (bool): If True, concatenate the input embeddings with MTLSTM outputs during forward """ super(MTLSTM, self).__init__() self.embed = False if n_vocab is not None: self.embed = True self.vectors = nn.Embedding(n_vocab, 300) if vectors is not None: self.vectors.weight.data = vectors self.rnn = nn.LSTM(300, 300, num_layers=2, bidirectional=True, batch_first=True) self.rnn.load_state_dict(model_zoo.load_url(model_urls['wmt-lstm'], model_dir=model_cache)) self.residual_embeddings = residual_embeddings def forward(self, inputs, lengths, hidden=None): """A pretrained MT-LSTM (McCann et. al. 2017). This LSTM was trained with 300d 840B GloVe on the WMT 2017 machine translation dataset. Arguments: inputs (Tensor): If MTLSTM handles embedding, a Long Tensor of size (batch_size, timesteps). Otherwise, a Float Tensor of size (batch_size, timesteps, features). lengths (Long Tensor): (batch_size, lengths) lenghts of each sequence for handling padding hidden (Float Tensor): initial hidden state of the LSTM """ if self.embed: inputs = self.vectors(inputs) lens, indices = torch.sort(lengths, 0, True) outputs, hidden_t = self.rnn(pack(inputs[indices], lens.tolist(), batch_first=True), hidden) outputs = unpack(outputs, batch_first=True)[0] _, _indices = torch.sort(indices, 0) outputs = outputs[_indices] if self.residual_embeddings: outputs = torch.cat([inputs, outputs], 2) return outputs class BiAttention(nn.Module): """ biattention in BiDAF model :param dim: hidden size """ def __init__(self, dim): super().__init__() self.linear = nn.Linear(3 * dim, 1) def forward(self, x1, x1_mask, x2, x2_mask): """ :param x1: b x n x d :param x2: b x m x d :param x1_mask: b x n :param x2_mask: b x m """ # bxnxmxd x1_aug = x1.unsqueeze(2).expand(x1.size(0), x1.size(1), x2.size(1), x1.size(2)) x2_aug = x2.unsqueeze(1).expand(x1.size(0), x1.size(1), x2.size(1), x2.size(2)) x_input = torch.cat([x1_aug, x2_aug, x1_aug * x2_aug], dim=3) similarity = self.linear(x_input).squeeze(3) # bxnxm x2_mask = x2_mask.unsqueeze(1).expand_as(similarity) similarity.data.masked_fill_(x2_mask.data, -2e20) # bxnxm # c -> q sim_row = F.softmax(similarity, dim=2) attn_a = sim_row.bmm(x2) # q -> c x1_mask = x1_mask.unsqueeze(2).expand_as(similarity) similarity.data.masked_fill_(x1_mask.data, -2e20) sim_col = F.softmax(similarity, dim=1) q2c = sim_col.transpose(1,2).bmm(x1) attn_b = sim_row.bmm(q2c) return attn_a, attn_b class DotAttention(nn.Module): """Given sequences X and Y, match sequence Y to each element in X. * o_i = sum(alpha_j * y_j) for i in X * alpha_j = softmax(y_j * x_i) """ def __init__(self, input_size, hidden): super(DotAttention, self).__init__() self.hidden = hidden self.input_size = input_size self.linear1 = nn.Linear(input_size, hidden) self.linear2 = nn.Linear(input_size, hidden) self.linear3 = nn.Linear(2*input_size, 2*input_size) def forward(self, x, y, y_mask): """ Args: x: batch * len1 * hdim y: batch * len2 * hdim y_mask: batch * len2 (1 for padding, 0 for true) Output: matched_seq: batch * len1 * hdim """ # Project vectors x_proj = F.relu(self.linear1(x)) y_proj = F.relu(self.linear2(y)) # Compute scores scores = x_proj.bmm(y_proj.transpose(2, 1)) / (self.hidden ** 0.5) # Mask padding y_mask = y_mask.unsqueeze(1).expand(scores.size()) scores.data.masked_fill_(y_mask.data, -float('inf')) # Normalize with softmax alpha_flat = F.softmax(scores.view(-1, y.size(1))) alpha = alpha_flat.view(-1, x.size(1), y.size(1)) # Take weighted average matched_seq = alpha.bmm(y) res = torch.cat([x, matched_seq], dim=2) res = F.dropout(res, p=0.2, training=self.training) # add gate gate = F.sigmoid(self.linear3(res)) return res * gate class SeqAttnMatch(nn.Module): """Given sequences X and Y, match sequence Y to each element in X. * o_i = sum(alpha_j * y_j) for i in X * alpha_j = softmax(y_j * x_i) """ def __init__(self, input_size, identity=False): super(SeqAttnMatch, self).__init__() if not identity: self.linear = nn.Linear(input_size, input_size) else: self.linear = None def forward(self, x, y, y_mask): """ Args: x: batch * len1 * hdim y: batch * len2 * hdim y_mask: batch * len2 (1 for padding, 0 for true) Output: matched_seq: batch * len1 * hdim """ # Project vectors if self.linear: x_proj = self.linear(x.view(-1, x.size(2))).view(x.size()) x_proj = F.relu(x_proj) y_proj = self.linear(y.view(-1, y.size(2))).view(y.size()) y_proj = F.relu(y_proj) else: x_proj = x y_proj = y # Compute scores scores = x_proj.bmm(y_proj.transpose(2, 1)) # Mask padding y_mask = y_mask.unsqueeze(1).expand(scores.size()) scores.data.masked_fill_(y_mask.data, -float('inf')) # Normalize with softmax alpha_flat = F.softmax(scores.view(-1, y.size(1)), dim=-1) alpha = alpha_flat.view(-1, x.size(1), y.size(1)) # Take weighted average matched_seq = alpha.bmm(y) return matched_seq class BilinearSeqAttn(nn.Module): """A bilinear attention layer over a sequence X w.r.t y: * o_i = softmax(x_i'Wy) for x_i in X. Optionally don't normalize output weights. """ def __init__(self, x_size, y_size, identity=False, normalize=True): super(BilinearSeqAttn, self).__init__() self.normalize = normalize # If identity is true, we just use a dot product without transformation. if not identity: self.linear = nn.Linear(y_size, x_size) else: self.linear = None def forward(self, x, y, x_mask): """ Args: x: batch * len * hdim1 y: batch * hdim2 x_mask: batch * len (1 for padding, 0 for true) Output: alpha = batch * len """ Wy = self.linear(y) if self.linear is not None else y xWy = x.bmm(Wy.unsqueeze(2)).squeeze(2) xWy.data.masked_fill_(x_mask.data, -float('inf')) if self.normalize: if self.training: # In training we output log-softmax for NLL alpha = F.log_softmax(xWy, dim=-1) else: # ...Otherwise 0-1 probabilities alpha = F.softmax(xWy, dim=-1) else: alpha = xWy.exp() return alpha class PtrNet(nn.Module): def __init__(self, in_size): super(PtrNet, self).__init__() self.linear1 = nn.Linear(in_size, 128) self.linear2 = nn.Linear(128, 1) def forward(self, doc, q_vec, x1_mask): """ :param p: B * N * H :param q_vec: B * H :param x1_mask: B * N :return res: B * H :return out: B * N """ out = torch.cat([doc, q_vec.unsqueeze(1).expand(q_vec.size(0), doc.size(1), q_vec.size(1))], dim=2) out = F.tanh(self.linear1(out)) out = self.linear2(out).squeeze(2) # B * N out.data.masked_fill_(x1_mask.data, -float('inf')) out = F.softmax(out) res = out.unsqueeze(1).bmm(doc).squeeze(1) # b*h return res, out class LinearSeqAttn(nn.Module): """Self attention over a sequence: * o_i = softmax(Wx_i) for x_i in X. """ def __init__(self, input_size): super(LinearSeqAttn, self).__init__() self.linear1 = nn.Linear(input_size, 64) self.linear2 = nn.Linear(64, 1) def forward(self, x, x_mask): """ Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: alpha: batch * len """ scores = self.linear2(F.tanh(self.linear1(x))).squeeze(2) scores.data.masked_fill_(x_mask.data, -float('inf')) alpha = F.softmax(scores) res = alpha.unsqueeze(1).bmm(x).squeeze(1) return res # ------------------------------------------------------------------------------ # Functional # ------------------------------------------------------------------------------ def uniform_weights(x, x_mask): """Return uniform weights over non-masked x (a sequence of vectors). Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_avg: batch * hdim """ alpha = Variable(torch.ones(x.size(0), x.size(1))) if x.data.is_cuda: alpha = alpha.cuda() alpha = alpha * x_mask.eq(0).float() alpha = alpha / alpha.sum(1).expand(alpha.size()) return alpha def weighted_avg(x, weights): """Return a weighted average of x (a sequence of vectors). Args: x: batch * len * hdim weights: batch * len, sum(dim = 1) = 1 Output: x_avg: batch * hdim """ return weights.unsqueeze(1).bmm(x).squeeze(1) class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0., bias=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim**-0.5 self._mask = None self.in_proj_weight = Parameter(torch.Tensor(3*embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3*embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.) nn.init.constant_(self.out_proj.bias, 0.) def forward(self, query, key, value, key_padding_mask=None, need_weights=True): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if qkv_same: # self-attention q, k, v = self.in_proj_qkv(query) elif kv_same: # encoder-decoder attention q = self.in_proj_q(query) if key is None: assert value is None # this will allow us to concat it with previous value and get # just get the previous value k = v = q.new(0) else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q *= self.scaling src_len = k.size(0) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.head_dim).transpose(0, 1) k = k.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1) v = v.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.float().masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'), ).type_as(attn_weights) # FP16 support: cast to float and back attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights) attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) if need_weights: # average attention weights over heads attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads else: attn_weights = None return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query): return self._in_proj(query, end=self.embed_dim) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2*self.embed_dim) def _in_proj(self, input, start=None, end=None): weight = self.in_proj_weight bias = self.in_proj_bias if end is not None: weight = weight[:end, :] if bias is not None: bias = bias[:end] if start is not None: weight = weight[start:, :] if bias is not None: bias = bias[start:] return F.linear(input, weight, bias)
tests/test_tine.py
mathiazom/recipe-scrapers
811
131527
from recipe_scrapers.tineno import TineNo from tests import ScraperTest class TestTineNoScraper(ScraperTest): scraper_class = TineNo def test_host(self): self.assertEqual("tine.no", self.harvester_class.host()) def test_canonical_url(self): self.assertEqual( "https://www.tine.no/oppskrifter/middag-og-hovedretter/kylling-og-fjarkre/rask-kylling-tikka-masala", self.harvester_class.canonical_url(), ) def test_title(self): self.assertEqual(self.harvester_class.title(), "Rask kylling tikka masala") def test_total_time(self): self.assertEqual(30, self.harvester_class.total_time()) def test_yields(self): self.assertEqual("4", self.harvester_class.yields()) def test_image(self): self.assertEqual( "https://www.tine.no/_/recipeimage/w_2880%2Ch_1620%2Cc_fill%2Cx_764%2Cy_430%2Cg_xy_center/recipeimage/yshftxnhdmojzhelrupo.png", self.harvester_class.image(), ) def test_ingredients(self): self.assertCountEqual( [ "Ris:", "4 dl basmatiris", "Tikka masala:", "400 g kyllingfileter", "1 ss TINE Meierismørtil steking", "1 stk paprika", "½ dl chili", "3 stk vårløk", "1 ts hvitløksfedd", "1 ss hakket, friskingefær", "½ dl hakket, friskkoriander", "2 ts garam masala", "3 dl TINE Lett Crème Fraîche 18 %", "3 ss tomatpuré", "½ ts salt", "¼ ts pepper", "Raita:", "½ dl slangeagurk", "3 dl TINE Yoghurt Naturell", "½ dl friskmynte", "1 ts hvitløksfedd", "½ ts salt", "¼ ts pepper", ], self.harvester_class.ingredients(), ) def test_instructions(self): return self.assertEqual( "Kok ris etter anvisningen på pakken.\nTikka masala: Del kylling i biter. Brun kyllingen i smør i en stekepanne på middels varme. Rens og hakk paprika, chili, vårløk og hvitløk og ha det i stekepannen sammen med kyllingen. Rens og finhakk ingefær og frisk koriander. Krydre med garam masala, koriander og ingefær. Hell i crème fraîche og tomatpuré, og la småkoke i 5 minutter. Smak til med salt og pepper.\nRaita: Riv agurk og bland den med yoghurt. Hakk mynte og hvitløk og bland det i. Smak til med salt og pepper.", self.harvester_class.instructions(), ) def test_ratings(self): self.assertEqual(3.9, self.harvester_class.ratings()) def test_description(self): self.assertEqual( "En god og rask oppskrift på en kylling tikka masala. Dette er en rett med små smakseksplosjoner som sender tankene til India.", self.harvester_class.description(), )
checkov/cloudformation/checks/resource/base_resource_check.py
niradler/checkov
4,013
131533
<reponame>niradler/checkov from abc import abstractmethod from typing import List, Callable, Optional, Dict, Any from checkov.cloudformation.checks.resource.registry import cfn_registry from checkov.common.checks.base_check import BaseCheck from checkov.common.models.enums import CheckCategories, CheckResult from checkov.common.multi_signature import multi_signature class BaseResourceCheck(BaseCheck): def __init__(self, name: str, id: str, categories: List[CheckCategories], supported_resources: List[str], guideline=None) -> None: super().__init__( name=name, id=id, categories=categories, supported_entities=supported_resources, block_type="resource", guideline=guideline ) self.supported_resources = supported_resources cfn_registry.register(self) def scan_entity_conf(self, conf: Dict[str, Any], entity_type: str) -> CheckResult: return self.scan_resource_conf(conf, entity_type) @multi_signature() @abstractmethod def scan_resource_conf(self, conf: Dict[str, Any], entity_type: str) -> CheckResult: raise NotImplementedError() @classmethod @scan_resource_conf.add_signature(args=["self", "conf"]) def _scan_resource_conf_self_conf(cls, wrapped: Callable[..., CheckResult]) -> Callable[..., CheckResult]: def wrapper(self: BaseCheck, conf: Dict[str, Any], entity_type: Optional[str] = None) -> CheckResult: # keep default argument for entity_type so old code, that doesn't set it, will work. return wrapped(self, conf) return wrapper
sdk/python/pulumi_azure/servicebus/get_namespace_authorization_rule.py
henriktao/pulumi-azure
109
131538
<filename>sdk/python/pulumi_azure/servicebus/get_namespace_authorization_rule.py # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'GetNamespaceAuthorizationRuleResult', 'AwaitableGetNamespaceAuthorizationRuleResult', 'get_namespace_authorization_rule', 'get_namespace_authorization_rule_output', ] @pulumi.output_type class GetNamespaceAuthorizationRuleResult: """ A collection of values returned by getNamespaceAuthorizationRule. """ def __init__(__self__, id=None, name=None, namespace_name=None, primary_connection_string=None, primary_connection_string_alias=None, primary_key=None, resource_group_name=None, secondary_connection_string=None, secondary_connection_string_alias=None, secondary_key=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if namespace_name and not isinstance(namespace_name, str): raise TypeError("Expected argument 'namespace_name' to be a str") pulumi.set(__self__, "namespace_name", namespace_name) if primary_connection_string and not isinstance(primary_connection_string, str): raise TypeError("Expected argument 'primary_connection_string' to be a str") pulumi.set(__self__, "primary_connection_string", primary_connection_string) if primary_connection_string_alias and not isinstance(primary_connection_string_alias, str): raise TypeError("Expected argument 'primary_connection_string_alias' to be a str") pulumi.set(__self__, "primary_connection_string_alias", primary_connection_string_alias) if primary_key and not isinstance(primary_key, str): raise TypeError("Expected argument 'primary_key' to be a str") pulumi.set(__self__, "primary_key", primary_key) if resource_group_name and not isinstance(resource_group_name, str): raise TypeError("Expected argument 'resource_group_name' to be a str") pulumi.set(__self__, "resource_group_name", resource_group_name) if secondary_connection_string and not isinstance(secondary_connection_string, str): raise TypeError("Expected argument 'secondary_connection_string' to be a str") pulumi.set(__self__, "secondary_connection_string", secondary_connection_string) if secondary_connection_string_alias and not isinstance(secondary_connection_string_alias, str): raise TypeError("Expected argument 'secondary_connection_string_alias' to be a str") pulumi.set(__self__, "secondary_connection_string_alias", secondary_connection_string_alias) if secondary_key and not isinstance(secondary_key, str): raise TypeError("Expected argument 'secondary_key' to be a str") pulumi.set(__self__, "secondary_key", secondary_key) @property @pulumi.getter def id(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter(name="namespaceName") def namespace_name(self) -> str: return pulumi.get(self, "namespace_name") @property @pulumi.getter(name="primaryConnectionString") def primary_connection_string(self) -> str: """ The primary connection string for the authorization rule. """ return pulumi.get(self, "primary_connection_string") @property @pulumi.getter(name="primaryConnectionStringAlias") def primary_connection_string_alias(self) -> str: """ The alias Primary Connection String for the ServiceBus Namespace, if the namespace is Geo DR paired. """ return pulumi.get(self, "primary_connection_string_alias") @property @pulumi.getter(name="primaryKey") def primary_key(self) -> str: """ The primary access key for the authorization rule. """ return pulumi.get(self, "primary_key") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> str: return pulumi.get(self, "resource_group_name") @property @pulumi.getter(name="secondaryConnectionString") def secondary_connection_string(self) -> str: """ The secondary connection string for the authorization rule. """ return pulumi.get(self, "secondary_connection_string") @property @pulumi.getter(name="secondaryConnectionStringAlias") def secondary_connection_string_alias(self) -> str: """ The alias Secondary Connection String for the ServiceBus Namespace """ return pulumi.get(self, "secondary_connection_string_alias") @property @pulumi.getter(name="secondaryKey") def secondary_key(self) -> str: """ The secondary access key for the authorization rule. """ return pulumi.get(self, "secondary_key") class AwaitableGetNamespaceAuthorizationRuleResult(GetNamespaceAuthorizationRuleResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetNamespaceAuthorizationRuleResult( id=self.id, name=self.name, namespace_name=self.namespace_name, primary_connection_string=self.primary_connection_string, primary_connection_string_alias=self.primary_connection_string_alias, primary_key=self.primary_key, resource_group_name=self.resource_group_name, secondary_connection_string=self.secondary_connection_string, secondary_connection_string_alias=self.secondary_connection_string_alias, secondary_key=self.secondary_key) def get_namespace_authorization_rule(name: Optional[str] = None, namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceAuthorizationRuleResult: """ Use this data source to access information about an existing ServiceBus Namespace Authorization Rule. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.servicebus.get_namespace_authorization_rule(name="examplerule", namespace_name="examplenamespace", resource_group_name="example-resources") pulumi.export("ruleId", example.id) ``` :param str name: Specifies the name of the ServiceBus Namespace Authorization Rule. :param str namespace_name: Specifies the name of the ServiceBus Namespace. :param str resource_group_name: Specifies the name of the Resource Group where the ServiceBus Namespace exists. """ __args__ = dict() __args__['name'] = name __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure:servicebus/getNamespaceAuthorizationRule:getNamespaceAuthorizationRule', __args__, opts=opts, typ=GetNamespaceAuthorizationRuleResult).value return AwaitableGetNamespaceAuthorizationRuleResult( id=__ret__.id, name=__ret__.name, namespace_name=__ret__.namespace_name, primary_connection_string=__ret__.primary_connection_string, primary_connection_string_alias=__ret__.primary_connection_string_alias, primary_key=__ret__.primary_key, resource_group_name=__ret__.resource_group_name, secondary_connection_string=__ret__.secondary_connection_string, secondary_connection_string_alias=__ret__.secondary_connection_string_alias, secondary_key=__ret__.secondary_key) @_utilities.lift_output_func(get_namespace_authorization_rule) def get_namespace_authorization_rule_output(name: Optional[pulumi.Input[str]] = None, namespace_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNamespaceAuthorizationRuleResult]: """ Use this data source to access information about an existing ServiceBus Namespace Authorization Rule. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.servicebus.get_namespace_authorization_rule(name="examplerule", namespace_name="examplenamespace", resource_group_name="example-resources") pulumi.export("ruleId", example.id) ``` :param str name: Specifies the name of the ServiceBus Namespace Authorization Rule. :param str namespace_name: Specifies the name of the ServiceBus Namespace. :param str resource_group_name: Specifies the name of the Resource Group where the ServiceBus Namespace exists. """ ...
scripts/validate_sphinx.py
SamuelMarks/botorch
2,344
131568
<filename>scripts/validate_sphinx.py<gh_stars>1000+ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import argparse import os import pkgutil import re from typing import Set # Paths are relative to top-level botorch directory (passed as arg below) SPHINX_RST_PATH = os.path.join("sphinx", "source") BOTORCH_LIBRARY_PATH = "botorch" # Regex for automodule directive used in Sphinx docs AUTOMODULE_REGEX = re.compile(r"\.\. automodule:: ([\.\w]*)") # The top-level modules in botorch not to be validated EXCLUDED_MODULES = {"version"} def parse_rst(rst_filename: str) -> Set[str]: """Extract automodule directives from rst.""" ret = set() with open(rst_filename, "r") as f: lines = f.readlines() for line in lines: line = line.strip() name = AUTOMODULE_REGEX.findall(line) if name: ret.add(name[0]) return ret def validate_complete_sphinx(path_to_botorch: str) -> None: """Validate that Sphinx-based API documentation is complete. - Every top-level module (e.g., acquisition, models, etc.) should have a corresponding .rst sphix source file in sphinx/source. - Every single non-package (i.e. py file) module should be included in an .rst file `automodule::` directive. Sphinx will then automatically include all members from the module in the documentation. Note: this function does not validate any documentation, only its presence. Args: path_to_botorch: the path to the top-level botorch directory (directory that includes botorch library, sphinx, website, etc.). """ # Load top-level modules used in botorch (e.g., acquisition, models) # Exclude auxiliary packages modules = { modname for importer, modname, ispkg in pkgutil.walk_packages( path=[BOTORCH_LIBRARY_PATH], onerror=lambda x: None ) if modname not in EXCLUDED_MODULES } # Load all rst files (these contain the documentation for Sphinx) rstpath = os.path.join(path_to_botorch, SPHINX_RST_PATH) rsts = {f.replace(".rst", "") for f in os.listdir(rstpath) if f.endswith(".rst")} # Verify that all top-level modules have a corresponding rst missing_rsts = modules.difference(rsts) if not len(missing_rsts) == 0: raise RuntimeError(f"Not all modules have corresponding rst: {missing_rsts}") # Track all modules that are not in docs (so can print all) modules_not_in_docs = [] # Iterate over top-level modules for module in modules.intersection(rsts): # Parse rst & extract all modules use automodule directive modules_in_rst = parse_rst(os.path.join(rstpath, module + ".rst")) # Extract all non-package modules for _importer, modname, ispkg in pkgutil.walk_packages( path=[ os.path.join(BOTORCH_LIBRARY_PATH, module) ], # botorch.__path__[0], module), prefix="botorch." + module + ".", onerror=lambda x: None, ): if not ispkg and ".tests" not in modname and modname not in modules_in_rst: modules_not_in_docs.append(modname) if not len(modules_not_in_docs) == 0: raise RuntimeError(f"Not all modules are documented: {modules_not_in_docs}") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Validate that Sphinx documentation is complete." ) parser.add_argument( "-p", "--path", metavar="path", required=True, help="Path to the top-level botorch directory.", ) args = parser.parse_args() validate_complete_sphinx(args.path)
timer.py
njuFerret/python-snippets
555
131582
<reponame>njuFerret/python-snippets<filename>timer.py """ Homegrown timing tools for function calls. Does total time, best-of time and best-of-totals time """ import time import sys if sys.version_info[0] >= 3 and sys.version_info[1] >= 3: timer = time.perf_counter else: timer = time.clock if sys.platform[:3] == 'win' else time.time def total(reps, func, *pargs, **kargs): """ Total time to run func() reps times. Return (total time, last result) """ replist = list(range(reps)) # Hoist out, eqvalize 2.x, 3.x start = timer() for i in replist: ret = func(*pargs, **kargs) elapsed = timer() - start return (elapsed, ret) def bestof(reps, func, *pargs, **kargs): """ Quickest func() among reps runs. Return (best time, last result) """ best = 2 ** 32 # 136 years seems large enough for i in range(reps): start = timer() ret = func(*pargs, **kargs) elapsed = timer() - start if elapsed < best: best = elapsed return (best, ret) def bestoftotal(reps1, reps2, func, *pargs, **kargs): """ It will give best of total: (best of reps1 runs of (total of reps2 runs of func)) """ return bestof(reps1, total, reps2, func, *pargs, **kargs)
Lib/test/test_lzma.py
inging44/python3
1,872
131587
<gh_stars>1000+ from io import BytesIO, UnsupportedOperation import os import pickle import random import unittest from test.support import ( _4G, TESTFN, import_module, bigmemtest, run_unittest, unlink ) lzma = import_module("lzma") from lzma import LZMACompressor, LZMADecompressor, LZMAError, LZMAFile class CompressorDecompressorTestCase(unittest.TestCase): # Test error cases. def test_simple_bad_args(self): self.assertRaises(TypeError, LZMACompressor, []) self.assertRaises(TypeError, LZMACompressor, format=3.45) self.assertRaises(TypeError, LZMACompressor, check="") self.assertRaises(TypeError, LZMACompressor, preset="asdf") self.assertRaises(TypeError, LZMACompressor, filters=3) # Can't specify FORMAT_AUTO when compressing. self.assertRaises(ValueError, LZMACompressor, format=lzma.FORMAT_AUTO) # Can't specify a preset and a custom filter chain at the same time. with self.assertRaises(ValueError): LZMACompressor(preset=7, filters=[{"id": lzma.FILTER_LZMA2}]) self.assertRaises(TypeError, LZMADecompressor, ()) self.assertRaises(TypeError, LZMADecompressor, memlimit=b"qw") with self.assertRaises(TypeError): LZMADecompressor(lzma.FORMAT_RAW, filters="zzz") # Cannot specify a memory limit with FILTER_RAW. with self.assertRaises(ValueError): LZMADecompressor(lzma.FORMAT_RAW, memlimit=0x1000000) # Can only specify a custom filter chain with FILTER_RAW. self.assertRaises(ValueError, LZMADecompressor, filters=FILTERS_RAW_1) with self.assertRaises(ValueError): LZMADecompressor(format=lzma.FORMAT_XZ, filters=FILTERS_RAW_1) with self.assertRaises(ValueError): LZMADecompressor(format=lzma.FORMAT_ALONE, filters=FILTERS_RAW_1) lzc = LZMACompressor() self.assertRaises(TypeError, lzc.compress) self.assertRaises(TypeError, lzc.compress, b"foo", b"bar") self.assertRaises(TypeError, lzc.flush, b"blah") empty = lzc.flush() self.assertRaises(ValueError, lzc.compress, b"quux") self.assertRaises(ValueError, lzc.flush) lzd = LZMADecompressor() self.assertRaises(TypeError, lzd.decompress) self.assertRaises(TypeError, lzd.decompress, b"foo", b"bar") lzd.decompress(empty) self.assertRaises(EOFError, lzd.decompress, b"quux") def test_bad_filter_spec(self): self.assertRaises(TypeError, LZMACompressor, filters=[b"wobsite"]) self.assertRaises(ValueError, LZMACompressor, filters=[{"xyzzy": 3}]) self.assertRaises(ValueError, LZMACompressor, filters=[{"id": 98765}]) with self.assertRaises(ValueError): LZMACompressor(filters=[{"id": lzma.FILTER_LZMA2, "foo": 0}]) with self.assertRaises(ValueError): LZMACompressor(filters=[{"id": lzma.FILTER_DELTA, "foo": 0}]) with self.assertRaises(ValueError): LZMACompressor(filters=[{"id": lzma.FILTER_X86, "foo": 0}]) def test_decompressor_after_eof(self): lzd = LZMADecompressor() lzd.decompress(COMPRESSED_XZ) self.assertRaises(EOFError, lzd.decompress, b"nyan") def test_decompressor_memlimit(self): lzd = LZMADecompressor(memlimit=1024) self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_XZ) lzd = LZMADecompressor(lzma.FORMAT_XZ, memlimit=1024) self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_XZ) lzd = LZMADecompressor(lzma.FORMAT_ALONE, memlimit=1024) self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_ALONE) # Test LZMADecompressor on known-good input data. def _test_decompressor(self, lzd, data, check, unused_data=b""): self.assertFalse(lzd.eof) out = lzd.decompress(data) self.assertEqual(out, INPUT) self.assertEqual(lzd.check, check) self.assertTrue(lzd.eof) self.assertEqual(lzd.unused_data, unused_data) def test_decompressor_auto(self): lzd = LZMADecompressor() self._test_decompressor(lzd, COMPRESSED_XZ, lzma.CHECK_CRC64) lzd = LZMADecompressor() self._test_decompressor(lzd, COMPRESSED_ALONE, lzma.CHECK_NONE) def test_decompressor_xz(self): lzd = LZMADecompressor(lzma.FORMAT_XZ) self._test_decompressor(lzd, COMPRESSED_XZ, lzma.CHECK_CRC64) def test_decompressor_alone(self): lzd = LZMADecompressor(lzma.FORMAT_ALONE) self._test_decompressor(lzd, COMPRESSED_ALONE, lzma.CHECK_NONE) def test_decompressor_raw_1(self): lzd = LZMADecompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_1) self._test_decompressor(lzd, COMPRESSED_RAW_1, lzma.CHECK_NONE) def test_decompressor_raw_2(self): lzd = LZMADecompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_2) self._test_decompressor(lzd, COMPRESSED_RAW_2, lzma.CHECK_NONE) def test_decompressor_raw_3(self): lzd = LZMADecompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_3) self._test_decompressor(lzd, COMPRESSED_RAW_3, lzma.CHECK_NONE) def test_decompressor_raw_4(self): lzd = LZMADecompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_4) self._test_decompressor(lzd, COMPRESSED_RAW_4, lzma.CHECK_NONE) def test_decompressor_chunks(self): lzd = LZMADecompressor() out = [] for i in range(0, len(COMPRESSED_XZ), 10): self.assertFalse(lzd.eof) out.append(lzd.decompress(COMPRESSED_XZ[i:i+10])) out = b"".join(out) self.assertEqual(out, INPUT) self.assertEqual(lzd.check, lzma.CHECK_CRC64) self.assertTrue(lzd.eof) self.assertEqual(lzd.unused_data, b"") def test_decompressor_unused_data(self): lzd = LZMADecompressor() extra = b"fooblibar" self._test_decompressor(lzd, COMPRESSED_XZ + extra, lzma.CHECK_CRC64, unused_data=extra) def test_decompressor_bad_input(self): lzd = LZMADecompressor() self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_RAW_1) lzd = LZMADecompressor(lzma.FORMAT_XZ) self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_ALONE) lzd = LZMADecompressor(lzma.FORMAT_ALONE) self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_XZ) lzd = LZMADecompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_1) self.assertRaises(LZMAError, lzd.decompress, COMPRESSED_XZ) # Test that LZMACompressor->LZMADecompressor preserves the input data. def test_roundtrip_xz(self): lzc = LZMACompressor() cdata = lzc.compress(INPUT) + lzc.flush() lzd = LZMADecompressor() self._test_decompressor(lzd, cdata, lzma.CHECK_CRC64) def test_roundtrip_alone(self): lzc = LZMACompressor(lzma.FORMAT_ALONE) cdata = lzc.compress(INPUT) + lzc.flush() lzd = LZMADecompressor() self._test_decompressor(lzd, cdata, lzma.CHECK_NONE) def test_roundtrip_raw(self): lzc = LZMACompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_4) cdata = lzc.compress(INPUT) + lzc.flush() lzd = LZMADecompressor(lzma.FORMAT_RAW, filters=FILTERS_RAW_4) self._test_decompressor(lzd, cdata, lzma.CHECK_NONE) def test_roundtrip_chunks(self): lzc = LZMACompressor() cdata = [] for i in range(0, len(INPUT), 10): cdata.append(lzc.compress(INPUT[i:i+10])) cdata.append(lzc.flush()) cdata = b"".join(cdata) lzd = LZMADecompressor() self._test_decompressor(lzd, cdata, lzma.CHECK_CRC64) # LZMADecompressor intentionally does not handle concatenated streams. def test_decompressor_multistream(self): lzd = LZMADecompressor() self._test_decompressor(lzd, COMPRESSED_XZ + COMPRESSED_ALONE, lzma.CHECK_CRC64, unused_data=COMPRESSED_ALONE) # Test with inputs larger than 4GiB. @bigmemtest(size=_4G + 100, memuse=2) def test_compressor_bigmem(self, size): lzc = LZMACompressor() cdata = lzc.compress(b"x" * size) + lzc.flush() ddata = lzma.decompress(cdata) try: self.assertEqual(len(ddata), size) self.assertEqual(len(ddata.strip(b"x")), 0) finally: ddata = None @bigmemtest(size=_4G + 100, memuse=3) def test_decompressor_bigmem(self, size): lzd = LZMADecompressor() blocksize = 10 * 1024 * 1024 block = random.getrandbits(blocksize * 8).to_bytes(blocksize, "little") try: input = block * (size // blocksize + 1) cdata = lzma.compress(input) ddata = lzd.decompress(cdata) self.assertEqual(ddata, input) finally: input = cdata = ddata = None # Pickling raises an exception; there's no way to serialize an lzma_stream. def test_pickle(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.assertRaises(TypeError): pickle.dumps(LZMACompressor(), proto) with self.assertRaises(TypeError): pickle.dumps(LZMADecompressor(), proto) class CompressDecompressFunctionTestCase(unittest.TestCase): # Test error cases: def test_bad_args(self): self.assertRaises(TypeError, lzma.compress) self.assertRaises(TypeError, lzma.compress, []) self.assertRaises(TypeError, lzma.compress, b"", format="xz") self.assertRaises(TypeError, lzma.compress, b"", check="none") self.assertRaises(TypeError, lzma.compress, b"", preset="blah") self.assertRaises(TypeError, lzma.compress, b"", filters=1024) # Can't specify a preset and a custom filter chain at the same time. with self.assertRaises(ValueError): lzma.compress(b"", preset=3, filters=[{"id": lzma.FILTER_LZMA2}]) self.assertRaises(TypeError, lzma.decompress) self.assertRaises(TypeError, lzma.decompress, []) self.assertRaises(TypeError, lzma.decompress, b"", format="lzma") self.assertRaises(TypeError, lzma.decompress, b"", memlimit=7.3e9) with self.assertRaises(TypeError): lzma.decompress(b"", format=lzma.FORMAT_RAW, filters={}) # Cannot specify a memory limit with FILTER_RAW. with self.assertRaises(ValueError): lzma.decompress(b"", format=lzma.FORMAT_RAW, memlimit=0x1000000) # Can only specify a custom filter chain with FILTER_RAW. with self.assertRaises(ValueError): lzma.decompress(b"", filters=FILTERS_RAW_1) with self.assertRaises(ValueError): lzma.decompress(b"", format=lzma.FORMAT_XZ, filters=FILTERS_RAW_1) with self.assertRaises(ValueError): lzma.decompress( b"", format=lzma.FORMAT_ALONE, filters=FILTERS_RAW_1) def test_decompress_memlimit(self): with self.assertRaises(LZMAError): lzma.decompress(COMPRESSED_XZ, memlimit=1024) with self.assertRaises(LZMAError): lzma.decompress( COMPRESSED_XZ, format=lzma.FORMAT_XZ, memlimit=1024) with self.assertRaises(LZMAError): lzma.decompress( COMPRESSED_ALONE, format=lzma.FORMAT_ALONE, memlimit=1024) # Test LZMADecompressor on known-good input data. def test_decompress_good_input(self): ddata = lzma.decompress(COMPRESSED_XZ) self.assertEqual(ddata, INPUT) ddata = lzma.decompress(COMPRESSED_ALONE) self.assertEqual(ddata, INPUT) ddata = lzma.decompress(COMPRESSED_XZ, lzma.FORMAT_XZ) self.assertEqual(ddata, INPUT) ddata = lzma.decompress(COMPRESSED_ALONE, lzma.FORMAT_ALONE) self.assertEqual(ddata, INPUT) ddata = lzma.decompress( COMPRESSED_RAW_1, lzma.FORMAT_RAW, filters=FILTERS_RAW_1) self.assertEqual(ddata, INPUT) ddata = lzma.decompress( COMPRESSED_RAW_2, lzma.FORMAT_RAW, filters=FILTERS_RAW_2) self.assertEqual(ddata, INPUT) ddata = lzma.decompress( COMPRESSED_RAW_3, lzma.FORMAT_RAW, filters=FILTERS_RAW_3) self.assertEqual(ddata, INPUT) ddata = lzma.decompress( COMPRESSED_RAW_4, lzma.FORMAT_RAW, filters=FILTERS_RAW_4) self.assertEqual(ddata, INPUT) def test_decompress_incomplete_input(self): self.assertRaises(LZMAError, lzma.decompress, COMPRESSED_XZ[:128]) self.assertRaises(LZMAError, lzma.decompress, COMPRESSED_ALONE[:128]) self.assertRaises(LZMAError, lzma.decompress, COMPRESSED_RAW_1[:128], format=lzma.FORMAT_RAW, filters=FILTERS_RAW_1) self.assertRaises(LZMAError, lzma.decompress, COMPRESSED_RAW_2[:128], format=lzma.FORMAT_RAW, filters=FILTERS_RAW_2) self.assertRaises(LZMAError, lzma.decompress, COMPRESSED_RAW_3[:128], format=lzma.FORMAT_RAW, filters=FILTERS_RAW_3) self.assertRaises(LZMAError, lzma.decompress, COMPRESSED_RAW_4[:128], format=lzma.FORMAT_RAW, filters=FILTERS_RAW_4) def test_decompress_bad_input(self): with self.assertRaises(LZMAError): lzma.decompress(COMPRESSED_BOGUS) with self.assertRaises(LZMAError): lzma.decompress(COMPRESSED_RAW_1) with self.assertRaises(LZMAError): lzma.decompress(COMPRESSED_ALONE, format=lzma.FORMAT_XZ) with self.assertRaises(LZMAError): lzma.decompress(COMPRESSED_XZ, format=lzma.FORMAT_ALONE) with self.assertRaises(LZMAError): lzma.decompress(COMPRESSED_XZ, format=lzma.FORMAT_RAW, filters=FILTERS_RAW_1) # Test that compress()->decompress() preserves the input data. def test_roundtrip(self): cdata = lzma.compress(INPUT) ddata = lzma.decompress(cdata) self.assertEqual(ddata, INPUT) cdata = lzma.compress(INPUT, lzma.FORMAT_XZ) ddata = lzma.decompress(cdata) self.assertEqual(ddata, INPUT) cdata = lzma.compress(INPUT, lzma.FORMAT_ALONE) ddata = lzma.decompress(cdata) self.assertEqual(ddata, INPUT) cdata = lzma.compress(INPUT, lzma.FORMAT_RAW, filters=FILTERS_RAW_4) ddata = lzma.decompress(cdata, lzma.FORMAT_RAW, filters=FILTERS_RAW_4) self.assertEqual(ddata, INPUT) # Unlike LZMADecompressor, decompress() *does* handle concatenated streams. def test_decompress_multistream(self): ddata = lzma.decompress(COMPRESSED_XZ + COMPRESSED_ALONE) self.assertEqual(ddata, INPUT * 2) # Test robust handling of non-LZMA data following the compressed stream(s). def test_decompress_trailing_junk(self): ddata = lzma.decompress(COMPRESSED_XZ + COMPRESSED_BOGUS) self.assertEqual(ddata, INPUT) def test_decompress_multistream_trailing_junk(self): ddata = lzma.decompress(COMPRESSED_XZ * 3 + COMPRESSED_BOGUS) self.assertEqual(ddata, INPUT * 3) class TempFile: """Context manager - creates a file, and deletes it on __exit__.""" def __init__(self, filename, data=b""): self.filename = filename self.data = data def __enter__(self): with open(self.filename, "wb") as f: f.write(self.data) def __exit__(self, *args): unlink(self.filename) class FileTestCase(unittest.TestCase): def test_init(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: pass with LZMAFile(BytesIO(), "w") as f: pass with LZMAFile(BytesIO(), "x") as f: pass with LZMAFile(BytesIO(), "a") as f: pass def test_init_with_filename(self): with TempFile(TESTFN, COMPRESSED_XZ): with LZMAFile(TESTFN) as f: pass with LZMAFile(TESTFN, "w") as f: pass with LZMAFile(TESTFN, "a") as f: pass def test_init_mode(self): with TempFile(TESTFN): with LZMAFile(TESTFN, "r"): pass with LZMAFile(TESTFN, "rb"): pass with LZMAFile(TESTFN, "w"): pass with LZMAFile(TESTFN, "wb"): pass with LZMAFile(TESTFN, "a"): pass with LZMAFile(TESTFN, "ab"): pass def test_init_with_x_mode(self): self.addCleanup(unlink, TESTFN) for mode in ("x", "xb"): unlink(TESTFN) with LZMAFile(TESTFN, mode): pass with self.assertRaises(FileExistsError): with LZMAFile(TESTFN, mode): pass def test_init_bad_mode(self): with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), (3, "x")) with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "xt") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "x+") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "rx") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "wx") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "rt") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "r+") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "wt") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "w+") with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), "rw") def test_init_bad_check(self): with self.assertRaises(TypeError): LZMAFile(BytesIO(), "w", check=b"asd") # CHECK_UNKNOWN and anything above CHECK_ID_MAX should be invalid. with self.assertRaises(LZMAError): LZMAFile(BytesIO(), "w", check=lzma.CHECK_UNKNOWN) with self.assertRaises(LZMAError): LZMAFile(BytesIO(), "w", check=lzma.CHECK_ID_MAX + 3) # Cannot specify a check with mode="r". with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), check=lzma.CHECK_NONE) with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), check=lzma.CHECK_CRC32) with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), check=lzma.CHECK_CRC64) with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), check=lzma.CHECK_SHA256) with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), check=lzma.CHECK_UNKNOWN) def test_init_bad_preset(self): with self.assertRaises(TypeError): LZMAFile(BytesIO(), "w", preset=4.39) with self.assertRaises(LZMAError): LZMAFile(BytesIO(), "w", preset=10) with self.assertRaises(LZMAError): LZMAFile(BytesIO(), "w", preset=23) with self.assertRaises(OverflowError): LZMAFile(BytesIO(), "w", preset=-1) with self.assertRaises(OverflowError): LZMAFile(BytesIO(), "w", preset=-7) with self.assertRaises(TypeError): LZMAFile(BytesIO(), "w", preset="foo") # Cannot specify a preset with mode="r". with self.assertRaises(ValueError): LZMAFile(BytesIO(COMPRESSED_XZ), preset=3) def test_init_bad_filter_spec(self): with self.assertRaises(TypeError): LZMAFile(BytesIO(), "w", filters=[b"wobsite"]) with self.assertRaises(ValueError): LZMAFile(BytesIO(), "w", filters=[{"xyzzy": 3}]) with self.assertRaises(ValueError): LZMAFile(BytesIO(), "w", filters=[{"id": 98765}]) with self.assertRaises(ValueError): LZMAFile(BytesIO(), "w", filters=[{"id": lzma.FILTER_LZMA2, "foo": 0}]) with self.assertRaises(ValueError): LZMAFile(BytesIO(), "w", filters=[{"id": lzma.FILTER_DELTA, "foo": 0}]) with self.assertRaises(ValueError): LZMAFile(BytesIO(), "w", filters=[{"id": lzma.FILTER_X86, "foo": 0}]) def test_init_with_preset_and_filters(self): with self.assertRaises(ValueError): LZMAFile(BytesIO(), "w", format=lzma.FORMAT_RAW, preset=6, filters=FILTERS_RAW_1) def test_close(self): with BytesIO(COMPRESSED_XZ) as src: f = LZMAFile(src) f.close() # LZMAFile.close() should not close the underlying file object. self.assertFalse(src.closed) # Try closing an already-closed LZMAFile. f.close() self.assertFalse(src.closed) # Test with a real file on disk, opened directly by LZMAFile. with TempFile(TESTFN, COMPRESSED_XZ): f = LZMAFile(TESTFN) fp = f._fp f.close() # Here, LZMAFile.close() *should* close the underlying file object. self.assertTrue(fp.closed) # Try closing an already-closed LZMAFile. f.close() def test_closed(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) try: self.assertFalse(f.closed) f.read() self.assertFalse(f.closed) finally: f.close() self.assertTrue(f.closed) f = LZMAFile(BytesIO(), "w") try: self.assertFalse(f.closed) finally: f.close() self.assertTrue(f.closed) def test_fileno(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) try: self.assertRaises(UnsupportedOperation, f.fileno) finally: f.close() self.assertRaises(ValueError, f.fileno) with TempFile(TESTFN, COMPRESSED_XZ): f = LZMAFile(TESTFN) try: self.assertEqual(f.fileno(), f._fp.fileno()) self.assertIsInstance(f.fileno(), int) finally: f.close() self.assertRaises(ValueError, f.fileno) def test_seekable(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) try: self.assertTrue(f.seekable()) f.read() self.assertTrue(f.seekable()) finally: f.close() self.assertRaises(ValueError, f.seekable) f = LZMAFile(BytesIO(), "w") try: self.assertFalse(f.seekable()) finally: f.close() self.assertRaises(ValueError, f.seekable) src = BytesIO(COMPRESSED_XZ) src.seekable = lambda: False f = LZMAFile(src) try: self.assertFalse(f.seekable()) finally: f.close() self.assertRaises(ValueError, f.seekable) def test_readable(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) try: self.assertTrue(f.readable()) f.read() self.assertTrue(f.readable()) finally: f.close() self.assertRaises(ValueError, f.readable) f = LZMAFile(BytesIO(), "w") try: self.assertFalse(f.readable()) finally: f.close() self.assertRaises(ValueError, f.readable) def test_writable(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) try: self.assertFalse(f.writable()) f.read() self.assertFalse(f.writable()) finally: f.close() self.assertRaises(ValueError, f.writable) f = LZMAFile(BytesIO(), "w") try: self.assertTrue(f.writable()) finally: f.close() self.assertRaises(ValueError, f.writable) def test_read(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") with LZMAFile(BytesIO(COMPRESSED_ALONE)) as f: self.assertEqual(f.read(), INPUT) with LZMAFile(BytesIO(COMPRESSED_XZ), format=lzma.FORMAT_XZ) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") with LZMAFile(BytesIO(COMPRESSED_ALONE), format=lzma.FORMAT_ALONE) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") with LZMAFile(BytesIO(COMPRESSED_RAW_1), format=lzma.FORMAT_RAW, filters=FILTERS_RAW_1) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") with LZMAFile(BytesIO(COMPRESSED_RAW_2), format=lzma.FORMAT_RAW, filters=FILTERS_RAW_2) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") with LZMAFile(BytesIO(COMPRESSED_RAW_3), format=lzma.FORMAT_RAW, filters=FILTERS_RAW_3) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") with LZMAFile(BytesIO(COMPRESSED_RAW_4), format=lzma.FORMAT_RAW, filters=FILTERS_RAW_4) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") def test_read_0(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertEqual(f.read(0), b"") with LZMAFile(BytesIO(COMPRESSED_ALONE)) as f: self.assertEqual(f.read(0), b"") with LZMAFile(BytesIO(COMPRESSED_XZ), format=lzma.FORMAT_XZ) as f: self.assertEqual(f.read(0), b"") with LZMAFile(BytesIO(COMPRESSED_ALONE), format=lzma.FORMAT_ALONE) as f: self.assertEqual(f.read(0), b"") def test_read_10(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: chunks = [] while True: result = f.read(10) if not result: break self.assertLessEqual(len(result), 10) chunks.append(result) self.assertEqual(b"".join(chunks), INPUT) def test_read_multistream(self): with LZMAFile(BytesIO(COMPRESSED_XZ * 5)) as f: self.assertEqual(f.read(), INPUT * 5) with LZMAFile(BytesIO(COMPRESSED_XZ + COMPRESSED_ALONE)) as f: self.assertEqual(f.read(), INPUT * 2) with LZMAFile(BytesIO(COMPRESSED_RAW_3 * 4), format=lzma.FORMAT_RAW, filters=FILTERS_RAW_3) as f: self.assertEqual(f.read(), INPUT * 4) def test_read_multistream_buffer_size_aligned(self): # Test the case where a stream boundary coincides with the end # of the raw read buffer. saved_buffer_size = lzma._BUFFER_SIZE lzma._BUFFER_SIZE = len(COMPRESSED_XZ) try: with LZMAFile(BytesIO(COMPRESSED_XZ * 5)) as f: self.assertEqual(f.read(), INPUT * 5) finally: lzma._BUFFER_SIZE = saved_buffer_size def test_read_trailing_junk(self): with LZMAFile(BytesIO(COMPRESSED_XZ + COMPRESSED_BOGUS)) as f: self.assertEqual(f.read(), INPUT) def test_read_multistream_trailing_junk(self): with LZMAFile(BytesIO(COMPRESSED_XZ * 5 + COMPRESSED_BOGUS)) as f: self.assertEqual(f.read(), INPUT * 5) def test_read_from_file(self): with TempFile(TESTFN, COMPRESSED_XZ): with LZMAFile(TESTFN) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") def test_read_from_file_with_bytes_filename(self): try: bytes_filename = TESTFN.encode("ascii") except UnicodeEncodeError: self.skipTest("Temporary file name needs to be ASCII") with TempFile(TESTFN, COMPRESSED_XZ): with LZMAFile(bytes_filename) as f: self.assertEqual(f.read(), INPUT) self.assertEqual(f.read(), b"") def test_read_incomplete(self): with LZMAFile(BytesIO(COMPRESSED_XZ[:128])) as f: self.assertRaises(EOFError, f.read) def test_read_truncated(self): # Drop stream footer: CRC (4 bytes), index size (4 bytes), # flags (2 bytes) and magic number (2 bytes). truncated = COMPRESSED_XZ[:-12] with LZMAFile(BytesIO(truncated)) as f: self.assertRaises(EOFError, f.read) with LZMAFile(BytesIO(truncated)) as f: self.assertEqual(f.read(len(INPUT)), INPUT) self.assertRaises(EOFError, f.read, 1) # Incomplete 12-byte header. for i in range(12): with LZMAFile(BytesIO(truncated[:i])) as f: self.assertRaises(EOFError, f.read, 1) def test_read_bad_args(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) f.close() self.assertRaises(ValueError, f.read) with LZMAFile(BytesIO(), "w") as f: self.assertRaises(ValueError, f.read) with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertRaises(TypeError, f.read, None) def test_read_bad_data(self): with LZMAFile(BytesIO(COMPRESSED_BOGUS)) as f: self.assertRaises(LZMAError, f.read) def test_read1(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: blocks = [] while True: result = f.read1() if not result: break blocks.append(result) self.assertEqual(b"".join(blocks), INPUT) self.assertEqual(f.read1(), b"") def test_read1_0(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertEqual(f.read1(0), b"") def test_read1_10(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: blocks = [] while True: result = f.read1(10) if not result: break blocks.append(result) self.assertEqual(b"".join(blocks), INPUT) self.assertEqual(f.read1(), b"") def test_read1_multistream(self): with LZMAFile(BytesIO(COMPRESSED_XZ * 5)) as f: blocks = [] while True: result = f.read1() if not result: break blocks.append(result) self.assertEqual(b"".join(blocks), INPUT * 5) self.assertEqual(f.read1(), b"") def test_read1_bad_args(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) f.close() self.assertRaises(ValueError, f.read1) with LZMAFile(BytesIO(), "w") as f: self.assertRaises(ValueError, f.read1) with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertRaises(TypeError, f.read1, None) def test_peek(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: result = f.peek() self.assertGreater(len(result), 0) self.assertTrue(INPUT.startswith(result)) self.assertEqual(f.read(), INPUT) with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: result = f.peek(10) self.assertGreater(len(result), 0) self.assertTrue(INPUT.startswith(result)) self.assertEqual(f.read(), INPUT) def test_peek_bad_args(self): with LZMAFile(BytesIO(), "w") as f: self.assertRaises(ValueError, f.peek) def test_iterator(self): with BytesIO(INPUT) as f: lines = f.readlines() with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertListEqual(list(iter(f)), lines) with LZMAFile(BytesIO(COMPRESSED_ALONE)) as f: self.assertListEqual(list(iter(f)), lines) with LZMAFile(BytesIO(COMPRESSED_XZ), format=lzma.FORMAT_XZ) as f: self.assertListEqual(list(iter(f)), lines) with LZMAFile(BytesIO(COMPRESSED_ALONE), format=lzma.FORMAT_ALONE) as f: self.assertListEqual(list(iter(f)), lines) with LZMAFile(BytesIO(COMPRESSED_RAW_2), format=lzma.FORMAT_RAW, filters=FILTERS_RAW_2) as f: self.assertListEqual(list(iter(f)), lines) def test_readline(self): with BytesIO(INPUT) as f: lines = f.readlines() with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: for line in lines: self.assertEqual(f.readline(), line) def test_readlines(self): with BytesIO(INPUT) as f: lines = f.readlines() with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertListEqual(f.readlines(), lines) def test_write(self): with BytesIO() as dst: with LZMAFile(dst, "w") as f: f.write(INPUT) expected = lzma.compress(INPUT) self.assertEqual(dst.getvalue(), expected) with BytesIO() as dst: with LZMAFile(dst, "w", format=lzma.FORMAT_XZ) as f: f.write(INPUT) expected = lzma.compress(INPUT, format=lzma.FORMAT_XZ) self.assertEqual(dst.getvalue(), expected) with BytesIO() as dst: with LZMAFile(dst, "w", format=lzma.FORMAT_ALONE) as f: f.write(INPUT) expected = lzma.compress(INPUT, format=lzma.FORMAT_ALONE) self.assertEqual(dst.getvalue(), expected) with BytesIO() as dst: with LZMAFile(dst, "w", format=lzma.FORMAT_RAW, filters=FILTERS_RAW_2) as f: f.write(INPUT) expected = lzma.compress(INPUT, format=lzma.FORMAT_RAW, filters=FILTERS_RAW_2) self.assertEqual(dst.getvalue(), expected) def test_write_10(self): with BytesIO() as dst: with LZMAFile(dst, "w") as f: for start in range(0, len(INPUT), 10): f.write(INPUT[start:start+10]) expected = lzma.compress(INPUT) self.assertEqual(dst.getvalue(), expected) def test_write_append(self): part1 = INPUT[:1024] part2 = INPUT[1024:1536] part3 = INPUT[1536:] expected = b"".join(lzma.compress(x) for x in (part1, part2, part3)) with BytesIO() as dst: with LZMAFile(dst, "w") as f: f.write(part1) with LZMAFile(dst, "a") as f: f.write(part2) with LZMAFile(dst, "a") as f: f.write(part3) self.assertEqual(dst.getvalue(), expected) def test_write_to_file(self): try: with LZMAFile(TESTFN, "w") as f: f.write(INPUT) expected = lzma.compress(INPUT) with open(TESTFN, "rb") as f: self.assertEqual(f.read(), expected) finally: unlink(TESTFN) def test_write_to_file_with_bytes_filename(self): try: bytes_filename = TESTFN.encode("ascii") except UnicodeEncodeError: self.skipTest("Temporary file name needs to be ASCII") try: with LZMAFile(bytes_filename, "w") as f: f.write(INPUT) expected = lzma.compress(INPUT) with open(TESTFN, "rb") as f: self.assertEqual(f.read(), expected) finally: unlink(TESTFN) def test_write_append_to_file(self): part1 = INPUT[:1024] part2 = INPUT[1024:1536] part3 = INPUT[1536:] expected = b"".join(lzma.compress(x) for x in (part1, part2, part3)) try: with LZMAFile(TESTFN, "w") as f: f.write(part1) with LZMAFile(TESTFN, "a") as f: f.write(part2) with LZMAFile(TESTFN, "a") as f: f.write(part3) with open(TESTFN, "rb") as f: self.assertEqual(f.read(), expected) finally: unlink(TESTFN) def test_write_bad_args(self): f = LZMAFile(BytesIO(), "w") f.close() self.assertRaises(ValueError, f.write, b"foo") with LZMAFile(BytesIO(COMPRESSED_XZ), "r") as f: self.assertRaises(ValueError, f.write, b"bar") with LZMAFile(BytesIO(), "w") as f: self.assertRaises(TypeError, f.write, None) self.assertRaises(TypeError, f.write, "text") self.assertRaises(TypeError, f.write, 789) def test_writelines(self): with BytesIO(INPUT) as f: lines = f.readlines() with BytesIO() as dst: with LZMAFile(dst, "w") as f: f.writelines(lines) expected = lzma.compress(INPUT) self.assertEqual(dst.getvalue(), expected) def test_seek_forward(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.seek(555) self.assertEqual(f.read(), INPUT[555:]) def test_seek_forward_across_streams(self): with LZMAFile(BytesIO(COMPRESSED_XZ * 2)) as f: f.seek(len(INPUT) + 123) self.assertEqual(f.read(), INPUT[123:]) def test_seek_forward_relative_to_current(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.read(100) f.seek(1236, 1) self.assertEqual(f.read(), INPUT[1336:]) def test_seek_forward_relative_to_end(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.seek(-555, 2) self.assertEqual(f.read(), INPUT[-555:]) def test_seek_backward(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.read(1001) f.seek(211) self.assertEqual(f.read(), INPUT[211:]) def test_seek_backward_across_streams(self): with LZMAFile(BytesIO(COMPRESSED_XZ * 2)) as f: f.read(len(INPUT) + 333) f.seek(737) self.assertEqual(f.read(), INPUT[737:] + INPUT) def test_seek_backward_relative_to_end(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.seek(-150, 2) self.assertEqual(f.read(), INPUT[-150:]) def test_seek_past_end(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.seek(len(INPUT) + 9001) self.assertEqual(f.tell(), len(INPUT)) self.assertEqual(f.read(), b"") def test_seek_past_start(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: f.seek(-88) self.assertEqual(f.tell(), 0) self.assertEqual(f.read(), INPUT) def test_seek_bad_args(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) f.close() self.assertRaises(ValueError, f.seek, 0) with LZMAFile(BytesIO(), "w") as f: self.assertRaises(ValueError, f.seek, 0) with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertRaises(ValueError, f.seek, 0, 3) self.assertRaises(ValueError, f.seek, 9, ()) self.assertRaises(TypeError, f.seek, None) self.assertRaises(TypeError, f.seek, b"derp") def test_tell(self): with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: pos = 0 while True: self.assertEqual(f.tell(), pos) result = f.read(183) if not result: break pos += len(result) self.assertEqual(f.tell(), len(INPUT)) with LZMAFile(BytesIO(), "w") as f: for pos in range(0, len(INPUT), 144): self.assertEqual(f.tell(), pos) f.write(INPUT[pos:pos+144]) self.assertEqual(f.tell(), len(INPUT)) def test_tell_bad_args(self): f = LZMAFile(BytesIO(COMPRESSED_XZ)) f.close() self.assertRaises(ValueError, f.tell) class OpenTestCase(unittest.TestCase): def test_binary_modes(self): with lzma.open(BytesIO(COMPRESSED_XZ), "rb") as f: self.assertEqual(f.read(), INPUT) with BytesIO() as bio: with lzma.open(bio, "wb") as f: f.write(INPUT) file_data = lzma.decompress(bio.getvalue()) self.assertEqual(file_data, INPUT) with lzma.open(bio, "ab") as f: f.write(INPUT) file_data = lzma.decompress(bio.getvalue()) self.assertEqual(file_data, INPUT * 2) def test_text_modes(self): uncompressed = INPUT.decode("ascii") uncompressed_raw = uncompressed.replace("\n", os.linesep) with lzma.open(BytesIO(COMPRESSED_XZ), "rt") as f: self.assertEqual(f.read(), uncompressed) with BytesIO() as bio: with lzma.open(bio, "wt") as f: f.write(uncompressed) file_data = lzma.decompress(bio.getvalue()).decode("ascii") self.assertEqual(file_data, uncompressed_raw) with lzma.open(bio, "at") as f: f.write(uncompressed) file_data = lzma.decompress(bio.getvalue()).decode("ascii") self.assertEqual(file_data, uncompressed_raw * 2) def test_filename(self): with TempFile(TESTFN): with lzma.open(TESTFN, "wb") as f: f.write(INPUT) with open(TESTFN, "rb") as f: file_data = lzma.decompress(f.read()) self.assertEqual(file_data, INPUT) with lzma.open(TESTFN, "rb") as f: self.assertEqual(f.read(), INPUT) with lzma.open(TESTFN, "ab") as f: f.write(INPUT) with lzma.open(TESTFN, "rb") as f: self.assertEqual(f.read(), INPUT * 2) def test_bad_params(self): # Test invalid parameter combinations. with self.assertRaises(ValueError): lzma.open(TESTFN, "") with self.assertRaises(ValueError): lzma.open(TESTFN, "rbt") with self.assertRaises(ValueError): lzma.open(TESTFN, "rb", encoding="utf-8") with self.assertRaises(ValueError): lzma.open(TESTFN, "rb", errors="ignore") with self.assertRaises(ValueError): lzma.open(TESTFN, "rb", newline="\n") def test_format_and_filters(self): # Test non-default format and filter chain. options = {"format": lzma.FORMAT_RAW, "filters": FILTERS_RAW_1} with lzma.open(BytesIO(COMPRESSED_RAW_1), "rb", **options) as f: self.assertEqual(f.read(), INPUT) with BytesIO() as bio: with lzma.open(bio, "wb", **options) as f: f.write(INPUT) file_data = lzma.decompress(bio.getvalue(), **options) self.assertEqual(file_data, INPUT) def test_encoding(self): # Test non-default encoding. uncompressed = INPUT.decode("ascii") uncompressed_raw = uncompressed.replace("\n", os.linesep) with BytesIO() as bio: with lzma.open(bio, "wt", encoding="utf-16-le") as f: f.write(uncompressed) file_data = lzma.decompress(bio.getvalue()).decode("utf-16-le") self.assertEqual(file_data, uncompressed_raw) bio.seek(0) with lzma.open(bio, "rt", encoding="utf-16-le") as f: self.assertEqual(f.read(), uncompressed) def test_encoding_error_handler(self): # Test wih non-default encoding error handler. with BytesIO(lzma.compress(b"foo\xffbar")) as bio: with lzma.open(bio, "rt", encoding="ascii", errors="ignore") as f: self.assertEqual(f.read(), "foobar") def test_newline(self): # Test with explicit newline (universal newline mode disabled). text = INPUT.decode("ascii") with BytesIO() as bio: with lzma.open(bio, "wt", newline="\n") as f: f.write(text) bio.seek(0) with lzma.open(bio, "rt", newline="\r") as f: self.assertEqual(f.readlines(), [text]) def test_x_mode(self): self.addCleanup(unlink, TESTFN) for mode in ("x", "xb", "xt"): unlink(TESTFN) with lzma.open(TESTFN, mode): pass with self.assertRaises(FileExistsError): with lzma.open(TESTFN, mode): pass class MiscellaneousTestCase(unittest.TestCase): def test_is_check_supported(self): # CHECK_NONE and CHECK_CRC32 should always be supported, # regardless of the options liblzma was compiled with. self.assertTrue(lzma.is_check_supported(lzma.CHECK_NONE)) self.assertTrue(lzma.is_check_supported(lzma.CHECK_CRC32)) # The .xz format spec cannot store check IDs above this value. self.assertFalse(lzma.is_check_supported(lzma.CHECK_ID_MAX + 1)) # This value should not be a valid check ID. self.assertFalse(lzma.is_check_supported(lzma.CHECK_UNKNOWN)) def test__encode_filter_properties(self): with self.assertRaises(TypeError): lzma._encode_filter_properties(b"not a dict") with self.assertRaises(ValueError): lzma._encode_filter_properties({"id": 0x100}) with self.assertRaises(ValueError): lzma._encode_filter_properties({"id": lzma.FILTER_LZMA2, "junk": 12}) with self.assertRaises(lzma.LZMAError): lzma._encode_filter_properties({"id": lzma.FILTER_DELTA, "dist": 9001}) # Test with parameters used by zipfile module. props = lzma._encode_filter_properties({ "id": lzma.FILTER_LZMA1, "pb": 2, "lp": 0, "lc": 3, "dict_size": 8 << 20, }) self.assertEqual(props, b"]\x00\x00\x80\x00") def test__decode_filter_properties(self): with self.assertRaises(TypeError): lzma._decode_filter_properties(lzma.FILTER_X86, {"should be": bytes}) with self.assertRaises(lzma.LZMAError): lzma._decode_filter_properties(lzma.FILTER_DELTA, b"too long") # Test with parameters used by zipfile module. filterspec = lzma._decode_filter_properties( lzma.FILTER_LZMA1, b"]\x00\x00\x80\x00") self.assertEqual(filterspec["id"], lzma.FILTER_LZMA1) self.assertEqual(filterspec["pb"], 2) self.assertEqual(filterspec["lp"], 0) self.assertEqual(filterspec["lc"], 3) self.assertEqual(filterspec["dict_size"], 8 << 20) def test_filter_properties_roundtrip(self): spec1 = lzma._decode_filter_properties( lzma.FILTER_LZMA1, b"]\x00\x00\x80\x00") reencoded = lzma._encode_filter_properties(spec1) spec2 = lzma._decode_filter_properties(lzma.FILTER_LZMA1, reencoded) self.assertEqual(spec1, spec2) # Test data: INPUT = b""" LAERTES O, fear me not. I stay too long: but here my father comes. Enter POLONIUS A double blessing is a double grace, Occasion smiles upon a second leave. LORD POLONIUS Yet here, Laertes! aboard, aboard, for shame! The wind sits in the shoulder of your sail, And you are stay'd for. There; my blessing with thee! And these few precepts in thy memory See thou character. Give thy thoughts no tongue, Nor any unproportioned thought his act. Be thou familiar, but by no means vulgar. Those friends thou hast, and their adoption tried, Grapple them to thy soul with hoops of steel; But do not dull thy palm with entertainment Of each new-hatch'd, unfledged comrade. Beware Of entrance to a quarrel, but being in, Bear't that the opposed may beware of thee. Give every man thy ear, but few thy voice; Take each man's censure, but reserve thy judgment. Costly thy habit as thy purse can buy, But not express'd in fancy; rich, not gaudy; For the apparel oft proclaims the man, And they in France of the best rank and station Are of a most select and generous chief in that. Neither a borrower nor a lender be; For loan oft loses both itself and friend, And borrowing dulls the edge of husbandry. This above all: to thine ownself be true, And it must follow, as the night the day, Thou canst not then be false to any man. Farewell: my blessing season this in thee! LAERTES Most humbly do I take my leave, my lord. <NAME> The time invites you; go; your servants tend. LAERTES Farewell, Ophelia; and remember well What I have said to you. OPHELIA 'Tis in my memory lock'd, And you yourself shall keep the key of it. LAERTES Farewell. """ COMPRESSED_BOGUS = b"this is not a valid lzma stream" COMPRESSED_XZ = ( b"\xfd7zXZ\x00\x00\x04\xe6\xd6\xb4F\x02\x00!\x01\x16\x00\x00\x00t/\xe5\xa3" b"\xe0\x07\x80\x03\xdf]\x00\x05\x14\x07bX\x19\xcd\xddn\x98\x15\xe4\xb4\x9d" b"o\x1d\xc4\xe5\n\x03\xcc2h\xc7\\\x86\xff\xf8\xe2\xfc\xe7\xd9\xfe6\xb8(" b"\xa8wd\xc2\"u.n\x1e\xc3\xf2\x8e\x8d\x8f\x02\x17/\xa6=\xf0\xa2\xdf/M\x89" b"\xbe\xde\xa7\x1cz\x18-]\xd5\xef\x13\x8frZ\x15\x80\x8c\xf8\x8do\xfa\x12" b"\x9b#z/\xef\xf0\xfaF\x01\x82\xa3M\x8e\xa1t\xca6 BF$\xe5Q\xa4\x98\xee\xde" b"l\xe8\x7f\xf0\x9d,bn\x0b\x13\xd4\xa8\x81\xe4N\xc8\x86\x153\xf5x2\xa2O" b"\x13@Q\xa1\x00/\xa5\xd0O\x97\xdco\xae\xf7z\xc4\xcdS\xb6t<\x16\xf2\x9cI#" b"\x89ud\xc66Y\xd9\xee\xe6\xce\x12]\xe5\xf0\xaa\x96-Pe\xade:\x04\t\x1b\xf7" b"\xdb7\n\x86\x1fp\xc8J\xba\xf4\xf0V\xa9\xdc\xf0\x02%G\xf9\xdf=?\x15\x1b" b"\xe1(\xce\x82=\xd6I\xac3\x12\x0cR\xb7\xae\r\xb1i\x03\x95\x01\xbd\xbe\xfa" b"\x02s\x01P\x9d\x96X\xb12j\xc8L\xa8\x84b\xf6\xc3\xd4c-H\x93oJl\xd0iQ\xe4k" b"\x84\x0b\xc1\xb7\xbc\xb1\x17\x88\xb1\xca?@\xf6\x07\xea\xe6x\xf1H12P\x0f" b"\x8a\xc9\xeauw\xe3\xbe\xaai\xa9W\xd0\x80\xcd#cb5\x99\xd8]\xa9d\x0c\xbd" b"\xa2\xdcWl\xedUG\xbf\x89yF\xf77\x81v\xbd5\x98\xbeh8\x18W\x08\xf0\x1b\x99" b"5:\x1a?rD\x96\xa1\x04\x0f\xae\xba\x85\xeb\x9d5@\xf5\x83\xd37\x83\x8ac" b"\x06\xd4\x97i\xcdt\x16S\x82k\xf6K\x01vy\x88\x91\x9b6T\xdae\r\xfd]:k\xbal" b"\xa9\xbba\xc34\xf9r\xeb}r\xdb\xc7\xdb*\x8f\x03z\xdc8h\xcc\xc9\xd3\xbcl" b"\xa5-\xcb\xeaK\xa2\xc5\x15\xc0\xe3\xc1\x86Z\xfb\xebL\xe13\xcf\x9c\xe3" b"\x1d\xc9\xed\xc2\x06\xcc\xce!\x92\xe5\xfe\x9c^\xa59w \x9bP\xa3PK\x08d" b"\xf9\xe2Z}\xa7\xbf\xed\xeb%$\x0c\x82\xb8/\xb0\x01\xa9&,\xf7qh{Q\x96)\xf2" b"q\x96\xc3\x80\xb4\x12\xb0\xba\xe6o\xf4!\xb4[\xd4\x8aw\x10\xf7t\x0c\xb3" b"\xd9\xd5\xc3`^\x81\x11??\\\xa4\x99\x85R\xd4\x8e\x83\xc9\x1eX\xbfa\xf1" b"\xac\xb0\xea\xea\xd7\xd0\xab\x18\xe2\xf2\xed\xe1\xb7\xc9\x18\xcbS\xe4>" b"\xc9\x95H\xe8\xcb\t\r%\xeb\xc7$.o\xf1\xf3R\x17\x1db\xbb\xd8U\xa5^\xccS" b"\x16\x01\x87\xf3/\x93\xd1\xf0v\xc0r\xd7\xcc\xa2Gkz\xca\x80\x0e\xfd\xd0" b"\x8b\xbb\xd2Ix\xb3\x1ey\xca-0\xe3z^\xd6\xd6\x8f_\xf1\x9dP\x9fi\xa7\xd1" b"\xe8\x90\x84\xdc\xbf\xcdky\x8e\xdc\x81\x7f\xa3\xb2+\xbf\x04\xef\xd8\\" b"\xc4\xdf\xe1\xb0\x01\xe9\x93\xe3Y\xf1\x1dY\xe8h\x81\xcf\xf1w\xcc\xb4\xef" b" \x8b|\x04\xea\x83ej\xbe\x1f\xd4z\x9c`\xd3\x1a\x92A\x06\xe5\x8f\xa9\x13" b"\t\x9e=\xfa\x1c\xe5_\x9f%v\x1bo\x11ZO\xd8\xf4\t\xddM\x16-\x04\xfc\x18<\"" b"CM\xddg~b\xf6\xef\x8e\x0c\xd0\xde|\xa0'\x8a\x0c\xd6x\xae!J\xa6F\x88\x15u" b"\x008\x17\xbc7y\xb3\xd8u\xac_\x85\x8d\xe7\xc1@\x9c\xecqc\xa3#\xad\xf1" b"\x935\xb5)_\r\xec3]\x0fo]5\xd0my\x07\x9b\xee\x81\xb5\x0f\xcfK+\x00\xc0" b"\xe4b\x10\xe4\x0c\x1a \x9b\xe0\x97t\xf6\xa1\x9e\x850\xba\x0c\x9a\x8d\xc8" b"\x8f\x07\xd7\xae\xc8\xf9+i\xdc\xb9k\xb0>f\x19\xb8\r\xa8\xf8\x1f$\xa5{p" b"\xc6\x880\xce\xdb\xcf\xca_\x86\xac\x88h6\x8bZ%'\xd0\n\xbf\x0f\x9c\"\xba" b"\xe5\x86\x9f\x0f7X=mNX[\xcc\x19FU\xc9\x860\xbc\x90a+* \xae_$\x03\x1e\xd3" b"\xcd_\xa0\x9c\xde\xaf46q\xa5\xc9\x92\xd7\xca\xe3`\x9d\x85}\xb4\xff\xb3" b"\x83\xfb\xb6\xca\xae`\x0bw\x7f\xfc\xd8\xacVe\x19\xc8\x17\x0bZ\xad\x88" b"\xeb#\x97\x03\x13\xb1d\x0f{\x0c\x04w\x07\r\x97\xbd\xd6\xc1\xc3B:\x95\x08" b"^\x10V\xaeaH\x02\xd9\xe3\n\\\x01X\xf6\x9c\x8a\x06u#%\xbe*\xa1\x18v\x85" b"\xec!\t4\x00\x00\x00\x00Vj?uLU\xf3\xa6\x00\x01\xfb\x07\x81\x0f\x00\x00tw" b"\x99P\xb1\xc4g\xfb\x02\x00\x00\x00\x00\x04YZ" ) COMPRESSED_ALONE = ( b"]\x00\x00\x80\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00\x05\x14\x07bX\x19" b"\xcd\xddn\x98\x15\xe4\xb4\x9do\x1d\xc4\xe5\n\x03\xcc2h\xc7\\\x86\xff\xf8" b"\xe2\xfc\xe7\xd9\xfe6\xb8(\xa8wd\xc2\"u.n\x1e\xc3\xf2\x8e\x8d\x8f\x02" b"\x17/\xa6=\xf0\xa2\xdf/M\x89\xbe\xde\xa7\x1cz\x18-]\xd5\xef\x13\x8frZ" b"\x15\x80\x8c\xf8\x8do\xfa\x12\x9b#z/\xef\xf0\xfaF\x01\x82\xa3M\x8e\xa1t" b"\xca6 BF$\xe5Q\xa4\x98\xee\xdel\xe8\x7f\xf0\x9d,bn\x0b\x13\xd4\xa8\x81" b"\xe4N\xc8\x86\x153\xf5x2\xa2O\x13@Q\xa1\x00/\xa5\xd0O\x97\xdco\xae\xf7z" b"\xc4\xcdS\xb6t<\x16\xf2\x9cI#\x89ud\xc66Y\xd9\xee\xe6\xce\x12]\xe5\xf0" b"\xaa\x96-Pe\xade:\x04\t\x1b\xf7\xdb7\n\x86\x1fp\xc8J\xba\xf4\xf0V\xa9" b"\xdc\xf0\x02%G\xf9\xdf=?\x15\x1b\xe1(\xce\x82=\xd6I\xac3\x12\x0cR\xb7" b"\xae\r\xb1i\x03\x95\x01\xbd\xbe\xfa\x02s\x01P\x9d\x96X\xb12j\xc8L\xa8" b"\x84b\xf8\x1epl\xeajr\xd1=\t\x03\xdd\x13\x1b3!E\xf9vV\xdaF\xf3\xd7\xb4" b"\x0c\xa9P~\xec\xdeE\xe37\xf6\x1d\xc6\xbb\xddc%\xb6\x0fI\x07\xf0;\xaf\xe7" b"\xa0\x8b\xa7Z\x99(\xe9\xe2\xf0o\x18>`\xe1\xaa\xa8\xd9\xa1\xb2}\xe7\x8d" b"\x834T\xb6\xef\xc1\xde\xe3\x98\xbcD\x03MA@\xd8\xed\xdc\xc8\x93\x03\x1a" b"\x93\x0b\x7f\x94\x12\x0b\x02Sa\x18\xc9\xc5\x9bTJE}\xf6\xc8g\x17#ZV\x01" b"\xc9\x9dc\x83\x0e>0\x16\x90S\xb8/\x03y_\x18\xfa(\xd7\x0br\xa2\xb0\xba?" b"\x8c\xe6\x83@\x84\xdf\x02:\xc5z\x9e\xa6\x84\xc9\xf5BeyX\x83\x1a\xf1 :\t" b"\xf7\x19\xfexD\\&G\xf3\x85Y\xa2J\xf9\x0bv{\x89\xf6\xe7)A\xaf\x04o\x00" b"\x075\xd3\xe0\x7f\x97\x98F\x0f?v\x93\xedVtTf\xb5\x97\x83\xed\x19\xd7\x1a" b"'k\xd7\xd9\xc5\\Y\xd1\xdc\x07\x15|w\xbc\xacd\x87\x08d\xec\xa7\xf6\x82" b"\xfc\xb3\x93\xeb\xb9 \x8d\xbc ,\xb3X\xb0\xd2s\xd7\xd1\xffv\x05\xdf}\xa2" b"\x96\xfb%\n\xdf\xa2\x7f\x08.\xa16\n\xe0\x19\x93\x7fh\n\x1c\x8c\x0f \x11" b"\xc6Bl\x95\x19U}\xe4s\xb5\x10H\xea\x86pB\xe88\x95\xbe\x8cZ\xdb\xe4\x94A" b"\x92\xb9;z\xaa\xa7{\x1c5!\xc0\xaf\xc1A\xf9\xda\xf0$\xb0\x02qg\xc8\xc7/|" b"\xafr\x99^\x91\x88\xbf\x03\xd9=\xd7n\xda6{>8\n\xc7:\xa9'\xba.\x0b\xe2" b"\xb5\x1d\x0e\n\x9a\x8e\x06\x8f:\xdd\x82'[\xc3\"wD$\xa7w\xecq\x8c,1\x93" b"\xd0,\xae2w\x93\x12$Jd\x19mg\x02\x93\x9cA\x95\x9d&\xca8i\x9c\xb0;\xe7NQ" b"\x1frh\x8beL;\xb0m\xee\x07Q\x9b\xc6\xd8\x03\xb5\xdeN\xd4\xfe\x98\xd0\xdc" b"\x1a[\x04\xde\x1a\xf6\x91j\xf8EOli\x8eB^\x1d\x82\x07\xb2\xb5R]\xb7\xd7" b"\xe9\xa6\xc3.\xfb\xf0-\xb4e\x9b\xde\x03\x88\xc6\xc1iN\x0e\x84wbQ\xdf~" b"\xe9\xa4\x884\x96kM\xbc)T\xf3\x89\x97\x0f\x143\xe7)\xa0\xb3B\x00\xa8\xaf" b"\x82^\xcb\xc7..\xdb\xc7\t\x9dH\xee5\xe9#\xe6NV\x94\xcb$Kk\xe3\x7f\r\xe3t" b"\x12\xcf'\xefR\x8b\xf42\xcf-LH\xac\xe5\x1f0~?SO\xeb\xc1E\x1a\x1c]\xf2" b"\xc4<\x11\x02\x10Z0a*?\xe4r\xff\xfb\xff\xf6\x14nG\xead^\xd6\xef8\xb6uEI" b"\x99\nV\xe2\xb3\x95\x8e\x83\xf6i!\xb5&1F\xb1DP\xf4 SO3D!w\x99_G\x7f+\x90" b".\xab\xbb]\x91>\xc9#h;\x0f5J\x91K\xf4^-[\x9e\x8a\\\x94\xca\xaf\xf6\x19" b"\xd4\xa1\x9b\xc4\xb8p\xa1\xae\x15\xe9r\x84\xe0\xcar.l []\x8b\xaf+0\xf2g" b"\x01aKY\xdfI\xcf,\n\xe8\xf0\xe7V\x80_#\xb2\xf2\xa9\x06\x8c>w\xe2W,\xf4" b"\x8c\r\xf963\xf5J\xcc2\x05=kT\xeaUti\xe5_\xce\x1b\xfa\x8dl\x02h\xef\xa8" b"\xfbf\x7f\xff\xf0\x19\xeax" ) FILTERS_RAW_1 = [{"id": lzma.FILTER_LZMA2, "preset": 3}] COMPRESSED_RAW_1 = ( b"\xe0\x07\x80\x03\xfd]\x00\x05\x14\x07bX\x19\xcd\xddn\x96cyq\xa1\xdd\xee" b"\xf8\xfam\xe3'\x88\xd3\xff\xe4\x9e \xceQ\x91\xa4\x14I\xf6\xb9\x9dVL8\x15" b"_\x0e\x12\xc3\xeb\xbc\xa5\xcd\nW\x1d$=R;\x1d\xf8k8\t\xb1{\xd4\xc5+\x9d" b"\x87c\xe5\xef\x98\xb4\xd7S3\xcd\xcc\xd2\xed\xa4\x0em\xe5\xf4\xdd\xd0b" b"\xbe4*\xaa\x0b\xc5\x08\x10\x85+\x81.\x17\xaf9\xc9b\xeaZrA\xe20\x7fs\"r" b"\xdaG\x81\xde\x90cu\xa5\xdb\xa9.A\x08l\xb0<\xf6\x03\xddOi\xd0\xc5\xb4" b"\xec\xecg4t6\"\xa6\xb8o\xb5?\x18^}\xb6}\x03[:\xeb\x03\xa9\n[\x89l\x19g" b"\x16\xc82\xed\x0b\xfb\x86n\xa2\x857@\x93\xcd6T\xc3u\xb0\t\xf9\x1b\x918" b"\xfc[\x1b\x1e4\xb3\x14\x06PCV\xa8\"\xf5\x81x~\xe9\xb5N\x9cK\x9f\xc6\xc3%" b"\xc8k:{6\xe7\xf7\xbd\x05\x02\xb4\xc4\xc3\xd3\xfd\xc3\xa8\\\xfc@\xb1F_" b"\xc8\x90\xd9sU\x98\xad8\x05\x07\xde7J\x8bM\xd0\xb3;X\xec\x87\xef\xae\xb3" b"eO,\xb1z,d\x11y\xeejlB\x02\x1d\xf28\x1f#\x896\xce\x0b\xf0\xf5\xa9PK\x0f" b"\xb3\x13P\xd8\x88\xd2\xa1\x08\x04C?\xdb\x94_\x9a\"\xe9\xe3e\x1d\xde\x9b" b"\xa1\xe8>H\x98\x10;\xc5\x03#\xb5\x9d4\x01\xe7\xc5\xba%v\xa49\x97A\xe0\"" b"\x8c\xc22\xe3i\xc1\x9d\xab3\xdf\xbe\xfdDm7\x1b\x9d\xab\xb5\x15o:J\x92" b"\xdb\x816\x17\xc2O\x99\x1b\x0e\x8d\xf3\tQ\xed\x8e\x95S/\x16M\xb2S\x04" b"\x0f\xc3J\xc6\xc7\xe4\xcb\xc5\xf4\xe7d\x14\xe4=^B\xfb\xd3E\xd3\x1e\xcd" b"\x91\xa5\xd0G\x8f.\xf6\xf9\x0bb&\xd9\x9f\xc2\xfdj\xa2\x9e\xc4\\\x0e\x1dC" b"v\xe8\xd2\x8a?^H\xec\xae\xeb>\xfe\xb8\xab\xd4IqY\x8c\xd4K7\x11\xf4D\xd0W" b"\xa5\xbe\xeaO\xbf\xd0\x04\xfdl\x10\xae5\xd4U\x19\x06\xf9{\xaa\xe0\x81" b"\x0f\xcf\xa3k{\x95\xbd\x19\xa2\xf8\xe4\xa3\x08O*\xf1\xf1B-\xc7(\x0eR\xfd" b"@E\x9f\xd3\x1e:\xfdV\xb7\x04Y\x94\xeb]\x83\xc4\xa5\xd7\xc0gX\x98\xcf\x0f" b"\xcd3\x00]n\x17\xec\xbd\xa3Y\x86\xc5\xf3u\xf6*\xbdT\xedA$A\xd9A\xe7\x98" b"\xef\x14\x02\x9a\xfdiw\xec\xa0\x87\x11\xd9%\xc5\xeb\x8a=\xae\xc0\xc4\xc6" b"D\x80\x8f\xa8\xd1\xbbq\xb2\xc0\xa0\xf5Cqp\xeeL\xe3\xe5\xdc \x84\"\xe9" b"\x80t\x83\x05\xba\xf1\xc5~\x93\xc9\xf0\x01c\xceix\x9d\xed\xc5)l\x16)\xd1" b"\x03@l\x04\x7f\x87\xa5yn\x1b\x01D\xaa:\xd2\x96\xb4\xb3?\xb0\xf9\xce\x07" b"\xeb\x81\x00\xe4\xc3\xf5%_\xae\xd4\xf9\xeb\xe2\rh\xb2#\xd67Q\x16D\x82hn" b"\xd1\xa3_?q\xf0\xe2\xac\xf317\x9e\xd0_\x83|\xf1\xca\xb7\x95S\xabW\x12" b"\xff\xddt\xf69L\x01\xf2|\xdaW\xda\xees\x98L\x18\xb8_\xe8$\x82\xea\xd6" b"\xd1F\xd4\x0b\xcdk\x01vf\x88h\xc3\xae\xb91\xc7Q\x9f\xa5G\xd9\xcc\x1f\xe3" b"5\xb1\xdcy\x7fI\x8bcw\x8e\x10rIp\x02:\x19p_\xc8v\xcea\"\xc1\xd9\x91\x03" b"\xbfe\xbe\xa6\xb3\xa8\x14\x18\xc3\xabH*m}\xc2\xc1\x9a}>l%\xce\x84\x99" b"\xb3d\xaf\xd3\x82\x15\xdf\xc1\xfc5fOg\x9b\xfc\x8e^&\t@\xce\x9f\x06J\xb8" b"\xb5\x86\x1d\xda{\x9f\xae\xb0\xff\x02\x81r\x92z\x8cM\xb7ho\xc9^\x9c\xb6" b"\x9c\xae\xd1\xc9\xf4\xdfU7\xd6\\!\xea\x0b\x94k\xb9Ud~\x98\xe7\x86\x8az" b"\x10;\xe3\x1d\xe5PG\xf8\xa4\x12\x05w\x98^\xc4\xb1\xbb\xfb\xcf\xe0\x7f" b"\x033Sf\x0c \xb1\xf6@\x94\xe5\xa3\xb2\xa7\x10\x9a\xc0\x14\xc3s\xb5xRD" b"\xf4`W\xd9\xe5\xd3\xcf\x91\rTZ-X\xbe\xbf\xb5\xe2\xee|\x1a\xbf\xfb\x08" b"\x91\xe1\xfc\x9a\x18\xa3\x8b\xd6^\x89\xf5[\xef\x87\xd1\x06\x1c7\xd6\xa2" b"\t\tQ5/@S\xc05\xd2VhAK\x03VC\r\x9b\x93\xd6M\xf1xO\xaaO\xed\xb9<\x0c\xdae" b"*\xd0\x07Hk6\x9fG+\xa1)\xcd\x9cl\x87\xdb\xe1\xe7\xefK}\x875\xab\xa0\x19u" b"\xf6*F\xb32\x00\x00\x00" ) FILTERS_RAW_2 = [{"id": lzma.FILTER_DELTA, "dist": 2}, {"id": lzma.FILTER_LZMA2, "preset": lzma.PRESET_DEFAULT | lzma.PRESET_EXTREME}] COMPRESSED_RAW_2 = ( b"\xe0\x07\x80\x05\x91]\x00\x05\x14\x06-\xd4\xa8d?\xef\xbe\xafH\xee\x042" b"\xcb.\xb5g\x8f\xfb\x14\xab\xa5\x9f\x025z\xa4\xdd\xd8\t[}W\xf8\x0c\x1dmH" b"\xfa\x05\xfcg\xba\xe5\x01Q\x0b\x83R\xb6A\x885\xc0\xba\xee\n\x1cv~\xde:o" b"\x06:J\xa7\x11Cc\xea\xf7\xe5*o\xf7\x83\\l\xbdE\x19\x1f\r\xa8\x10\xb42" b"\x0caU{\xd7\xb8w\xdc\xbe\x1b\xfc8\xb4\xcc\xd38\\\xf6\x13\xf6\xe7\x98\xfa" b"\xc7[\x17_9\x86%\xa8\xf8\xaa\xb8\x8dfs#\x1e=\xed<\x92\x10\\t\xff\x86\xfb" b"=\x9e7\x18\x1dft\\\xb5\x01\x95Q\xc5\x19\xb38\xe0\xd4\xaa\x07\xc3\x7f\xd8" b"\xa2\x00>-\xd3\x8e\xa1#\xfa\x83ArAm\xdbJ~\x93\xa3B\x82\xe0\xc7\xcc(\x08`" b"WK\xad\x1b\x94kaj\x04 \xde\xfc\xe1\xed\xb0\x82\x91\xefS\x84%\x86\xfbi" b"\x99X\xf1B\xe7\x90;E\xfde\x98\xda\xca\xd6T\xb4bg\xa4\n\x9aj\xd1\x83\x9e]" b"\"\x7fM\xb5\x0fr\xd2\\\xa5j~P\x10GH\xbfN*Z\x10.\x81\tpE\x8a\x08\xbe1\xbd" b"\xcd\xa9\xe1\x8d\x1f\x04\xf9\x0eH\xb9\xae\xd6\xc3\xc1\xa5\xa9\x95P\xdc~" b"\xff\x01\x930\xa9\x04\xf6\x03\xfe\xb5JK\xc3]\xdd9\xb1\xd3\xd7F\xf5\xd1" b"\x1e\xa0\x1c_\xed[\x0c\xae\xd4\x8b\x946\xeb\xbf\xbb\xe3$kS{\xb5\x80,f:Sj" b"\x0f\x08z\x1c\xf5\xe8\xe6\xae\x98\xb0Q~r\x0f\xb0\x05?\xb6\x90\x19\x02&" b"\xcb\x80\t\xc4\xea\x9c|x\xce\x10\x9c\xc5|\xcbdhh+\x0c'\xc5\x81\xc33\xb5" b"\x14q\xd6\xc5\xe3`Z#\xdc\x8a\xab\xdd\xea\x08\xc2I\xe7\x02l{\xec\x196\x06" b"\x91\x8d\xdc\xd5\xb3x\xe1hz%\xd1\xf8\xa5\xdd\x98!\x8c\x1c\xc1\x17RUa\xbb" b"\x95\x0f\xe4X\xea1\x0c\xf1=R\xbe\xc60\xe3\xa4\x9a\x90bd\x97$]B\x01\xdd" b"\x1f\xe3h2c\x1e\xa0L`4\xc6x\xa3Z\x8a\r\x14]T^\xd8\x89\x1b\x92\r;\xedY" b"\x0c\xef\x8d9z\xf3o\xb6)f\xa9]$n\rp\x93\xd0\x10\xa4\x08\xb8\xb2\x8b\xb6" b"\x8f\x80\xae;\xdcQ\xf1\xfa\x9a\x06\x8e\xa5\x0e\x8cK\x9c @\xaa:UcX\n!\xc6" b"\x02\x12\xcb\x1b\"=\x16.\x1f\x176\xf2g=\xe1Wn\xe9\xe1\xd4\xf1O\xad\x15" b"\x86\xe9\xa3T\xaf\xa9\xd7D\xb5\xd1W3pnt\x11\xc7VOj\xb7M\xc4i\xa1\xf1$3" b"\xbb\xdc\x8af\xb0\xc5Y\r\xd1\xfb\xf2\xe7K\xe6\xc5hwO\xfe\x8c2^&\x07\xd5" b"\x1fV\x19\xfd\r\x14\xd2i=yZ\xe6o\xaf\xc6\xb6\x92\x9d\xc4\r\xb3\xafw\xac%" b"\xcfc\x1a\xf1`]\xf2\x1a\x9e\x808\xedm\xedQ\xb2\xfe\xe4h`[q\xae\xe0\x0f" b"\xba0g\xb6\"N\xc3\xfb\xcfR\x11\xc5\x18)(\xc40\\\xa3\x02\xd9G!\xce\x1b" b"\xc1\x96x\xb5\xc8z\x1f\x01\xb4\xaf\xde\xc2\xcd\x07\xe7H\xb3y\xa8M\n\\A\t" b"ar\xddM\x8b\x9a\xea\x84\x9b!\xf1\x8d\xb1\xf1~\x1e\r\xa5H\xba\xf1\x84o" b"\xda\x87\x01h\xe9\xa2\xbe\xbeqN\x9d\x84\x0b!WG\xda\xa1\xa5A\xb7\xc7`j" b"\x15\xf2\xe9\xdd?\x015B\xd2~E\x06\x11\xe0\x91!\x05^\x80\xdd\xa8y\x15}" b"\xa1)\xb1)\x81\x18\xf4\xf4\xf8\xc0\xefD\xe3\xdb2f\x1e\x12\xabu\xc9\x97" b"\xcd\x1e\xa7\x0c\x02x4_6\x03\xc4$t\xf39\x94\x1d=\xcb\xbfv\\\xf5\xa3\x1d" b"\x9d8jk\x95\x13)ff\xf9n\xc4\xa9\xe3\x01\xb8\xda\xfb\xab\xdfM\x99\xfb\x05" b"\xe0\xe9\xb0I\xf4E\xab\xe2\x15\xa3\x035\xe7\xdeT\xee\x82p\xb4\x88\xd3" b"\x893\x9c/\xc0\xd6\x8fou;\xf6\x95PR\xa9\xb2\xc1\xefFj\xe2\xa7$\xf7h\xf1" b"\xdfK(\xc9c\xba7\xe8\xe3)\xdd\xb2,\x83\xfb\x84\x18.y\x18Qi\x88\xf8`h-" b"\xef\xd5\xed\x8c\t\xd8\xc3^\x0f\x00\xb7\xd0[!\xafM\x9b\xd7.\x07\xd8\xfb" b"\xd9\xe2-S+\xaa8,\xa0\x03\x1b \xea\xa8\x00\xc3\xab~\xd0$e\xa5\x7f\xf7" b"\x95P]\x12\x19i\xd9\x7fo\x0c\xd8g^\rE\xa5\x80\x18\xc5\x01\x80\xaek`\xff~" b"\xb6y\xe7+\xe5\x11^D\xa7\x85\x18\"!\xd6\xd2\xa7\xf4\x1eT\xdb\x02\xe15" b"\x02Y\xbc\x174Z\xe7\x9cH\x1c\xbf\x0f\xc6\xe9f]\xcf\x8cx\xbc\xe5\x15\x94" b"\xfc3\xbc\xa7TUH\xf1\x84\x1b\xf7\xa9y\xc07\x84\xf8X\xd8\xef\xfc \x1c\xd8" b"( /\xf2\xb7\xec\xc1\\\x8c\xf6\x95\xa1\x03J\x83vP8\xe1\xe3\xbb~\xc24kA" b"\x98y\xa1\xf2P\xe9\x9d\xc9J\xf8N\x99\xb4\xceaO\xde\x16\x1e\xc2\x19\xa7" b"\x03\xd2\xe0\x8f:\x15\xf3\x84\x9e\xee\xe6e\xb8\x02q\xc7AC\x1emw\xfd\t" b"\x9a\x1eu\xc1\xa9\xcaCwUP\x00\xa5\xf78L4w!\x91L2 \x87\xd0\xf2\x06\x81j" b"\x80;\x03V\x06\x87\x92\xcb\x90lv@E\x8d\x8d\xa5\xa6\xe7Z[\xdf\xd6E\x03`>" b"\x8f\xde\xa1bZ\x84\xd0\xa9`\x05\x0e{\x80;\xe3\xbef\x8d\x1d\xebk1.\xe3" b"\xe9N\x15\xf7\xd4(\xfa\xbb\x15\xbdu\xf7\x7f\x86\xae!\x03L\x1d\xb5\xc1" b"\xb9\x11\xdb\xd0\x93\xe4\x02\xe1\xd2\xcbBjc_\xe8}d\xdb\xc3\xa0Y\xbe\xc9/" b"\x95\x01\xa3,\xe6bl@\x01\xdbp\xc2\xce\x14\x168\xc2q\xe3uH\x89X\xa4\xa9" b"\x19\x1d\xc1}\x7fOX\x19\x9f\xdd\xbe\x85\x83\xff\x96\x1ee\x82O`CF=K\xeb$I" b"\x17_\xefX\x8bJ'v\xde\x1f+\xd9.v\xf8Tv\x17\xf2\x9f5\x19\xe1\xb9\x91\xa8S" b"\x86\xbd\x1a\"(\xa5x\x8dC\x03X\x81\x91\xa8\x11\xc4pS\x13\xbc\xf2'J\xae!" b"\xef\xef\x84G\t\x8d\xc4\x10\x132\x00oS\x9e\xe0\xe4d\x8f\xb8y\xac\xa6\x9f" b",\xb8f\x87\r\xdf\x9eE\x0f\xe1\xd0\\L\x00\xb2\xe1h\x84\xef}\x98\xa8\x11" b"\xccW#\\\x83\x7fo\xbbz\x8f\x00" ) FILTERS_RAW_3 = [{"id": lzma.FILTER_IA64, "start_offset": 0x100}, {"id": lzma.FILTER_LZMA2}] COMPRESSED_RAW_3 = ( b"\xe0\x07\x80\x03\xdf]\x00\x05\x14\x07bX\x19\xcd\xddn\x98\x15\xe4\xb4\x9d" b"o\x1d\xc4\xe5\n\x03\xcc2h\xc7\\\x86\xff\xf8\xe2\xfc\xe7\xd9\xfe6\xb8(" b"\xa8wd\xc2\"u.n\x1e\xc3\xf2\x8e\x8d\x8f\x02\x17/\xa6=\xf0\xa2\xdf/M\x89" b"\xbe\xde\xa7\x1cz\x18-]\xd5\xef\x13\x8frZ\x15\x80\x8c\xf8\x8do\xfa\x12" b"\x9b#z/\xef\xf0\xfaF\x01\x82\xa3M\x8e\xa1t\xca6 BF$\xe5Q\xa4\x98\xee\xde" b"l\xe8\x7f\xf0\x9d,bn\x0b\x13\xd4\xa8\x81\xe4N\xc8\x86\x153\xf5x2\xa2O" b"\x13@Q\xa1\x00/\xa5\xd0O\x97\xdco\xae\xf7z\xc4\xcdS\xb6t<\x16\xf2\x9cI#" b"\x89ud\xc66Y\xd9\xee\xe6\xce\x12]\xe5\xf0\xaa\x96-Pe\xade:\x04\t\x1b\xf7" b"\xdb7\n\x86\x1fp\xc8J\xba\xf4\xf0V\xa9\xdc\xf0\x02%G\xf9\xdf=?\x15\x1b" b"\xe1(\xce\x82=\xd6I\xac3\x12\x0cR\xb7\xae\r\xb1i\x03\x95\x01\xbd\xbe\xfa" b"\x02s\x01P\x9d\x96X\xb12j\xc8L\xa8\x84b\xf6\xc3\xd4c-H\x93oJl\xd0iQ\xe4k" b"\x84\x0b\xc1\xb7\xbc\xb1\x17\x88\xb1\xca?@\xf6\x07\xea\xe6x\xf1H12P\x0f" b"\x8a\xc9\xeauw\xe3\xbe\xaai\xa9W\xd0\x80\xcd#cb5\x99\xd8]\xa9d\x0c\xbd" b"\xa2\xdcWl\xedUG\xbf\x89yF\xf77\x81v\xbd5\x98\xbeh8\x18W\x08\xf0\x1b\x99" b"5:\x1a?rD\x96\xa1\x04\x0f\xae\xba\x85\xeb\x9d5@\xf5\x83\xd37\x83\x8ac" b"\x06\xd4\x97i\xcdt\x16S\x82k\xf6K\x01vy\x88\x91\x9b6T\xdae\r\xfd]:k\xbal" b"\xa9\xbba\xc34\xf9r\xeb}r\xdb\xc7\xdb*\x8f\x03z\xdc8h\xcc\xc9\xd3\xbcl" b"\xa5-\xcb\xeaK\xa2\xc5\x15\xc0\xe3\xc1\x86Z\xfb\xebL\xe13\xcf\x9c\xe3" b"\x1d\xc9\xed\xc2\x06\xcc\xce!\x92\xe5\xfe\x9c^\xa59w \x9bP\xa3PK\x08d" b"\xf9\xe2Z}\xa7\xbf\xed\xeb%$\x0c\x82\xb8/\xb0\x01\xa9&,\xf7qh{Q\x96)\xf2" b"q\x96\xc3\x80\xb4\x12\xb0\xba\xe6o\xf4!\xb4[\xd4\x8aw\x10\xf7t\x0c\xb3" b"\xd9\xd5\xc3`^\x81\x11??\\\xa4\x99\x85R\xd4\x8e\x83\xc9\x1eX\xbfa\xf1" b"\xac\xb0\xea\xea\xd7\xd0\xab\x18\xe2\xf2\xed\xe1\xb7\xc9\x18\xcbS\xe4>" b"\xc9\x95H\xe8\xcb\t\r%\xeb\xc7$.o\xf1\xf3R\x17\x1db\xbb\xd8U\xa5^\xccS" b"\x16\x01\x87\xf3/\x93\xd1\xf0v\xc0r\xd7\xcc\xa2Gkz\xca\x80\x0e\xfd\xd0" b"\x8b\xbb\xd2Ix\xb3\x1ey\xca-0\xe3z^\xd6\xd6\x8f_\xf1\x9dP\x9fi\xa7\xd1" b"\xe8\x90\x84\xdc\xbf\xcdky\x8e\xdc\x81\x7f\xa3\xb2+\xbf\x04\xef\xd8\\" b"\xc4\xdf\xe1\xb0\x01\xe9\x93\xe3Y\xf1\x1dY\xe8h\x81\xcf\xf1w\xcc\xb4\xef" b" \x8b|\x04\xea\x83ej\xbe\x1f\xd4z\x9c`\xd3\x1a\x92A\x06\xe5\x8f\xa9\x13" b"\t\x9e=\xfa\x1c\xe5_\x9f%v\x1bo\x11ZO\xd8\xf4\t\xddM\x16-\x04\xfc\x18<\"" b"CM\xddg~b\xf6\xef\x8e\x0c\xd0\xde|\xa0'\x8a\x0c\xd6x\xae!J\xa6F\x88\x15u" b"\x008\x17\xbc7y\xb3\xd8u\xac_\x85\x8d\xe7\xc1@\x9c\xecqc\xa3#\xad\xf1" b"\x935\xb5)_\r\xec3]\x0fo]5\xd0my\x07\x9b\xee\x81\xb5\x0f\xcfK+\x00\xc0" b"\xe4b\x10\xe4\x0c\x1a \x9b\xe0\x97t\xf6\xa1\x9e\x850\xba\x0c\x9a\x8d\xc8" b"\x8f\x07\xd7\xae\xc8\xf9+i\xdc\xb9k\xb0>f\x19\xb8\r\xa8\xf8\x1f$\xa5{p" b"\xc6\x880\xce\xdb\xcf\xca_\x86\xac\x88h6\x8bZ%'\xd0\n\xbf\x0f\x9c\"\xba" b"\xe5\x86\x9f\x0f7X=mNX[\xcc\x19FU\xc9\x860\xbc\x90a+* \xae_$\x03\x1e\xd3" b"\xcd_\xa0\x9c\xde\xaf46q\xa5\xc9\x92\xd7\xca\xe3`\x9d\x85}\xb4\xff\xb3" b"\x83\xfb\xb6\xca\xae`\x0bw\x7f\xfc\xd8\xacVe\x19\xc8\x17\x0bZ\xad\x88" b"\xeb#\x97\x03\x13\xb1d\x0f{\x0c\x04w\x07\r\x97\xbd\xd6\xc1\xc3B:\x95\x08" b"^\x10V\xaeaH\x02\xd9\xe3\n\\\x01X\xf6\x9c\x8a\x06u#%\xbe*\xa1\x18v\x85" b"\xec!\t4\x00\x00\x00" ) FILTERS_RAW_4 = [{"id": lzma.FILTER_DELTA, "dist": 4}, {"id": lzma.FILTER_X86, "start_offset": 0x40}, {"id": lzma.FILTER_LZMA2, "preset": 4, "lc": 2}] COMPRESSED_RAW_4 = ( b"\xe0\x07\x80\x06\x0e\\\x00\x05\x14\x07bW\xaah\xdd\x10\xdc'\xd6\x90,\xc6v" b"Jq \x14l\xb7\x83xB\x0b\x97f=&fx\xba\n>Tn\xbf\x8f\xfb\x1dF\xca\xc3v_\xca?" b"\xfbV<\x92#\xd4w\xa6\x8a\xeb\xf6\x03\xc0\x01\x94\xd8\x9e\x13\x12\x98\xd1" b"*\xfa]c\xe8\x1e~\xaf\xb5]Eg\xfb\x9e\x01\"8\xb2\x90\x06=~\xe9\x91W\xcd" b"\xecD\x12\xc7\xfa\xe1\x91\x06\xc7\x99\xb9\xe3\x901\x87\x19u\x0f\x869\xff" b"\xc1\xb0hw|\xb0\xdcl\xcck\xb16o7\x85\xee{Y_b\xbf\xbc$\xf3=\x8d\x8bw\xe5Z" b"\x08@\xc4kmE\xad\xfb\xf6*\xd8\xad\xa1\xfb\xc5{\xdej,)\x1emB\x1f<\xaeca" b"\x80(\xee\x07 \xdf\xe9\xf8\xeb\x0e-\x97\x86\x90c\xf9\xea'B\xf7`\xd7\xb0" b"\x92\xbd\xa0\x82]\xbd\x0e\x0eB\x19\xdc\x96\xc6\x19\xd86D\xf0\xd5\x831" b"\x03\xb7\x1c\xf7&5\x1a\x8f PZ&j\xf8\x98\x1bo\xcc\x86\x9bS\xd3\xa5\xcdu" b"\xf9$\xcc\x97o\xe5V~\xfb\x97\xb5\x0b\x17\x9c\xfdxW\x10\xfep4\x80\xdaHDY" b"\xfa)\xfet\xb5\"\xd4\xd3F\x81\xf4\x13\x1f\xec\xdf\xa5\x13\xfc\"\x91x\xb7" b"\x99\xce\xc8\x92\n\xeb[\x10l*Y\xd8\xb1@\x06\xc8o\x8d7r\xebu\xfd5\x0e\x7f" b"\xf1$U{\t}\x1fQ\xcfxN\x9d\x9fXX\xe9`\x83\xc1\x06\xf4\x87v-f\x11\xdb/\\" b"\x06\xff\xd7)B\xf3g\x06\x88#2\x1eB244\x7f4q\t\xc893?mPX\x95\xa6a\xfb)d" b"\x9b\xfc\x98\x9aj\x04\xae\x9b\x9d\x19w\xba\xf92\xfaA\x11\\\x17\x97C3\xa4" b"\xbc!\x88\xcdo[\xec:\x030\x91.\x85\xe0@\\4\x16\x12\x9d\xcaJv\x97\xb04" b"\xack\xcbkf\xa3ss\xfc\x16^\x8ce\x85a\xa5=&\xecr\xb3p\xd1E\xd5\x80y\xc7" b"\xda\xf6\xfek\xbcT\xbfH\xee\x15o\xc5\x8c\x830\xec\x1d\x01\xae\x0c-e\\" b"\x91\x90\x94\xb2\xf8\x88\x91\xe8\x0b\xae\xa7>\x98\xf6\x9ck\xd2\xc6\x08" b"\xe6\xab\t\x98\xf2!\xa0\x8c^\xacqA\x99<\x1cEG\x97\xc8\xf1\xb6\xb9\x82" b"\x8d\xf7\x08s\x98a\xff\xe3\xcc\x92\x0e\xd2\xb6U\xd7\xd9\x86\x7fa\xe5\x1c" b"\x8dTG@\t\x1e\x0e7*\xfc\xde\xbc]6N\xf7\xf1\x84\x9e\x9f\xcf\xe9\x1e\xb5'" b"\xf4<\xdf\x99sq\xd0\x9d\xbd\x99\x0b\xb4%p4\xbf{\xbb\x8a\xd2\x0b\xbc=M" b"\x94H:\xf5\xa8\xd6\xa4\xc90\xc2D\xb9\xd3\xa8\xb0S\x87 `\xa2\xeb\xf3W\xce" b" 7\xf9N#\r\xe6\xbe\t\x9d\xe7\x811\xf9\x10\xc1\xc2\x14\xf6\xfc\xcba\xb7" b"\xb1\x7f\x95l\xe4\tjA\xec:\x10\xe5\xfe\xc2\\=D\xe2\x0c\x0b3]\xf7\xc1\xf7" b"\xbceZ\xb1A\xea\x16\xe5\xfddgFQ\xed\xaf\x04\xa3\xd3\xf8\xa2q\x19B\xd4r" b"\xc5\x0c\x9a\x14\x94\xea\x91\xc4o\xe4\xbb\xb4\x99\xf4@\xd1\xe6\x0c\xe3" b"\xc6d\xa0Q\n\xf2/\xd8\xb8S5\x8a\x18:\xb5g\xac\x95D\xce\x17\x07\xd4z\xda" b"\x90\xe65\x07\x19H!\t\xfdu\x16\x8e\x0eR\x19\xf4\x8cl\x0c\xf9Q\xf1\x80" b"\xe3\xbf\xd7O\xf8\x8c\x18\x0b\x9c\xf1\x1fb\xe1\tR\xb2\xf1\xe1A\xea \xcf-" b"IGE\xf1\x14\x98$\x83\x15\xc9\xd8j\xbf\x19\x0f\xd5\xd1\xaa\xb3\xf3\xa5I2s" b"\x8d\x145\xca\xd5\xd93\x9c\xb8D0\xe6\xaa%\xd0\xc0P}JO^h\x8e\x08\xadlV." b"\x18\x88\x13\x05o\xb0\x07\xeaw\xe0\xb6\xa4\xd5*\xe4r\xef\x07G+\xc1\xbei[" b"w\xe8\xab@_\xef\x15y\xe5\x12\xc9W\x1b.\xad\x85-\xc2\xf7\xe3mU6g\x8eSA" b"\x01(\xd3\xdb\x16\x13=\xde\x92\xf9,D\xb8\x8a\xb2\xb4\xc9\xc3\xefnE\xe8\\" b"\xa6\xe2Y\xd2\xcf\xcb\x8c\xb6\xd5\xe9\x1d\x1e\x9a\x8b~\xe2\xa6\rE\x84uV" b"\xed\xc6\x99\xddm<\x10[\x0fu\x1f\xc1\x1d1\n\xcfw\xb2%!\xf0[\xce\x87\x83B" b"\x08\xaa,\x08%d\xcef\x94\"\xd9g.\xc83\xcbXY+4\xec\x85qA\n\x1d=9\xf0*\xb1" b"\x1f/\xf3s\xd61b\x7f@\xfb\x9d\xe3FQ\\\xbd\x82\x1e\x00\xf3\xce\xd3\xe1" b"\xca,E\xfd7[\xab\xb6\xb7\xac!mA}\xbd\x9d3R5\x9cF\xabH\xeb\x92)cc\x13\xd0" b"\xbd\xee\xe9n{\x1dIJB\xa5\xeb\x11\xe8`w&`\x8b}@Oxe\t\x8a\x07\x02\x95\xf2" b"\xed\xda|\xb1e\xbe\xaa\xbbg\x19@\xe1Y\x878\x84\x0f\x8c\xe3\xc98\xf2\x9e" b"\xd5N\xb5J\xef\xab!\xe2\x8dq\xe1\xe5q\xc5\xee\x11W\xb7\xe4k*\x027\xa0" b"\xa3J\xf4\xd8m\xd0q\x94\xcb\x07\n:\xb6`.\xe4\x9c\x15+\xc0)\xde\x80X\xd4" b"\xcfQm\x01\xc2cP\x1cA\x85'\xc9\xac\x8b\xe6\xb2)\xe6\x84t\x1c\x92\xe4Z" b"\x1cR\xb0\x9e\x96\xd1\xfb\x1c\xa6\x8b\xcb`\x10\x12]\xf2gR\x9bFT\xe0\xc8H" b"S\xfb\xac<\x04\xc7\xc1\xe8\xedP\xf4\x16\xdb\xc0\xd7e\xc2\x17J^\x1f\xab" b"\xff[\x08\x19\xb4\xf5\xfb\x19\xb4\x04\xe5c~']\xcb\xc2A\xec\x90\xd0\xed" b"\x06,\xc5K{\x86\x03\xb1\xcdMx\xdeQ\x8c3\xf9\x8a\xea=\x89\xaba\xd2\xc89a" b"\xd72\xf0\xc3\x19\x8a\xdfs\xd4\xfd\xbb\x81b\xeaE\"\xd8\xf4d\x0cD\xf7IJ!" b"\xe5d\xbbG\xe9\xcam\xaa\x0f_r\x95\x91NBq\xcaP\xce\xa7\xa9\xb5\x10\x94eP!" b"|\x856\xcd\xbfIir\xb8e\x9bjP\x97q\xabwS7\x1a\x0ehM\xe7\xca\x86?\xdeP}y~" b"\x0f\x95I\xfc\x13\xe1<Q\x1b\x868\x1d\x11\xdf\x94\xf4\x82>r\xa9k\x88\xcb" b"\xfd\xc3v\xe2\xb9\x8a\x02\x8eq\x92I\xf8\xf6\xf1\x03s\x9b\xb8\xe3\"\xe3" b"\xa9\xa5>D\xb8\x96;\xe7\x92\xd133\xe8\xdd'e\xc9.\xdc;\x17\x1f\xf5H\x13q" b"\xa4W\x0c\xdb~\x98\x01\xeb\xdf\xe32\x13\x0f\xddx\n6\xa0\t\x10\xb6\xbb" b"\xb0\xc3\x18\xb6;\x9fj[\xd9\xd5\xc9\x06\x8a\x87\xcd\xe5\xee\xfc\x9c-%@" b"\xee\xe0\xeb\xd2\xe3\xe8\xfb\xc0\x122\\\xc7\xaf\xc2\xa1Oth\xb3\x8f\x82" b"\xb3\x18\xa8\x07\xd5\xee_\xbe\xe0\x1cA\x1e_\r\x9a\xb0\x17W&\xa2D\x91\x94" b"\x1a\xb2\xef\xf2\xdc\x85;X\xb0,\xeb>-7S\xe5\xca\x07)\x1fp\x7f\xcaQBL\xca" b"\xf3\xb9d\xfc\xb5su\xb0\xc8\x95\x90\xeb*)\xa0v\xe4\x9a{FW\xf4l\xde\xcdj" b"\x00" ) def test_main(): run_unittest( CompressorDecompressorTestCase, CompressDecompressFunctionTestCase, FileTestCase, OpenTestCase, MiscellaneousTestCase, ) if __name__ == "__main__": test_main()
cell2location/models/simplified/_cell2location_v3_no_factorisation_module.py
nadavyayon/cell2location
127
131590
import numpy as np import pandas as pd import pyro import pyro.distributions as dist import torch from pyro.nn import PyroModule from scvi import _CONSTANTS from scvi.data._anndata import get_from_registry from scvi.nn import one_hot # class NegativeBinomial(TorchDistributionMixin, ScVINegativeBinomial): # pass class LocationModelMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel(PyroModule): """ Cell2location models the elements of :math:`D` as Negative Binomial distributed, given an unobserved gene expression level (rate) :math:`mu` and a gene- and batch-specific over-dispersion parameter :math:`\alpha_{e,g}` which accounts for unexplained variance: .. math:: D_{s,g} \sim \mathtt{NB}(\mu_{s,g}, \alpha_{e,g}) The expression level of genes :math:`\mu_{s,g}` in the mRNA count space is modelled as a linear function of expression signatures of reference cell types :math:`g_{f,g}`: .. math:: \mu_{s,g} = (m_{g} \left (\sum_{f} {w_{s,f} \: g_{f,g}} \right) + s_{e,g}) y_{s} Here, :math:`w_{s,f}` denotes regression weight of each reference signature :math:`f` at location :math:`s`, which can be interpreted as the expected number of cells at location :math:`s` that express reference signature :math:`f`; :math:`g_{f,g}` denotes the reference signatures of cell types :math:`f` of each gene :math:`g`, `cell_state_df` input ; :math:`m_{g}` denotes a gene-specific scaling parameter which adjusts for global differences in sensitivity between technologies (platform effect); :math:`y_{s}` denotes a location/observation-specific scaling parameter which adjusts for differences in sensitivity between observations and batches; :math:`s_{e,g}` is additive component that account for gene- and location-specific shift, such as due to contaminating or free-floating RNA. To account for the similarity of location patterns across cell types, :math:`w_{s,f}` is modelled using another layer of decomposition (factorization) using :math:`r={1, .., R}` groups of cell types, that can be interpreted as cellular compartments or tissue zones. Unless stated otherwise, R is set to 50. Corresponding graphical model can be found in supplementary methods: https://www.biorxiv.org/content/10.1101/2020.11.15.378125v1.supplementary-material Approximate Variational Inference is used to estimate the posterior distribution of all model parameters. Estimation of absolute cell abundance `w_{s,f}` is guided using informed prior on the number of cells (argument called `N_cells_per_location`). It is a tissue-level global estimate, which can be derived from histology images (H&E or DAPI), ideally paired to the spatial expression data or at least representing the same tissue type. This parameter can be estimated by manually counting nuclei in a 10-20 locations in the histology image (e.g. using 10X Loupe browser), and computing the average cell abundance. An appropriate setting of this prior is essential to inform the estimation of absolute cell type abundance values, however, the model is robust to a range of similar values. In settings where suitable histology images are not available, the size of capture regions relative to the expected size of cells can be used to estimate `N_cells_per_location`. The prior on detection efficiency per location :math:`y_s` is selected to discourage over-normalisation, such that unless data has evidence of strong technical effect, the effect is assumed to be small and close to the mean sensitivity for each batch :math:`y_e`: .. math:: y_s ~ Gamma(detection_alpha, detection_alpha / y_e) where y_e is unknown/latent average detection efficiency in each batch/experiment: .. math:: y_e ~ Gamma(10, 10 / detection_mean) """ def __init__( self, n_obs, n_vars, n_factors, n_batch, cell_state_mat, n_groups: int = 50, detection_mean=1 / 2, detection_alpha=200.0, m_g_gene_level_prior={"mean": 1, "mean_var_ratio": 1.0, "alpha_mean": 3.0}, N_cells_per_location=8.0, A_factors_per_location=7.0, N_cells_mean_var_ratio=1.0, alpha_g_phi_hyp_prior={"alpha": 9.0, "beta": 3.0}, gene_add_alpha_hyp_prior={"alpha": 9.0, "beta": 3.0}, gene_add_mean_hyp_prior={ "alpha": 1.0, "beta": 100.0, }, detection_hyp_prior={"mean_alpha": 10.0}, w_sf_mean_var_ratio=5.0, ): super().__init__() self.n_obs = n_obs self.n_vars = n_vars self.n_factors = n_factors self.n_batch = n_batch self.n_groups = n_groups self.m_g_gene_level_prior = m_g_gene_level_prior self.alpha_g_phi_hyp_prior = alpha_g_phi_hyp_prior self.w_sf_mean_var_ratio = w_sf_mean_var_ratio self.gene_add_alpha_hyp_prior = gene_add_alpha_hyp_prior self.gene_add_mean_hyp_prior = gene_add_mean_hyp_prior detection_hyp_prior["mean"] = detection_mean detection_hyp_prior["alpha"] = detection_alpha self.detection_hyp_prior = detection_hyp_prior self.register_buffer( "detection_hyp_prior_alpha", torch.tensor(self.detection_hyp_prior["alpha"]), ) self.register_buffer( "detection_mean_hyp_prior_alpha", torch.tensor(self.detection_hyp_prior["mean_alpha"]), ) self.register_buffer( "detection_mean_hyp_prior_beta", torch.tensor(self.detection_hyp_prior["mean_alpha"] / self.detection_hyp_prior["mean"]), ) # compute hyperparameters from mean and sd self.register_buffer("m_g_mu_hyp", torch.tensor(self.m_g_gene_level_prior["mean"])) self.register_buffer( "m_g_mu_mean_var_ratio_hyp", torch.tensor(self.m_g_gene_level_prior["mean_var_ratio"]), ) self.register_buffer("m_g_alpha_hyp_mean", torch.tensor(self.m_g_gene_level_prior["alpha_mean"])) self.cell_state_mat = cell_state_mat self.register_buffer("cell_state", torch.tensor(cell_state_mat.T)) self.register_buffer("N_cells_per_location", torch.tensor(N_cells_per_location)) self.register_buffer("A_factors_per_location", torch.tensor(A_factors_per_location)) self.register_buffer("N_cells_mean_var_ratio", torch.tensor(N_cells_mean_var_ratio)) self.register_buffer( "alpha_g_phi_hyp_prior_alpha", torch.tensor(self.alpha_g_phi_hyp_prior["alpha"]), ) self.register_buffer( "alpha_g_phi_hyp_prior_beta", torch.tensor(self.alpha_g_phi_hyp_prior["beta"]), ) self.register_buffer( "gene_add_alpha_hyp_prior_alpha", torch.tensor(self.gene_add_alpha_hyp_prior["alpha"]), ) self.register_buffer( "gene_add_alpha_hyp_prior_beta", torch.tensor(self.gene_add_alpha_hyp_prior["beta"]), ) self.register_buffer( "gene_add_mean_hyp_prior_alpha", torch.tensor(self.gene_add_mean_hyp_prior["alpha"]), ) self.register_buffer( "gene_add_mean_hyp_prior_beta", torch.tensor(self.gene_add_mean_hyp_prior["beta"]), ) self.register_buffer("w_sf_mean_var_ratio_tensor", torch.tensor(self.w_sf_mean_var_ratio)) self.register_buffer("n_factors_tensor", torch.tensor(self.n_factors)) self.register_buffer("n_groups_tensor", torch.tensor(self.n_groups)) self.register_buffer("ones", torch.ones((1, 1))) self.register_buffer("ones_1_n_groups", torch.ones((1, self.n_groups))) self.register_buffer("ones_1_n_factors", torch.ones((1, self.n_factors))) self.register_buffer("ones_n_batch_1", torch.ones((self.n_batch, 1))) self.register_buffer("eps", torch.tensor(1e-8)) @staticmethod def _get_fn_args_from_batch(tensor_dict): x_data = tensor_dict[_CONSTANTS.X_KEY] ind_x = tensor_dict["ind_x"].long().squeeze() batch_index = tensor_dict[_CONSTANTS.BATCH_KEY] return (x_data, ind_x, batch_index), {} def create_plates(self, x_data, idx, batch_index): return pyro.plate("obs_plate", size=self.n_obs, dim=-2, subsample=idx) def list_obs_plate_vars(self): """Create a dictionary with: 1. "name" - the name of observation/minibatch plate; 2. "input" - indexes of model args to provide to encoder network when using amortised inference; 3. "sites" - dictionary with keys - names of variables that belong to the observation plate (used to recognise and merge posterior samples for minibatch variables) values - the dimensions in non-plate axis of each variable (used to construct output layer of encoder network when using amortised inference) """ return { "name": "obs_plate", "input": [0, 2], # expression data + (optional) batch index "input_transform": [ torch.log1p, lambda x: x, ], # how to transform input data before passing to NN "sites": { "w_sf": self.n_factors, "detection_y_s": 1, }, } def forward(self, x_data, idx, batch_index): obs2sample = one_hot(batch_index, self.n_batch) obs_plate = self.create_plates(x_data, idx, batch_index) # =====================Gene expression level scaling m_g======================= # # Explains difference in sensitivity for each gene between single cell and spatial technology m_g_mean = pyro.sample( "m_g_mean", dist.Gamma( self.m_g_mu_mean_var_ratio_hyp * self.m_g_mu_hyp, self.m_g_mu_mean_var_ratio_hyp, ) .expand([1, 1]) .to_event(2), ) # (1, 1) m_g_alpha_e_inv = pyro.sample( "m_g_alpha_e_inv", dist.Exponential(self.m_g_alpha_hyp_mean).expand([1, 1]).to_event(2), ) # (1, 1) m_g_alpha_e = self.ones / m_g_alpha_e_inv.pow(2) m_g = pyro.sample( "m_g", dist.Gamma(m_g_alpha_e, m_g_alpha_e / m_g_mean).expand([1, self.n_vars]).to_event(2), # self.m_g_mu_hyp) ) # (1, n_vars) # =====================Cell abundances w_sf======================= # # factorisation prior on w_sf models similarity in locations # across cell types f and reflects the absolute scale of w_sf n_cells_per_location = pyro.sample( "n_cells_per_location", dist.Gamma( self.N_cells_per_location * self.N_cells_mean_var_ratio, self.N_cells_mean_var_ratio, ), ) a_factors_per_location = pyro.sample( "a_factors_per_location", dist.Gamma(self.A_factors_per_location, self.ones), ) # cell group loadings shape = self.ones_1_n_factors * a_factors_per_location / self.n_factors_tensor rate = self.ones_1_n_factors / (n_cells_per_location / a_factors_per_location) with obs_plate: w_sf = pyro.sample( "w_sf", dist.Gamma( shape, rate, ), ) # (self.n_obs, self.n_factors) # =====================Location-specific detection efficiency ======================= # # y_s with hierarchical mean prior detection_mean_y_e = pyro.sample( "detection_mean_y_e", dist.Gamma( self.ones * self.detection_mean_hyp_prior_alpha, self.ones * self.detection_mean_hyp_prior_beta, ) .expand([self.n_batch, 1]) .to_event(2), ) detection_hyp_prior_alpha = pyro.deterministic( "detection_hyp_prior_alpha", self.ones_n_batch_1 * self.detection_hyp_prior_alpha, ) beta = (obs2sample @ detection_hyp_prior_alpha) / (obs2sample @ detection_mean_y_e) with obs_plate: detection_y_s = pyro.sample( "detection_y_s", dist.Gamma(obs2sample @ detection_hyp_prior_alpha, beta), ) # (self.n_obs, 1) # =====================Gene-specific additive component ======================= # # per gene molecule contribution that cannot be explained by # cell state signatures (e.g. background, free-floating RNA) s_g_gene_add_alpha_hyp = pyro.sample( "s_g_gene_add_alpha_hyp", dist.Gamma(self.gene_add_alpha_hyp_prior_alpha, self.gene_add_alpha_hyp_prior_beta), ) s_g_gene_add_mean = pyro.sample( "s_g_gene_add_mean", dist.Gamma( self.gene_add_mean_hyp_prior_alpha, self.gene_add_mean_hyp_prior_beta, ) .expand([self.n_batch, 1]) .to_event(2), ) # (self.n_batch) s_g_gene_add_alpha_e_inv = pyro.sample( "s_g_gene_add_alpha_e_inv", dist.Exponential(s_g_gene_add_alpha_hyp).expand([self.n_batch, 1]).to_event(2), ) # (self.n_batch) s_g_gene_add_alpha_e = self.ones / s_g_gene_add_alpha_e_inv.pow(2) s_g_gene_add = pyro.sample( "s_g_gene_add", dist.Gamma(s_g_gene_add_alpha_e, s_g_gene_add_alpha_e / s_g_gene_add_mean) .expand([self.n_batch, self.n_vars]) .to_event(2), ) # (self.n_batch, n_vars) # =====================Gene-specific overdispersion ======================= # alpha_g_phi_hyp = pyro.sample( "alpha_g_phi_hyp", dist.Gamma(self.alpha_g_phi_hyp_prior_alpha, self.alpha_g_phi_hyp_prior_beta), ) alpha_g_inverse = pyro.sample( "alpha_g_inverse", dist.Exponential(alpha_g_phi_hyp).expand([self.n_batch, self.n_vars]).to_event(2), ) # (self.n_batch, self.n_vars) # =====================Expected expression ======================= # # expected expression mu = ((w_sf @ self.cell_state) * m_g + (obs2sample @ s_g_gene_add)) * detection_y_s alpha = obs2sample @ (self.ones / alpha_g_inverse.pow(2)) # convert mean and overdispersion to total count and logits # total_count, logits = _convert_mean_disp_to_counts_logits( # mu, alpha, eps=self.eps # ) # =====================DATA likelihood ======================= # # Likelihood (sampling distribution) of data_target & add overdispersion via NegativeBinomial with obs_plate: pyro.sample( "data_target", dist.GammaPoisson(concentration=alpha, rate=alpha / mu), # dist.NegativeBinomial(total_count=total_count, logits=logits), obs=x_data, ) # =====================Compute mRNA count from each factor in locations ======================= # with obs_plate: mRNA = w_sf * (self.cell_state * m_g).sum(-1) pyro.deterministic("u_sf_mRNA_factors", mRNA) def compute_expected(self, samples, adata, ind_x=None): r"""Compute expected expression of each gene in each location. Useful for evaluating how well the model learned expression pattern of all genes in the data. """ if ind_x is None: ind_x = np.arange(adata.n_obs).astype(int) else: ind_x = ind_x.astype(int) obs2sample = get_from_registry(adata, _CONSTANTS.BATCH_KEY) obs2sample = pd.get_dummies(obs2sample.flatten()).values[ind_x, :] mu = ( np.dot(samples["w_sf"][ind_x, :], self.cell_state_mat.T) * samples["m_g"] + np.dot(obs2sample, samples["s_g_gene_add"]) ) * samples["detection_y_s"][ind_x, :] alpha = np.dot(obs2sample, 1 / np.power(samples["alpha_g_inverse"], 2)) return {"mu": mu, "alpha": alpha, "ind_x": ind_x}
keras/layers/convolutional.py
averybigant/keras
250
131601
# -*- coding: utf-8 -*- from __future__ import absolute_import import theano import theano.tensor as T from .. import activations, initializations, regularizers, constraints from ..utils.theano_utils import shared_zeros from ..layers.core import Layer class Convolution1D(Layer): def __init__(self, input_dim, nb_filter, filter_length, init='uniform', activation='linear', weights=None, border_mode='valid', subsample_length=1, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None): if border_mode not in {'valid', 'full'}: raise Exception('Invalid border mode for Convolution1D:', border_mode) super(Convolution1D, self).__init__() self.nb_filter = nb_filter self.input_dim = input_dim self.filter_length = filter_length self.subsample_length = subsample_length self.init = initializations.get(init) self.activation = activations.get(activation) self.subsample = (1, subsample_length) self.border_mode = border_mode self.input = T.tensor3() self.W_shape = (nb_filter, input_dim, filter_length, 1) self.W = self.init(self.W_shape) self.b = shared_zeros((nb_filter,)) self.params = [self.W, self.b] self.regularizers = [] self.W_regularizer = regularizers.get(W_regularizer) if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) def get_output(self, train): X = self.get_input(train) X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3) conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=self.border_mode, subsample=self.subsample) output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1) def get_config(self): return {"name": self.__class__.__name__, "input_dim": self.input_dim, "nb_filter": self.nb_filter, "filter_length": self.filter_length, "init": self.init.__name__, "activation": self.activation.__name__, "border_mode": self.border_mode, "subsample_length": self.subsample_length, "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} class MaxPooling1D(Layer): def __init__(self, pool_length=2, stride=None, ignore_border=True): super(MaxPooling1D, self).__init__() self.pool_length = pool_length self.stride = stride if self.stride: self.st = (1, self.stride) else: self.st = None self.input = T.tensor3() self.poolsize = (1, pool_length) self.ignore_border = ignore_border def get_output(self, train): X = self.get_input(train) X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 1, 3, 2) output = T.signal.downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border) output = output.dimshuffle(0, 1, 3, 2) return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])) def get_config(self): return {"name": self.__class__.__name__, "stride": self.stride, "pool_length": self.pool_length, "ignore_border": self.ignore_border, "subsample_length": self.subsample_length} class Convolution2D(Layer): def __init__(self, nb_filter, stack_size, nb_row, nb_col, init='glorot_uniform', activation='linear', weights=None, border_mode='valid', subsample=(1, 1), W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None): if border_mode not in {'valid', 'full', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) super(Convolution2D, self).__init__() self.init = initializations.get(init) self.activation = activations.get(activation) self.subsample = subsample self.border_mode = border_mode self.nb_filter = nb_filter self.stack_size = stack_size self.nb_row = nb_row self.nb_col = nb_col self.input = T.tensor4() self.W_shape = (nb_filter, stack_size, nb_row, nb_col) self.W = self.init(self.W_shape) self.b = shared_zeros((nb_filter,)) self.params = [self.W, self.b] self.regularizers = [] self.W_regularizer = regularizers.get(W_regularizer) if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) def get_output(self, train): X = self.get_input(train) border_mode = self.border_mode if border_mode == 'same': border_mode = 'full' conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=border_mode, subsample=self.subsample) if self.border_mode == 'same': shift_x = (self.nb_row - 1) // 2 shift_y = (self.nb_col - 1) // 2 conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y] return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) def get_config(self): return {"name": self.__class__.__name__, "nb_filter": self.nb_filter, "stack_size": self.stack_size, "nb_row": self.nb_row, "nb_col": self.nb_col, "init": self.init.__name__, "activation": self.activation.__name__, "border_mode": self.border_mode, "subsample": self.subsample, "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} class MaxPooling2D(Layer): def __init__(self, poolsize=(2, 2), stride=None, ignore_border=True): super(MaxPooling2D, self).__init__() self.input = T.tensor4() self.poolsize = poolsize self.stride = stride self.ignore_border = ignore_border def get_output(self, train): X = self.get_input(train) output = T.signal.downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border) return output def get_config(self): return {"name": self.__class__.__name__, "poolsize": self.poolsize, "ignore_border": self.ignore_border, "stride": self.stride} class ZeroPadding2D(Layer): def __init__(self, width=1): super(ZeroPadding2D, self).__init__() self.width = width self.input = T.tensor4() def get_output(self, train): X = self.get_input(train) width = self.width in_shape = X.shape out_shape = (in_shape[0], in_shape[1], in_shape[2] + 2 * width, in_shape[3] + 2 * width) out = T.zeros(out_shape) indices = (slice(None), slice(None), slice(width, in_shape[2] + width), slice(width, in_shape[3] + width)) return T.set_subtensor(out[indices], X) def get_config(self): return {"name": self.__class__.__name__, "width": self.width}
simple_rl/mdp/StateClass.py
david-abel/mdps
230
131604
# Python imports from collections.abc import Sequence import numpy as np ''' StateClass.py: Contains the State Class. ''' class State(Sequence): ''' Abstract State class ''' def __init__(self, data=[], is_terminal=False): self.data = data self._is_terminal = is_terminal def features(self): ''' Summary Used by function approximators to represent the state. Override this method in State subclasses to have functiona approximators use a different set of features. Returns: (iterable) ''' return np.array(self.data).flatten() def get_data(self): return self.data def get_num_feats(self): return len(self.features()) def is_terminal(self): return self._is_terminal def set_terminal(self, is_term=True): self._is_terminal = is_term def __hash__(self): if type(self.data).__module__ == np.__name__: # Numpy arrays return hash(str(self.data)) elif self.data.__hash__ is None: return hash(tuple(self.data)) else: return hash(self.data) def __str__(self): return "s." + str(self.data) def __eq__(self, other): if isinstance(other, State): return self.data == other.data return False def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data)
week_8_serverless/lambda_handler.py
krishnakaushik25/MLOps-Basics
2,800
131618
<reponame>krishnakaushik25/MLOps-Basics<filename>week_8_serverless/lambda_handler.py """ Lambda wrapper """ import json from inference_onnx import ColaONNXPredictor inferencing_instance = ColaONNXPredictor("./models/model.onnx") def lambda_handler(event, context): """ Lambda function handler for predicting linguistic acceptability of the given sentence """ if "resource" in event.keys(): body = event["body"] body = json.loads(body) print(f"Got the input: {body['sentence']}") response = inferencing_instance.predict(body["sentence"]) return { "statusCode": 200, "headers": {}, "body": json.dumps(response) } else: return inferencing_instance.predict(event["sentence"]) if __name__ == "__main__": test = {"sentence": "this is a sample sentence"} lambda_handler(test, None)
homeassistant/components/onewire/onewirehub.py
learn-home-automation/core
22,481
131624
"""Hub for communication with 1-Wire server or mount_dir.""" from __future__ import annotations import logging import os from typing import TYPE_CHECKING from pi1wire import Pi1Wire from pyownet import protocol from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_IDENTIFIERS, ATTR_MANUFACTURER, ATTR_MODEL, ATTR_NAME, ATTR_VIA_DEVICE, CONF_HOST, CONF_PORT, CONF_TYPE, ) from homeassistant.core import HomeAssistant from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import device_registry as dr from homeassistant.helpers.entity import DeviceInfo from .const import ( CONF_MOUNT_DIR, CONF_TYPE_OWSERVER, CONF_TYPE_SYSBUS, DEVICE_SUPPORT_OWSERVER, DEVICE_SUPPORT_SYSBUS, DOMAIN, MANUFACTURER_EDS, MANUFACTURER_HOBBYBOARDS, MANUFACTURER_MAXIM, ) from .model import ( OWDeviceDescription, OWDirectDeviceDescription, OWServerDeviceDescription, ) DEVICE_COUPLERS = { # Family : [branches] "1F": ["aux", "main"] } DEVICE_MANUFACTURER = { "7E": MANUFACTURER_EDS, "EF": MANUFACTURER_HOBBYBOARDS, } _LOGGER = logging.getLogger(__name__) def _is_known_owserver_device(device_family: str, device_type: str) -> bool: """Check if device family/type is known to the library.""" if device_family in ("7E", "EF"): # EDS or HobbyBoard return device_type in DEVICE_SUPPORT_OWSERVER[device_family] return device_family in DEVICE_SUPPORT_OWSERVER class OneWireHub: """Hub to communicate with SysBus or OWServer.""" def __init__(self, hass: HomeAssistant) -> None: """Initialize.""" self.hass = hass self.type: str | None = None self.pi1proxy: Pi1Wire | None = None self.owproxy: protocol._Proxy | None = None self.devices: list[OWDeviceDescription] | None = None async def connect(self, host: str, port: int) -> None: """Connect to the owserver host.""" try: self.owproxy = await self.hass.async_add_executor_job( protocol.proxy, host, port ) except protocol.ConnError as exc: raise CannotConnect from exc async def check_mount_dir(self, mount_dir: str) -> None: """Test that the mount_dir is a valid path.""" if not await self.hass.async_add_executor_job(os.path.isdir, mount_dir): raise InvalidPath self.pi1proxy = Pi1Wire(mount_dir) async def initialize(self, config_entry: ConfigEntry) -> None: """Initialize a config entry.""" self.type = config_entry.data[CONF_TYPE] if self.type == CONF_TYPE_SYSBUS: mount_dir = config_entry.data[CONF_MOUNT_DIR] _LOGGER.debug("Initializing using SysBus %s", mount_dir) await self.check_mount_dir(mount_dir) elif self.type == CONF_TYPE_OWSERVER: host = config_entry.data[CONF_HOST] port = config_entry.data[CONF_PORT] _LOGGER.debug("Initializing using OWServer %s:%s", host, port) await self.connect(host, port) await self.discover_devices() if TYPE_CHECKING: assert self.devices # Register discovered devices on Hub device_registry = dr.async_get(self.hass) for device in self.devices: device_info: DeviceInfo = device.device_info device_registry.async_get_or_create( config_entry_id=config_entry.entry_id, identifiers=device_info[ATTR_IDENTIFIERS], manufacturer=device_info[ATTR_MANUFACTURER], model=device_info[ATTR_MODEL], name=device_info[ATTR_NAME], via_device=device_info.get(ATTR_VIA_DEVICE), ) async def discover_devices(self) -> None: """Discover all devices.""" if self.devices is None: if self.type == CONF_TYPE_SYSBUS: self.devices = await self.hass.async_add_executor_job( self._discover_devices_sysbus ) if self.type == CONF_TYPE_OWSERVER: self.devices = await self.hass.async_add_executor_job( self._discover_devices_owserver ) def _discover_devices_sysbus(self) -> list[OWDeviceDescription]: """Discover all sysbus devices.""" devices: list[OWDeviceDescription] = [] assert self.pi1proxy all_sensors = self.pi1proxy.find_all_sensors() if not all_sensors: _LOGGER.error( "No onewire sensor found. Check if dtoverlay=w1-gpio " "is in your /boot/config.txt. " "Check the mount_dir parameter if it's defined" ) for interface in all_sensors: device_family = interface.mac_address[:2] device_id = f"{device_family}-{interface.mac_address[2:]}" if device_family not in DEVICE_SUPPORT_SYSBUS: _LOGGER.warning( "Ignoring unknown device family (%s) found for device %s", device_family, device_id, ) continue device_info: DeviceInfo = { ATTR_IDENTIFIERS: {(DOMAIN, device_id)}, ATTR_MANUFACTURER: DEVICE_MANUFACTURER.get( device_family, MANUFACTURER_MAXIM ), ATTR_MODEL: device_family, ATTR_NAME: device_id, } device = OWDirectDeviceDescription( device_info=device_info, interface=interface, ) devices.append(device) return devices def _discover_devices_owserver( self, path: str = "/", parent_id: str | None = None ) -> list[OWDeviceDescription]: """Discover all owserver devices.""" devices: list[OWDeviceDescription] = [] assert self.owproxy for device_path in self.owproxy.dir(path): device_id = os.path.split(os.path.split(device_path)[0])[1] device_family = self.owproxy.read(f"{device_path}family").decode() _LOGGER.debug("read `%sfamily`: %s", device_path, device_family) device_type = self._get_device_type_owserver(device_path) if not _is_known_owserver_device(device_family, device_type): _LOGGER.warning( "Ignoring unknown device family/type (%s/%s) found for device %s", device_family, device_type, device_id, ) continue device_info: DeviceInfo = { ATTR_IDENTIFIERS: {(DOMAIN, device_id)}, ATTR_MANUFACTURER: DEVICE_MANUFACTURER.get( device_family, MANUFACTURER_MAXIM ), ATTR_MODEL: device_type, ATTR_NAME: device_id, } if parent_id: device_info[ATTR_VIA_DEVICE] = (DOMAIN, parent_id) device = OWServerDeviceDescription( device_info=device_info, id=device_id, family=device_family, path=device_path, type=device_type, ) devices.append(device) if device_branches := DEVICE_COUPLERS.get(device_family): for branch in device_branches: devices += self._discover_devices_owserver( f"{device_path}{branch}", device_id ) return devices def _get_device_type_owserver(self, device_path: str) -> str: """Get device model.""" if TYPE_CHECKING: assert self.owproxy device_type = self.owproxy.read(f"{device_path}type").decode() _LOGGER.debug("read `%stype`: %s", device_path, device_type) if device_type == "EDS": device_type = self.owproxy.read(f"{device_path}device_type").decode() _LOGGER.debug("read `%sdevice_type`: %s", device_path, device_type) if TYPE_CHECKING: assert isinstance(device_type, str) return device_type class CannotConnect(HomeAssistantError): """Error to indicate we cannot connect.""" class InvalidPath(HomeAssistantError): """Error to indicate the path is invalid."""
examples/single-pole-balancing/evolve-feedforward.py
HTalarmo/neat-python
1,129
131634
""" Single-pole balancing experiment using a feed-forward neural network. """ from __future__ import print_function import multiprocessing import os import pickle import neat import cart_pole import visualize runs_per_net = 5 simulation_seconds = 60.0 # Use the NN network phenotype and the discrete actuator force function. def eval_genome(genome, config): net = neat.nn.FeedForwardNetwork.create(genome, config) fitnesses = [] for runs in range(runs_per_net): sim = cart_pole.CartPole() # Run the given simulation for up to num_steps time steps. fitness = 0.0 while sim.t < simulation_seconds: inputs = sim.get_scaled_state() action = net.activate(inputs) # Apply action to the simulated cart-pole force = cart_pole.discrete_actuator_force(action) sim.step(force) # Stop if the network fails to keep the cart within the position or angle limits. # The per-run fitness is the number of time steps the network can balance the pole # without exceeding these limits. if abs(sim.x) >= sim.position_limit or abs(sim.theta) >= sim.angle_limit_radians: break fitness = sim.t fitnesses.append(fitness) # The genome's fitness is its worst performance across all runs. return min(fitnesses) def eval_genomes(genomes, config): for genome_id, genome in genomes: genome.fitness = eval_genome(genome, config) def run(): # Load the config file, which is assumed to live in # the same directory as this script. local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'config-feedforward') config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet, neat.DefaultStagnation, config_path) pop = neat.Population(config) stats = neat.StatisticsReporter() pop.add_reporter(stats) pop.add_reporter(neat.StdOutReporter(True)) pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome) winner = pop.run(pe.evaluate) # Save the winner. with open('winner-feedforward', 'wb') as f: pickle.dump(winner, f) print(winner) visualize.plot_stats(stats, ylog=True, view=True, filename="feedforward-fitness.svg") visualize.plot_species(stats, view=True, filename="feedforward-speciation.svg") node_names = {-1: 'x', -2: 'dx', -3: 'theta', -4: 'dtheta', 0: 'control'} visualize.draw_net(config, winner, True, node_names=node_names) visualize.draw_net(config, winner, view=True, node_names=node_names, filename="winner-feedforward.gv") visualize.draw_net(config, winner, view=True, node_names=node_names, filename="winner-feedforward-enabled.gv", show_disabled=False) visualize.draw_net(config, winner, view=True, node_names=node_names, filename="winner-feedforward-enabled-pruned.gv", show_disabled=False, prune_unused=True) if __name__ == '__main__': run()
tests/torch/nn/parallel/tensor_parallel/_parallel_2d/test_wrapper_2d.py
lipovsek/oslo
249
131639
<gh_stars>100-1000 import torch.distributed as dist import wandb from datasets import load_dataset from torch.optim import Adam from torch.utils.data import DataLoader from transformers import AutoTokenizer, GPT2Config, GPT2LMHeadModel from oslo.torch.distributed import ParallelContext, ParallelMode from oslo.torch.nn.parallel.tensor_parallel import TensorParallel from oslo.torch.nn.parallel.utils import allocate_params # parallel context 생성 parallel_context = ParallelContext.from_torch( data_parallel_size=1, pipeline_parallel_size=1, tensor_parallel_size=4, tensor_parallel_mode=ParallelMode.TENSOR_2D, ) # 토크나이저 생성 tokenizer = AutoTokenizer.from_pretrained("gpt2") tokenizer.pad_token = tokenizer.eos_token # 모델 생성 및 병렬화 수행 model_no_tp = GPT2LMHeadModel(GPT2Config.from_pretrained("gpt2")).cuda() model_tp = GPT2LMHeadModel(GPT2Config.from_pretrained("gpt2")) wrapper_tp = TensorParallel(model_tp, parallel_context) allocate_params(wrapper_tp, parallel_context) # allocate_params 함수는 추후에 모든 페러렐 래퍼를 관장하는 클래스에서 처리될 예정 # https://github.com/tunib-ai/oslo/blob/307131bbd5ed995ea8dca8ac541bfbce9bfec29b/oslo/pytorch/model_parallelism/model_parallel_engine.py if dist.get_rank() == 0: print(wrapper_tp) # 옵티마이저 생성 optimizer_tp = Adam(wrapper_tp.parameters(), lr=3e-5) optimizer_no_tp = Adam(model_no_tp.parameters(), lr=3e-5) # 데이터셋 생성 datasets = load_dataset("squad").data["train"]["context"] datasets = [str(sample) for sample in datasets[:500]] dataloader = DataLoader(datasets, batch_size=4) # 모니터링 생성 if dist.get_rank() == 0: wandb.init(project="oslo", name="tp_exp") # 모니터링 생성 대기 dist.barrier() # 학습 시작 for data in dataloader: optimizer_tp.zero_grad() optimizer_no_tp.zero_grad() inputs = tokenizer( data, return_tensors="pt", padding=True, truncation=True, max_length=512, ).to("cuda") loss_tp = wrapper_tp(**inputs, labels=inputs["input_ids"]).loss loss_no_tp = model_no_tp(**inputs, labels=inputs["input_ids"]).loss if dist.get_rank() == 0: print(f"TP:{loss_tp}, NOTP:{loss_no_tp}") wandb.log({"tp": loss_tp, "notp": loss_no_tp}) loss_tp.backward() loss_no_tp.backward() optimizer_tp.step() optimizer_no_tp.step()
Examples/Image/Classification/ResNet/Python/resnet_models.py
burhandodhy/CNTK
17,702
131642
<gh_stars>1000+ # Copyright (c) Microsoft. All rights reserved. # # Licensed under the MIT license. See LICENSE.md file in the project root # for full license information. # ============================================================================== import numpy as np from cntk.initializer import he_normal, normal from cntk.layers import AveragePooling, MaxPooling, BatchNormalization, Convolution, Dense from cntk.ops import element_times, relu # # assembly components # def conv_bn(input, filter_size, num_filters, strides=(1, 1), init=he_normal(), bn_init_scale=1): c = Convolution(filter_size, num_filters, activation=None, init=init, pad=True, strides=strides, bias=False)(input) r = BatchNormalization(map_rank=1, normalization_time_constant=4096, use_cntk_engine=False, init_scale=bn_init_scale, disable_regularization=True)(c) return r def conv_bn_relu(input, filter_size, num_filters, strides=(1, 1), init=he_normal()): r = conv_bn(input, filter_size, num_filters, strides, init, 1) return relu(r) # # ResNet components # def resnet_basic(input, num_filters): c1 = conv_bn_relu(input, (3, 3), num_filters) c2 = conv_bn(c1, (3, 3), num_filters, bn_init_scale=1) p = c2 + input return relu(p) def resnet_basic_inc(input, num_filters, strides=(2, 2)): c1 = conv_bn_relu(input, (3, 3), num_filters, strides) c2 = conv_bn(c1, (3, 3), num_filters, bn_init_scale=1) s = conv_bn(input, (1, 1), num_filters, strides) # Shortcut p = c2 + s return relu(p) def resnet_basic_stack(input, num_stack_layers, num_filters): assert(num_stack_layers >= 0) l = input for _ in range(num_stack_layers): l = resnet_basic(l, num_filters) return l def resnet_bottleneck(input, out_num_filters, inter_out_num_filters): c1 = conv_bn_relu(input, (1, 1), inter_out_num_filters) c2 = conv_bn_relu(c1, (3, 3), inter_out_num_filters) c3 = conv_bn(c2, (1, 1), out_num_filters, bn_init_scale=0) p = c3 + input return relu(p) def resnet_bottleneck_inc(input, out_num_filters, inter_out_num_filters, stride1x1, stride3x3): c1 = conv_bn_relu(input, (1, 1), inter_out_num_filters, strides=stride1x1) c2 = conv_bn_relu(c1, (3, 3), inter_out_num_filters, strides=stride3x3) c3 = conv_bn(c2, (1, 1), out_num_filters, bn_init_scale=0) stride = np.multiply(stride1x1, stride3x3) s = conv_bn(input, (1, 1), out_num_filters, strides=stride) # Shortcut p = c3 + s return relu(p) def resnet_bottleneck_stack(input, num_stack_layers, out_num_filters, inter_out_num_filters): assert(num_stack_layers >= 0) l = input for _ in range(num_stack_layers): l = resnet_bottleneck(l, out_num_filters, inter_out_num_filters) return l # # Defines the residual network model for classifying images # def create_cifar10_model(input, num_stack_layers, num_classes): c_map = [16, 32, 64] conv = conv_bn_relu(input, (3, 3), c_map[0]) r1 = resnet_basic_stack(conv, num_stack_layers, c_map[0]) r2_1 = resnet_basic_inc(r1, c_map[1]) r2_2 = resnet_basic_stack(r2_1, num_stack_layers-1, c_map[1]) r3_1 = resnet_basic_inc(r2_2, c_map[2]) r3_2 = resnet_basic_stack(r3_1, num_stack_layers-1, c_map[2]) # Global average pooling and output pool = AveragePooling(filter_shape=(8, 8), name='final_avg_pooling')(r3_2) z = Dense(num_classes, init=normal(0.01))(pool) return z def create_imagenet_model_basic(input, num_stack_layers, num_classes): c_map = [64, 128, 256, 512] conv = conv_bn_relu(input, (7, 7), c_map[0], strides=(2, 2)) pool1 = MaxPooling((3, 3), strides=(2, 2), pad=True)(conv) r1 = resnet_basic_stack(pool1, num_stack_layers[0], c_map[0]) r2_1 = resnet_basic_inc(r1, c_map[1]) r2_2 = resnet_basic_stack(r2_1, num_stack_layers[1], c_map[1]) r3_1 = resnet_basic_inc(r2_2, c_map[2]) r3_2 = resnet_basic_stack(r3_1, num_stack_layers[2], c_map[2]) r4_1 = resnet_basic_inc(r3_2, c_map[3]) r4_2 = resnet_basic_stack(r4_1, num_stack_layers[3], c_map[3]) # Global average pooling and output pool = AveragePooling(filter_shape=(7, 7), name='final_avg_pooling')(r4_2) z = Dense(num_classes, init=normal(0.01))(pool) return z def create_imagenet_model_bottleneck(input, num_stack_layers, num_classes, stride1x1, stride3x3): c_map = [64, 128, 256, 512, 1024, 2048] # conv1 and max pooling conv1 = conv_bn_relu(input, (7, 7), c_map[0], strides=(2, 2)) pool1 = MaxPooling((3,3), strides=(2,2), pad=True)(conv1) # conv2_x r2_1 = resnet_bottleneck_inc(pool1, c_map[2], c_map[0], (1, 1), (1, 1)) r2_2 = resnet_bottleneck_stack(r2_1, num_stack_layers[0], c_map[2], c_map[0]) # conv3_x r3_1 = resnet_bottleneck_inc(r2_2, c_map[3], c_map[1], stride1x1, stride3x3) r3_2 = resnet_bottleneck_stack(r3_1, num_stack_layers[1], c_map[3], c_map[1]) # conv4_x r4_1 = resnet_bottleneck_inc(r3_2, c_map[4], c_map[2], stride1x1, stride3x3) r4_2 = resnet_bottleneck_stack(r4_1, num_stack_layers[2], c_map[4], c_map[2]) # conv5_x r5_1 = resnet_bottleneck_inc(r4_2, c_map[5], c_map[3], stride1x1, stride3x3) r5_2 = resnet_bottleneck_stack(r5_1, num_stack_layers[3], c_map[5], c_map[3]) # Global average pooling and output pool = AveragePooling(filter_shape=(7, 7), name='final_avg_pooling')(r5_2) z = Dense(num_classes, init=normal(0.01))(pool) return z
extraPackages/matplotlib-3.0.3/examples/axes_grid1/demo_axes_grid2.py
dolboBobo/python3_ios
130
131661
<reponame>dolboBobo/python3_ios """ =============== Demo Axes Grid2 =============== Grid of images with shared xaxis and yaxis. """ import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import ImageGrid import numpy as np def get_demo_image(): from matplotlib.cbook import get_sample_data f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False) z = np.load(f) # z is a numpy array of 15x15 return z, (-3, 4, -4, 3) def add_inner_title(ax, title, loc, size=None, **kwargs): from matplotlib.offsetbox import AnchoredText from matplotlib.patheffects import withStroke if size is None: size = dict(size=plt.rcParams['legend.fontsize']) at = AnchoredText(title, loc=loc, prop=size, pad=0., borderpad=0.5, frameon=False, **kwargs) ax.add_artist(at) at.txt._text.set_path_effects([withStroke(foreground="w", linewidth=3)]) return at if 1: F = plt.figure(1, (6, 6)) F.clf() # prepare images Z, extent = get_demo_image() ZS = [Z[i::3, :] for i in range(3)] extent = extent[0], extent[1]/3., extent[2], extent[3] # demo 1 : colorbar at each axes grid = ImageGrid(F, 211, # similar to subplot(111) nrows_ncols=(1, 3), direction="row", axes_pad=0.05, add_all=True, label_mode="1", share_all=True, cbar_location="top", cbar_mode="each", cbar_size="7%", cbar_pad="1%", ) for ax, z in zip(grid, ZS): im = ax.imshow( z, origin="lower", extent=extent, interpolation="nearest") ax.cax.colorbar(im) for ax, im_title in zip(grid, ["Image 1", "Image 2", "Image 3"]): t = add_inner_title(ax, im_title, loc='lower left') t.patch.set_alpha(0.5) for ax, z in zip(grid, ZS): ax.cax.toggle_label(True) #axis = ax.cax.axis[ax.cax.orientation] #axis.label.set_text("counts s$^{-1}$") #axis.label.set_size(10) #axis.major_ticklabels.set_size(6) # changing the colorbar ticks grid[1].cax.set_xticks([-1, 0, 1]) grid[2].cax.set_xticks([-1, 0, 1]) grid[0].set_xticks([-2, 0]) grid[0].set_yticks([-2, 0, 2]) # demo 2 : shared colorbar grid2 = ImageGrid(F, 212, nrows_ncols=(1, 3), direction="row", axes_pad=0.05, add_all=True, label_mode="1", share_all=True, cbar_location="right", cbar_mode="single", cbar_size="10%", cbar_pad=0.05, ) grid2[0].set_xlabel("X") grid2[0].set_ylabel("Y") vmax, vmin = np.max(ZS), np.min(ZS) import matplotlib.colors norm = matplotlib.colors.Normalize(vmax=vmax, vmin=vmin) for ax, z in zip(grid2, ZS): im = ax.imshow(z, norm=norm, origin="lower", extent=extent, interpolation="nearest") # With cbar_mode="single", cax attribute of all axes are identical. ax.cax.colorbar(im) ax.cax.toggle_label(True) for ax, im_title in zip(grid2, ["(a)", "(b)", "(c)"]): t = add_inner_title(ax, im_title, loc='upper left') t.patch.set_ec("none") t.patch.set_alpha(0.5) grid2[0].set_xticks([-2, 0]) grid2[0].set_yticks([-2, 0, 2]) plt.show()
build_defs/intellij_plugin_debug_target.bzl
sitaktif/intellij
675
131670
"""IntelliJ plugin debug target rule used for debugging IntelliJ plugins. Creates a plugin target debuggable from IntelliJ. Any files in the 'deps' and 'javaagents' attribute are deployed to the plugin sandbox. Any files are stripped of their prefix and installed into <sandbox>/plugins. If you need structure, first put the files into //build_defs:build_defs%repackage_files. intellij_plugin_debug_targets can be nested. repackaged_files( name = "foo_files", srcs = [ ":my_plugin_jar", ":my_additional_plugin_files", ], prefix = "plugins/foo/lib", ) intellij_plugin_debug_target( name = "my_debug_target", deps = [ ":my_jar", ], javaagents = [ ":agent_deploy.jar", ], ) """ load("//build_defs:build_defs.bzl", "output_path", "repackaged_files_data") SUFFIX = ".intellij-plugin-debug-target-deploy-info" def _repackaged_deploy_file(f, repackaging_data): return struct( src = f, deploy_location = output_path(f, repackaging_data), ) def _flat_deploy_file(f): return struct( src = f, deploy_location = f.basename, ) def _intellij_plugin_debug_target_aspect_impl(target, ctx): aspect_intellij_plugin_deploy_info = None files = target.files if ctx.rule.kind == "intellij_plugin_debug_target": aspect_intellij_plugin_deploy_info = target.intellij_plugin_deploy_info elif ctx.rule.kind == "_repackaged_files": data = target[repackaged_files_data] aspect_intellij_plugin_deploy_info = struct( deploy_files = [_repackaged_deploy_file(f, data) for f in data.files.to_list()], java_agent_deploy_files = [], ) # TODO(brendandouglas): Remove when migrating to Bazel 0.5, when DefaultInfo # provider can be populated by '_repackaged_files' directly files = depset(transitive = [files, data.files]) else: aspect_intellij_plugin_deploy_info = struct( deploy_files = [_flat_deploy_file(f) for f in target.files.to_list()], java_agent_deploy_files = [], ) return struct( input_files = files, aspect_intellij_plugin_deploy_info = aspect_intellij_plugin_deploy_info, ) _intellij_plugin_debug_target_aspect = aspect( implementation = _intellij_plugin_debug_target_aspect_impl, ) def _build_deploy_info_file(deploy_file): return struct( execution_path = deploy_file.src.path, deploy_location = deploy_file.deploy_location, ) def _intellij_plugin_debug_target_impl(ctx): files = depset() deploy_files = [] java_agent_deploy_files = [] for target in ctx.attr.deps: files = depset(transitive = [files, target.input_files]) deploy_files.extend(target.aspect_intellij_plugin_deploy_info.deploy_files) java_agent_deploy_files.extend(target.aspect_intellij_plugin_deploy_info.java_agent_deploy_files) for target in ctx.attr.javaagents: files = depset(transitive = [files, target.input_files]) java_agent_deploy_files.extend(target.aspect_intellij_plugin_deploy_info.deploy_files) java_agent_deploy_files.extend(target.aspect_intellij_plugin_deploy_info.java_agent_deploy_files) deploy_info = struct( deploy_files = [_build_deploy_info_file(f) for f in deploy_files], java_agent_deploy_files = [_build_deploy_info_file(f) for f in java_agent_deploy_files], ) output = ctx.actions.declare_file(ctx.label.name + SUFFIX) ctx.actions.write(output, deploy_info.to_proto()) # We've already consumed any dependent intellij_plugin_debug_targets into our own, # do not build or report these files = depset([f for f in files.to_list() if not f.path.endswith(SUFFIX)]) files = depset([output], transitive = [files]) return struct( files = files, intellij_plugin_deploy_info = struct( deploy_files = deploy_files, java_agent_deploy_files = java_agent_deploy_files, ), ) intellij_plugin_debug_target = rule( implementation = _intellij_plugin_debug_target_impl, attrs = { "deps": attr.label_list(aspects = [_intellij_plugin_debug_target_aspect]), "javaagents": attr.label_list(aspects = [_intellij_plugin_debug_target_aspect]), }, )
firebase_admin/project_management.py
oppia/firebase-admin-python
771
131679
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Firebase Project Management module. This module enables management of resources in Firebase projects, such as Android and iOS apps. """ import base64 import re import time import requests import firebase_admin from firebase_admin import exceptions from firebase_admin import _http_client from firebase_admin import _utils _PROJECT_MANAGEMENT_ATTRIBUTE = '_project_management' def _get_project_management_service(app): return _utils.get_app_service(app, _PROJECT_MANAGEMENT_ATTRIBUTE, _ProjectManagementService) def android_app(app_id, app=None): """Obtains a reference to an Android app in the associated Firebase project. Args: app_id: The app ID that identifies this Android app. app: An App instance (optional). Returns: AndroidApp: An ``AndroidApp`` instance. """ return AndroidApp(app_id=app_id, service=_get_project_management_service(app)) def ios_app(app_id, app=None): """Obtains a reference to an iOS app in the associated Firebase project. Args: app_id: The app ID that identifies this iOS app. app: An App instance (optional). Returns: IOSApp: An ``IOSApp`` instance. """ return IOSApp(app_id=app_id, service=_get_project_management_service(app)) def list_android_apps(app=None): """Lists all Android apps in the associated Firebase project. Args: app: An App instance (optional). Returns: list: a list of ``AndroidApp`` instances referring to each Android app in the Firebase project. """ return _get_project_management_service(app).list_android_apps() def list_ios_apps(app=None): """Lists all iOS apps in the associated Firebase project. Args: app: An App instance (optional). Returns: list: a list of ``IOSApp`` instances referring to each iOS app in the Firebase project. """ return _get_project_management_service(app).list_ios_apps() def create_android_app(package_name, display_name=None, app=None): """Creates a new Android app in the associated Firebase project. Args: package_name: The package name of the Android app to be created. display_name: A nickname for this Android app (optional). app: An App instance (optional). Returns: AndroidApp: An ``AndroidApp`` instance that is a reference to the newly created app. """ return _get_project_management_service(app).create_android_app(package_name, display_name) def create_ios_app(bundle_id, display_name=None, app=None): """Creates a new iOS app in the associated Firebase project. Args: bundle_id: The bundle ID of the iOS app to be created. display_name: A nickname for this iOS app (optional). app: An App instance (optional). Returns: IOSApp: An ``IOSApp`` instance that is a reference to the newly created app. """ return _get_project_management_service(app).create_ios_app(bundle_id, display_name) def _check_is_string_or_none(obj, field_name): if obj is None or isinstance(obj, str): return obj raise ValueError('{0} must be a string.'.format(field_name)) def _check_is_nonempty_string(obj, field_name): if isinstance(obj, str) and obj: return obj raise ValueError('{0} must be a non-empty string.'.format(field_name)) def _check_is_nonempty_string_or_none(obj, field_name): if obj is None: return None return _check_is_nonempty_string(obj, field_name) def _check_not_none(obj, field_name): if obj is None: raise ValueError('{0} cannot be None.'.format(field_name)) return obj class AndroidApp: """A reference to an Android app within a Firebase project. Note: Unless otherwise specified, all methods defined in this class make an RPC. Please use the module-level function ``android_app(app_id)`` to obtain instances of this class instead of instantiating it directly. """ def __init__(self, app_id, service): self._app_id = app_id self._service = service @property def app_id(self): """Returns the app ID of the Android app to which this instance refers. Note: This method does not make an RPC. Returns: string: The app ID of the Android app to which this instance refers. """ return self._app_id def get_metadata(self): """Retrieves detailed information about this Android app. Returns: AndroidAppMetadata: An ``AndroidAppMetadata`` instance. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. """ return self._service.get_android_app_metadata(self._app_id) def set_display_name(self, new_display_name): """Updates the display name attribute of this Android app to the one given. Args: new_display_name: The new display name for this Android app. Returns: NoneType: None. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. """ return self._service.set_android_app_display_name(self._app_id, new_display_name) def get_config(self): """Retrieves the configuration artifact associated with this Android app.""" return self._service.get_android_app_config(self._app_id) def get_sha_certificates(self): """Retrieves the entire list of SHA certificates associated with this Android app. Returns: list: A list of ``SHACertificate`` instances. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. """ return self._service.get_sha_certificates(self._app_id) def add_sha_certificate(self, certificate_to_add): """Adds a SHA certificate to this Android app. Args: certificate_to_add: The SHA certificate to add. Returns: NoneType: None. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. (For example, if the certificate_to_add already exists.) """ return self._service.add_sha_certificate(self._app_id, certificate_to_add) def delete_sha_certificate(self, certificate_to_delete): """Removes a SHA certificate from this Android app. Args: certificate_to_delete: The SHA certificate to delete. Returns: NoneType: None. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. (For example, if the certificate_to_delete is not found.) """ return self._service.delete_sha_certificate(certificate_to_delete) class IOSApp: """A reference to an iOS app within a Firebase project. Note: Unless otherwise specified, all methods defined in this class make an RPC. Please use the module-level function ``ios_app(app_id)`` to obtain instances of this class instead of instantiating it directly. """ def __init__(self, app_id, service): self._app_id = app_id self._service = service @property def app_id(self): """Returns the app ID of the iOS app to which this instance refers. Note: This method does not make an RPC. Returns: string: The app ID of the iOS app to which this instance refers. """ return self._app_id def get_metadata(self): """Retrieves detailed information about this iOS app. Returns: IOSAppMetadata: An ``IOSAppMetadata`` instance. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. """ return self._service.get_ios_app_metadata(self._app_id) def set_display_name(self, new_display_name): """Updates the display name attribute of this iOS app to the one given. Args: new_display_name: The new display name for this iOS app. Returns: NoneType: None. Raises: FirebaseError: If an error occurs while communicating with the Firebase Project Management Service. """ return self._service.set_ios_app_display_name(self._app_id, new_display_name) def get_config(self): """Retrieves the configuration artifact associated with this iOS app.""" return self._service.get_ios_app_config(self._app_id) class _AppMetadata: """Detailed information about a Firebase Android or iOS app.""" def __init__(self, name, app_id, display_name, project_id): # _name is the fully qualified resource name of this Android or iOS app; currently it is not # exposed to client code. self._name = _check_is_nonempty_string(name, 'name') self._app_id = _check_is_nonempty_string(app_id, 'app_id') self._display_name = _check_is_string_or_none(display_name, 'display_name') self._project_id = _check_is_nonempty_string(project_id, 'project_id') @property def app_id(self): """The globally unique, Firebase-assigned identifier of this Android or iOS app. This ID is unique even across apps of different platforms. """ return self._app_id @property def display_name(self): """The user-assigned display name of this Android or iOS app. Note that the display name can be None if it has never been set by the user.""" return self._display_name @property def project_id(self): """The permanent, globally unique, user-assigned ID of the parent Firebase project.""" return self._project_id def __eq__(self, other): if not isinstance(other, type(self)): return False # pylint: disable=protected-access return (self._name == other._name and self.app_id == other.app_id and self.display_name == other.display_name and self.project_id == other.project_id) # pylint: enable=protected-access class AndroidAppMetadata(_AppMetadata): """Android-specific information about an Android Firebase app.""" def __init__(self, package_name, name, app_id, display_name, project_id): """Clients should not instantiate this class directly.""" super(AndroidAppMetadata, self).__init__(name, app_id, display_name, project_id) self._package_name = _check_is_nonempty_string(package_name, 'package_name') @property def package_name(self): """The canonical package name of this Android app as it would appear in the Play Store.""" return self._package_name def __eq__(self, other): return (super(AndroidAppMetadata, self).__eq__(other) and self.package_name == other.package_name) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash( (self._name, self.app_id, self.display_name, self.project_id, self.package_name)) class IOSAppMetadata(_AppMetadata): """iOS-specific information about an iOS Firebase app.""" def __init__(self, bundle_id, name, app_id, display_name, project_id): """Clients should not instantiate this class directly.""" super(IOSAppMetadata, self).__init__(name, app_id, display_name, project_id) self._bundle_id = _check_is_nonempty_string(bundle_id, 'bundle_id') @property def bundle_id(self): """The canonical bundle ID of this iOS app as it would appear in the iOS AppStore.""" return self._bundle_id def __eq__(self, other): return super(IOSAppMetadata, self).__eq__(other) and self.bundle_id == other.bundle_id def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self._name, self.app_id, self.display_name, self.project_id, self.bundle_id)) class SHACertificate: """Represents a SHA-1 or SHA-256 certificate associated with an Android app.""" SHA_1 = 'SHA_1' SHA_256 = 'SHA_256' _SHA_1_RE = re.compile('^[0-9A-Fa-f]{40}$') _SHA_256_RE = re.compile('^[0-9A-Fa-f]{64}$') def __init__(self, sha_hash, name=None): """Creates a new SHACertificate instance. Args: sha_hash: A string; the certificate hash for the Android app. name: The fully qualified resource name of this certificate; note that this field should be omitted if the instance is being constructed for the purpose of calling the add_sha_certificate() method on an ``AndroidApp``. Raises: ValueError: If the sha_hash is not a valid SHA-1 or SHA-256 certificate hash. """ _check_is_nonempty_string(sha_hash, 'sha_hash') _check_is_nonempty_string_or_none(name, 'name') self._name = name self._sha_hash = sha_hash.lower() if SHACertificate._SHA_1_RE.match(sha_hash): self._cert_type = SHACertificate.SHA_1 elif SHACertificate._SHA_256_RE.match(sha_hash): self._cert_type = SHACertificate.SHA_256 else: raise ValueError( 'The supplied certificate hash is neither a valid SHA-1 nor SHA_256 hash.') @property def name(self): """Returns the fully qualified resource name of this certificate, if known. Returns: string: The fully qualified resource name of this certificate, if known; otherwise, the empty string. """ return self._name @property def sha_hash(self): """Returns the certificate hash. Returns: string: The certificate hash. """ return self._sha_hash @property def cert_type(self): """Returns the type of the SHA certificate encoded in the hash. Returns: string: One of 'SHA_1' or 'SHA_256'. """ return self._cert_type def __eq__(self, other): if not isinstance(other, SHACertificate): return False return (self.name == other.name and self.sha_hash == other.sha_hash and self.cert_type == other.cert_type) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.name, self.sha_hash, self.cert_type)) class _ProjectManagementService: """Provides methods for interacting with the Firebase Project Management Service.""" BASE_URL = 'https://firebase.googleapis.com' MAXIMUM_LIST_APPS_PAGE_SIZE = 100 MAXIMUM_POLLING_ATTEMPTS = 8 POLL_BASE_WAIT_TIME_SECONDS = 0.5 POLL_EXPONENTIAL_BACKOFF_FACTOR = 1.5 ANDROID_APPS_RESOURCE_NAME = 'androidApps' ANDROID_APP_IDENTIFIER_NAME = 'packageName' IOS_APPS_RESOURCE_NAME = 'iosApps' IOS_APP_IDENTIFIER_NAME = 'bundleId' def __init__(self, app): project_id = app.project_id if not project_id: raise ValueError( 'Project ID is required to access the Firebase Project Management Service. Either ' 'set the projectId option, or use service account credentials. Alternatively, set ' 'the GOOGLE_CLOUD_PROJECT environment variable.') self._project_id = project_id version_header = 'Python/Admin/{0}'.format(firebase_admin.__version__) timeout = app.options.get('httpTimeout', _http_client.DEFAULT_TIMEOUT_SECONDS) self._client = _http_client.JsonHttpClient( credential=app.credential.get_credential(), base_url=_ProjectManagementService.BASE_URL, headers={'X-Client-Version': version_header}, timeout=timeout) def get_android_app_metadata(self, app_id): return self._get_app_metadata( platform_resource_name=_ProjectManagementService.ANDROID_APPS_RESOURCE_NAME, identifier_name=_ProjectManagementService.ANDROID_APP_IDENTIFIER_NAME, metadata_class=AndroidAppMetadata, app_id=app_id) def get_ios_app_metadata(self, app_id): return self._get_app_metadata( platform_resource_name=_ProjectManagementService.IOS_APPS_RESOURCE_NAME, identifier_name=_ProjectManagementService.IOS_APP_IDENTIFIER_NAME, metadata_class=IOSAppMetadata, app_id=app_id) def _get_app_metadata(self, platform_resource_name, identifier_name, metadata_class, app_id): """Retrieves detailed information about an Android or iOS app.""" _check_is_nonempty_string(app_id, 'app_id') path = '/v1beta1/projects/-/{0}/{1}'.format(platform_resource_name, app_id) response = self._make_request('get', path) return metadata_class( response[identifier_name], name=response['name'], app_id=response['appId'], display_name=response.get('displayName') or None, project_id=response['projectId']) def set_android_app_display_name(self, app_id, new_display_name): self._set_display_name( app_id=app_id, new_display_name=new_display_name, platform_resource_name=_ProjectManagementService.ANDROID_APPS_RESOURCE_NAME) def set_ios_app_display_name(self, app_id, new_display_name): self._set_display_name( app_id=app_id, new_display_name=new_display_name, platform_resource_name=_ProjectManagementService.IOS_APPS_RESOURCE_NAME) def _set_display_name(self, app_id, new_display_name, platform_resource_name): """Sets the display name of an Android or iOS app.""" path = '/v1beta1/projects/-/{0}/{1}?updateMask=displayName'.format( platform_resource_name, app_id) request_body = {'displayName': new_display_name} self._make_request('patch', path, json=request_body) def list_android_apps(self): return self._list_apps( platform_resource_name=_ProjectManagementService.ANDROID_APPS_RESOURCE_NAME, app_class=AndroidApp) def list_ios_apps(self): return self._list_apps( platform_resource_name=_ProjectManagementService.IOS_APPS_RESOURCE_NAME, app_class=IOSApp) def _list_apps(self, platform_resource_name, app_class): """Lists all the Android or iOS apps within the Firebase project.""" path = '/v1beta1/projects/{0}/{1}?pageSize={2}'.format( self._project_id, platform_resource_name, _ProjectManagementService.MAXIMUM_LIST_APPS_PAGE_SIZE) response = self._make_request('get', path) apps_list = [] while True: apps = response.get('apps') if not apps: break apps_list.extend(app_class(app_id=app['appId'], service=self) for app in apps) next_page_token = response.get('nextPageToken') if not next_page_token: break # Retrieve the next page of apps. path = '/v1beta1/projects/{0}/{1}?pageToken={2}&pageSize={3}'.format( self._project_id, platform_resource_name, next_page_token, _ProjectManagementService.MAXIMUM_LIST_APPS_PAGE_SIZE) response = self._make_request('get', path) return apps_list def create_android_app(self, package_name, display_name=None): return self._create_app( platform_resource_name=_ProjectManagementService.ANDROID_APPS_RESOURCE_NAME, identifier_name=_ProjectManagementService.ANDROID_APP_IDENTIFIER_NAME, identifier=package_name, display_name=display_name, app_class=AndroidApp) def create_ios_app(self, bundle_id, display_name=None): return self._create_app( platform_resource_name=_ProjectManagementService.IOS_APPS_RESOURCE_NAME, identifier_name=_ProjectManagementService.IOS_APP_IDENTIFIER_NAME, identifier=bundle_id, display_name=display_name, app_class=IOSApp) def _create_app( self, platform_resource_name, identifier_name, identifier, display_name, app_class): """Creates an Android or iOS app.""" _check_is_string_or_none(display_name, 'display_name') path = '/v1beta1/projects/{0}/{1}'.format(self._project_id, platform_resource_name) request_body = {identifier_name: identifier} if display_name: request_body['displayName'] = display_name response = self._make_request('post', path, json=request_body) operation_name = response['name'] poll_response = self._poll_app_creation(operation_name) return app_class(app_id=poll_response['appId'], service=self) def _poll_app_creation(self, operation_name): """Polls the Long-Running Operation repeatedly until it is done with exponential backoff.""" for current_attempt in range(_ProjectManagementService.MAXIMUM_POLLING_ATTEMPTS): delay_factor = pow( _ProjectManagementService.POLL_EXPONENTIAL_BACKOFF_FACTOR, current_attempt) wait_time_seconds = delay_factor * _ProjectManagementService.POLL_BASE_WAIT_TIME_SECONDS time.sleep(wait_time_seconds) path = '/v1/{0}'.format(operation_name) poll_response, http_response = self._body_and_response('get', path) done = poll_response.get('done') if done: response = poll_response.get('response') if response: return response raise exceptions.UnknownError( 'Polling finished, but the operation terminated in an error.', http_response=http_response) raise exceptions.DeadlineExceededError('Polling deadline exceeded.') def get_android_app_config(self, app_id): return self._get_app_config( platform_resource_name=_ProjectManagementService.ANDROID_APPS_RESOURCE_NAME, app_id=app_id) def get_ios_app_config(self, app_id): return self._get_app_config( platform_resource_name=_ProjectManagementService.IOS_APPS_RESOURCE_NAME, app_id=app_id) def _get_app_config(self, platform_resource_name, app_id): path = '/v1beta1/projects/-/{0}/{1}/config'.format(platform_resource_name, app_id) response = self._make_request('get', path) # In Python 2.7, the base64 module works with strings, while in Python 3, it works with # bytes objects. This line works in both versions. return base64.standard_b64decode(response['configFileContents']).decode(encoding='utf-8') def get_sha_certificates(self, app_id): path = '/v1beta1/projects/-/androidApps/{0}/sha'.format(app_id) response = self._make_request('get', path) cert_list = response.get('certificates') or [] return [SHACertificate(sha_hash=cert['shaHash'], name=cert['name']) for cert in cert_list] def add_sha_certificate(self, app_id, certificate_to_add): path = '/v1beta1/projects/-/androidApps/{0}/sha'.format(app_id) sha_hash = _check_not_none(certificate_to_add, 'certificate_to_add').sha_hash cert_type = certificate_to_add.cert_type request_body = {'shaHash': sha_hash, 'certType': cert_type} self._make_request('post', path, json=request_body) def delete_sha_certificate(self, certificate_to_delete): name = _check_not_none(certificate_to_delete, 'certificate_to_delete').name path = '/v1beta1/{0}'.format(name) self._make_request('delete', path) def _make_request(self, method, url, json=None): body, _ = self._body_and_response(method, url, json) return body def _body_and_response(self, method, url, json=None): try: return self._client.body_and_response(method=method, url=url, json=json) except requests.exceptions.RequestException as error: raise _utils.handle_platform_error_from_requests(error)
tensorflow/tools/common/traverse_test.py
abhaikollara/tensorflow
848
131718
<gh_stars>100-1000 # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Python module traversal.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import googletest from tensorflow.tools.common import test_module1 from tensorflow.tools.common import test_module2 from tensorflow.tools.common import traverse class TestVisitor(object): def __init__(self): self.call_log = [] def __call__(self, path, parent, children): self.call_log += [(path, parent, children)] class TraverseTest(googletest.TestCase): def test_cycle(self): class Cyclist(object): pass Cyclist.cycle = Cyclist visitor = TestVisitor() traverse.traverse(Cyclist, visitor) # We simply want to make sure we terminate. def test_module(self): visitor = TestVisitor() traverse.traverse(test_module1, visitor) called = [parent for _, parent, _ in visitor.call_log] self.assertIn(test_module1.ModuleClass1, called) self.assertIn(test_module2.ModuleClass2, called) def test_class(self): visitor = TestVisitor() traverse.traverse(TestVisitor, visitor) self.assertEqual(TestVisitor, visitor.call_log[0][1]) # There are a bunch of other members, but make sure that the ones we know # about are there. self.assertIn('__init__', [name for name, _ in visitor.call_log[0][2]]) self.assertIn('__call__', [name for name, _ in visitor.call_log[0][2]]) # There are more classes descended into, at least __class__ and # __class__.__base__, neither of which are interesting to us, and which may # change as part of Python version etc., so we don't test for them. def test_non_class(self): integer = 5 visitor = TestVisitor() traverse.traverse(integer, visitor) self.assertEqual([], visitor.call_log) if __name__ == '__main__': googletest.main()
lib/dataset.py
Gofinge/SpatioTemporalSegmentation
212
131732
from abc import ABC from pathlib import Path from collections import defaultdict import random import numpy as np from enum import Enum import torch from torch.utils.data import Dataset, DataLoader import MinkowskiEngine as ME from plyfile import PlyData import lib.transforms as t from lib.dataloader import InfSampler from lib.voxelizer import Voxelizer class DatasetPhase(Enum): Train = 0 Val = 1 Val2 = 2 TrainVal = 3 Test = 4 def datasetphase_2str(arg): if arg == DatasetPhase.Train: return 'train' elif arg == DatasetPhase.Val: return 'val' elif arg == DatasetPhase.Val2: return 'val2' elif arg == DatasetPhase.TrainVal: return 'trainval' elif arg == DatasetPhase.Test: return 'test' else: raise ValueError('phase must be one of dataset enum.') def str2datasetphase_type(arg): if arg.upper() == 'TRAIN': return DatasetPhase.Train elif arg.upper() == 'VAL': return DatasetPhase.Val elif arg.upper() == 'VAL2': return DatasetPhase.Val2 elif arg.upper() == 'TRAINVAL': return DatasetPhase.TrainVal elif arg.upper() == 'TEST': return DatasetPhase.Test else: raise ValueError('phase must be one of train/val/test') def cache(func): def wrapper(self, *args, **kwargs): # Assume that args[0] is index index = args[0] if self.cache: if index not in self.cache_dict[func.__name__]: results = func(self, *args, **kwargs) self.cache_dict[func.__name__][index] = results return self.cache_dict[func.__name__][index] else: return func(self, *args, **kwargs) return wrapper class DictDataset(Dataset, ABC): IS_FULL_POINTCLOUD_EVAL = False def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, data_root='/'): """ data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]] """ Dataset.__init__(self) # Allows easier path concatenation if not isinstance(data_root, Path): data_root = Path(data_root) self.data_root = data_root self.data_paths = sorted(data_paths) self.prevoxel_transform = prevoxel_transform self.input_transform = input_transform self.target_transform = target_transform # dictionary of input self.data_loader_dict = { 'input': (self.load_input, self.input_transform), 'target': (self.load_target, self.target_transform) } # For large dataset, do not cache self.cache = cache self.cache_dict = defaultdict(dict) self.loading_key_order = ['input', 'target'] def load_input(self, index): raise NotImplementedError def load_target(self, index): raise NotImplementedError def get_classnames(self): pass def reorder_result(self, result): return result def __getitem__(self, index): out_array = [] for k in self.loading_key_order: loader, transformer = self.data_loader_dict[k] v = loader(index) if transformer: v = transformer(v) out_array.append(v) return out_array def __len__(self): return len(self.data_paths) class VoxelizationDatasetBase(DictDataset, ABC): IS_TEMPORAL = False CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000) ROTATION_AXIS = None NUM_IN_CHANNEL = None NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes IGNORE_LABELS = None # labels that are not evaluated def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, data_root='/', ignore_mask=255, return_transformation=False, **kwargs): """ ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation. """ DictDataset.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, cache=cache, data_root=data_root) self.ignore_mask = ignore_mask self.return_transformation = return_transformation def __getitem__(self, index): raise NotImplementedError def load_ply(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T labels = np.array(data['label'], dtype=np.int32) return coords, feats, labels, None def __len__(self): num_data = len(self.data_paths) return num_data class VoxelizationDataset(VoxelizationDatasetBase): """This dataset loads RGB point clouds and their labels as a list of points and voxelizes the pointcloud with sufficient data augmentation. """ # Voxelization arguments VOXEL_SIZE = 0.05 # 5cm # Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate # augmentation has to be done before voxelization SCALE_AUGMENTATION_BOUND = (0.9, 1.1) ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2)) ELASTIC_DISTORT_PARAMS = None # MISC. PREVOXELIZATION_VOXEL_SIZE = None # Augment coords to feats AUGMENT_COORDS_TO_FEATS = False def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, data_root='/', ignore_label=255, return_transformation=False, augment_data=False, config=None, **kwargs): self.augment_data = augment_data self.config = config VoxelizationDatasetBase.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, cache=cache, data_root=data_root, ignore_mask=ignore_label, return_transformation=return_transformation) # Prevoxel transformations self.voxelizer = Voxelizer( voxel_size=self.VOXEL_SIZE, clip_bound=self.CLIP_BOUND, use_augmentation=augment_data, scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND, rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND, translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND, ignore_label=ignore_label) # map labels not evaluated to ignore_label label_map = {} n_used = 0 for l in range(self.NUM_LABELS): if l in self.IGNORE_LABELS: label_map[l] = self.ignore_mask else: label_map[l] = n_used n_used += 1 label_map[self.ignore_mask] = self.ignore_mask self.label_map = label_map self.NUM_LABELS -= len(self.IGNORE_LABELS) def _augment_coords_to_feats(self, coords, feats, labels=None): norm_coords = coords - coords.mean(0) # color must come first. if isinstance(coords, np.ndarray): feats = np.concatenate((feats, norm_coords), 1) else: feats = torch.cat((feats, norm_coords), 1) return coords, feats, labels def convert_mat2cfl(self, mat): # Generally, xyz,rgb,label return mat[:, :3], mat[:, 3:-1], mat[:, -1] def __getitem__(self, index): coords, feats, labels, center = self.load_ply(index) # Downsample the pointcloud with finer voxel size before transformation for memory and speed if self.PREVOXELIZATION_VOXEL_SIZE is not None: inds = ME.utils.sparse_quantize( coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True) coords = coords[inds] feats = feats[inds] labels = labels[inds] # Prevoxel transformations if self.prevoxel_transform is not None: coords, feats, labels = self.prevoxel_transform(coords, feats, labels) coords, feats, labels, transformation = self.voxelizer.voxelize( coords, feats, labels, center=center) # map labels not used for evaluation to ignore_label if self.input_transform is not None: coords, feats, labels = self.input_transform(coords, feats, labels) if self.target_transform is not None: coords, feats, labels = self.target_transform(coords, feats, labels) if self.IGNORE_LABELS is not None: labels = np.array([self.label_map[x] for x in labels], dtype=np.int) # Use coordinate features if config is set if self.AUGMENT_COORDS_TO_FEATS: coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels) return_args = [coords, feats, labels] if self.return_transformation: return_args.append(transformation.astype(np.float32)) return tuple(return_args) class TemporalVoxelizationDataset(VoxelizationDataset): IS_TEMPORAL = True def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, data_root='/', ignore_label=255, temporal_dilation=1, temporal_numseq=3, return_transformation=False, augment_data=False, config=None, **kwargs): VoxelizationDataset.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, data_root=data_root, ignore_label=ignore_label, return_transformation=return_transformation, augment_data=augment_data, config=config, **kwargs) self.temporal_dilation = temporal_dilation self.temporal_numseq = temporal_numseq temporal_window = temporal_dilation * (temporal_numseq - 1) + 1 self.numels = [len(p) - temporal_window + 1 for p in self.data_paths] if any([numel <= 0 for numel in self.numels]): raise ValueError('Your temporal window configuration is too wide for ' 'this dataset. Please change the configuration.') def load_world_pointcloud(self, filename): raise NotImplementedError def __getitem__(self, index): for seq_idx, numel in enumerate(self.numels): if index >= numel: index -= numel else: break numseq = self.temporal_numseq if self.augment_data and self.config.temporal_rand_numseq: numseq = random.randrange(1, self.temporal_numseq + 1) dilations = [self.temporal_dilation for i in range(numseq - 1)] if self.augment_data and self.config.temporal_rand_dilation: dilations = [random.randrange(1, self.temporal_dilation + 1) for i in range(numseq - 1)] files = [self.data_paths[seq_idx][index + sum(dilations[:i])] for i in range(numseq)] world_pointclouds = [self.load_world_pointcloud(f) for f in files] ptcs, centers = zip(*world_pointclouds) # Downsample pointcloud for speed and memory if self.PREVOXELIZATION_VOXEL_SIZE is not None: new_ptcs = [] for ptc in ptcs: inds = ME.utils.sparse_quantize( ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True) new_ptcs.append(ptc[inds]) ptcs = new_ptcs # Apply prevoxel transformations ptcs = [self.prevoxel_transform(ptc) for ptc in ptcs] coords, feats, labels = zip(*ptcs) outs = self.voxelizer.voxelize_temporal( coords, feats, labels, centers=centers, return_transformation=self.return_transformation) if self.return_transformation: coords_t, feats_t, labels_t, transformation_t = outs else: coords_t, feats_t, labels_t = outs joint_coords = np.vstack([ np.hstack((coords, np.ones((coords.shape[0], 1)) * i)) for i, coords in enumerate(coords_t) ]) joint_feats = np.vstack(feats_t) joint_labels = np.hstack(labels_t) # map labels not used for evaluation to ignore_label if self.input_transform is not None: joint_coords, joint_feats, joint_labels = self.input_transform(joint_coords, joint_feats, joint_labels) if self.target_transform is not None: joint_coords, joint_feats, joint_labels = self.target_transform(joint_coords, joint_feats, joint_labels) if self.IGNORE_LABELS is not None: joint_labels = np.array([self.label_map[x] for x in joint_labels], dtype=np.int) return_args = [joint_coords, joint_feats, joint_labels] if self.return_transformation: pointclouds = np.vstack([ np.hstack((pointcloud[0][:, :6], np.ones((pointcloud[0].shape[0], 1)) * i)) for i, pointcloud in enumerate(world_pointclouds) ]) transformations = np.vstack( [np.hstack((transformation, [i])) for i, transformation in enumerate(transformation_t)]) return_args.extend([pointclouds.astype(np.float32), transformations.astype(np.float32)]) return tuple(return_args) def __len__(self): num_data = sum(self.numels) return num_data def initialize_data_loader(DatasetClass, config, phase, num_workers, shuffle, repeat, augment_data, batch_size, limit_numpoints, input_transform=None, target_transform=None): if isinstance(phase, str): phase = str2datasetphase_type(phase) if config.return_transformation: collate_fn = t.cflt_collate_fn_factory(limit_numpoints) else: collate_fn = t.cfl_collate_fn_factory(limit_numpoints) prevoxel_transform_train = [] if augment_data: prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS)) if len(prevoxel_transform_train) > 0: prevoxel_transforms = t.Compose(prevoxel_transform_train) else: prevoxel_transforms = None input_transforms = [] if input_transform is not None: input_transforms += input_transform if augment_data: input_transforms += [ t.RandomDropout(0.2), t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL), t.ChromaticAutoContrast(), t.ChromaticTranslation(config.data_aug_color_trans_ratio), t.ChromaticJitter(config.data_aug_color_jitter_std), # t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max), ] if len(input_transforms) > 0: input_transforms = t.Compose(input_transforms) else: input_transforms = None dataset = DatasetClass( config, prevoxel_transform=prevoxel_transforms, input_transform=input_transforms, target_transform=target_transform, cache=config.cache_data, augment_data=augment_data, phase=phase) data_args = { 'dataset': dataset, 'num_workers': num_workers, 'batch_size': batch_size, 'collate_fn': collate_fn, } if repeat: data_args['sampler'] = InfSampler(dataset, shuffle) else: data_args['shuffle'] = shuffle data_loader = DataLoader(**data_args) return data_loader
tests/test_paginator.py
XeryusTC/fastapi-pagination
315
131753
from fastapi import FastAPI from pytest import fixture from fastapi_pagination import LimitOffsetPage, Page, add_pagination, paginate from .base import BasePaginationTestCase, SafeTestClient, UserOut from .utils import faker app = FastAPI() entities = [UserOut(name=faker.name()) for _ in range(100)] @app.get("/default", response_model=Page[UserOut]) @app.get("/limit-offset", response_model=LimitOffsetPage[UserOut]) async def route(): return paginate(entities) add_pagination(app) class TestPaginationParams(BasePaginationTestCase): @fixture(scope="session") def entities(self): return entities @fixture(scope="session") def client(self): with SafeTestClient(app) as c: yield c
Validation/HGCalValidation/test/python/recHitClient_cfg.py
ckamtsikis/cmssw
852
131762
import FWCore.ParameterSet.Config as cms import os from Configuration.Eras.Era_Phase2C9_cff import Phase2C9 process = cms.Process('CLIENT',Phase2C9) process.load("Configuration.StandardSequences.Reconstruction_cff") process.load('Configuration.Geometry.GeometryExtended2026D46Reco_cff') process.load('Configuration.Geometry.GeometryExtended2026D46_cff') process.load('Configuration.StandardSequences.EndOfProcess_cff') process.load('FWCore.MessageService.MessageLogger_cfi') process.MessageLogger.cerr.FwkReport.reportEvery = 1 process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') from Configuration.AlCa.autoCond import autoCond process.GlobalTag.globaltag = autoCond['upgradePLS3'] process.load("Validation.HGCalValidation.HGCalRecHitsClient_cfi") process.hgcalRecHitClientEE.Verbosity = 2 process.hgcalRecHitClientHEF = process.hgcalRecHitClientEE.clone( DetectorName = cms.string("HGCalHESiliconSensitive")) process.hgcalRecHitClientHEB = process.hgcalRecHitClientEE.clone( DetectorName = cms.string("HGCalHEScintillatorSensitive")) process.load("DQMServices.Core.DQM_cfg") process.DQM.collectorHost = '' # summary process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) ) ## process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring('file:./test_output_rechitVal.root') ) process.load("Configuration.StandardSequences.EDMtoMEAtRunEnd_cff") process.dqmSaver.referenceHandling = cms.untracked.string('all') cmssw_version = os.environ.get('CMSSW_VERSION','CMSSW_X_Y_Z') Workflow = '/HGCalValidation/'+'Harvesting/'+str(cmssw_version) process.dqmSaver.workflow = Workflow process.load("Validation.HGCalValidation.HGCalRecHitsClient_cfi") process.p = cms.Path(process.EDMtoME * process.hgcalRecHitClientEE * process.hgcalRecHitClientHEF * process.hgcalRecHitClientHEB * process.dqmSaver)
tests/parsers/test_parsers.py
ssato/python-anyconfig
213
131771
# # Copyright (C) 2012 - 2021 <NAME> <<EMAIL>> # SPDX-License-Identifier: MIT # # pylint: disable=missing-docstring, invalid-name import pathlib import unittest import anyconfig.backend.json import anyconfig.backend.json.default as JSON try: import anyconfig.backend.yaml.pyyaml as PYYAML except ImportError: PYYAML = None import anyconfig.parsers.parsers as TT import anyconfig.ioinfo from anyconfig.common import ( UnknownProcessorTypeError, UnknownFileTypeError ) from .. import base CNF_PATH = base.RES_DIR / 'base/basics/10/10.json' class Test(unittest.TestCase): def setUp(self): self.psrs = TT.Parsers() def test_10_json_parsers(self): jpsrs = self.psrs.findall(None, forced_type="json") self.assertTrue(isinstance(jpsrs[0], JSON.Parser)) def test_12_yaml_parsers(self): if PYYAML: ypsrs = self.psrs.findall(None, forced_type="yaml") self.assertTrue(isinstance(ypsrs[0], PYYAML.Parser)) def test_30_find__ng_cases(self): self.assertRaises(ValueError, self.psrs.find, None) self.assertRaises(UnknownProcessorTypeError, self.psrs.find, None, forced_type="_unkonw_type_") self.assertRaises(UnknownFileTypeError, self.psrs.find, "cnf.unknown_ext") def test_32_find__ng_cases(self): pcls = anyconfig.backend.json.Parser self.assertTrue(isinstance(self.psrs.find("x.conf", forced_type="json"), pcls)) self.assertTrue(isinstance(self.psrs.find("x.json"), pcls)) with open(CNF_PATH) as inp: self.assertTrue(isinstance(self.psrs.find(inp), pcls)) if pathlib is not None: inp = pathlib.Path("x.json") self.assertTrue(isinstance(self.psrs.find(inp), pcls)) def test_34_find__input_object(self): inp = anyconfig.ioinfo.make(CNF_PATH) psr = self.psrs.find(inp) self.assertTrue(isinstance(psr, anyconfig.backend.json.Parser)) # vim:sw=4:ts=4:et:
econml/metalearners/__init__.py
imatiach-msft/EconML
1,846
131801
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from ._metalearners import (TLearner, SLearner, XLearner, DomainAdaptationLearner) __all__ = ["TLearner", "SLearner", "XLearner", "DomainAdaptationLearner"]
unittests/tools/test_netsparker_parser.py
mtcolman/django-DefectDojo
249
131807
from ..dojo_test_case import DojoTestCase from dojo.models import Test from dojo.tools.netsparker.parser import NetsparkerParser class TestNetsparkerParser(DojoTestCase): def test_parse_file_with_one_finding(self): testfile = open("unittests/scans/netsparker/netsparker_one_finding.json") parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() with self.subTest(i=0): finding = findings[0] self.assertEqual("Medium", finding.severity) self.assertEqual(16, finding.cwe) self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) self.assertIsNotNone(finding.description) self.assertGreater(len(finding.description), 0) self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C", finding.cvssv3) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") def test_parse_file_with_multiple_finding(self): testfile = open("unittests/scans/netsparker/netsparker_many_findings.json") parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() with self.subTest(i=0): finding = findings[0] self.assertEqual("Medium", finding.severity) self.assertEqual(16, finding.cwe) self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) self.assertIsNotNone(finding.description) self.assertGreater(len(finding.description), 0) self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:R/S:U/C:H/I:N/A:N/E:H/RL:O/RC:C", finding.cvssv3) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") with self.subTest(i=1): finding = findings[1] self.assertEqual("Critical", finding.severity) self.assertEqual(89, finding.cwe) self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) self.assertIsNotNone(finding.description) self.assertGreater(len(finding.description), 0) self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H", finding.cvssv3) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] self.assertEqual(str(endpoint), "http://php.testsparker.com/artist.php?id=-1%20OR%2017-7=10") with self.subTest(i=2): finding = findings[2] self.assertEqual("Medium", finding.severity) self.assertEqual(205, finding.cwe) self.assertEqual("25/06/2021", finding.date.strftime("%d/%m/%Y")) self.assertIsNotNone(finding.description) self.assertGreater(len(finding.description), 0) self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:L/A:N/E:H/RL:O/RC:C", finding.cvssv3) self.assertEqual(1, len(finding.unsaved_endpoints)) endpoint = finding.unsaved_endpoints[0] self.assertEqual(str(endpoint), "http://php.testsparker.com")
noxfile.py
cansarigol/pdbr
222
131813
<gh_stars>100-1000 import nox nox.options.stop_on_first_error = True SOURCE_FILES = "pdbr", "tests", "noxfile.py" @nox.session def lint(session, reuse_venv=True): session.install("autoflake", "isort==5.*", "black>=20.8b1") session.run("autoflake", "--in-place", "--recursive", *SOURCE_FILES) session.run("black", *SOURCE_FILES) session.run("isort", *SOURCE_FILES) @nox.session def check(session, reuse_venv=True): session.install("pre-commit") session.run("pre-commit", "run", "--all-files") @nox.session(python=["3.6.13", "3.7", "3.8", "3.9", "3.10"]) def test(session, reuse_venv=True): session.install( "pytest", "pytest-cov", "rich", "icecream", "prompt_toolkit", "sqlparse" ) session.run("pytest", "--cov-report", "term-missing", "--cov=pdbr", "tests") @nox.session @nox.parametrize("django", ["1.8", "1.11", "2.0", "2.2", "3.0"]) def django_test(session, django, reuse_venv=True): session.install(f"django=={django}", "rich", "pytest", "sqlparse") session.run("python", "runtests.py")
leetcode/169.majority-element.py
geemaple/algorithm
177
131822
# # @lc app=leetcode id=169 lang=python # # [169] Majority Element # # https://leetcode.com/problems/majority-element/description/ # # algorithms # Easy (56.44%) # Total Accepted: 542.5K # Total Submissions: 960.6K # Testcase Example: '[3,2,3]' # # Given an array of size n, find the majority element. The majority element is # the element that appears more than ⌊ n/2 ⌋ times. # # You may assume that the array is non-empty and the majority element always # exist in the array. # # Example 1: # # # Input: [3,2,3] # Output: 3 # # Example 2: # # # Input: [2,2,1,1,1,2,2] # Output: 2 # # # # O(N) class Solution(object): def majorityElement(self, nums): """ :type nums: List[int] :rtype: int """ candidate = 0 counter = 0 for val in nums: if counter == 0: candidate = val if candidate == val: counter += 1 else: counter -= 1 return candidate # O(NlogN) from collections import Counter class Solution2(object): def majorityElement(self, nums): """ :type nums: List[int] :rtype: int """ counter = Counter(nums) for key in counter: if counter[key] > len(nums)//2: return key # O(NlogN) class Solution3(object): def majorityElement(self, nums): """ :type nums: List[int] :rtype: int """ nums.sort() mid = len(nums)//2 return nums[mid]
cornac/models/bivaecf/recom_bivaecf.py
carmanzhang/cornac
597
131839
# Copyright 2018 The Cornac Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np from ..recommender import Recommender from ...utils.common import scale from ...exception import ScoreException class BiVAECF(Recommender): """Bilateral Variational AutoEncoder for Collaborative Filtering. Parameters ---------- k: int, optional, default: 10 The dimension of the stochastic user ``theta'' and item ``beta'' factors. encoder_structure: list, default: [20] The number of neurons per layer of the user and item encoders for BiVAE. For example, encoder_structure = [20], the user (item) encoder structure will be [num_items, 20, k] ([num_users, 20, k]). act_fn: str, default: 'tanh' Name of the activation function used between hidden layers of the auto-encoder. Supported functions: ['sigmoid', 'tanh', 'elu', 'relu', 'relu6'] likelihood: str, default: 'pois' The likelihood function used for modeling the observations. Supported choices: bern: Bernoulli likelihood gaus: Gaussian likelihood pois: Poisson likelihood n_epochs: int, optional, default: 100 The number of epochs for SGD. batch_size: int, optional, default: 100 The batch size. learning_rate: float, optional, default: 0.001 The learning rate for Adam. beta_kl: float, optional, default: 1.0 The weight of the KL terms as in beta-VAE. cap_priors: dict, optional, default: {"user":False, "item":False} When {"user":True, "item":True}, CAP priors are used (see BiVAE paper for details),\ otherwise the standard Normal is used as a Prior over the user and item latent variables. name: string, optional, default: 'BiVAECF' The name of the recommender model. trainable: boolean, optional, default: True When False, the model is not trained and Cornac assumes that the model is already \ pre-trained. verbose: boolean, optional, default: False When True, some running logs are displayed. seed: int, optional, default: None Random seed for parameters initialization. use_gpu: boolean, optional, default: True If True and your system supports CUDA then training is performed on GPUs. References ---------- * <NAME>, <NAME>, <NAME>. " Bilateral Variational Autoencoder for Collaborative Filtering." ACM International Conference on Web Search and Data Mining (WSDM). 2021. """ def __init__( self, name="BiVAECF", k=10, encoder_structure=[20], act_fn="tanh", likelihood="pois", n_epochs=100, batch_size=100, learning_rate=0.001, beta_kl=1.0, cap_priors={"user": False, "item": False}, trainable=True, verbose=False, seed=None, use_gpu=True, ): Recommender.__init__(self, name=name, trainable=trainable, verbose=verbose) self.k = k self.encoder_structure = encoder_structure self.act_fn = act_fn self.likelihood = likelihood self.batch_size = batch_size self.n_epochs = n_epochs self.learning_rate = learning_rate self.beta_kl = beta_kl self.cap_priors = cap_priors self.seed = seed self.use_gpu = use_gpu def fit(self, train_set, val_set=None): """Fit the model to observations. Parameters ---------- train_set: :obj:`cornac.data.Dataset`, required User-Item preference data as well as additional modalities. val_set: :obj:`cornac.data.Dataset`, optional, default: None User-Item preference data for model selection purposes (e.g., early stopping). Returns ------- self : object """ Recommender.fit(self, train_set, val_set) import torch from .bivae import BiVAE, learn self.device = ( torch.device("cuda:0") if (self.use_gpu and torch.cuda.is_available()) else torch.device("cpu") ) if self.trainable: feature_dim = {"user": None, "item": None} if self.cap_priors.get("user", False): if train_set.user_feature is None: raise ValueError( "CAP priors for users is set to True but no user features are provided" ) else: feature_dim["user"] = train_set.user_feature.feature_dim if self.cap_priors.get("item", False): if train_set.item_feature is None: raise ValueError( "CAP priors for items is set to True but no item features are provided" ) else: feature_dim["item"] = train_set.item_feature.feature_dim if self.seed is not None: torch.manual_seed(self.seed) torch.cuda.manual_seed(self.seed) if not hasattr(self, "bivaecf"): num_items = train_set.matrix.shape[1] num_users = train_set.matrix.shape[0] self.bivae = BiVAE( k=self.k, user_encoder_structure=[num_items] + self.encoder_structure, item_encoder_structure=[num_users] + self.encoder_structure, act_fn=self.act_fn, likelihood=self.likelihood, cap_priors=self.cap_priors, feature_dim=feature_dim, batch_size=self.batch_size, ).to(self.device) learn( self.bivae, self.train_set, n_epochs=self.n_epochs, batch_size=self.batch_size, learn_rate=self.learning_rate, beta_kl=self.beta_kl, verbose=self.verbose, device=self.device, ) elif self.verbose: print("%s is trained already (trainable = False)" % (self.name)) return self def score(self, user_idx, item_idx=None): """Predict the scores/ratings of a user for an item. Parameters ---------- user_idx: int, required The index of the user for whom to perform score prediction. item_idx: int, optional, default: None The index of the item for which to perform score prediction. If None, scores for all known items will be returned. Returns ------- res : A scalar or a Numpy array Relative scores that the user gives to the item or to all known items """ if item_idx is None: if self.train_set.is_unk_user(user_idx): raise ScoreException( "Can't make score prediction for (user_id=%d)" % user_idx ) theta_u = self.bivae.mu_theta[user_idx].view(1, -1) beta = self.bivae.mu_beta known_item_scores = ( self.bivae.decode_user(theta_u, beta).cpu().numpy().ravel() ) return known_item_scores else: if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item( item_idx ): raise ScoreException( "Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx) ) theta_u = self.bivae.mu_theta[user_idx].view(1, -1) beta_i = self.bivae.mu_beta[item_idx].view(1, -1) pred = self.bivae.decode_user(theta_u, beta_i).cpu().numpy().ravel() pred = scale( pred, self.train_set.min_rating, self.train_set.max_rating, 0.0, 1.0 ) return pred
instrumentation/opentelemetry-instrumentation-sklearn/src/opentelemetry/instrumentation/sklearn/__init__.py
benjaminsky/opentelemetry-python-contrib
208
131842
# Copyright 2020, OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The integration with sklearn supports the scikit-learn compatible libraries, it can be enabled by using ``SklearnInstrumentor``. .. sklearn: https://github.com/scikit-learn/scikit-learn Usage ----- Package instrumentation example: .. code-block:: python from opentelemetry.instrumentation.sklearn import SklearnInstrumentor # instrument the sklearn library SklearnInstrumentor().instrument() # instrument sklearn and other libraries SklearnInstrumentor( packages=["sklearn", "lightgbm", "xgboost"] ).instrument() Model intrumentation example: .. code-block:: python from opentelemetry.instrumentation.sklearn import SklearnInstrumentor from sklearn.datasets import load_iris from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y) model = Pipeline( [ ("class", RandomForestClassifier(n_estimators=10)), ] ) model.fit(X_train, y_train) SklearnInstrumentor().instrument_estimator(model) """ import logging import os from functools import wraps from importlib import import_module from inspect import isclass from pkgutil import iter_modules from typing import ( Callable, Collection, Dict, List, MutableMapping, Sequence, Type, Union, ) from sklearn.base import BaseEstimator from sklearn.pipeline import FeatureUnion, Pipeline from sklearn.tree import BaseDecisionTree from sklearn.utils.metaestimators import _IffHasAttrDescriptor from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.sklearn.package import _instruments from opentelemetry.instrumentation.sklearn.version import __version__ from opentelemetry.trace import get_tracer from opentelemetry.util.types import Attributes logger = logging.getLogger(__name__) def implement_span_estimator( func: Callable, estimator: Union[BaseEstimator, Type[BaseEstimator]], attributes: Attributes = None, ): """Wrap the method call with a span. Args: func: A callable to be wrapped in a span estimator: An instance or class of an estimator attributes: Attributes to apply to the span Returns: The passed function wrapped in a span. """ if isclass(estimator): name = estimator.__name__ else: name = estimator.__class__.__name__ logger.debug("Instrumenting: %s.%s", name, func.__name__) attributes = attributes or {} name = f"{name}.{func.__name__}" return implement_span_function(func, name, attributes) def implement_span_function(func: Callable, name: str, attributes: Attributes): """Wrap the function with a span. Args: func: A callable to be wrapped in a span name: The name of the span attributes: Attributes to apply to the span Returns: The passed function wrapped in a span. """ @wraps(func) def wrapper(*args, **kwargs): with get_tracer(__name__, __version__).start_as_current_span( name=name ) as span: if span.is_recording(): for key, val in attributes.items(): span.set_attribute(key, val) return func(*args, **kwargs) return wrapper def implement_span_delegator( obj: _IffHasAttrDescriptor, attributes: Attributes = None ): """Wrap the descriptor's fn with a span. Args: obj: An instance of _IffHasAttrDescriptor attributes: Attributes to apply to the span """ # Don't instrument inherited delegators if hasattr(obj, "_otel_original_fn"): logger.debug("Already instrumented: %s", obj.fn.__qualname__) return logger.debug("Instrumenting: %s", obj.fn.__qualname__) attributes = attributes or {} setattr(obj, "_otel_original_fn", getattr(obj, "fn")) setattr( obj, "fn", implement_span_function(obj.fn, obj.fn.__qualname__, attributes), ) def get_delegator( estimator: Type[BaseEstimator], method_name: str ) -> Union[_IffHasAttrDescriptor, None]: """Get the delegator from a class method or None. Args: estimator: A class derived from ``sklearn``'s ``BaseEstimator``. method_name (str): The method name of the estimator on which to check for delegation. Returns: The delegator, if one exists, otherwise None. """ class_attr = getattr(estimator, method_name) if getattr(class_attr, "__closure__", None) is not None: for cell in class_attr.__closure__: if isinstance(cell.cell_contents, _IffHasAttrDescriptor): return cell.cell_contents return None def get_base_estimators(packages: List[str]) -> Dict[str, Type[BaseEstimator]]: """Walk package hierarchies to get BaseEstimator-derived classes. Args: packages (list(str)): A list of package names to instrument. Returns: A dictionary of qualnames and classes inheriting from ``BaseEstimator``. """ klasses = {} for package_name in packages: lib = import_module(package_name) package_dir = os.path.dirname(lib.__file__) for (_, module_name, _) in iter_modules([package_dir]): # import the module and iterate through its attributes try: module = import_module(package_name + "." + module_name) except ImportError: logger.warning( "Unable to import %s.%s", package_name, module_name ) continue for attribute_name in dir(module): attrib = getattr(module, attribute_name) if isclass(attrib) and issubclass(attrib, BaseEstimator): klasses[ ".".join([package_name, module_name, attribute_name]) ] = attrib return klasses # Methods on which spans should be applied. DEFAULT_METHODS = [ "fit", "transform", "predict", "predict_proba", "_fit", "_transform", "_predict", "_predict_proba", ] # Classes and their attributes which contain a list of tupled estimators # through which we should walk recursively for estimators. DEFAULT_NAMEDTUPLE_ATTRIBS = { Pipeline: ["steps"], FeatureUnion: ["transformer_list"], } # Classes and their attributes which contain an estimator or sequence of # estimators through which we should walk recursively for estimators. DEFAULT_ATTRIBS = {} # Classes (including children) explicitly excluded from autoinstrumentation DEFAULT_EXCLUDE_CLASSES = [BaseDecisionTree] # Default packages for autoinstrumentation DEFAULT_PACKAGES = ["sklearn"] class SklearnInstrumentor(BaseInstrumentor): """Instrument a fitted sklearn model with opentelemetry spans. Instrument methods of ``BaseEstimator``-derived components in a sklearn model. The assumption is that a machine learning model ``Pipeline`` (or class descendent) is being instrumented with opentelemetry. Within a ``Pipeline`` is some hierarchy of estimators and transformers. The ``instrument_estimator`` method walks this hierarchy of estimators, implementing each of the defined methods with its own span. Certain estimators in the sklearn ecosystem contain other estimators as instance attributes. Support for walking this embedded sub-hierarchy is supported with ``recurse_attribs``. This argument is a dictionary with classes as keys, and a list of attributes representing embedded estimators as values. By default, ``recurse_attribs`` is empty. Similar to Pipelines, there are also estimators which have class attributes as a list of 2-tuples; for instance, the ``FeatureUnion`` and its attribute ``transformer_list``. Instrumenting estimators like this is also supported through the ``recurse_namedtuple_attribs`` argument. This argument is a dictionary with classes as keys, and a list of attribute names representing the namedtuple list(s). By default, the ``recurse_namedtuple_attribs`` dictionary supports ``Pipeline`` with ``steps``, and ``FeatureUnion`` with ``transformer_list``. Note that spans will not be generated for any child transformer whose parent transformer has ``n_jobs`` parameter set to anything besides ``None`` or ``1``. Package instrumentation example: .. code-block:: python from opentelemetry.instrumentation.sklearn import SklearnInstrumentor # instrument the sklearn library SklearnInstrumentor().instrument() # instrument several sklearn-compatible libraries packages = ["sklearn", "lightgbm", "xgboost"] SklearnInstrumentor(packages=packages).instrument() Model intrumentation example: .. code-block:: python from opentelemetry.instrumentation.sklearn import SklearnInstrumentor from sklearn.datasets import load_iris from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y) model = Pipeline( [ ("class", RandomForestClassifier(n_estimators=10)), ] ) model.fit(X_train, y_train) SklearnInstrumentor().instrument_estimator(model) Args: methods (list): A list of method names on which to instrument a span. This list of methods will be checked on all estimators in the model hierarchy. Used in package and model instrumentation recurse_attribs (dict): A dictionary of ``BaseEstimator``-derived sklearn classes as keys, with values being a list of attributes. Each attribute represents either an estimator or list of estimators on which to also implement spans. An example is ``RandomForestClassifier`` and its attribute ``estimators_``. Used in model instrumentation only. recurse_namedtuple_attribs (dict): A dictionary of ``BaseEstimator``- derived sklearn types as keys, with values being a list of attribute names. Each attribute represents a list of 2-tuples in which the first element is the estimator name, and the second element is the estimator. Defaults include sklearn's ``Pipeline`` and its attribute ``steps``, and the ``FeatureUnion`` and its attribute ``transformer_list``. Used in model instrumentation only. packages: A list of sklearn-compatible packages to instrument. Used with package instrumentation only. exclude_classes: A list of classes to exclude from instrumentation. Child classes are also excluded. Default is sklearn's ``[BaseDecisionTree]``. """ def __new__(cls, *args, **kwargs): """Override new. The base class' new method passes args and kwargs. We override because we init the class with configuration and Python raises TypeError when additional arguments are passed to the object.__new__() method. """ if cls._instance is None: cls._instance = object.__new__(cls) return cls._instance def __init__( self, methods: List[str] = None, recurse_attribs: Dict[Type[BaseEstimator], List[str]] = None, recurse_namedtuple_attribs: Dict[ Type[BaseEstimator], List[str] ] = None, packages: List[str] = None, exclude_classes: List[Type] = None, ): self.methods = methods or DEFAULT_METHODS self.recurse_attribs = recurse_attribs or DEFAULT_ATTRIBS self.recurse_namedtuple_attribs = ( recurse_namedtuple_attribs or DEFAULT_NAMEDTUPLE_ATTRIBS ) self.packages = packages or DEFAULT_PACKAGES if exclude_classes is None: self.exclude_classes = tuple(DEFAULT_EXCLUDE_CLASSES) else: self.exclude_classes = tuple(exclude_classes) def instrumentation_dependencies(self) -> Collection[str]: return _instruments def _instrument(self, **kwargs): """Instrument the library, and any additional specified on init.""" klasses = get_base_estimators(packages=self.packages) attributes = kwargs.get("attributes") for _, klass in klasses.items(): if issubclass(klass, self.exclude_classes): logger.debug("Not instrumenting (excluded): %s", str(klass)) else: logger.debug("Instrumenting: %s", str(klass)) for method_name in self.methods: if hasattr(klass, method_name): self._instrument_class_method( estimator=klass, method_name=method_name, attributes=attributes, ) def _uninstrument(self, **kwargs): """Uninstrument the library""" klasses = get_base_estimators(packages=self.packages) for _, klass in klasses.items(): logger.debug("Uninstrumenting: %s", str(klass)) for method_name in self.methods: if hasattr(klass, method_name): self._uninstrument_class_method( estimator=klass, method_name=method_name ) def instrument_estimator( self, estimator: BaseEstimator, attributes: Attributes = None ): """Instrument a fitted estimator and its hierarchy where configured. Args: estimator (sklearn.base.BaseEstimator): A fitted ``sklearn`` estimator, typically a ``Pipeline`` instance. attributes (dict): Attributes to attach to the spans. """ if isinstance(estimator, self.exclude_classes): logger.debug( "Not instrumenting (excluded): %s", estimator.__class__.__name__, ) return if isinstance( estimator, tuple(self.recurse_namedtuple_attribs.keys()) ): self._instrument_estimator_namedtuple( estimator=estimator, attributes=attributes ) if isinstance(estimator, tuple(self.recurse_attribs.keys())): self._instrument_estimator_attribute( estimator=estimator, attributes=attributes ) for method_name in self.methods: if hasattr(estimator, method_name): self._instrument_instance_method( estimator=estimator, method_name=method_name, attributes=attributes, ) def uninstrument_estimator(self, estimator: BaseEstimator): """Uninstrument a fitted estimator and its hierarchy where configured. Args: estimator (sklearn.base.BaseEstimator): A fitted ``sklearn`` estimator, typically a ``Pipeline`` instance. """ if isinstance(estimator, self.exclude_classes): logger.debug( "Not uninstrumenting (excluded): %s", estimator.__class__.__name__, ) return if isinstance( estimator, tuple(self.recurse_namedtuple_attribs.keys()) ): self._uninstrument_estimator_namedtuple(estimator=estimator) if isinstance(estimator, tuple(self.recurse_attribs.keys())): self._uninstrument_estimator_attribute(estimator=estimator) for method_name in self.methods: if hasattr(estimator, method_name): self._uninstrument_instance_method( estimator=estimator, method_name=method_name ) def _check_instrumented( self, estimator: Union[BaseEstimator, Type[BaseEstimator]], method_name: str, ) -> bool: """Check an estimator-method is instrumented. Args: estimator (BaseEstimator): A class or instance of an ``sklearn`` estimator. method_name (str): The method name of the estimator on which to check for instrumentation. """ orig_method_name = "_otel_original_" + method_name has_original = hasattr(estimator, orig_method_name) orig_class, orig_method = getattr( estimator, orig_method_name, (None, None) ) same_class = orig_class == estimator if has_original and same_class: class_method = self._unwrap_function( getattr(estimator, method_name) ) # if they match then the subclass doesn't override # if they don't then the overridden method needs instrumentation if class_method.__name__ == orig_method.__name__: return True return False def _uninstrument_class_method( self, estimator: Type[BaseEstimator], method_name: str ): """Uninstrument a class method. Replaces the patched method with the original, and deletes the attribute which stored the original method. Args: estimator (BaseEstimator): A class or instance of an ``sklearn`` estimator. method_name (str): The method name of the estimator on which to apply a span. """ orig_method_name = "_otel_original_" + method_name if isclass(estimator): qualname = estimator.__qualname__ else: qualname = estimator.__class__.__qualname__ delegator = get_delegator(estimator, method_name) if self._check_instrumented(estimator, method_name): logger.debug( "Uninstrumenting: %s.%s", qualname, method_name, ) _, orig_method = getattr(estimator, orig_method_name) setattr( estimator, method_name, orig_method, ) delattr(estimator, orig_method_name) elif delegator is not None: if not hasattr(delegator, "_otel_original_fn"): logger.debug( "Already uninstrumented: %s.%s", qualname, method_name, ) return setattr( delegator, "fn", getattr(delegator, "_otel_original_fn"), ) delattr(delegator, "_otel_original_fn") else: logger.debug( "Already uninstrumented: %s.%s", qualname, method_name, ) def _uninstrument_instance_method( self, estimator: BaseEstimator, method_name: str ): """Uninstrument an instance method. Replaces the patched method with the original, and deletes the attribute which stored the original method. Args: estimator (BaseEstimator): A class or instance of an ``sklearn`` estimator. method_name (str): The method name of the estimator on which to apply a span. """ orig_method_name = "_otel_original_" + method_name if isclass(estimator): qualname = estimator.__qualname__ else: qualname = estimator.__class__.__qualname__ if self._check_instrumented(estimator, method_name): logger.debug( "Uninstrumenting: %s.%s", qualname, method_name, ) _, orig_method = getattr(estimator, orig_method_name) setattr( estimator, method_name, orig_method, ) delattr(estimator, orig_method_name) else: logger.debug( "Already uninstrumented: %s.%s", qualname, method_name, ) def _instrument_class_method( self, estimator: Type[BaseEstimator], method_name: str, attributes: Attributes = None, ): """Instrument an estimator method with a span. When instrumenting we attach a tuple of (Class, method) to the attribute ``_otel_original_<method_name>`` for each method. This allows us to replace the patched with the original in uninstrumentation, but also allows proper instrumentation of child classes without instrumenting inherited methods twice. Args: estimator (BaseEstimator): A ``BaseEstimator``-derived class method_name (str): The method name of the estimator on which to apply a span. attributes (dict): Attributes to attach to the spans. """ if self._check_instrumented(estimator, method_name): logger.debug( "Already instrumented: %s.%s", estimator.__qualname__, method_name, ) return class_attr = getattr(estimator, method_name) delegator = get_delegator(estimator, method_name) if isinstance(class_attr, property): logger.debug( "Not instrumenting found property: %s.%s", estimator.__qualname__, method_name, ) elif delegator is not None: implement_span_delegator(delegator) else: setattr( estimator, "_otel_original_" + method_name, (estimator, class_attr), ) setattr( estimator, method_name, implement_span_estimator(class_attr, estimator, attributes), ) def _unwrap_function(self, function): """Fetch the function underlying any decorators""" if hasattr(function, "__wrapped__"): return self._unwrap_function(function.__wrapped__) return function def _instrument_instance_method( self, estimator: BaseEstimator, method_name: str, attributes: Attributes = None, ): """Instrument an estimator instance method with a span. When instrumenting we attach a tuple of (Class, method) to the attribute ``_otel_original_<method_name>`` for each method. This allows us to replace the patched with the original in unstrumentation. Args: estimator (BaseEstimator): A fitted ``sklearn`` estimator. method_name (str): The method name of the estimator on which to apply a span. attributes (dict): Attributes to attach to the spans. """ if self._check_instrumented(estimator, method_name): logger.debug( "Already instrumented: %s.%s", estimator.__class__.__qualname__, method_name, ) return class_attr = getattr(type(estimator), method_name, None) if isinstance(class_attr, property): logger.debug( "Not instrumenting found property: %s.%s", estimator.__class__.__qualname__, method_name, ) else: method = getattr(estimator, method_name) setattr( estimator, "_otel_original_" + method_name, (estimator, method) ) setattr( estimator, method_name, implement_span_estimator(method, estimator, attributes), ) def _instrument_estimator_attribute( self, estimator: BaseEstimator, attributes: Attributes = None ): """Instrument instance attributes which also contain estimators. Handle instance attributes which are also estimators, are a list (Sequence) of estimators, or are mappings (dictionary) in which the values are estimators. Examples include ``RandomForestClassifier`` and ``MultiOutputRegressor`` instances which have attributes ``estimators_`` attributes. Args: estimator (BaseEstimator): A fitted ``sklearn`` estimator, with an attribute which also contains an estimator or collection of estimators. attributes (dict): Attributes to attach to the spans. """ attribs = self.recurse_attribs.get(estimator.__class__, []) for attrib in attribs: attrib_value = getattr(estimator, attrib) if isinstance(attrib_value, Sequence): for value in attrib_value: self.instrument_estimator( estimator=value, attributes=attributes ) elif isinstance(attrib_value, MutableMapping): for value in attrib_value.values(): self.instrument_estimator( estimator=value, attributes=attributes ) else: self.instrument_estimator( estimator=attrib_value, attributes=attributes ) def _instrument_estimator_namedtuple( self, estimator: BaseEstimator, attributes: Attributes = None ): """Instrument attributes with (name, estimator) tupled components. Examples include Pipeline and FeatureUnion instances which have attributes steps and transformer_list, respectively. Args: estimator: A fitted sklearn estimator, with an attribute which also contains an estimator or collection of estimators. attributes (dict): Attributes to attach to the spans. """ attribs = self.recurse_namedtuple_attribs.get(estimator.__class__, []) for attrib in attribs: for _, est in getattr(estimator, attrib): self.instrument_estimator(estimator=est, attributes=attributes) def _uninstrument_estimator_attribute(self, estimator: BaseEstimator): """Uninstrument instance attributes which also contain estimators. Handle instance attributes which are also estimators, are a list (Sequence) of estimators, or are mappings (dictionary) in which the values are estimators. Examples include ``RandomForestClassifier`` and ``MultiOutputRegressor`` instances which have attributes ``estimators_`` attributes. Args: estimator (BaseEstimator): A fitted ``sklearn`` estimator, with an attribute which also contains an estimator or collection of estimators. """ attribs = self.recurse_attribs.get(estimator.__class__, []) for attrib in attribs: attrib_value = getattr(estimator, attrib) if isinstance(attrib_value, Sequence): for value in attrib_value: self.uninstrument_estimator(estimator=value) elif isinstance(attrib_value, MutableMapping): for value in attrib_value.values(): self.uninstrument_estimator(estimator=value) else: self.uninstrument_estimator(estimator=attrib_value) def _uninstrument_estimator_namedtuple(self, estimator: BaseEstimator): """Uninstrument attributes with (name, estimator) tupled components. Examples include Pipeline and FeatureUnion instances which have attributes steps and transformer_list, respectively. Args: estimator: A fitted sklearn estimator, with an attribute which also contains an estimator or collection of estimators. """ attribs = self.recurse_namedtuple_attribs.get(estimator.__class__, []) for attrib in attribs: for _, est in getattr(estimator, attrib): self.uninstrument_estimator(estimator=est)
preprocess.py
astricks/Voice-Conversion-GAN
104
131865
<gh_stars>100-1000 import librosa import numpy as np import os import pyworld from pprint import pprint import librosa.display import time def load_wavs(wav_dir, sr): wavs = list() for file in os.listdir(wav_dir): file_path = os.path.join(wav_dir, file) wav, _ = librosa.load(file_path, sr=sr, mono=True) # wav = wav.astype(np.float64) wavs.append(wav) return wavs def world_decompose(wav, fs, frame_period=5.0): # Decompose speech signal into f0, spectral envelope and aperiodicity using WORLD wav = wav.astype(np.float64) f0, timeaxis = pyworld.harvest( wav, fs, frame_period=frame_period, f0_floor=71.0, f0_ceil=800.0) # Finding Spectogram sp = pyworld.cheaptrick(wav, f0, timeaxis, fs) # Finding aperiodicity ap = pyworld.d4c(wav, f0, timeaxis, fs) # Use this in Ipython to see plot # librosa.display.specshow(np.log(sp).T, # sr=fs, # hop_length=int(0.001 * fs * frame_period), # x_axis="time", # y_axis="linear", # cmap="magma") # colorbar() return f0, timeaxis, sp, ap def world_encode_spectral_envelop(sp, fs, dim=24): # Get Mel-Cepstral coefficients (MCEPs) # sp = sp.astype(np.float64) coded_sp = pyworld.code_spectral_envelope(sp, fs, dim) return coded_sp def world_encode_data(wave, fs, frame_period=5.0, coded_dim=24): f0s = list() timeaxes = list() sps = list() aps = list() coded_sps = list() for wav in wave: f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=fs, frame_period=frame_period) coded_sp = world_encode_spectral_envelop(sp=sp, fs=fs, dim=coded_dim) f0s.append(f0) timeaxes.append(timeaxis) sps.append(sp) aps.append(ap) coded_sps.append(coded_sp) return f0s, timeaxes, sps, aps, coded_sps def logf0_statistics(f0s): # Note: np.ma.log() calculating log on masked array (for incomplete or invalid entries in array) log_f0s_concatenated = np.ma.log(np.concatenate(f0s)) log_f0s_mean = log_f0s_concatenated.mean() log_f0s_std = log_f0s_concatenated.std() return log_f0s_mean, log_f0s_std def transpose_in_list(lst): transposed_lst = list() for array in lst: transposed_lst.append(array.T) return transposed_lst def coded_sps_normalization_fit_transform(coded_sps): coded_sps_concatenated = np.concatenate(coded_sps, axis=1) coded_sps_mean = np.mean(coded_sps_concatenated, axis=1, keepdims=True) coded_sps_std = np.std(coded_sps_concatenated, axis=1, keepdims=True) coded_sps_normalized = list() for coded_sp in coded_sps: coded_sps_normalized.append( (coded_sp - coded_sps_mean) / coded_sps_std) return coded_sps_normalized, coded_sps_mean, coded_sps_std def wav_padding(wav, sr, frame_period, multiple=4): assert wav.ndim == 1 num_frames = len(wav) num_frames_padded = int((np.ceil((np.floor(num_frames / (sr * frame_period / 1000)) + 1) / multiple + 1) * multiple - 1) * (sr * frame_period / 1000)) num_frames_diff = num_frames_padded - num_frames num_pad_left = num_frames_diff // 2 num_pad_right = num_frames_diff - num_pad_left wav_padded = np.pad(wav, (num_pad_left, num_pad_right), 'constant', constant_values=0) return wav_padded def pitch_conversion(f0, mean_log_src, std_log_src, mean_log_target, std_log_target): # Logarithm Gaussian Normalization for Pitch Conversions f0_converted = np.exp((np.log(f0) - mean_log_src) / std_log_src * std_log_target + mean_log_target) return f0_converted def world_decode_spectral_envelop(coded_sp, fs): fftlen = pyworld.get_cheaptrick_fft_size(fs) decoded_sp = pyworld.decode_spectral_envelope(coded_sp, fs, fftlen) return decoded_sp def world_speech_synthesis(f0, decoded_sp, ap, fs, frame_period): wav = pyworld.synthesize(f0, decoded_sp, ap, fs, frame_period) wav = wav.astype(np.float32) return wav def sample_train_data(dataset_A, dataset_B, n_frames=128): # Created Pytorch custom dataset instead num_samples = min(len(dataset_A), len(dataset_B)) train_data_A_idx = np.arange(len(dataset_A)) train_data_B_idx = np.arange(len(dataset_B)) np.random.shuffle(train_data_A_idx) np.random.shuffle(train_data_B_idx) train_data_A_idx_subset = train_data_A_idx[:num_samples] train_data_B_idx_subset = train_data_B_idx[:num_samples] train_data_A = list() train_data_B = list() for idx_A, idx_B in zip(train_data_A_idx_subset, train_data_B_idx_subset): data_A = dataset_A[idx_A] frames_A_total = data_A.shape[1] assert frames_A_total >= n_frames start_A = np.random.randint(frames_A_total - n_frames + 1) end_A = start_A + n_frames train_data_A.append(data_A[:, start_A:end_A]) data_B = dataset_B[idx_B] frames_B_total = data_B.shape[1] assert frames_B_total >= n_frames start_B = np.random.randint(frames_B_total - n_frames + 1) end_B = start_B + n_frames train_data_B.append(data_B[:, start_B:end_B]) train_data_A = np.array(train_data_A) train_data_B = np.array(train_data_B) return train_data_A, train_data_B if __name__ == '__main__': start_time = time.time() wavs = load_wavs("../data/vcc2016_training/SF1/", 16000) # pprint(wavs) f0, timeaxis, sp, ap = world_decompose(wavs[0], 16000, 5.0) print(f0.shape, timeaxis.shape, sp.shape, ap.shape) coded_sp = world_encode_spectral_envelop(sp, 16000, 24) print(coded_sp.shape) f0s, timeaxes, sps, aps, coded_sps = world_encode_data(wavs, 16000, 5, 24) # print(f0s) log_f0_mean, log_f0_std = logf0_statistics(f0s) # print(log_f0_mean) coded_sps_transposed = transpose_in_list(lst=coded_sps) # print(coded_sps_transposed) coded_sps_norm, coded_sps_mean, coded_sps_std = coded_sps_normalization_fit_transform( coded_sps=coded_sps_transposed) print( "Total time for preprcessing-> {:.4f}".format(time.time() - start_time)) print(len(coded_sps_norm), coded_sps_norm[0].shape) temp_A = np.random.randn(162, 24, 550) temp_B = np.random.randn(158, 24, 550) a, b = sample_train_data(temp_A, temp_B) print(a.shape, b.shape)
examples/access-levels-write/access-levels-write.py
kylelaker/iam-floyd
360
131878
import iam_floyd as statement import importlib import os import sys import inspect currentdir = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) helperDir = '%s/../../helper/python' % currentdir sys.path.insert(0, helperDir) test = importlib.import_module('python_test') out = getattr(test, 'out') deploy = getattr(test, 'deploy') s = ( # doc-start statement.Ec2() \ .allow() \ .all_write_actions() # doc-end ) all = [s] out(all) # deploy(all) disabled, bc exceeds policy size limit
tests/record/test_legacy.py
mauritsvdvijgh/aiokafka
731
131882
<gh_stars>100-1000 import struct from unittest import mock import pytest from aiokafka.record.legacy_records import ( LegacyRecordBatch, LegacyRecordBatchBuilder ) from aiokafka.errors import CorruptRecordException @pytest.mark.parametrize("magic", [0, 1]) @pytest.mark.parametrize("key,value,checksum", [ (b"test", b"Super", [278251978, -2095076219]), (b"test", None, [580701536, 164492157]), (None, b"Super", [2797021502, 3315209433]), (b"", b"Super", [1446809667, 890351012]), (b"test", b"", [4230475139, 3614888862]), ]) def test_read_write_serde_v0_v1_no_compression(magic, key, value, checksum): builder = LegacyRecordBatchBuilder( magic=magic, compression_type=0, batch_size=1024 * 1024) builder.append(0, timestamp=9999999, key=key, value=value) buffer = builder.build() batch = LegacyRecordBatch(buffer, magic) assert batch.validate_crc() assert batch.is_control_batch is False assert batch.is_transactional is False assert batch.producer_id is None assert batch.next_offset == 1 msgs = list(batch) assert len(msgs) == 1 msg = msgs[0] assert msg.offset == 0 assert msg.timestamp == (9999999 if magic else None) assert msg.timestamp_type == (0 if magic else None) assert msg.key == key assert msg.value == value assert msg.checksum == checksum[magic] & 0xffffffff @pytest.mark.parametrize("compression_type", [ LegacyRecordBatch.CODEC_GZIP, LegacyRecordBatch.CODEC_SNAPPY, LegacyRecordBatch.CODEC_LZ4 ]) @pytest.mark.parametrize("magic", [0, 1]) def test_read_write_serde_v0_v1_with_compression(compression_type, magic): builder = LegacyRecordBatchBuilder( magic=magic, compression_type=compression_type, batch_size=1024 * 1024) for offset in range(10): builder.append( offset, timestamp=9999999, key=b"test", value=b"Super") buffer = builder.build() # Broker will set the offset to a proper last offset value struct.pack_into(">q", buffer, 0, 9) batch = LegacyRecordBatch(buffer, magic) assert batch.validate_crc() assert batch.is_control_batch is False assert batch.is_transactional is False assert batch.producer_id is None assert batch.next_offset == 10 msgs = list(batch) for offset, msg in enumerate(msgs): assert msg.offset == offset assert msg.timestamp == (9999999 if magic else None) assert msg.timestamp_type == (0 if magic else None) assert msg.key == b"test" assert msg.value == b"Super" assert msg.checksum == (-2095076219 if magic else 278251978) & \ 0xffffffff @pytest.mark.parametrize("magic", [0, 1]) def test_written_bytes_equals_size_in_bytes(magic): key = b"test" value = b"Super" builder = LegacyRecordBatchBuilder( magic=magic, compression_type=0, batch_size=1024 * 1024) size_in_bytes = builder.size_in_bytes( 0, timestamp=9999999, key=key, value=value) pos = builder.size() builder.append(0, timestamp=9999999, key=key, value=value) assert builder.size() - pos == size_in_bytes @pytest.mark.parametrize("magic", [0, 1]) def test_legacy_batch_builder_validates_arguments(magic): builder = LegacyRecordBatchBuilder( magic=magic, compression_type=0, batch_size=1024 * 1024) # Key should not be str with pytest.raises(TypeError): builder.append( 0, timestamp=9999999, key="some string", value=None) # Value should not be str with pytest.raises(TypeError): builder.append( 0, timestamp=9999999, key=None, value="some string") # Timestamp should be of proper type (timestamp is ignored for magic == 0) if magic != 0: with pytest.raises(TypeError): builder.append( 0, timestamp="1243812793", key=None, value=b"some string") # Offset of invalid type with pytest.raises(TypeError): builder.append( "0", timestamp=9999999, key=None, value=b"some string") # Unknown struct errors are passed through. These are theoretical and # indicate a bug in the implementation. The C implementation locates # _encode_msg elsewhere and is less vulnerable to such bugs since it's # statically typed, so we skip the test there. if hasattr(builder, "_encode_msg"): with mock.patch.object(builder, "_encode_msg") as mocked: err = struct.error("test error") mocked.side_effect = err with pytest.raises(struct.error) as excinfo: builder.append( 0, timestamp=None, key=None, value=b"some string") assert excinfo.value == err # Ok to pass value as None builder.append( 0, timestamp=9999999, key=b"123", value=None) # Timestamp can be None builder.append( 1, timestamp=None, key=None, value=b"some string") # Ok to pass offsets in not incremental order. This should not happen thou builder.append( 5, timestamp=9999999, key=b"123", value=None) # in case error handling code fails to fix inner buffer in builder assert len(builder.build()) == 119 if magic else 95 @pytest.mark.parametrize("magic", [0, 1]) def test_legacy_correct_metadata_response(magic): builder = LegacyRecordBatchBuilder( magic=magic, compression_type=0, batch_size=1024 * 1024) meta = builder.append( 0, timestamp=9999999, key=b"test", value=b"Super") assert meta.offset == 0 assert meta.timestamp == (9999999 if magic else -1) assert meta.crc == (-2095076219 if magic else 278251978) & 0xffffffff assert repr(meta) == ( "LegacyRecordMetadata(offset=0, crc={}, size={}, " "timestamp={})".format(meta.crc, meta.size, meta.timestamp) ) @pytest.mark.parametrize("magic", [0, 1]) def test_legacy_batch_size_limit(magic): # First message can be added even if it's too big builder = LegacyRecordBatchBuilder( magic=magic, compression_type=0, batch_size=1024) meta = builder.append(0, timestamp=None, key=None, value=b"M" * 2000) assert meta.size > 0 assert meta.crc is not None assert meta.offset == 0 assert meta.timestamp is not None assert len(builder.build()) > 2000 builder = LegacyRecordBatchBuilder( magic=magic, compression_type=0, batch_size=1024) meta = builder.append(0, timestamp=None, key=None, value=b"M" * 700) assert meta is not None meta = builder.append(1, timestamp=None, key=None, value=b"M" * 700) assert meta is None meta = builder.append(2, timestamp=None, key=None, value=b"M" * 700) assert meta is None assert len(builder.build()) < 1000 ATTRIBUTES_OFFSET = 17 TIMESTAMP_OFFSET = 18 TIMESTAMP_TYPE_MASK = 0x08 def _make_compressed_batch(magic): builder = LegacyRecordBatchBuilder( magic=magic, compression_type=LegacyRecordBatch.CODEC_GZIP, batch_size=1024 * 1024) for offset in range(10): builder.append( offset, timestamp=9999999, key=b"test", value=b"Super") return builder.build() def test_read_log_append_time_v1(): buffer = _make_compressed_batch(1) # As Builder does not support creating data with `timestamp_type==1` we # patch the result manually buffer[ATTRIBUTES_OFFSET] |= TIMESTAMP_TYPE_MASK expected_timestamp = 10000000 struct.pack_into(">q", buffer, TIMESTAMP_OFFSET, expected_timestamp) batch = LegacyRecordBatch(buffer, 1) msgs = list(batch) for offset, msg in enumerate(msgs): assert msg.offset == offset assert msg.timestamp == expected_timestamp assert msg.timestamp_type == 1 @pytest.mark.parametrize("magic", [0, 1]) def test_reader_corrupt_record_v0_v1(magic): buffer = _make_compressed_batch(magic) len_offset = 8 # If the wrapper of compressed messages has a key it will just be ignored. key_offset = 26 if magic else 18 new_buffer = ( buffer[:key_offset] + b"\x00\x00\x00\x03123" + # Insert some KEY into wrapper buffer[key_offset + 4:] # Ignore the 4 byte -1 value for old KEY==None ) struct.pack_into(">i", new_buffer, len_offset, len(new_buffer) - 12) batch = LegacyRecordBatch(new_buffer, magic) msgs = list(batch) for offset, msg in enumerate(msgs): assert msg.offset == offset assert msg.timestamp == (9999999 if magic else None) assert msg.timestamp_type == (0 if magic else None) assert msg.key == b"test" assert msg.value == b"Super" assert msg.checksum == (-2095076219 if magic else 278251978) & \ 0xffffffff # If the wrapper does not contain a `value` it's corrupted value_offset = 30 if magic else 22 new_buffer = ( buffer[:value_offset] + b"\xff\xff\xff\xff" # Set `value` to None by altering size to -1 ) struct.pack_into(">i", new_buffer, len_offset, len(new_buffer) - 12) with pytest.raises( CorruptRecordException, match="Value of compressed message is None"): batch = LegacyRecordBatch(new_buffer, magic) list(batch) def test_record_overhead(): known = { 0: 14, 1: 22, } for magic, size in known.items(): assert LegacyRecordBatchBuilder.record_overhead(magic) == size
manage_firmware/firmware_upgrade_manager.py
robegli/automation-scripts
274
131931
<filename>manage_firmware/firmware_upgrade_manager.py import meraki import datetime import time ''' Cisco Meraki Firmware Upgrade Manager <NAME> .:|:.:|:. https://github.com/TKIPisalegacycipher This script will pull network IDs from an org and then create asynchronous action batches. Each batch will contain, for each network, an action that will delay the upgrade datetime stamp by X days (configurable). Each batch can contain up to 100 actions, therefore, each batch can modify up to 100 networks. As always, you should read the docs before diving in. If you know how these features work, then it will be easier to understand and leverage this tool. Firmware upgrades endpoint: https://developer.cisco.com/meraki/api-v1/#!get-network-firmware-upgrades Action batches: https://developer.cisco.com/meraki/api-v1/#!action-batches-overview NB: Once you start the script, there are no confirmation prompts or previews, so test in a lab if necessary. NB: When the final batch has been submitted, depending on the batch size, it may take a few minutes to finish. Feeling creative? Then try extending this script (using existing code, for the most part) to confirm when the batches are complete. Feeling super creative? Wrap this behind a Flask frontend and have yourself a merry little GUI. ''' # init Meraki Python SDK session dashboard = meraki.DashboardAPI(suppress_logging=True, single_request_timeout=120) # Configurable options # Organization ID. Replace this with your actual organization ID. organization_id = 'YOUR ORG ID HERE' # Use your own organization ID. time_delta_in_days = 30 # Max is 1 month per the firmware upgrades endpoint docs actions_per_batch = 100 # Max number of actions to submit in a batch. 100 is the maximum. Bigger batches take longer. wait_factor = 0.33 # Wait factor for action batches when the action batch queue is full. # Firmware IDs; not needed for rescheduling, only for upgrading. If you plan to use this for upgrading, then you should # first GET the availableVersions IDs and use those here instead, since they have probably changed from the time this # was published. mx_new_firmware_id = 2128 # Did you update this to your actual FW ID by GETing your availableFirmwareVersions? mx_old_firmware_id = 2009 # Did you update this to your actual FW ID by GETing your availableFirmwareVersions? def time_formatter(date_time_stamp): # Basic time formatter to return strings that the API requires formatted_date_time_stamp = date_time_stamp.replace(microsecond=0).isoformat() + 'Z' return formatted_date_time_stamp # Time stamps utc_now = datetime.datetime.utcnow() utc_future = utc_now + datetime.timedelta(days=time_delta_in_days) utc_now_formatted = time_formatter(utc_now) utc_future_formatted = time_formatter(utc_future) action_reschedule_existing = { "products": { "appliance": { "nextUpgrade": { "time": utc_future_formatted } } } } # Use this action to schedule a new upgrade. If you do not provide a time param (as shown above), it will execute # immediately. IMPORTANT: See API docs for more info before using this. action_schedule_new_upgrade = { "products": { "appliance": { "nextUpgrade": { "time": utc_future_formatted, "toVersion": { "id": mx_new_firmware_id } } } } } # GET the network list networks_list = dashboard.organizations.getOrganizationNetworks( organizationId=organization_id ) def format_single_action(resource, operation, body): # Combine a single set of batch components into an action action = { "resource": resource, "operation": operation, "body": body } return action def create_single_upgrade_action(network_id): # Create a single upgrade action # AB component parts, rename action action_resource = f'/networks/{network_id}/firmwareUpgrades' action_operation = 'update' # Choose whether to reschedule an existing or start a new upgrade action_body = action_schedule_new_upgrade upgrade_action = format_single_action(action_resource, action_operation, action_body) return upgrade_action def run_an_action_batch(org_id, actions_list, synchronous=False): # Create and run an action batch batch_response = dashboard.organizations.createOrganizationActionBatch( organizationId=org_id, actions=actions_list, confirmed=True, synchronous=synchronous ) return batch_response def create_action_list(net_list): # Creates a list of actions and returns it # Iterate through the list of network IDs and create an action for each, then collect it list_of_actions = list() for network in net_list: # Create the action single_action = create_single_upgrade_action(network['id']) list_of_actions.append(single_action) return list_of_actions def batch_actions_splitter(batch_actions): # Split the list of actions into smaller lists of maximum 100 actions each # For each ID in range length of network_ids for i in range(0, len(batch_actions), actions_per_batch): # Create an index range for network_ids of 100 items: yield batch_actions[i:i + actions_per_batch] def action_batch_runner(batch_actions_lists, org_id): # Create an action batch for each list of actions # Store the responses responses = list() number_of_batches = len(batch_actions_lists) number_of_batches_submitted = 0 wait_seconds = int(30) # Make a batch for each list for batch_action_list in batch_actions_lists: action_batch_queue_checker(org_id) batch_response = run_an_action_batch(org_id, batch_action_list) responses.append(batch_response) number_of_batches_submitted += 1 # Inform user of progress. print(f'Submitted batch {number_of_batches_submitted} of {number_of_batches}.') return responses def action_batch_queue_checker(org_id): all_action_batches = dashboard.organizations.getOrganizationActionBatches(organizationId=org_id) running_action_batches = [batch for batch in all_action_batches if batch['status']['completed'] is False and batch['status']['failed'] is False] total_running_actions = 0 for batch in running_action_batches: batch_actions = len(batch['actions']) total_running_actions += batch_actions wait_seconds = total_running_actions * wait_factor while len(running_action_batches) > 4: print(f'There are already five action batches in progress with a total of {total_running_actions} running actions. Waiting {wait_seconds} seconds.') time.sleep(wait_seconds) print('Checking again.') all_action_batches = dashboard.organizations.getOrganizationActionBatches(organizationId=org_id) running_action_batches = [batch for batch in all_action_batches if batch['status']['completed'] is False and batch['status']['failed'] is False] total_running_actions = 0 for batch in running_action_batches: batch_actions = len(batch['actions']) total_running_actions += batch_actions wait_seconds = total_running_actions * wait_factor # Create a list of upgrade actions upgrade_actions_list = create_action_list(networks_list) # Split the list into multiple lists of max 100 items each upgrade_actions_lists = list(batch_actions_splitter(upgrade_actions_list)) # Run the action batches to clone the networks upgraded_networks_responses = action_batch_runner(upgrade_actions_lists, organization_id)
payloadprocessor/sqlmap - percentage.py
knassar702/community-scripts
629
131939
<gh_stars>100-1000 import string def process(payload): retVal = payload if payload: retVal = "" i = 0 while i < len(payload): if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits: retVal += payload[i:i + 3] i += 3 elif payload[i] != ' ': retVal += '%%%s' % payload[i] i += 1 else: retVal += payload[i] i += 1 return retVal
anuga/structures/tests/test_inlet_operator.py
samcom12/anuga_core
136
131945
#!/usr/bin/env python from __future__ import division from past.utils import old_div import unittest import os.path import sys import numpy import anuga from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross from anuga.shallow_water.shallow_water_domain import Domain from anuga.abstract_2d_finite_volumes.util import file_function from anuga.utilities.system_tools import get_pathname_from_package from anuga.structures.inlet_operator import Inlet_operator class Test_inlet_operator(unittest.TestCase): """ Test the boyd box operator, in particular the discharge_routine! """ def setUp(self): pass def tearDown(self): try: os.remove('Test_Outlet_Inlet.sww') except: pass def _create_domain(self,d_length, d_width, dx, dy, elevation_0, elevation_1, stage_0, stage_1): points, vertices, boundary = rectangular_cross(int(old_div(d_length,dx)), int(old_div(d_width,dy)), len1=d_length, len2=d_width) domain = Domain(points, vertices, boundary) domain.set_name('Test_Outlet_Inlet') # Output name domain.set_store() domain.set_default_order(2) domain.H0 = 0.01 domain.tight_slope_limiters = 1 #print 'Size', len(domain) #------------------------------------------------------------------------------ # Setup initial conditions #------------------------------------------------------------------------------ def elevation(x, y): """Set up a elevation """ z = numpy.zeros(x.shape,dtype='d') z[:] = elevation_0 numpy.putmask(z, x > old_div(d_length,2), elevation_1) return z def stage(x,y): """Set up stage """ z = numpy.zeros(x.shape,dtype='d') z[:] = stage_0 numpy.putmask(z, x > old_div(d_length,2), stage_1) return z #print 'Setting Quantities....' domain.set_quantity('elevation', elevation) # Use function for elevation domain.set_quantity('stage', stage) # Use function for elevation Br = anuga.Reflective_boundary(domain) domain.set_boundary({'left': Br, 'right': Br, 'top': Br, 'bottom': Br}) return domain def test_inlet_constant_Q(self): """test_inlet_Q This tests that the inlet operator adds the correct amount of water """ stage_0 = 11.0 stage_1 = 10.0 elevation_0 = 10.0 elevation_1 = 10.0 domain_length = 200.0 domain_width = 200.0 domain = self._create_domain(d_length=domain_length, d_width=domain_width, dx = 10.0, dy = 10.0, elevation_0 = elevation_0, elevation_1 = elevation_1, stage_0 = stage_0, stage_1 = stage_1) vol0 = domain.compute_total_volume() finaltime = 3.0 line1 = [[95.0, 10.0], [105.0, 10.0]] Q1 = 5.00 line2 = [[10.0, 90.0], [20.0, 90.0]] Q2 = 10.0 Inlet_operator(domain, line1, Q1, logging=False) Inlet_operator(domain, line2, Q2) for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime): #domain.write_time() #print domain.volumetric_balance_statistics() pass vol1 = domain.compute_total_volume() assert numpy.allclose((Q1+Q2)*finaltime, vol1-vol0, rtol=1.0e-8) assert numpy.allclose((Q1+Q2)*finaltime, domain.fractional_step_volume_integral, rtol=1.0e-8) def test_inlet_constant_Q_polygon(self): """test_inlet_Q This tests that the inlet operator adds the correct amount of water """ stage_0 = 11.0 stage_1 = 10.0 elevation_0 = 10.0 elevation_1 = 10.0 domain_length = 200.0 domain_width = 200.0 domain = self._create_domain(d_length=domain_length, d_width=domain_width, dx = 10.0, dy = 10.0, elevation_0 = elevation_0, elevation_1 = elevation_1, stage_0 = stage_0, stage_1 = stage_1) vol0 = domain.compute_total_volume() finaltime = 3.0 poly1 = [[95.0, 10.0], [105.0, 10.0], [105, 20.0], [95.0, 20.0]] Q1 = 5.00 Inlet_operator(domain, poly1, Q1, logging=False) for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime): #domain.write_time() #print domain.volumetric_balance_statistics() pass vol1 = domain.compute_total_volume() assert numpy.allclose((Q1)*finaltime, vol1-vol0, rtol=1.0e-8) assert numpy.allclose((Q1)*finaltime, domain.fractional_step_volume_integral, rtol=1.0e-8) def test_inlet_variable_Q(self): """test_inlet_Q This tests that the inlet operator adds the correct amount of water """ stage_0 = 11.0 stage_1 = 10.0 elevation_0 = 10.0 elevation_1 = 10.0 domain_length = 200.0 domain_width = 200.0 domain = self._create_domain(d_length=domain_length, d_width=domain_width, dx = 10.0, dy = 10.0, elevation_0 = elevation_0, elevation_1 = elevation_1, stage_0 = stage_0, stage_1 = stage_1) vol0 = domain.compute_total_volume() finaltime = 3.0 #Make sure we are inthe right directory to find the #time series data for the inlets import os path = get_pathname_from_package('anuga.structures') filename1 = os.path.join(path, 'tests', 'data', 'inlet_operator_test1.tms') filename2 = os.path.join(path, 'tests', 'data', 'inlet_operator_test2.tms') line1 = [[95.0, 10.0], [105.0, 10.0]] Q1 = file_function(filename=filename1, quantities=['hydrograph']) line2 = [[10.0, 90.0], [20.0, 90.0]] Q2 = file_function(filename=filename2, quantities=['hydrograph']) Inlet_operator(domain, line1, Q1) Inlet_operator(domain, line2, Q2) for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime): #domain.write_time() #print domain.volumetric_balance_statistics() pass vol1 = domain.compute_total_volume() #print vol1-vol0 assert numpy.allclose(13.5, vol1-vol0, rtol=1.0e-8) assert numpy.allclose(vol1-vol0, domain.fractional_step_volume_integral, rtol=1.0e-8) def test_inlet_variable_Q_default(self): """test_inlet_Q This tests that the inlet operator adds the correct amount of water """ stage_0 = 11.0 stage_1 = 10.0 elevation_0 = 10.0 elevation_1 = 10.0 domain_length = 200.0 domain_width = 200.0 domain = self._create_domain(d_length=domain_length, d_width=domain_width, dx = 10.0, dy = 10.0, elevation_0 = elevation_0, elevation_1 = elevation_1, stage_0 = stage_0, stage_1 = stage_1) vol0 = domain.compute_total_volume() finaltime = 5.0 #Make sure we are inthe right directory to find the #time series data for the inlets import os baseDir = os.getcwd() path = get_pathname_from_package('anuga.structures') filename1 = os.path.join(path, 'tests', 'data', 'inlet_operator_test1.tms') filename2 = os.path.join(path, 'tests', 'data', 'inlet_operator_test2.tms') line1 = [[95.0, 10.0], [105.0, 10.0]] Q1 = file_function(filename=filename1, quantities=['hydrograph']) line2 = [[10.0, 90.0], [20.0, 90.0]] Q2 = file_function(filename=filename2, quantities=['hydrograph']) os.chdir(baseDir) import warnings warnings.simplefilter("ignore") Inlet_operator(domain, line1, Q1, default=6) Inlet_operator(domain, line2, Q2, default=3) for t in domain.evolve(yieldstep = 1.0, finaltime = finaltime): #domain.write_time() #print domain.volumetric_balance_statistics() pass warnings.simplefilter("default") vol1 = domain.compute_total_volume() #print vol1-vol0 assert numpy.allclose(31.5, vol1-vol0, rtol=1.0e-8) assert numpy.allclose(vol1-vol0, domain.fractional_step_volume_integral, rtol=1.0e-8) # ========================================================================= if __name__ == "__main__": suite = unittest.makeSuite(Test_inlet_operator, 'test') runner = unittest.TextTestRunner() runner.run(suite)
typescript/libs/service_proxy.py
bertilnilsson/TypeScript-Sublime-Plugin
1,688
131964
<filename>typescript/libs/service_proxy.py<gh_stars>1000+ import sublime from . import json_helpers from .global_vars import IS_ST2 from .node_client import CommClient from .text_helpers import Location class ServiceProxy: def __init__(self, worker_client=CommClient(), server_client=CommClient()): self.__comm = server_client self.__worker_comm = worker_client self.seq = 1 def increase_seq(self): temp = self.seq self.seq += 1 return temp def exit(self): req_dict = self.create_req_dict("exit") json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def stop_worker(self): req_dict = self.create_req_dict("exit") json_str = json_helpers.encode(req_dict) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def configure(self, host_info="Sublime Text", file=None, format_options=None): args = {"hostInfo": host_info, "formatOptions": format_options, "file": file} req_dict = self.create_req_dict("configure", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.postCmd(json_str) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) self.set_inferred_project_compiler_options() def set_inferred_project_compiler_options(self): """ Add full type support for compilers running in file scope mode """ compiler_options = { "target": "ESNext", # enable all es-next features "allowJs": True, # enable javascript support "jsx": "Preserve", # enable jsx support "noEmit": True # do not emit outputs } args = { "options": compiler_options } req_dict = self.create_req_dict("compilerOptionsForInferredProjects", args) json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def change(self, path, begin_location=Location(1, 1), end_location=Location(1, 1), insertString=""): args = { "file": path, "line": begin_location.line, "offset": begin_location.offset, "endLine": end_location.line, "endOffset": end_location.offset, "insertString": insertString } req_dict = self.create_req_dict("change", args) json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def completions(self, path, location=Location(1, 1), prefix="", on_completed=None): args = {"file": path, "line": location.line, "offset": location.offset, "prefix": prefix} req_dict = self.create_req_dict("completions", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmd( json_str, lambda response_dict: None if on_completed is None else on_completed(response_dict), req_dict["seq"] ) def async_completions(self, path, location=Location(1, 1), prefix="", on_completed=None): args = {"file": path, "line": location.line, "offset": location.offset, "prefix": prefix} req_dict = self.create_req_dict("completions", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmdAsync(json_str, on_completed, req_dict["seq"]) def signature_help(self, path, location=Location(1, 1), prefix="", on_completed=None): args = {"file": path, "line": location.line, "offset": location.offset, "prefix": prefix} req_dict = self.create_req_dict("signatureHelp", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmd( json_str, lambda response_dict: None if on_completed is None else on_completed(response_dict), req_dict["seq"] ) def async_signature_help(self, path, location=Location(1, 1), prefix="", on_completed=None): args = {"file": path, "line": location.line, "offset": location.offset, "prefix": prefix} req_dict = self.create_req_dict("signatureHelp", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmdAsync(json_str, on_completed, req_dict["seq"]) def definition(self, path, location=Location(1, 1)): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("definition", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def type_definition(self, path, location=Location(1, 1)): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("typeDefinition", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def format(self, path, begin_location=Location(1, 1), end_location=Location(1, 1)): args = { "file": path, "line": begin_location.line, "offset": begin_location.offset, "endLine": end_location.line, "endOffset": end_location.offset } req_dict = self.create_req_dict("format", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) if self.__worker_comm.started(): self.__worker_comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def format_on_key(self, path, location=Location(1, 1), key=""): args = {"file": path, "line": location.line, "offset": location.offset, "key": key} req_dict = self.create_req_dict("formatonkey", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) if self.__worker_comm.started(): self.__worker_comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def organize_imports(self, path): args = { "scope": { "type": "file", "args": { "file": path } }, } req_dict = self.create_req_dict("organizeImports", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) if self.__worker_comm.started(): self.__worker_comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def open(self, path): args = {"file": path} req_dict = self.create_req_dict("open", args) json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def open_on_worker(self, path): args = {"file": path} req_dict = self.create_req_dict("open", args) json_str = json_helpers.encode(req_dict) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def close(self, path): args = {"file": path} req_dict = self.create_req_dict("close", args) json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def references(self, path, location=Location(1, 1)): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("references", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def reload(self, path, alternate_path): args = {"file": path, "tmpfile": alternate_path} req_dict = self.create_req_dict("reload", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) if self.__worker_comm.started(): self.__worker_comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def reload_on_worker(self, path, alternate_path): args = {"file": path, "tmpfile": alternate_path} req_dict = self.create_req_dict("reload", args) json_str = json_helpers.encode(req_dict) if self.__worker_comm.started(): response_dict = self.__worker_comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def reload_async(self, path, alternate_path, on_completed): args = {"file": path, "tmpfile": alternate_path} req_dict = self.create_req_dict("reload", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmdAsync(json_str, on_completed, req_dict["seq"]) if self.__worker_comm.started(): self.__worker_comm.sendCmdAsync(json_str, None, req_dict["seq"]) def reload_async_on_worker(self, path, alternate_path, on_completed): args = {"file": path, "tmpfile": alternate_path} req_dict = self.create_req_dict("reload", args) json_str = json_helpers.encode(req_dict) if self.__worker_comm.started(): self.__worker_comm.sendCmdAsync(json_str, None, req_dict["seq"]) def rename(self, path, location=Location(1, 1)): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("rename", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) if self.__worker_comm.started(): self.__worker_comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def get_applicable_refactors_async(self, path, start_loc, end_loc, on_completed): args = { "file": path, "startLine": start_loc.line, "startOffset": start_loc.offset, "endLine": end_loc.line, "endOffset": end_loc.offset, } req_dict = self.create_req_dict("getApplicableRefactors", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmdAsync(json_str, on_completed, req_dict["seq"]) def get_edits_for_refactor_async(self, path, refactor_name, action_name, start_loc, end_loc, on_completed): args = { "file": path, "startLine": start_loc.line, "startOffset": start_loc.offset, "endLine": end_loc.line, "endOffset": end_loc.offset, "refactor": refactor_name, "action": action_name, } req_dict = self.create_req_dict("getEditsForRefactor", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdAsync(json_str, on_completed, req_dict["seq"]) #on_completed(response_dict) #return response_dict def request_get_err(self, delay=0, pathList=[]): args = {"files": pathList, "delay": delay} req_dict = self.create_req_dict("geterr", args) json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) def request_get_err_for_project(self, delay=0, path=""): args = {"file": path, "delay": delay} req_dict = self.create_req_dict("geterrForProject", args) json_str = json_helpers.encode(req_dict) if self.__worker_comm.started(): self.__worker_comm.postCmd(json_str) def type(self, path, location=Location(1, 1)): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("type", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def quick_info(self, path, location=Location(1, 1), on_completed=None): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("quickinfo", args) json_str = json_helpers.encode(req_dict) callback = on_completed or (lambda: None) if not IS_ST2: self.__comm.sendCmdAsync( json_str, callback, req_dict["seq"] ) else: self.__comm.sendCmd( json_str, callback, req_dict["seq"] ) def quick_info_full(self, path, location=Location(1, 1), on_completed=None): args = {"file": path, "line": location.line, "offset": location.offset} req_dict = self.create_req_dict("quickinfo-full", args) json_str = json_helpers.encode(req_dict) callback = on_completed or (lambda: None) if not IS_ST2: self.__comm.sendCmdAsync( json_str, callback, req_dict["seq"] ) else: self.__comm.sendCmd( json_str, callback, req_dict["seq"] ) def save_to(self, path, alternatePath): args = {"file": path, "tmpfile": alternatePath} req_dict = self.create_req_dict("saveto", args) json_str = json_helpers.encode(req_dict) self.__comm.postCmd(json_str) def nav_to(self, search_text, file_name): args = {"searchValue": search_text, "file": file_name, "maxResultCount": 20} req_dict = self.create_req_dict("navto", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def project_info(self, file_name, need_file_name_list=False): args = {"file": file_name, "needFileNameList": need_file_name_list} req_dict = self.create_req_dict("projectInfo", args) json_str = json_helpers.encode(req_dict) return self.__comm.sendCmdSync(json_str, req_dict["seq"]) def async_document_highlights(self, path, location, on_completed=None): args = {"line": location.line, "offset": location.offset, "file": path, "filesToSearch": [path]} req_dict = self.create_req_dict("documentHighlights", args) json_str = json_helpers.encode(req_dict) self.__comm.sendCmdAsync(json_str, on_completed, req_dict["seq"]) def add_event_handler(self, event_name, cb): self.__comm.add_event_handler(event_name, cb) def add_event_handler_for_worker(self, event_name, cb): self.__worker_comm.add_event_handler(event_name, cb) def create_req_dict(self, command_name, args=None): req_dict = { "command": command_name, "seq": self.increase_seq(), "type": "request" } if args: req_dict["arguments"] = args return req_dict def get_semantic_errors(self, path): args = { "file": path } req_dict = self.create_req_dict("semanticDiagnosticsSync", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def get_syntactic_errors(self, path): args = { "file": path } req_dict = self.create_req_dict("syntacticDiagnosticsSync", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict def get_code_fixes(self, path, startLine, startOffset, endLine, endOffset, errorCodes): args = { "file": path, "startLine": startLine, "startOffset": startOffset, "endLine": endLine, "endOffset": endOffset, "errorCodes": errorCodes } req_dict = self.create_req_dict("getCodeFixes", args) json_str = json_helpers.encode(req_dict) response_dict = self.__comm.sendCmdSync(json_str, req_dict["seq"]) return response_dict
designate-8.0.0/designate/loggingutils.py
scottwedge/OpenStack-Stein
145
131975
<filename>designate-8.0.0/designate/loggingutils.py # Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect LOG = {} def log_rpc_call(func, rpcapi, logger): def wrapped(*args, **kwargs): logger.debug("Calling designate.%(rpcapi)s.%(function)s() " "over RPC", {'function': func.__name__, 'rpcapi': rpcapi}) return func(*args, **kwargs) return wrapped LOGGING_BLACKLIST = [ 'get_instance', '__init__' ] def rpc_logging(logger, rpcapi): def wrapper(cls): CLASS_BLACKLIST = getattr(cls, 'LOGGING_BLACKLIST', []) BLACKLIST = CLASS_BLACKLIST + LOGGING_BLACKLIST for name, m in inspect.getmembers(cls, inspect.ismethod): if name not in BLACKLIST: setattr(cls, name, log_rpc_call(m, rpcapi, logger)) return cls return wrapper
autobahn/wamp/gen/wamp/proto/CalleeFeatures.py
rapyuta-robotics/autobahn-python
1,670
131976
# automatically generated by the FlatBuffers compiler, do not modify # namespace: proto import flatbuffers class CalleeFeatures(object): __slots__ = ['_tab'] @classmethod def GetRootAsCalleeFeatures(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = CalleeFeatures() x.Init(buf, n + offset) return x # CalleeFeatures def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # CalleeFeatures def CallerIdentification(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def CallTrustlevels(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def CallTimeout(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def CallCanceling(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def ProgressiveCallResults(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def RegistrationRevocation(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def PatternBasedRegistration(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def SharedRegistration(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def PayloadTransparency(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # CalleeFeatures def PayloadEncryptionCryptobox(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) if o != 0: return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False def CalleeFeaturesStart(builder): builder.StartObject(10) def CalleeFeaturesAddCallerIdentification(builder, callerIdentification): builder.PrependBoolSlot(0, callerIdentification, 0) def CalleeFeaturesAddCallTrustlevels(builder, callTrustlevels): builder.PrependBoolSlot(1, callTrustlevels, 0) def CalleeFeaturesAddCallTimeout(builder, callTimeout): builder.PrependBoolSlot(2, callTimeout, 0) def CalleeFeaturesAddCallCanceling(builder, callCanceling): builder.PrependBoolSlot(3, callCanceling, 0) def CalleeFeaturesAddProgressiveCallResults(builder, progressiveCallResults): builder.PrependBoolSlot(4, progressiveCallResults, 0) def CalleeFeaturesAddRegistrationRevocation(builder, registrationRevocation): builder.PrependBoolSlot(5, registrationRevocation, 0) def CalleeFeaturesAddPatternBasedRegistration(builder, patternBasedRegistration): builder.PrependBoolSlot(6, patternBasedRegistration, 0) def CalleeFeaturesAddSharedRegistration(builder, sharedRegistration): builder.PrependBoolSlot(7, sharedRegistration, 0) def CalleeFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(8, payloadTransparency, 0) def CalleeFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(9, payloadEncryptionCryptobox, 0) def CalleeFeaturesEnd(builder): return builder.EndObject()
biostar/forum/migrations/0012_delete_sync.py
tangibleai/biostar-central
477
131979
<reponame>tangibleai/biostar-central<gh_stars>100-1000 # Generated by Django 3.1 on 2021-02-13 21:36 from django.db import migrations from django.template.defaultfilters import slugify def badge_uids(apps, schema_editor): """ Change badge uids to it's name """ Badge = apps.get_model('forum', 'Badge') badges = Badge.objects.all() for badge in badges: badge.uid = slugify(badge.name) badge.save() class Migration(migrations.Migration): dependencies = [ ('forum', '0011_spam_index'), ] operations = [ migrations.DeleteModel( name='Sync', ), migrations.RunPython(badge_uids), ]
lib/spack/spack/test/git_fetch.py
LiamBindle/spack
2,360
131984
<gh_stars>1000+ # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import copy import os import shutil import pytest from llnl.util.filesystem import mkdirp, touch, working_dir import spack.config import spack.repo from spack.fetch_strategy import GitFetchStrategy from spack.spec import Spec from spack.stage import Stage from spack.util.executable import which from spack.version import ver pytestmark = pytest.mark.skipif( not which('git'), reason='requires git to be installed') _mock_transport_error = 'Mock HTTP transport error' @pytest.fixture(params=[None, '1.8.5.2', '1.8.5.1', '1.7.10', '1.7.1', '1.7.0']) def git_version(request, monkeypatch): """Tests GitFetchStrategy behavior for different git versions. GitFetchStrategy tries to optimize using features of newer git versions, but needs to work with older git versions. To ensure code paths for old versions still work, we fake it out here and make it use the backward-compatibility code paths with newer git versions. """ git = which('git', required=True) real_git_version = ( spack.fetch_strategy.GitFetchStrategy.version_from_git(git)) if request.param is None: # Don't patch; run with the real git_version method. yield real_git_version else: test_git_version = ver(request.param) if test_git_version > real_git_version: pytest.skip("Can't test clone logic for newer version of git.") # Patch the fetch strategy to think it's using a lower git version. # we use this to test what we'd need to do with older git versions # using a newer git installation. monkeypatch.setattr(GitFetchStrategy, 'git_version', test_git_version) yield test_git_version @pytest.fixture def mock_bad_git(monkeypatch): """ Test GitFetchStrategy behavior with a bad git command for git >= 1.7.1 to trigger a SpackError. """ def bad_git(*args, **kwargs): """Raise a SpackError with the transport message.""" raise spack.error.SpackError(_mock_transport_error) # Patch the fetch strategy to think it's using a git version that # will error out when git is called. monkeypatch.setattr(GitFetchStrategy, 'git', bad_git) monkeypatch.setattr(GitFetchStrategy, 'git_version', ver('1.7.1')) yield def test_bad_git(tmpdir, mock_bad_git): """Trigger a SpackError when attempt a fetch with a bad git.""" testpath = str(tmpdir) with pytest.raises(spack.error.SpackError): fetcher = GitFetchStrategy(git='file:///not-a-real-git-repo') with Stage(fetcher, path=testpath): fetcher.fetch() @pytest.mark.parametrize("type_of_test", ['master', 'branch', 'tag', 'commit']) @pytest.mark.parametrize("secure", [True, False]) def test_fetch(type_of_test, secure, mock_git_repository, config, mutable_mock_repo, git_version): """Tries to: 1. Fetch the repo using a fetch strategy constructed with supplied args (they depend on type_of_test). 2. Check if the test_file is in the checked out repository. 3. Assert that the repository is at the revision supplied. 4. Add and remove some files, then reset the repo, and ensure it's all there again. """ # Retrieve the right test parameters t = mock_git_repository.checks[type_of_test] h = mock_git_repository.hash # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) pkg.versions[ver('git')] = t.args # Enter the stage directory and check some properties with pkg.stage: with spack.config.override('config:verify_ssl', secure): pkg.do_stage() with working_dir(pkg.stage.source_path): assert h('HEAD') == h(t.revision) file_path = os.path.join(pkg.stage.source_path, t.file) assert os.path.isdir(pkg.stage.source_path) assert os.path.isfile(file_path) os.unlink(file_path) assert not os.path.isfile(file_path) untracked_file = 'foobarbaz' touch(untracked_file) assert os.path.isfile(untracked_file) pkg.do_restage() assert not os.path.isfile(untracked_file) assert os.path.isdir(pkg.stage.source_path) assert os.path.isfile(file_path) assert h('HEAD') == h(t.revision) @pytest.mark.parametrize("type_of_test", ['branch', 'commit']) def test_debug_fetch(mock_packages, type_of_test, mock_git_repository, config): """Fetch the repo with debug enabled.""" # Retrieve the right test parameters t = mock_git_repository.checks[type_of_test] # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) pkg.versions[ver('git')] = t.args # Fetch then ensure source path exists with pkg.stage: with spack.config.override('config:debug', True): pkg.do_fetch() assert os.path.isdir(pkg.stage.source_path) def test_git_extra_fetch(tmpdir): """Ensure a fetch after 'expanding' is effectively a no-op.""" testpath = str(tmpdir) fetcher = GitFetchStrategy(git='file:///not-a-real-git-repo') with Stage(fetcher, path=testpath) as stage: mkdirp(stage.source_path) fetcher.fetch() # Use fetcher to fetch for code coverage shutil.rmtree(stage.source_path) def test_needs_stage(): """Trigger a NoStageError when attempt a fetch without a stage.""" with pytest.raises(spack.fetch_strategy.NoStageError, match=r"set_stage.*before calling fetch"): fetcher = GitFetchStrategy(git='file:///not-a-real-git-repo') fetcher.fetch() @pytest.mark.parametrize("get_full_repo", [True, False]) def test_get_full_repo(get_full_repo, git_version, mock_git_repository, config, mutable_mock_repo): """Ensure that we can clone a full repository.""" if git_version < ver('1.7.1'): pytest.skip('Not testing get_full_repo for older git {0}'. format(git_version)) secure = True type_of_test = 'tag-branch' t = mock_git_repository.checks[type_of_test] spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) args = copy.copy(t.args) args['get_full_repo'] = get_full_repo pkg.versions[ver('git')] = args with pkg.stage: with spack.config.override('config:verify_ssl', secure): pkg.do_stage() with working_dir(pkg.stage.source_path): branches\ = mock_git_repository.git_exe('branch', '-a', output=str).splitlines() nbranches = len(branches) commits\ = mock_git_repository.\ git_exe('log', '--graph', '--pretty=format:%h -%d %s (%ci) <%an>', '--abbrev-commit', output=str).splitlines() ncommits = len(commits) if get_full_repo: assert(nbranches >= 5) assert(ncommits == 2) else: assert(nbranches == 2) assert(ncommits == 1) @pytest.mark.disable_clean_stage_check @pytest.mark.parametrize("submodules", [True, False]) def test_gitsubmodule(submodules, mock_git_repository, config, mutable_mock_repo): """ Test GitFetchStrategy behavior with submodules """ type_of_test = 'tag-branch' t = mock_git_repository.checks[type_of_test] # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) args = copy.copy(t.args) args['submodules'] = submodules pkg.versions[ver('git')] = args pkg.do_stage() with working_dir(pkg.stage.source_path): for submodule_count in range(2): file_path = os.path.join(pkg.stage.source_path, 'third_party/submodule{0}/r0_file_{0}' .format(submodule_count)) if submodules: assert os.path.isfile(file_path) else: assert not os.path.isfile(file_path) @pytest.mark.disable_clean_stage_check def test_gitsubmodules_delete(mock_git_repository, config, mutable_mock_repo): """ Test GitFetchStrategy behavior with submodules_delete """ type_of_test = 'tag-branch' t = mock_git_repository.checks[type_of_test] # Construct the package under test spec = Spec('git-test') spec.concretize() pkg = spack.repo.get(spec) args = copy.copy(t.args) args['submodules'] = True args['submodules_delete'] = ['third_party/submodule0', 'third_party/submodule1'] pkg.versions[ver('git')] = args pkg.do_stage() with working_dir(pkg.stage.source_path): file_path = os.path.join(pkg.stage.source_path, 'third_party/submodule0') assert not os.path.isdir(file_path) file_path = os.path.join(pkg.stage.source_path, 'third_party/submodule1') assert not os.path.isdir(file_path)
tests/unittests/utils/test_immutabletypes.py
buddwm/hubble
363
131986
# -*- coding: utf-8 -*- ''' :codeauthor: <NAME> (<EMAIL>) tests.unit.utils.immutabletypes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Test hubblestack.utils.immutabletypes ''' from tests.support.unit import TestCase import hubblestack.utils.immutabletypes as immutabletypes class ImmutableTypesTestCase(TestCase): def test_immutablelist_sum(self): lst = [4, 5, 6] imt = immutabletypes.ImmutableList([1, 2, 3]) __add__ = imt + lst self.assertEqual(__add__, [1, 2, 3, 4, 5, 6]) __radd__ = lst + imt self.assertEqual(__radd__, [4, 5, 6, 1, 2, 3]) def test_freeze_list_sum(self): lst = [4, 5, 6] imt = immutabletypes.freeze([1, 2, 3]) __add__ = imt + lst self.assertEqual(__add__, [1, 2, 3, 4, 5, 6]) __radd__ = lst + imt self.assertEqual(__radd__, [4, 5, 6, 1, 2, 3]) def test_immutablelist_imutability(self): frozen = immutabletypes.freeze([1, 2, 3]) with self.assertRaises(TypeError): frozen[1] = 2 with self.assertRaises(TypeError): frozen[1:-1] = 5 def test_immutabledict_imutability(self): data = { 1: 1, 2: 2, 3: { 3.1: 3.1, 3.2: 3.2, 3.3: { 3.31: 3.33, 3.32: 3.34, 3.33: [3.331, 3.332, 3.333] } }, 4: [4.1, 4.2, 4.3] } frozen = immutabletypes.freeze(data) with self.assertRaises(TypeError): frozen[1] = 2 with self.assertRaises(TypeError): fdict = frozen[3] fdict[3.1] = 5 with self.assertRaises(TypeError): fdict = frozen[3] fdict[3.4] = 3.4 with self.assertRaises(TypeError): frozen[3][3.3][3.32] = 3.99 with self.assertRaises(TypeError): frozen[3][3.3][3.33][0] = 5 with self.assertRaises(TypeError): flist = frozen[4] flist[0] = 5
lldb/test/API/lang/cpp/typeof/TestTypeOfDeclTypeExpr.py
mkinsner/llvm
2,338
131991
import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class TestCase(TestBase): mydir = TestBase.compute_mydir(__file__) @no_debug_info_test def test(self): self.expect_expr("int i; __typeof__(i) j = 1; j", result_type="typeof (i)", result_value="1") self.expect_expr("int i; typeof(i) j = 1; j", result_type="typeof (i)", result_value="1") self.expect_expr("int i; decltype(i) j = 1; j", result_type="decltype(i)", result_value="1")
tests/data/config/g.py
jinliwei1997/mmcv
3,748
131995
<filename>tests/data/config/g.py filename = 'reserved.py'
tools/python/airmaps/dags/build_maps.py
smartyw/organicmaps
3,062
131996
import logging from datetime import timedelta from airflow import DAG from airflow.operators.python_operator import PythonOperator from airflow.utils.dates import days_ago from airmaps.instruments import settings from airmaps.instruments import storage from airmaps.instruments.utils import make_rm_build_task from airmaps.instruments.utils import run_generation_from_first_stage from maps_generator.generator import stages_declaration as sd from maps_generator.generator.env import Env from maps_generator.generator.env import PathProvider from maps_generator.generator.env import get_all_countries_list from maps_generator.maps_generator import run_generation logger = logging.getLogger("airmaps") MAPS_STORAGE_PATH = f"{settings.STORAGE_PREFIX}/maps" class MapsGenerationDAG(DAG): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) build_prolog_task = PythonOperator( task_id="Build_prolog_task", provide_context=True, python_callable=MapsGenerationDAG.build_prolog, dag=self, ) build_epilog_task = PythonOperator( task_id="Build_epilog_task", provide_context=True, python_callable=MapsGenerationDAG.build_epilog, dag=self, ) publish_maps_task = PythonOperator( task_id="Publish_maps_task", provide_context=True, python_callable=MapsGenerationDAG.publish_maps, dag=self, ) rm_build_task = make_rm_build_task(self) build_epilog_task >> publish_maps_task >> rm_build_task for country in get_all_countries_list(PathProvider.borders_path()): build_prolog_task >> self.make_mwm_operator(country) >> build_epilog_task @staticmethod def get_params(namespace="env", **kwargs): return kwargs.get("params", {}).get(namespace, {}) @staticmethod def build_prolog(**kwargs): params = MapsGenerationDAG.get_params(**kwargs) env = Env(**params) kwargs["ti"].xcom_push(key="build_name", value=env.build_name) run_generation( env, ( sd.StageDownloadAndConvertPlanet(), sd.StageCoastline(), sd.StagePreprocess(), sd.StageFeatures(), sd.StageDownloadDescriptions(), ), ) @staticmethod def make_build_mwm_func(country): def build_mwm(**kwargs): build_name = kwargs["ti"].xcom_pull(key="build_name") params = MapsGenerationDAG.get_params(**kwargs) params.update({"build_name": build_name, "countries": [country,]}) env = Env(**params) # We need to check existing of mwm.tmp. It is needed if we want to # build mwms from part of planet. tmp_mwm_name = env.get_tmp_mwm_names() assert len(tmp_mwm_name) <= 1 if not tmp_mwm_name: logger.warning(f"mwm.tmp does not exist for {country}.") return run_generation_from_first_stage(env, (sd.StageMwm(),), build_lock=False) return build_mwm @staticmethod def build_epilog(**kwargs): build_name = kwargs["ti"].xcom_pull(key="build_name") params = MapsGenerationDAG.get_params(**kwargs) params.update({"build_name": build_name}) env = Env(**params) run_generation_from_first_stage( env, ( sd.StageCountriesTxt(), sd.StageExternalResources(), sd.StageLocalAds(), sd.StageStatistics(), sd.StageCleanup(), ), ) env.finish() @staticmethod def publish_maps(**kwargs): build_name = kwargs["ti"].xcom_pull(key="build_name") params = MapsGenerationDAG.get_params(**kwargs) params.update({"build_name": build_name}) env = Env(**params) subdir = MapsGenerationDAG.get_params(namespace="storage", **kwargs)["subdir"] storage_path = f"{MAPS_STORAGE_PATH}/{subdir}" storage.wd_publish(env.paths.mwm_path, f"{storage_path}/{env.mwm_version}/") def make_mwm_operator(self, country): normalized_name = "__".join(country.lower().split()) return PythonOperator( task_id=f"Build_country_{normalized_name}_task", provide_context=True, python_callable=MapsGenerationDAG.make_build_mwm_func(country), dag=self, ) PARAMS = {"storage": {"subdir": "open_source"}} if settings.DEBUG: PARAMS["env"] = { # The planet file in debug mode does not contain Russia_Moscow territory. # It is needed for testing. "countries": ["Cuba", "Haiti", "Jamaica", "Cayman Islands", "Russia_Moscow"] } OPEN_SOURCE_MAPS_GENERATION_DAG = MapsGenerationDAG( "Generate_open_source_maps", schedule_interval=timedelta(days=7), default_args={ "owner": "OMaps", "depends_on_past": True, "start_date": days_ago(0), "email": settings.EMAILS, "email_on_failure": True, "email_on_retry": False, "retries": 0, "retry_delay": timedelta(minutes=5), "priority_weight": 1, "params": PARAMS, }, )
Chapter15/lib/common.py
feiwang20/DRLHandsOn-Playground
2,497
132009
import numpy as np import torch import ptan def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"): """ Convert batch into training tensors :param batch: :param net: :return: states variable, actions tensor, reference values variable """ states = [] actions = [] rewards = [] not_done_idx = [] last_states = [] for idx, exp in enumerate(batch): states.append(exp.state) actions.append(exp.action) rewards.append(exp.reward) if exp.last_state is not None: not_done_idx.append(idx) last_states.append(exp.last_state) states_v = ptan.agent.float32_preprocessor(states).to(device) actions_v = torch.FloatTensor(actions).to(device) # handle rewards rewards_np = np.array(rewards, dtype=np.float32) if not_done_idx: last_states_v = ptan.agent.float32_preprocessor(last_states).to(device) last_vals_v = net(last_states_v) last_vals_np = last_vals_v.data.cpu().numpy()[:, 0] rewards_np[not_done_idx] += last_val_gamma * last_vals_np ref_vals_v = torch.FloatTensor(rewards_np).to(device) return states_v, actions_v, ref_vals_v
ipypublish/convert/config_manager.py
parmentelat/ipypublish
220
132020
import os import glob import importlib import logging from six import string_types from jinja2 import DictLoader import jsonschema import nbconvert # noqa: F401 from ipypublish.utils import ( pathlib, handle_error, get_module_path, read_file_from_directory, read_file_from_module, ) from ipypublish import export_plugins from ipypublish import schema from ipypublish.templates.create_template import create_template _TEMPLATE_KEY = "new_template" _EXPORT_SCHEMA_FILE = "export_config.schema.json" _EXPORT_SCHEMA = None logger = logging.getLogger("configuration") def get_export_config_path(export_key, config_folder_paths=()): # type (string, Tuple[str]) -> Union[string, None] """we search for a plugin name, which matches the supplied plugin name """ for name, jsonpath in iter_all_export_paths(config_folder_paths): if name == export_key: return pathlib.Path(jsonpath) return None def iter_all_export_paths(config_folder_paths=(), regex="*.json"): """we iterate through all json files in the supplied plugin_folder_paths, and then in the `export_plugins` folder """ for plugin_folder_path in config_folder_paths: for jsonpath in glob.glob(os.path.join(plugin_folder_path, regex)): name = os.path.splitext(os.path.basename(jsonpath))[0] yield name, pathlib.Path(jsonpath) module_path = get_module_path(export_plugins) for jsonpath in glob.glob(os.path.join(str(module_path), regex)): name = os.path.splitext(os.path.basename(jsonpath))[0] yield name, pathlib.Path(jsonpath) def load_export_config(export_config_path): """load the export configuration""" if isinstance(export_config_path, string_types): export_config_path = pathlib.Path(export_config_path) data = read_file_from_directory( export_config_path.parent, export_config_path.name, "export configuration", logger, interp_ext=True, ) # validate against schema global _EXPORT_SCHEMA if _EXPORT_SCHEMA is None: # lazy load schema once _EXPORT_SCHEMA = read_file_from_directory( get_module_path(schema), _EXPORT_SCHEMA_FILE, "export configuration schema", logger, interp_ext=True, ) try: jsonschema.validate(data, _EXPORT_SCHEMA) except jsonschema.ValidationError as err: handle_error( "validation of export config {} failed against {}: {}".format( export_config_path, _EXPORT_SCHEMA_FILE, err.message ), jsonschema.ValidationError, logger=logger, ) return data def iter_all_export_infos(config_folder_paths=(), regex="*.json", get_mime=False): """iterate through all export configuration and yield a dict of info""" for name, path in iter_all_export_paths(config_folder_paths, regex): data = load_export_config(path) info = dict( [ ("key", str(name)), ("class", data["exporter"]["class"]), ("path", str(path)), ("description", data["description"]), ] ) if get_mime: info["mime_type"] = create_exporter_cls( data["exporter"]["class"] ).output_mimetype yield info def create_exporter_cls(class_str): # type: (str) -> nbconvert.exporters.Exporter """dynamically load export class""" export_class_path = class_str.split(".") module_path = ".".join(export_class_path[0:-1]) class_name = export_class_path[-1] try: export_module = importlib.import_module(module_path) except ModuleNotFoundError: # noqa: F821 handle_error( "module {} containing exporter class {} not found".format( module_path, class_name ), ModuleNotFoundError, logger=logger, ) # noqa: F821 if hasattr(export_module, class_name): export_class = getattr(export_module, class_name) else: handle_error( "module {} does not contain class {}".format(module_path, class_name), ImportError, logger=logger, ) return export_class def get_export_extension(export_config_path): """return the file extension of the exporter class""" data = load_export_config(export_config_path) exporter_cls = create_exporter_cls(data["exporter"]["class"]) return exporter_cls.file_extension def str_to_jinja(template_str, template_key="jinja_template"): return DictLoader({template_key: template_str}) def load_template(template_key, template_dict): if template_dict is None: return None if "directory" in template_dict["outline"]: outline_template = read_file_from_directory( template_dict["outline"]["directory"], template_dict["outline"]["file"], "template outline", logger, interp_ext=False, ) outline_name = "{0}/{1}".format( template_dict["outline"]["directory"], template_dict["outline"]["file"] ) else: outline_template = read_file_from_module( template_dict["outline"]["module"], template_dict["outline"]["file"], "template outline", logger, interp_ext=False, ) outline_name = "{0}/{1}".format( template_dict["outline"]["module"], template_dict["outline"]["file"] ) segments = [] for snum, segment in enumerate(template_dict.get("segments", [])): if "file" not in segment: handle_error("'file' expected in segment {}".format(snum), KeyError, logger) if "directory" in segment: seg_data = read_file_from_directory( segment["directory"], segment["file"], "template segment", logger, interp_ext=True, ) elif "module" in segment: seg_data = read_file_from_module( segment["module"], segment["file"], "template segment", logger, interp_ext=True, ) else: handle_error( "'directory' or 'module' expected in segment {}".format(snum), KeyError, logger, ) segments.append(seg_data) template_str = create_template(outline_template, outline_name, segments) return str_to_jinja(template_str, template_key)
Tests/pens/pointPen_test.py
twardoch/fonttools-py27
240
132029
<filename>Tests/pens/pointPen_test.py from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools.misc.loggingTools import CapturingLogHandler import unittest from fontTools.pens.basePen import AbstractPen from fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen, \ SegmentToPointPen, GuessSmoothPointPen, ReverseContourPointPen class _TestSegmentPen(AbstractPen): def __init__(self): self._commands = [] def __repr__(self): return " ".join(self._commands) def moveTo(self, pt): self._commands.append("%s %s moveto" % (pt[0], pt[1])) def lineTo(self, pt): self._commands.append("%s %s lineto" % (pt[0], pt[1])) def curveTo(self, *pts): pts = ["%s %s" % pt for pt in pts] self._commands.append("%s curveto" % " ".join(pts)) def qCurveTo(self, *pts): pts = ["%s %s" % pt if pt is not None else "None" for pt in pts] self._commands.append("%s qcurveto" % " ".join(pts)) def closePath(self): self._commands.append("closepath") def endPath(self): self._commands.append("endpath") def addComponent(self, glyphName, transformation): self._commands.append("'%s' %s addcomponent" % (glyphName, transformation)) def _reprKwargs(kwargs): items = [] for key in sorted(kwargs): value = kwargs[key] if isinstance(value, basestring): items.append("%s='%s'" % (key, value)) else: items.append("%s=%s" % (key, value)) return items class _TestPointPen(AbstractPointPen): def __init__(self): self._commands = [] def __repr__(self): return " ".join(self._commands) def beginPath(self, identifier=None, **kwargs): items = [] if identifier is not None: items.append("identifier='%s'" % identifier) items.extend(_reprKwargs(kwargs)) self._commands.append("beginPath(%s)" % ", ".join(items)) def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): items = ["%s" % (pt,)] if segmentType is not None: items.append("segmentType='%s'" % segmentType) if smooth: items.append("smooth=True") if name is not None: items.append("name='%s'" % name) if identifier is not None: items.append("identifier='%s'" % identifier) items.extend(_reprKwargs(kwargs)) self._commands.append("addPoint(%s)" % ", ".join(items)) def endPath(self): self._commands.append("endPath()") def addComponent(self, glyphName, transform, identifier=None, **kwargs): items = ["'%s'" % glyphName, "%s" % transform] if identifier is not None: items.append("identifier='%s'" % identifier) items.extend(_reprKwargs(kwargs)) self._commands.append("addComponent(%s)" % ", ".join(items)) class PointToSegmentPenTest(unittest.TestCase): def test_open(self): pen = _TestSegmentPen() ppen = PointToSegmentPen(pen) ppen.beginPath() ppen.addPoint((10, 10), "move") ppen.addPoint((10, 20), "line") ppen.endPath() self.assertEqual("10 10 moveto 10 20 lineto endpath", repr(pen)) def test_closed(self): pen = _TestSegmentPen() ppen = PointToSegmentPen(pen) ppen.beginPath() ppen.addPoint((10, 10), "line") ppen.addPoint((10, 20), "line") ppen.addPoint((20, 20), "line") ppen.endPath() self.assertEqual("10 10 moveto 10 20 lineto 20 20 lineto closepath", repr(pen)) def test_cubic(self): pen = _TestSegmentPen() ppen = PointToSegmentPen(pen) ppen.beginPath() ppen.addPoint((10, 10), "line") ppen.addPoint((10, 20)) ppen.addPoint((20, 20)) ppen.addPoint((20, 40), "curve") ppen.endPath() self.assertEqual("10 10 moveto 10 20 20 20 20 40 curveto closepath", repr(pen)) def test_quad(self): pen = _TestSegmentPen() ppen = PointToSegmentPen(pen) ppen.beginPath(identifier='foo') ppen.addPoint((10, 10), "line") ppen.addPoint((10, 40)) ppen.addPoint((40, 40)) ppen.addPoint((10, 40), "qcurve") ppen.endPath() self.assertEqual("10 10 moveto 10 40 40 40 10 40 qcurveto closepath", repr(pen)) def test_quad_onlyOffCurvePoints(self): pen = _TestSegmentPen() ppen = PointToSegmentPen(pen) ppen.beginPath() ppen.addPoint((10, 10)) ppen.addPoint((10, 40)) ppen.addPoint((40, 40)) ppen.endPath() self.assertEqual("10 10 10 40 40 40 None qcurveto closepath", repr(pen)) def test_roundTrip1(self): tpen = _TestPointPen() ppen = PointToSegmentPen(SegmentToPointPen(tpen)) ppen.beginPath() ppen.addPoint((10, 10), "line") ppen.addPoint((10, 20)) ppen.addPoint((20, 20)) ppen.addPoint((20, 40), "curve") ppen.endPath() self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') addPoint((10, 20)) " "addPoint((20, 20)) addPoint((20, 40), segmentType='curve') endPath()", repr(tpen)) class TestSegmentToPointPen(unittest.TestCase): def test_move(self): tpen = _TestPointPen() pen = SegmentToPointPen(tpen) pen.moveTo((10, 10)) pen.endPath() self.assertEqual("beginPath() addPoint((10, 10), segmentType='move') endPath()", repr(tpen)) def test_poly(self): tpen = _TestPointPen() pen = SegmentToPointPen(tpen) pen.moveTo((10, 10)) pen.lineTo((10, 20)) pen.lineTo((20, 20)) pen.closePath() self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') " "addPoint((10, 20), segmentType='line') " "addPoint((20, 20), segmentType='line') endPath()", repr(tpen)) def test_cubic(self): tpen = _TestPointPen() pen = SegmentToPointPen(tpen) pen.moveTo((10, 10)) pen.curveTo((10, 20), (20, 20), (20, 10)) pen.closePath() self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') " "addPoint((10, 20)) addPoint((20, 20)) addPoint((20, 10), " "segmentType='curve') endPath()", repr(tpen)) def test_quad(self): tpen = _TestPointPen() pen = SegmentToPointPen(tpen) pen.moveTo((10, 10)) pen.qCurveTo((10, 20), (20, 20), (20, 10)) pen.closePath() self.assertEqual("beginPath() addPoint((10, 10), segmentType='line') " "addPoint((10, 20)) addPoint((20, 20)) " "addPoint((20, 10), segmentType=qcurve) endPath()", repr(tpen)) def test_quad(self): tpen = _TestPointPen() pen = SegmentToPointPen(tpen) pen.qCurveTo((10, 20), (20, 20), (20, 10), (10, 10), None) pen.closePath() self.assertEqual("beginPath() addPoint((10, 20)) addPoint((20, 20)) " "addPoint((20, 10)) addPoint((10, 10)) endPath()", repr(tpen)) def test_roundTrip1(self): spen = _TestSegmentPen() pen = SegmentToPointPen(PointToSegmentPen(spen)) pen.moveTo((10, 10)) pen.lineTo((10, 20)) pen.lineTo((20, 20)) pen.closePath() self.assertEqual("10 10 moveto 10 20 lineto 20 20 lineto closepath", repr(spen)) def test_roundTrip2(self): spen = _TestSegmentPen() pen = SegmentToPointPen(PointToSegmentPen(spen)) pen.qCurveTo((10, 20), (20, 20), (20, 10), (10, 10), None) pen.closePath() pen.addComponent('base', [1, 0, 0, 1, 0, 0]) self.assertEqual("10 20 20 20 20 10 10 10 None qcurveto closepath " "'base' [1, 0, 0, 1, 0, 0] addcomponent", repr(spen)) class TestGuessSmoothPointPen(unittest.TestCase): def test_guessSmooth_exact(self): tpen = _TestPointPen() pen = GuessSmoothPointPen(tpen) pen.beginPath(identifier="foo") pen.addPoint((0, 100), segmentType="curve") pen.addPoint((0, 200)) pen.addPoint((400, 200), identifier='bar') pen.addPoint((400, 100), segmentType="curve") pen.addPoint((400, 0)) pen.addPoint((0, 0)) pen.endPath() self.assertEqual("beginPath(identifier='foo') " "addPoint((0, 100), segmentType='curve', smooth=True) " "addPoint((0, 200)) addPoint((400, 200), identifier='bar') " "addPoint((400, 100), segmentType='curve', smooth=True) " "addPoint((400, 0)) addPoint((0, 0)) endPath()", repr(tpen)) def test_guessSmooth_almost(self): tpen = _TestPointPen() pen = GuessSmoothPointPen(tpen) pen.beginPath() pen.addPoint((0, 100), segmentType="curve") pen.addPoint((1, 200)) pen.addPoint((395, 200)) pen.addPoint((400, 100), segmentType="curve") pen.addPoint((400, 0)) pen.addPoint((0, 0)) pen.endPath() self.assertEqual("beginPath() addPoint((0, 100), segmentType='curve', smooth=True) " "addPoint((1, 200)) addPoint((395, 200)) " "addPoint((400, 100), segmentType='curve', smooth=True) " "addPoint((400, 0)) addPoint((0, 0)) endPath()", repr(tpen)) def test_guessSmooth_tangent(self): tpen = _TestPointPen() pen = GuessSmoothPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="move") pen.addPoint((0, 100), segmentType="line") pen.addPoint((3, 200)) pen.addPoint((300, 200)) pen.addPoint((400, 200), segmentType="curve") pen.endPath() self.assertEqual("beginPath() addPoint((0, 0), segmentType='move') " "addPoint((0, 100), segmentType='line', smooth=True) " "addPoint((3, 200)) addPoint((300, 200)) " "addPoint((400, 200), segmentType='curve') endPath()", repr(tpen)) class TestReverseContourPointPen(unittest.TestCase): def test_singlePoint(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="move") pen.endPath() self.assertEqual("beginPath() " "addPoint((0, 0), segmentType='move') " "endPath()", repr(tpen)) def test_line(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="move") pen.addPoint((0, 100), segmentType="line") pen.endPath() self.assertEqual("beginPath() " "addPoint((0, 100), segmentType='move') " "addPoint((0, 0), segmentType='line') " "endPath()", repr(tpen)) def test_triangle(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="line") pen.addPoint((0, 100), segmentType="line") pen.addPoint((100, 100), segmentType="line") pen.endPath() self.assertEqual("beginPath() " "addPoint((0, 0), segmentType='line') " "addPoint((100, 100), segmentType='line') " "addPoint((0, 100), segmentType='line') " "endPath()", repr(tpen)) def test_cubicOpen(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="move") pen.addPoint((0, 100)) pen.addPoint((100, 200)) pen.addPoint((200, 200), segmentType="curve") pen.endPath() self.assertEqual("beginPath() " "addPoint((200, 200), segmentType='move') " "addPoint((100, 200)) " "addPoint((0, 100)) " "addPoint((0, 0), segmentType='curve') " "endPath()", repr(tpen)) def test_quadOpen(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="move") pen.addPoint((0, 100)) pen.addPoint((100, 200)) pen.addPoint((200, 200), segmentType="qcurve") pen.endPath() self.assertEqual("beginPath() " "addPoint((200, 200), segmentType='move') " "addPoint((100, 200)) " "addPoint((0, 100)) " "addPoint((0, 0), segmentType='qcurve') " "endPath()", repr(tpen)) def test_cubicClosed(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((0, 0), segmentType="line") pen.addPoint((0, 100)) pen.addPoint((100, 200)) pen.addPoint((200, 200), segmentType="curve") pen.endPath() self.assertEqual("beginPath() " "addPoint((0, 0), segmentType='curve') " "addPoint((200, 200), segmentType='line') " "addPoint((100, 200)) " "addPoint((0, 100)) " "endPath()", repr(tpen)) def test_quadClosedOffCurveStart(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath() pen.addPoint((100, 200)) pen.addPoint((200, 200), segmentType="qcurve") pen.addPoint((0, 0), segmentType="line") pen.addPoint((0, 100)) pen.endPath() self.assertEqual("beginPath() " "addPoint((100, 200)) " "addPoint((0, 100)) " "addPoint((0, 0), segmentType='qcurve') " "addPoint((200, 200), segmentType='line') " "endPath()", repr(tpen)) def test_quadNoOnCurve(self): tpen = _TestPointPen() pen = ReverseContourPointPen(tpen) pen.beginPath(identifier='bar') pen.addPoint((0, 0)) pen.addPoint((0, 100), identifier='foo', arbitrary='foo') pen.addPoint((100, 200), arbitrary=123) pen.addPoint((200, 200)) pen.endPath() pen.addComponent("base", [1, 0, 0, 1, 0, 0], identifier='foo') self.assertEqual("beginPath(identifier='bar') " "addPoint((0, 0)) " "addPoint((200, 200)) " "addPoint((100, 200), arbitrary=123) " "addPoint((0, 100), identifier='foo', arbitrary='foo') " "endPath() " "addComponent('base', [1, 0, 0, 1, 0, 0], identifier='foo')", repr(tpen))
tools/accuracy_checker/openvino/tools/accuracy_checker/metrics/question_answering.py
alpkn/open_model_zoo
2,201
132126
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re from collections import Counter import string import numpy from ..representation import QuestionAnsweringAnnotation, QuestionAnsweringPrediction from ..representation import QuestionAnsweringEmbeddingAnnotation, QuestionAnsweringEmbeddingPrediction from ..representation import QuestionAnsweringBiDAFAnnotation from .metric import PerImageEvaluationMetric, FullDatasetEvaluationMetric from ..config import NumberField def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) return re.sub(regex, " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() class ScoreF1(PerImageEvaluationMetric): __provider__ = 'f1' annotation_types = (QuestionAnsweringAnnotation,) prediction_types = (QuestionAnsweringPrediction,) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.per_question_results = {} def update(self, annotation, prediction): gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])] if not gold_answers: gold_answers = [''] prediction_answer = prediction.tokens[0] if prediction.tokens else '' max_f1_score = max(self.compute_f1(a, prediction_answer) for a in gold_answers) current_max_f1_score = self.per_question_results.get(annotation.question_id, 0) self.per_question_results[annotation.question_id] = max(max_f1_score, current_max_f1_score) return max_f1_score @staticmethod def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = Counter(gold_toks) & Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def evaluate(self, annotations, predictions): return sum(self.per_question_results.values()) / len(self.per_question_results) def reset(self): del self.per_question_results self.per_question_results = {} class ExactMatchScore(PerImageEvaluationMetric): __provider__ = 'exact_match' annotation_types = (QuestionAnsweringAnnotation, QuestionAnsweringBiDAFAnnotation, ) prediction_types = (QuestionAnsweringPrediction, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.per_question_results = {} def update(self, annotation, prediction): gold_answers = [answer["text"] for answer in annotation.orig_answer_text if normalize_answer(answer["text"])] if not gold_answers: gold_answers = [''] pred_answer = prediction.tokens[0] if prediction.tokens else '' max_exact_match = max(self.compute_exact(a_gold, pred_answer) for a_gold in gold_answers) self.per_question_results[annotation.question_id] = max( max_exact_match, self.per_question_results.get(annotation.question_id, 0) ) return max_exact_match @staticmethod def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def evaluate(self, annotations, predictions): return sum(self.per_question_results.values()) / len(self.per_question_results) def reset(self): del self.per_question_results self.per_question_results = {} class QuestionAnsweringEmbeddingAccuracy(FullDatasetEvaluationMetric): __provider__ = 'qa_embedding_accuracy' annotation_types = (QuestionAnsweringEmbeddingAnnotation,) prediction_types = (QuestionAnsweringEmbeddingPrediction,) @classmethod def parameters(cls): parameters = super().parameters() parameters.update({ 'top_k': NumberField( value_type=int, min_value=1, max_value=1000, default=5, optional=True, description='Specifies the number of closest context embeddings to check.' ), }) return parameters def configure(self): self.top_k = self.get_value_from_config('top_k') def evaluate(self, annotations, predictions): ap_pairs = list(zip(annotations, predictions)) #check data alignment assert all( a.identifier is p.identifier if not isinstance(p.identifier, tuple) else p.identifier.values for a, p in ap_pairs), "annotations and predictions are not aligned" q_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is not None] c_pairs = [(a, p) for a, p in ap_pairs if a.context_pos_indetifier is None] c_data_identifiers = [a.identifier for a, p in c_pairs] c_vecs = numpy.array([p.embedding for a, p in c_pairs]) # calc distances from each question to all contexts and check if top_k has true positives true_pos = 0 for q_a, q_p in q_pairs: #calc distance between question embedding with all context embeddings d = c_vecs - q_p.embedding[None, :] dist = numpy.linalg.norm(d, ord=2, axis=1) index = dist.argsort() #check that right context in the list of top_k c_pos_index = c_data_identifiers.index(q_a.context_pos_indetifier) if c_pos_index in index[:self.top_k]: true_pos += 1 return [true_pos/len(q_pairs)] if q_pairs else 0
dynamo/__init__.py
xing-lab-pitt/dynamo-release
236
132136
<filename>dynamo/__init__.py<gh_stars>100-1000 """Mapping Vector Field of Single Cells """ from .get_version import get_version __version__ = get_version(__file__) del get_version from . import pp from . import est from . import tl from . import vf from . import pd from . import pl from . import mv from . import sim from .data_io import * from . import sample_data from . import configuration from . import ext from .get_version import get_all_dependencies_version from .dynamo_logger import ( Logger, LoggerManager, main_tqdm, main_info, main_warning, main_critical, main_exception, ) # alias config = configuration
tests/providers/test_profile.py
jacksmith15/faker
12,077
132179
<reponame>jacksmith15/faker<gh_stars>1000+ import unittest from faker import Faker class TestProfileProvider(unittest.TestCase): """Test profile provider methods""" num_samples = 10 def setUp(self): self.fake = Faker() Faker.seed(0) def test_simple_profile(self): for _ in range(self.num_samples): profile = self.fake.simple_profile() assert isinstance(profile, dict) assert len(profile["username"]) >= 1 assert profile["sex"] in ["F", "M"] profile = self.fake.simple_profile(sex="F") assert profile["sex"] == "F" profile = self.fake.simple_profile(sex="M") assert profile["sex"] == "M" def test_profile(self): for _ in range(self.num_samples): profile = self.fake.profile() assert isinstance(profile, dict) assert len(profile["username"]) >= 1 assert profile["sex"] in ["F", "M"] assert "website" in profile.keys() profile = self.fake.profile(sex="F") assert profile["sex"] == "F" profile = self.fake.profile(sex="M") assert profile["sex"] == "M" profile = self.fake.profile(fields=["ssn", "name"]) assert len(profile) == 2 assert "ssn" in profile.keys() assert "name" in profile.keys() profile = self.fake.profile(fields=[]) assert len(profile) > 0 assert "ssn" in profile.keys() profile = self.fake.profile(fields=["secret_org"]) assert len(profile) == 0
utils/distributed_utils.py
hellog2n/deep-generative-prior
395
132182
import math import multiprocessing as mp import os import torch import torch.distributed as dist from torch.nn import Module from torch.utils.data import Sampler class DistModule(Module): def __init__(self, module): super(DistModule, self).__init__() self.module = module broadcast_params(self.module) def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def train(self, mode=True): super(DistModule, self).train(mode) self.module.train(mode) def average_gradients(model): """ average gradients """ for param in model.parameters(): if param.requires_grad and param.grad is not None: dist.all_reduce(param.grad.data) def broadcast_params(model): """ broadcast model parameters """ for p in model.state_dict().values(): dist.broadcast(p, 0) def average_params(model): """ broadcast model parameters """ worldsize = dist.get_world_size() for p in model.state_dict().values(): dist.all_reduce(p) p /= worldsize def dist_init(port): if mp.get_start_method(allow_none=True) != 'spawn': mp.set_start_method('spawn') proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() torch.cuda.set_device(proc_id % num_gpus) if '[' in node_list: beg = node_list.find('[') pos1 = node_list.find('-', beg) if pos1 < 0: pos1 = 1000 pos2 = node_list.find(',', beg) if pos2 < 0: pos2 = 1000 node_list = node_list[:min(pos1, pos2)].replace('[', '') addr = node_list[8:].replace('-', '.') print(addr) os.environ['MASTER_PORT'] = port os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['RANK'] = str(proc_id) dist.init_process_group(backend='nccl') rank = dist.get_rank() world_size = dist.get_world_size() return rank, world_size class DistributedSampler(Sampler): """Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Arguments: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. """ def __init__(self, dataset, num_replicas=None, rank=None): if num_replicas is None: if not dist.is_available(): raise RuntimeError( "Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError( "Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int( math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas def __iter__(self): # deterministically shuffle based on epoch indices = [i for i in range(len(self.dataset))] # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank * self.num_samples:(self.rank + 1) * self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
omnizart/vocal_contour/inference.py
nicolasanjoran/omnizart
1,145
132184
import numpy as np from scipy.special import expit from librosa.core import midi_to_hz from omnizart.constants.midi import LOWEST_MIDI_NOTE def inference(feature, model, timestep=128, batch_size=10, feature_num=384): assert len(feature.shape) == 2 # Padding total_samples = len(feature) pad_bottom = (feature_num - feature.shape[1]) // 2 pad_top = feature_num - feature.shape[1] - pad_bottom pad_len = timestep - 1 feature = np.pad(feature, ((pad_len, pad_len), (pad_bottom, pad_top))) # Prepare for prediction output = np.zeros(feature.shape + (2,)) total_batches = int(np.ceil(total_samples / batch_size)) last_batch_idx = len(feature) - pad_len for bidx in range(total_batches): print(f"batch: {bidx+1}/{total_batches}", end="\r") # Collect batch feature start_idx = bidx * batch_size end_idx = min(start_idx + batch_size, last_batch_idx) batch = np.array([feature[idx:idx+timestep] for idx in range(start_idx, end_idx)]) # noqa: E226 batch = np.expand_dims(batch, axis=3) # Predict contour batch_pred = model.predict(batch) batch_pred = 1 / (1 + np.exp(-expit(batch_pred))) # Add the batch results to the output container. for idx, pred in enumerate(batch_pred): slice_start = start_idx + idx slice_end = slice_start + timestep output[slice_start:slice_end] += pred output = output[pad_len:-pad_len, pad_bottom:-pad_top, 1] # Remove padding # Filter values avg_max_val = np.mean(np.max(output, axis=1)) output = np.where(output > avg_max_val, output, 0) # Generate final output F0 f0 = [] # pylint: disable=invalid-name for pitches in output: if np.sum(pitches) > 0: pidx = np.argmax(pitches) f0.append(midi_to_hz(pidx / 4 + LOWEST_MIDI_NOTE)) else: f0.append(0) return np.array(f0)
main_pcl.py
HCY123902/PCL
309
132226
<gh_stars>100-1000 import argparse import builtins import math import os import random import shutil import time import warnings from tqdm import tqdm import numpy as np import faiss import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models import pcl.loader import pcl.builder model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])) parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet50)') parser.add_argument('-j', '--workers', default=32, type=int, metavar='N', help='number of data loading workers (default: 32)') parser.add_argument('--epochs', default=200, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256), this is the total ' 'batch size of all GPUs on the current node when ' 'using Data Parallel or Distributed Data Parallel') parser.add_argument('--lr', '--learning-rate', default=0.03, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int, help='learning rate schedule (when to drop lr by 10x)') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum of SGD solver') parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay') parser.add_argument('-p', '--print-freq', default=100, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=-1, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://172.16.17.32:23456', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') parser.add_argument('--low-dim', default=128, type=int, help='feature dimension (default: 128)') parser.add_argument('--pcl-r', default=16384, type=int, help='queue size; number of negative pairs; needs to be smaller than num_cluster (default: 16384)') parser.add_argument('--moco-m', default=0.999, type=float, help='moco momentum of updating key encoder (default: 0.999)') parser.add_argument('--temperature', default=0.2, type=float, help='softmax temperature') parser.add_argument('--mlp', action='store_true', help='use mlp head') parser.add_argument('--aug-plus', action='store_true', help='use moco-v2/SimCLR data augmentation') parser.add_argument('--cos', action='store_true', help='use cosine lr schedule') parser.add_argument('--num-cluster', default='25000,50000,100000', type=str, help='number of clusters') parser.add_argument('--warmup-epoch', default=20, type=int, help='number of warm-up epochs to only train with InfoNCE loss') parser.add_argument('--exp-dir', default='experiment_pcl', type=str, help='experiment directory') def main(): args = parser.parse_args() if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') if args.gpu is not None: warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') if args.dist_url == "env://" and args.world_size == -1: args.world_size = int(os.environ["WORLD_SIZE"]) args.distributed = args.world_size > 1 or args.multiprocessing_distributed args.num_cluster = args.num_cluster.split(',') if not os.path.exists(args.exp_dir): os.mkdir(args.exp_dir) ngpus_per_node = torch.cuda.device_count() if args.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly args.world_size = ngpus_per_node * args.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, args) def main_worker(gpu, ngpus_per_node, args): args.gpu = gpu if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) # suppress printing if not master if args.multiprocessing_distributed and args.gpu != 0: def print_pass(*args): pass builtins.print = print_pass if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) # create model print("=> creating model '{}'".format(args.arch)) model = pcl.builder.MoCo( models.__dict__[args.arch], args.low_dim, args.pcl_r, args.moco_m, args.temperature, args.mlp) print(model) if args.distributed: # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. if args.gpu is not None: torch.cuda.set_device(args.gpu) model.cuda(args.gpu) # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs we have args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) else: model.cuda() # DistributedDataParallel will divide and allocate batch_size to all # available GPUs if device_ids are not set model = torch.nn.parallel.DistributedDataParallel(model) elif args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) # comment out the following line for debugging raise NotImplementedError("Only DistributedDataParallel is supported.") else: # AllGather implementation (batch shuffle, queue update, etc.) in # this code only supports DistributedDataParallel. raise NotImplementedError("Only DistributedDataParallel is supported.") # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda(args.gpu) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if args.gpu is None: checkpoint = torch.load(args.resume) else: # Map model to be loaded to specified single gpu. loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True # Data loading code traindir = os.path.join(args.data, 'train') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if args.aug_plus: # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709 augmentation = [ transforms.RandomResizedCrop(224, scale=(0.2, 1.)), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened ], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([pcl.loader.GaussianBlur([.1, 2.])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ] else: # MoCo v1's aug: same as InstDisc https://arxiv.org/abs/1805.01978 augmentation = [ transforms.RandomResizedCrop(224, scale=(0.2, 1.)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ] # center-crop augmentation eval_augmentation = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) train_dataset = pcl.loader.ImageFolderInstance( traindir, pcl.loader.TwoCropsTransform(transforms.Compose(augmentation))) eval_dataset = pcl.loader.ImageFolderInstance( traindir, eval_augmentation) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) eval_sampler = torch.utils.data.distributed.DistributedSampler(eval_dataset,shuffle=False) else: train_sampler = None eval_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True) # dataloader for center-cropped images, use larger batch size to increase speed eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size=args.batch_size*5, shuffle=False, sampler=eval_sampler, num_workers=args.workers, pin_memory=True) for epoch in range(args.start_epoch, args.epochs): cluster_result = None if epoch>=args.warmup_epoch: # compute momentum features for center-cropped images features = compute_features(eval_loader, model, args) # placeholder for clustering result cluster_result = {'im2cluster':[],'centroids':[],'density':[]} for num_cluster in args.num_cluster: cluster_result['im2cluster'].append(torch.zeros(len(eval_dataset),dtype=torch.long).cuda()) cluster_result['centroids'].append(torch.zeros(int(num_cluster),args.low_dim).cuda()) cluster_result['density'].append(torch.zeros(int(num_cluster)).cuda()) if args.gpu == 0: features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice features = features.numpy() cluster_result = run_kmeans(features,args) #run kmeans clustering on master node # save the clustering result # torch.save(cluster_result,os.path.join(args.exp_dir, 'clusters_%d'%epoch)) dist.barrier() # broadcast clustering result for k, data_list in cluster_result.items(): for data_tensor in data_list: dist.broadcast(data_tensor, 0, async_op=False) if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch, args) # train for one epoch train(train_loader, model, criterion, optimizer, epoch, args, cluster_result) if (epoch+1)%5==0 and (not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0)): save_checkpoint({ 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), }, is_best=False, filename='{}/checkpoint_{:04d}.pth.tar'.format(args.exp_dir,epoch)) def train(train_loader, model, criterion, optimizer, epoch, args, cluster_result=None): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') acc_inst = AverageMeter('Acc@Inst', ':6.2f') acc_proto = AverageMeter('Acc@Proto', ':6.2f') progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, acc_inst, acc_proto], prefix="Epoch: [{}]".format(epoch)) # switch to train mode model.train() end = time.time() for i, (images, index) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) if args.gpu is not None: images[0] = images[0].cuda(args.gpu, non_blocking=True) images[1] = images[1].cuda(args.gpu, non_blocking=True) # compute output output, target, output_proto, target_proto = model(im_q=images[0], im_k=images[1], cluster_result=cluster_result, index=index) # InfoNCE loss loss = criterion(output, target) # ProtoNCE loss if output_proto is not None: loss_proto = 0 for proto_out,proto_target in zip(output_proto, target_proto): loss_proto += criterion(proto_out, proto_target) accp = accuracy(proto_out, proto_target)[0] acc_proto.update(accp[0], images[0].size(0)) # average loss across all sets of prototypes loss_proto /= len(args.num_cluster) loss += loss_proto losses.update(loss.item(), images[0].size(0)) acc = accuracy(output, target)[0] acc_inst.update(acc[0], images[0].size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: progress.display(i) def compute_features(eval_loader, model, args): print('Computing features...') model.eval() features = torch.zeros(len(eval_loader.dataset),args.low_dim).cuda() for i, (images, index) in enumerate(tqdm(eval_loader)): with torch.no_grad(): images = images.cuda(non_blocking=True) feat = model(images,is_eval=True) features[index] = feat dist.barrier() dist.all_reduce(features, op=dist.ReduceOp.SUM) return features.cpu() def run_kmeans(x, args): """ Args: x: data to be clustered """ print('performing kmeans clustering') results = {'im2cluster':[],'centroids':[],'density':[]} for seed, num_cluster in enumerate(args.num_cluster): # intialize faiss clustering parameters d = x.shape[1] k = int(num_cluster) clus = faiss.Clustering(d, k) clus.verbose = True clus.niter = 20 clus.nredo = 5 clus.seed = seed clus.max_points_per_centroid = 1000 clus.min_points_per_centroid = 10 res = faiss.StandardGpuResources() cfg = faiss.GpuIndexFlatConfig() cfg.useFloat16 = False cfg.device = args.gpu index = faiss.GpuIndexFlatL2(res, d, cfg) clus.train(x, index) D, I = index.search(x, 1) # for each sample, find cluster distance and assignments im2cluster = [int(n[0]) for n in I] # get cluster centroids centroids = faiss.vector_to_array(clus.centroids).reshape(k,d) # sample-to-centroid distances for each cluster Dcluster = [[] for c in range(k)] for im,i in enumerate(im2cluster): Dcluster[i].append(D[im][0]) # concentration estimation (phi) density = np.zeros(k) for i,dist in enumerate(Dcluster): if len(dist)>1: d = (np.asarray(dist)**0.5).mean()/np.log(len(dist)+10) density[i] = d #if cluster only has one point, use the max to estimate its concentration dmax = density.max() for i,dist in enumerate(Dcluster): if len(dist)<=1: density[i] = dmax density = density.clip(np.percentile(density,10),np.percentile(density,90)) #clamp extreme values for stability density = args.temperature*density/density.mean() #scale the mean to temperature # convert to cuda Tensors for broadcast centroids = torch.Tensor(centroids).cuda() centroids = nn.functional.normalize(centroids, p=2, dim=1) im2cluster = torch.LongTensor(im2cluster).cuda() density = torch.Tensor(density).cuda() results['centroids'].append(centroids) results['density'].append(density) results['im2cluster'].append(im2cluster) return results def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): torch.save(state, filename) if is_best: shutil.copyfile(filename, 'model_best.pth.tar') class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter) for meter in self.meters] print('\t'.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits) + 'd}' return '[' + fmt + '/' + fmt.format(num_batches) + ']' def adjust_learning_rate(optimizer, epoch, args): """Decay the learning rate based on schedule""" lr = args.lr if args.cos: # cosine lr schedule lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs)) else: # stepwise lr schedule for milestone in args.schedule: lr *= 0.1 if epoch >= milestone else 1. for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__': main()
ch46-机器学习-K近邻/knn-find_nearest.py
makelove/OpenCV-Python-Tutorial
2,875
132267
<gh_stars>1000+ # -*- coding: utf-8 -*- # @Time : 2017/8/8 12:33 # @Author : play4fun # @File : knn-find_nearest.py # @Software: PyCharm """ knn-find_nearest.py: http://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Machine_Learning_Classification_K-nearest_neighbors_k-NN.php """ import cv2 import numpy as np import matplotlib.pyplot as plt # Feature set containing (x,y) values of 25 known/training data trainData = np.random.randint(0, 100, (25, 2)).astype(np.float32) # Labels each one either Red or Blue with numbers 0 and 1 responses = np.random.randint(0, 2, (25, 1)).astype(np.float32) # plot Reds red = trainData[responses.ravel() == 0] plt.scatter(red[:, 0], red[:, 1], 80, 'r', '^') # plot Blues blue = trainData[responses.ravel() == 1] plt.scatter(blue[:, 0], blue[:, 1], 80, 'b', 's') # CvKNearest instance # knn = cv2.KNearest() knn = cv2.ml.KNearest_create() # trains the model knn.train(trainData, responses)#TODO #TypeError: only length-1 arrays can be converted to Python scalars # New sample : (x,y) newcomer = np.random.randint(0, 100, (1, 2)).astype(np.float32) plt.scatter(newcomer[:, 0], newcomer[:, 1], 80, 'g', 'o') # Finds the 3nearest neighbors and predicts responses for input vectors ret, results, neighbours, dist = knn.find_nearest(newcomer, 3) print("result: ", results, "\n") print("neighbours: ", neighbours, "\n") print("distance: ", dist) plt.show()
imaginaire/third_party/bias_act/setup.py
hw07216/imaginaire
3,308
132287
<reponame>hw07216/imaginaire # flake8: noqa from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension import os cuda_version = os.getenv('CUDA_VERSION') print('CUDA_VERSION: {}'.format(cuda_version)) nvcc_args = list() # nvcc_args.append('-gencode') # nvcc_args.append('arch=compute_50,code=sm_50') # nvcc_args.append('-gencode') # nvcc_args.append('arch=compute_52,code=sm_52') # nvcc_args.append('-gencode') # nvcc_args.append('arch=compute_60,code=sm_60') # nvcc_args.append('-gencode') # nvcc_args.append('arch=compute_61,code=sm_61') nvcc_args.append('-gencode') nvcc_args.append('arch=compute_70,code=sm_70') nvcc_args.append('-gencode') nvcc_args.append('arch=compute_75,code=sm_75') if cuda_version is not None: if cuda_version >= '11.0': nvcc_args.append('-gencode') nvcc_args.append('arch=compute_80,code=sm_80') nvcc_args.append('-Xcompiler') nvcc_args.append('-Wall') nvcc_args.append('-std=c++14') setup( name='bias_act_cuda', py_modules=['bias_act'], ext_modules=[ CUDAExtension('bias_act_cuda', [ './src/bias_act_cuda.cc', './src/bias_act_cuda_kernel.cu' ], extra_compile_args={'cxx': ['-Wall', '-std=c++14'], 'nvcc': nvcc_args}) ], cmdclass={ 'build_ext': BuildExtension })
shhh/api/encryption.py
olaoladapo/shhh
243
132312
import secrets from base64 import urlsafe_b64decode, urlsafe_b64encode from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC class Secret: """Encrypt and decrypt secrets.""" __slots__ = ("secret", "passphrase") def __init__(self, secret: bytes, passphrase: str): self.secret = secret self.passphrase = passphrase def _derive_key(self, salt: bytes, iterations: int) -> bytes: """Derive a secret key from a given passphrase and salt.""" kdf = PBKDF2HMAC( algorithm=hashes.SHA256(), length=32, salt=salt, iterations=iterations, backend=default_backend(), ) return urlsafe_b64encode(kdf.derive(self.passphrase.encode())) def encrypt(self, iterations: int = 100_000) -> bytes: """Encrypt secret.""" salt = secrets.token_bytes(16) key = self._derive_key(salt, iterations) return urlsafe_b64encode( b"%b%b%b" % ( salt, iterations.to_bytes(4, "big"), urlsafe_b64decode(Fernet(key).encrypt(self.secret)), ) ) def decrypt(self) -> str: """Decrypt secret.""" decoded = urlsafe_b64decode(self.secret) salt, iteration, message = ( decoded[:16], decoded[16:20], urlsafe_b64encode(decoded[20:]), ) iterations = int.from_bytes(iteration, "big") key = self._derive_key(salt, iterations) return Fernet(key).decrypt(message).decode("utf-8")
py/mazebase/utils/creationutils.py
fakeNetflix/facebook-repo-MazeBase
263
132327
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from random import shuffle, random from collections import defaultdict import itertools import mazebase.items as mi from .mazeutils import MazeException def sprinkle(game, tiles, tilemask=None): ''' Sprinkles blocks into a map. Tiles is given in the format like: [(MazeItem, float), ...] ex. [(Block, .5)] where we sprinkle MazeItem with the percent chance given by the second arg. Defaults to generating on empty tiles, but you can override this with tilemask and specify a list of locations. Returns list of item ids ''' if tilemask is None: tilemask = empty_locations(game) ids = [] for (x, y) in tilemask: shuffle(tiles) for tile, pct in tiles: if random() < pct: ids.append(game._add_item(tile(location=(x, y)))) break return ids def empty_locations(game, bad_blocks=None, mask=lambda x, y: True): '''By default, finds empty locations in the map. If bad_blocks is not none, then finds locations without any bad_blocks, but maybe with other block types mask is a function that provides valid coordinates ''' empties = [] for x, y in itertools.product(range(game.width), range(game.height)): if not mask(x, y): continue itemlst = game._map[x][y] if bad_blocks is None and itemlst == []: empties.append((x, y)) elif bad_blocks is not None and not any( isinstance(item, typ) for item, typ in itertools.product(itemlst, bad_blocks)): empties.append((x, y)) return empties def dijkstra(game, initial, movefunc, weighted=False): ''' Accepts: game initial: (x, y) tuple of start location movefunc: f(loc) determines the locations you can move to from loc weighted: use the _approx_reward_map instead of # of moves Returns: visited: dictionary of {location: distance} pairs path: dictionary of {location: previous_location} pairs ''' visited = defaultdict(lambda: 1e309) visited[initial] = 0 path = {} nodes = set(itertools.product(range(game.width), range(game.height))) while nodes: current = nodes.intersection(visited.keys()) if not current: break min_node = min(current, key=visited.get) nodes.remove(min_node) current_weight = visited[min_node] x, y = min_node for edge in movefunc(game, min_node): # Maximize reward by minimizing "distance = - reward" w = -game._approx_reward_map[edge[0]][edge[1]] if weighted else 1 weight = current_weight + w if edge not in visited or weight < visited[edge]: visited[edge] = weight path[edge] = min_node return visited, path def __movefunc_helper(game, loc, movefunc_helper): res = [] x, y = loc for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]: nx, ny = x + dx, y + dy if not game._in_bounds((nx, ny)): continue if movefunc_helper(game, loc, (dx, dy)): res.append((nx, ny)) return res def agent_movefunc(game, loc): ''' Can move to non-block spaces ''' def helper(game, loc, dloc): x, y = loc dx, dy = dloc nx, ny = x + dx, y + dy return game._tile_get_block((nx, ny), mi.Block) is None return __movefunc_helper(game, loc, helper) def pushblock_movefunc(game, loc): ''' Can move if tile behind and in front are not blocked (so agent can push from behind) ''' def helper(game, loc, dloc): x, y = loc dx, dy = dloc tx, ty = x - dx, y - dy nx, ny = x + dx, y + dy return (game._in_bounds((tx, ty)) and game._tile_get_block((nx, ny), mi.Block) is None and game._tile_get_block((tx, ty), mi.Block) is None) return __movefunc_helper(game, loc, helper)
src/api-engine/api/common/response.py
MicroMetaverse/cello
865
132334
"""Class represents response format. { status, successful/fail data, response msg, error messages } """ def ok(data): return { "data": data, "msg": None, "status": "successful" } def err(msg): return { "data": None, "msg": msg, "status": "fail" }
docs/examples/dns/pointdns/instantiate_driver.py
dupontz/libcloud
1,435
132335
from libcloud.dns.types import Provider from libcloud.dns.providers import get_driver cls = get_driver(Provider.POINTDNS) driver = cls('username', 'apikey')
src/winforms/toga_winforms/widgets/textinput.py
luizoti/toga
1,261
132336
from ctypes import c_uint from ctypes.wintypes import HWND, WPARAM from travertino.size import at_least from travertino.constants import TRANSPARENT from toga_winforms.colors import native_color from toga_winforms.libs import HorizontalTextAlignment, WinForms, user32 from .base import Widget class TextInput(Widget): def create(self): self.native = WinForms.TextBox() self.native.Multiline = False self.native.DoubleClick += self.winforms_double_click self.native.TextChanged += self.winforms_text_changed self.native.Validated += self.winforms_validated self.native.GotFocus += self.winforms_got_focus self.native.LostFocus += self.winforms_lost_focus self.error_provider = WinForms.ErrorProvider() self.error_provider.SetIconAlignment( self.native, WinForms.ErrorIconAlignment.MiddleRight ) self.error_provider.SetIconPadding(self.native, -20) self.error_provider.BlinkStyle = WinForms.ErrorBlinkStyle.NeverBlink def set_readonly(self, value): self.native.ReadOnly = value def set_placeholder(self, value): # This solution is based on https://stackoverflow.com/questions/4902565/watermark-textbox-in-winforms if self.interface.placeholder: # Message Code for setting Cue Banner (Placeholder) EM_SETCUEBANNER = c_uint(0x1501) # value 0 means placeholder is hidden as soon the input gets focus # value 1 means placeholder is hidden only after something is typed into input show_placeholder_on_focus = WPARAM(1) window_handle = HWND(self.native.Handle.ToInt32()) user32.SendMessageW(window_handle, EM_SETCUEBANNER, show_placeholder_on_focus, self.interface.placeholder) def get_value(self): return self.native.Text def set_value(self, value): self.native.Text = value def set_alignment(self, value): self.native.TextAlign = HorizontalTextAlignment(value) def set_font(self, font): if font: self.native.Font = font.bind(self.interface.factory).native def set_color(self, color): if color: self.native.ForeColor = native_color(color) else: self.native.ForeColor = self.native.DefaultForeColor def set_background_color(self, value): if value: self.native.BackColor = native_color(value) else: self.native.BackColor = native_color(TRANSPARENT) def rehint(self): # Height of a text input is known and fixed. # Width must be > 100 # print("REHINT TextInput", self, self.native.PreferredSize) self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH) self.interface.intrinsic.height = self.native.PreferredSize.Height def set_on_change(self, handler): # No special handling required pass def set_on_gain_focus(self, handler): # No special handling required pass def set_on_lose_focus(self, handler): # No special handling required pass def winforms_text_changed(self, sender, event): if self.interface._on_change: self.interface.on_change(self.interface) def winforms_validated(self, sender, event): self.interface.validate() def winforms_got_focus(self, sender, event): if self.container and self.interface.on_gain_focus: self.interface.on_gain_focus(self.interface) def winforms_lost_focus(self, sender, event): if self.container and self.interface.on_lose_focus: self.interface.on_lose_focus(self.interface) def clear_error(self): self.error_provider.SetError(self.native, "") def set_error(self, error_message): self.error_provider.SetError(self.native, error_message) def winforms_double_click(self, sender, event): self.native.SelectAll()
codechef/july18a/jerrytom.py
Ashindustry007/competitive-programming
506
132338
#!/usr/bin/env python3 # https://www.codechef.com/JULY18A/problems/JERRYTOM def max_clique(g): n = 0 for x in g: n = max(n, len(x)) l = [set() for _ in range(n + 1)] s = [0] * len(g) for i, x in enumerate(g): ll = len(x) l[ll].add(i) s[i] = ll m = 0 for _ in range(len(g)): for i in range(n + 1): if len(l[i]) > 0: x = l[i].pop() m = max(m, i) s[x] = 0 for k in g[x]: if s[k] > 0: l[s[k]].remove(k) s[k] -= 1 l[s[k]].add(k) break return m + 1 def dfs(s, b, u): s.add(u) b[u] = True for v in g[u]: if b[v]: continue dfs(s, b, v) for _ in range(int(input())): n, m = map(int, input().split()) g = [list() for _ in range(n)] for _ in range(m): u, v = map(int, input().split()) g[u-1].append(v-1) g[v-1].append(u-1) print(max_clique(g))
benchmarks/baseline_classifiers.py
zhhengcs/sunny-side-up
581
132341
import os, sys, logging import json import numpy as np import random from collections import defaultdict, Counter import cPickle as pickle import cProfile, pstats import threading import time import multiprocessing import math from sklearn import metrics from sklearn import svm from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from src.datasets import data_utils from src.datasets.data_utils import timed, TextTooShortException, DataSampler, WordVectorBuilder from src.datasets.imdb import IMDB from src.datasets.sentiment140 import Sentiment140 from src.datasets.amazon_reviews import AmazonReviews from src.datasets.open_weiboscope import OpenWeibo from src.datasets.arabic_twitter import ArabicTwitter from src.datasets.word_vector_embedder import WordVectorEmbedder data_fraction_test = 0.20 data_fraction_train = 0.80 num_threads = multiprocessing.cpu_count() threadLock = threading.Lock() # setup logging logger = data_utils.syslogger(__name__) # set output directory dir_data = "/data" try: dir_results = os.path.join(dir_data, os.path.dirname(os.path.realpath(__file__)), 'results') except NameError: dir_results = os.path.join(dir_data, 'results') # data inputs datasets = [ # { 'sentiment140': { # 'class': Sentiment140, # 'path': os.path.join(dir_data, 'sentiment140.csv'), # 'args': { 'load': { 'rng_seed': 13337 }, # 'embed': { 'type': 'averaged' }, # 'normalize': { 'min_length': 70, # 'max_length': 150, # 'reverse': False, # 'pad_out': False # }, # 'shuffle_after_load': False, # 'models': [ # 'glove', # 'word2vec' # ] # } # } # }, # { 'imdb': { # 'class': IMDB, # 'path': os.path.join(dir_data, 'imdb'), # 'args': { 'load': { 'rng_seed': 13337 }, # 'embed': { 'type': 'averaged' }, # 'normalize': { 'encoding': None, # 'reverse': False, # 'pad_out': False, # 'min_length': 0, # 'max_length': 9999999 # }, # 'shuffle_after_load': False, # 'models': [ # 'glove', # 'word2vec' # ] # } # } # }, # { 'amazon': { # 'class': AmazonReviews, # 'path': os.path.join(dir_data, 'amazonreviews.gz'), # 'args': { 'load': { 'rng_seed': 13337 }, # 'embed': { 'type': 'averaged' }, # 'normalize': { 'encoding': None, # 'reverse': False, # 'min_length': 0, # 'max_length': 9999999, # 'pad_out': False # }, # 'shuffle_after_load': True, # 'models': [ # 'glove', # 'word2vec', # { # 'word2vec': { 'model': '/data/amazon/amazon_800000.bin' } # } # ] # } # } # }, # { 'openweibo': { # 'class': OpenWeibo, # 'path': os.path.join(dir_data, 'openweibo'), # 'args': { 'load': { 'rng_seed': 13337 }, # 'embed': { 'type': 'averaged' }, # 'shuffle_after_load': True, # 'models': [ # 'glove', # 'word2vec', # { # 'word2vec': { 'model': '/data/openweibo/openweibo_800000.bin' } # } # ] # } # } # }, # { 'openweibo': { # 'class': OpenWeibo, # 'path': os.path.join(dir_data, 'openweibocensored'), # 'args': { 'load': { 'form': 'hanzi', # 'rng_seed': 13337, # 'label_type': 'denied' # }, # 'embed': { 'type': 'averaged' }, # 'shuffle_after_load': True, # 'models': [ # 'glove', # 'word2vec', # { # 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_hanzi_CLEAN_vocab31357747.bin' } # } # ] # } # } # }, # { 'openweibo': { # 'class': OpenWeibo, # 'path': os.path.join(dir_data, 'openweibo'), # 'args': { 'load': { 'form': 'hanzi', # 'rng_seed': 13337 # }, # 'embed': { 'type': 'averaged' }, # 'shuffle_after_load': True, # 'models': [ # 'glove', # 'word2vec', # { # 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_hanzi_CLEAN_vocab31357747.bin' } # } # ] # } # } # }, # { 'openweibo': { # 'class': OpenWeibo, # 'path': os.path.join(dir_data, 'openweibo'), # 'args': { 'load': { 'form': 'hanzi', # 'rng_seed': 13337 # }, # 'embed': { 'type': 'averaged' }, # 'shuffle_after_load': True, # 'models': [ # { # 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_min10_hanzi_vocab2548911_binary_CLEAN.bin', # 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin', # 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin', # 'args': { 'binary': 'True' } # } # }, # { # 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz', # 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin', # 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin' # } # }, # { # 'glove': { # 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin', # 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin' # } # }, # { # 'word2vec': { # 'model': '/data/sentiment140_800000.bin', # 'train': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_train.bin', # 'test': '/data/openweibo/openweibo_hanzi_deleted_800000_samples_test.bin' # } # } # ] # } # } # }, # { 'openweibo': { # 'class': OpenWeibo, # 'path': os.path.join(dir_data, 'openweibo'), # 'args': { 'load': { 'form': 'hanzi', # 'rng_seed': 13337, # 'label_type': 'denied' # }, # 'embed': { 'type': 'averaged' }, # 'shuffle_after_load': True, # 'models': [ # { # 'word2vec': { 'model': '/data/openweibo/openweibo_fullset_min10_hanzi_vocab2548911_binary_CLEAN.bin', # 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin', # 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin', # 'args': { 'binary': 'True' } # } # }, # { # 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz', # 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin', # 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin', # 'args': { 'binary': 'True' } # } # }, # { # 'glove': { # 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin', # 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin', # } # }, # { # 'word2vec': { # 'model': '/data/sentiment140_800000.bin', # 'train': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_train.bin', # 'test': '/data/openweibocensored/openweibo_hanzi_censored_27622_samples_test.bin', # } # } # ] # } # } # }, { 'arabic_twitter': { 'class': ArabicTwitter, 'path': os.path.join(dir_data, 'arabic_twitter'), 'args': { 'load': { 'form': 'arabic', 'rng_seed': 13337 }, 'embed': { 'type': 'averaged' }, 'shuffle_after_load': True, 'models': [ # { # 'word2vec': { 'model': '/data/arabic_tweets/arabic_tweets_min10vocab_vocab1520226.bin', # 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin', # 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin', # 'args': { 'binary': 'True' } # } # }, { 'word2vec': { 'model': '/data/GoogleNews-vectors-negative300.bin.gz', 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin', 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin', 'args': { 'binary': 'True' } } }, { 'glove': { 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin', 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin', } }, { 'word2vec': { 'model': '/data/sentiment140_800000.bin', 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin', 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin', } }, { 'word2vec': { 'model': '/data/arabic_tweets/arabic_tweets_NLTK_min10vocab_vocab981429.bin', 'train': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_train.bin', 'test': '/data/arabic_tweets/arabic_twitter_emojis_767203_samples_test.bin', 'args': { 'binary': 'True' } } } ] } } } ] def classifiers(): """ Returns a list of classifier tuples (name, model) for use in training """ return [("LogisticRegression", LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)), ("RandomForests", RandomForestClassifier(n_jobs=-1, n_estimators = 15, max_features = 'sqrt')), ("Gaussian NaiveBayes", GaussianNB())] #, #("LinearSVM", svm.LinearSVC())] # profiled methods @timed def timed_training(classifier, values, labels): return classifier.fit(values, labels) @timed def timed_testing(classifier, values): return classifier.predict(values) @timed def timed_dataload(loader, data, args, embedder, values, labels): # use separate counter to account for invalid input along the way counter = 0 for text,sentiment in data: try: if (counter % 10000 == 0): print("Loading at {}".format(counter)) # normalize and tokenize if necessary if args.has_key('normalize'): text_normalized = data_utils.normalize(text, **args['normalize']) else: text_normalized = text # tokenize if args.get('load', {}).get('form', None) == 'hanzi': tokens = data_utils.tokenize_hanzi(text_normalized) elif args.get('load', {}).get('form', None) == 'arabic': text_stripped = loader.twitter_strip(text_normalized) tokens = loader.tokenize_arabic(text_stripped) else: tokens = data_utils.tokenize(text_normalized) # choose embedding type vector = None if args['embed']['type'] == 'concatenated': vector = embedder.embed_words_into_vectors_concatenated(tokens, **self.args['embed']) elif args['embed']['type'] == 'averaged': vector = embedder.embed_words_into_vectors_averaged(tokens) else: pass # data labeled by sentiment score (thread-safe with lock) if vector is not None: values.append(vector) labels.append(sentiment) counter += 1 except TextTooShortException as e: pass # iterate all datasources for dataset in datasets: for data_source, data_params in dataset.iteritems(): # prepare data loader klass = data_params['class'] loader = klass(data_params['path']) data_args = data_params['args'] load_args = data_args.get('load', {}) data = loader.load_data(**load_args) # test all vector models for embedder_model in data_args['models']: # identify prebuilt model if exists if isinstance(embedder_model, dict): # initialize word vector embedder embedder_model, prebuilt_model_params = embedder_model.items().pop() prebuilt_path_model = prebuilt_model_params.get('model', None) model_args = prebuilt_model_params.get('args', {}) embedder = WordVectorEmbedder(embedder_model, model_fullpath=prebuilt_path_model, model_args=model_args) # update embedder parameters if prebuilt_path_model: model_path_dir, model_path_filename, model_path_filext = WordVectorBuilder.filename_components(prebuilt_path_model) embedder.model_subset = model_path_filename # training data (custom or default) if prebuilt_model_params.get('train', None): prebuilt_path_train = prebuilt_model_params.get('train') else: prebuilt_path_train = WordVectorBuilder.filename_train(prebuilt_path_model) with open(prebuilt_path_train, 'rb') as f: data_train = pickle.load(f) # testing data (custom or default) if prebuilt_model_params.get('test', None): prebuilt_path_test = prebuilt_model_params.get('test') else: prebuilt_path_test = WordVectorBuilder.filename_test(prebuilt_path_model) with open(prebuilt_path_test, 'rb') as f: data_test = pickle.load(f) # initialize lists (will be converted later into numpy arrays) values_train = [] labels_train = [] values_test = [] labels_test = [] # initialize timer seconds_loading = 0 logger.info("processing {} samples from {}...".format(len(data_train)+len(data_test), prebuilt_path_model)) # load training dataset profile_results = timed_dataload(loader, data_train, data_args, embedder, values_train, labels_train) seconds_loading += profile_results.timer.total_tt # load training dataset profile_results = timed_dataload(loader, data_test, data_args, embedder, values_test, labels_test) seconds_loading += profile_results.timer.total_tt # shuffle if necessary if data_args['shuffle_after_load']: # store new lists values_train_shuffled = [] labels_train_shuffled = [] values_test_shuffled = [] labels_test_shuffled = [] # generate subsample of random indices out of total available random.seed(data_args.get('load', {}).get('rng_seed', None)) indices_train = range(len(values_train)) indices_test = range(len(values_test)) random.shuffle(indices_train) random.shuffle(indices_test) # keep entries at those random indices for i in indices_train: values_train_shuffled.append(values_train[i]) labels_train_shuffled.append(labels_train[i]) for i in indices_test: values_test_shuffled.append(values_test[i]) labels_test_shuffled.append(labels_test[i]) # keep shuffled lists values_train = values_train_shuffled labels_train = labels_train_shuffled values_test = values_test_shuffled labels_test = labels_test_shuffled # create numpy arrays for classifier input values_train = np.array(values_train, dtype='float32') labels_train = np.array(labels_train, dtype='float32') values_test = np.array(values_test, dtype='float32') labels_test = np.array(labels_test, dtype='float32') else: # initialize word vector embedder embedder = WordVectorEmbedder(embedder_model) # initialize lists (will be converted later into numpy arrays) values = [] labels = [] # get equal-sized subsets of each class data_sampler = DataSampler(klass, file_path=data_params['path'], num_classes=2) data = data_sampler.sample_balanced(min_samples=data_args.get('min_samples', None), rng_seed=data_args.get('load', {}).get('rng_seed', None)) # load dataset logger.info("processing {} samples from {}...".format(len(data), data_params['path'])) profile_results = timed_dataload(loader, data, data_args, embedder, values, labels) # store loading time seconds_loading = profile_results.timer.total_tt # shuffle if necessary if data_args['shuffle_after_load']: # store new lists values_shuffled = [] labels_shuffled = [] # generate subsample of random indices out of total available random.seed(data_args.get('load', {}).get('rng_seed', None)) indices = range(len(values)) random.shuffle(indices) # keep entries at those random indices for i in indices: values_shuffled.append(values[i]) labels_shuffled.append(labels[i]) # keep shuffled lists values = values_shuffled labels = labels_shuffled # convert into nparray for sklearn values = np.nan_to_num(np.array(values, dtype="float32")) labels = np.nan_to_num(np.array(labels, dtype="float32")) logger.info("Loaded {} samples...".format(len(values))) # split into training and test data logger.info("splitting dataset into training and testing sets...") labels_train, labels_dev, labels_test = data_utils.split_data(labels, train=data_fraction_train, dev=0, test=data_fraction_test) values_train, values_dev, values_test = data_utils.split_data(values, train=data_fraction_train, dev=0, test=data_fraction_test) # calculate distribution dist = Counter() dist.update(labels_test) # setup classifier logger.info("Training on {}, Testing on {}...".format(len(values_train), len(values_test))) for classifier_name,classifier in classifiers(): # profiled training logger.info("Training %s classifier..." % classifier.__class__.__name__) profile_results = timed_training(classifier, values_train, labels_train) seconds_training = profile_results.timer.total_tt # profiled testing logger.info("Testing %s classifier..." % classifier.__class__.__name__) profile_results = timed_testing(classifier, values_test) predictions = profile_results.results seconds_testing = profile_results.timer.total_tt # calculate metrics data_size = len(labels_test) data_positive = np.sum(labels_test) data_negative = data_size - data_positive confusion_matrix = metrics.confusion_matrix(labels_test, predictions) TN = confusion_matrix[0][0] FP = confusion_matrix[0][1] FN = confusion_matrix[1][0] TP = confusion_matrix[1][1] accuracy = metrics.accuracy_score(labels_test, predictions) precision = metrics.precision_score(labels_test, predictions) recall = metrics.recall_score(labels_test, predictions) f1 = metrics.f1_score(labels_test, predictions) # build results object results = { 'classifier': str(classifier.__class__.__name__), 'data': { 'source': str(data_source), 'testsize': str(data_size), 'positive': str(data_positive), 'negative': str(data_negative), 'time_in_seconds_loading': str(seconds_loading) }, 'embedding': { 'model': str(embedder_model), 'subset': str(embedder.model_subset) }, 'data_args': data_args, 'metrics': { 'TP': str(TP), 'FP': str(FP), 'TN': str(TN), 'FN': str(FN), 'accuracy': str(accuracy), 'precision': str(precision), 'recall': str(recall), 'f1': str(f1), 'time_in_seconds_training': str(seconds_training), 'time_in_seconds_testing': str(seconds_testing) } } # ensure output directory exists if not os.path.isdir(dir_results): data_utils.mkdir_p(dir_results) # save json file filename_results = "{}_{}_{}.json".format(data_source, embedder_model, classifier.__class__.__name__) logger.info("Saving results to {}...".format(filename_results)) with open(os.path.join(dir_results,filename_results), 'a') as outfile: json.dump(results, outfile, sort_keys=True, indent=4, separators=(',', ': ')) outfile.write('\n')
Chapter 3/restful_python_chapter_03_04/users_test_01.py
Mohamed2011-bit/Building-RESTful-Python-Web-Services
116
132375
""" Book: Building RESTful Python Web Services Chapter 3: Improving and adding authentication to an API with Django Author: <NAME> - Twitter.com/gastonhillar Publisher: Packt Publishing Ltd. - http://www.packtpub.com """ from django.contrib.auth.models import User user = User.objects.create_user('kevin', '<EMAIL>', '<PASSWORD>') user.save()
jorldy/test/core/network/test_rnd_network.py
Kyushik/JORLDY
300
132390
<reponame>Kyushik/JORLDY import torch from core.network.rnd import RND_MLP, RND_CNN, RND_Multi def test_rnd_mlp_call(): D_in, D_out, D_hidden = 2, 3, 4 num_workers, gamma_i = 2, 0.99 net = RND_MLP( D_in=D_in, D_out=D_out, D_hidden=D_hidden, num_workers=num_workers, gamma_i=gamma_i, ) batch_size = 5 mock_input = [ torch.rand((batch_size * num_workers, D_in)), ] out = net(*mock_input, update_ri=True) assert out.shape == (batch_size * num_workers, 1) mock_input = [ torch.rand((batch_size, D_in)), ] out = net(*mock_input, update_ri=False) assert out.shape == (batch_size, 1) def test_rnd_cnn_call(): D_in, D_out, D_hidden = [3, 36, 36], 3, 4 num_workers, gamma_i = 2, 0.99 net = RND_CNN( D_in=D_in, D_out=D_out, D_hidden=D_hidden, num_workers=num_workers, gamma_i=gamma_i, ) batch_size = 5 mock_input = [ torch.rand((batch_size * num_workers, *D_in)), ] out = net(*mock_input, update_ri=True) assert out.shape == (batch_size * num_workers, 1) mock_input = [ torch.rand((batch_size, *D_in)), ] out = net(*mock_input, update_ri=False) assert out.shape == (batch_size, 1) def test_rnd_multi_call(): D_in, D_out, D_hidden = [[3, 36, 36], 2], 3, 4 num_workers, gamma_i = 2, 0.99 net = RND_Multi( D_in=D_in, D_out=D_out, D_hidden=D_hidden, num_workers=num_workers, gamma_i=gamma_i, ) batch_size = 5 mock_input = [ [ torch.rand((batch_size * num_workers, *D_in[0])), torch.rand((batch_size * num_workers, D_in[1])), ], ] out = net(*mock_input, update_ri=True) assert out.shape == (batch_size * num_workers, 1) mock_input = [ [torch.rand((batch_size, *D_in[0])), torch.rand((batch_size, D_in[1]))], ] out = net(*mock_input, update_ri=False) assert out.shape == (batch_size, 1)
lingvo/tasks/asr/tools/simple_wer_v2.py
allenwang28/lingvo
2,611
132399
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The new version script to evalute the word error rate (WER) for ASR tasks. Tensorflow and Lingvo are not required to run this script. Example of Usage: a) `python simple_wer_v2.py file_hypothesis file_reference` b) `python simple_wer_v2.py file_hypothesis file_reference file_keyphrases` where `file_hypothesis` is the filename for hypothesis text, `file_reference` is the filename for reference text, and `file_keyphrases` is the optional filename for important phrases (one phrase per line). Note that the program will also generate a html to diagnose the errors, and the html filename is `{$file_hypothesis}_diagnois.html`. Another way is to use this file as a stand-alone library, by calling class SimpleWER with the following member functions: - AddHypRef(hyp, ref): Updates the evaluation for each (hyp,ref) pair. - GetWER(): Computes word error rate (WER) for all the added hyp-ref pairs. - GetSummaries(): Generates strings to summarize word and key phrase errors. - GetKeyPhraseStats(): Measures stats for key phrases. Stats include: (1) Jaccard similarity: https://en.wikipedia.org/wiki/Jaccard_index. (2) F1 score: https://en.wikipedia.org/wiki/Precision_and_recall. """ import re import sys def TxtPreprocess(txt): """Preprocess text before WER caculation.""" # Lowercase, remove \t and new line. txt = re.sub(r'[\t\n]', ' ', txt.lower()) # Remove punctuation before space. txt = re.sub(r'[,.\?!]+ ', ' ', txt) # Remove punctuation before end. txt = re.sub(r'[,.\?!]+$', ' ', txt) # Remove punctuation after space. txt = re.sub(r' [,.\?!]+', ' ', txt) # Remove quotes, [, ], ( and ). txt = re.sub(r'["\(\)\[\]]', '', txt) # Remove extra space. txt = re.sub(' +', ' ', txt.strip()) return txt def RemoveCommentTxtPreprocess(txt): """Preprocess text and remove comments in the brancket, such as [comments].""" # Remove comments surrounded by box brackets: txt = re.sub(r'\[\w+\]', '', txt) return TxtPreprocess(txt) def HighlightAlignedHtml(hyp, ref, err_type): """Generate a html element to highlight the difference between hyp and ref. Args: hyp: Hypothesis string. ref: Reference string. err_type: one of 'none', 'sub', 'del', 'ins'. Returns: a html string where disagreements are highlighted. Note `hyp` is highlighted in green, and marked with <del> </del> `ref` is highlighted in yellow. If you want html with nother styles, consider to write your own function. Raises: ValueError: if err_type is not among ['none', 'sub', 'del', 'ins']. or if when err_type == 'none', hyp != ref """ highlighted_html = '' if err_type == 'none': if hyp != ref: raise ValueError('hyp (%s) does not match ref (%s) for none error' % (hyp, ref)) highlighted_html += '%s ' % hyp elif err_type == 'sub': highlighted_html += """<span style="background-color: yellow"> <del>%s</del></span><span style="background-color: yellow"> %s </span> """ % (hyp, ref) elif err_type == 'del': highlighted_html += """<span style="background-color: red"> %s </span> """ % ( ref) elif err_type == 'ins': highlighted_html += """<span style="background-color: green"> <del>%s</del> </span> """ % ( hyp) else: raise ValueError('unknown err_type ' + err_type) return highlighted_html def ComputeEditDistanceMatrix(hyp_words, ref_words): """Compute edit distance between two list of strings. Args: hyp_words: the list of words in the hypothesis sentence ref_words: the list of words in the reference sentence Returns: Edit distance matrix (in the format of list of lists), where the first index is the reference and the second index is the hypothesis. """ reference_length_plus = len(ref_words) + 1 hypothesis_length_plus = len(hyp_words) + 1 edit_dist_mat = [[]] * reference_length_plus # Initialization. for i in range(reference_length_plus): edit_dist_mat[i] = [0] * hypothesis_length_plus for j in range(hypothesis_length_plus): if i == 0: edit_dist_mat[0][j] = j elif j == 0: edit_dist_mat[i][0] = i # Do dynamic programming. for i in range(1, reference_length_plus): for j in range(1, hypothesis_length_plus): if ref_words[i - 1] == hyp_words[j - 1]: edit_dist_mat[i][j] = edit_dist_mat[i - 1][j - 1] else: tmp0 = edit_dist_mat[i - 1][j - 1] + 1 tmp1 = edit_dist_mat[i][j - 1] + 1 tmp2 = edit_dist_mat[i - 1][j] + 1 edit_dist_mat[i][j] = min(tmp0, tmp1, tmp2) return edit_dist_mat class SimpleWER: """Compute word error rates after the alignment. Attributes: key_phrases: list of important phrases. aligned_htmls: list of diagnois htmls, each of which corresponding to a pair of hypothesis and reference. hyp_keyphrase_counts: dict. `hyp_keyphrase_counts[w]` counts how often a key phrases `w` appear in the hypotheses. ref_keyphrase_counts: dict. `ref_keyphrase_counts[w]` counts how often a key phrases `w` appear in the references. matched_keyphrase_counts: dict. `matched_keyphrase_counts[w]` counts how often a key phrase `w` appear in the aligned transcripts when the reference and hyp_keyphrase match. wer_info: dict with four keys: 'sub' (substitution error), 'ins' (insersion error), 'del' (deletion error), 'nw' (number of words). We can use wer_info to compute word error rate (WER) as (wer_info['sub']+wer_info['ins']+wer_info['del'])*100.0/wer_info['nw'] """ def __init__(self, key_phrases=None, html_handler=HighlightAlignedHtml, preprocess_handler=RemoveCommentTxtPreprocess): """Initialize SimpleWER object. Args: key_phrases: list of strings as important phrases. If key_phrases is None, no key_phrases related metric will be computed. html_handler: function to generate a string with html tags. preprocess_handler: function to preprocess text before computing WER. """ self._preprocess_handler = preprocess_handler self._html_handler = html_handler self.key_phrases = key_phrases self.aligned_htmls = [] self.wer_info = {'sub': 0, 'ins': 0, 'del': 0, 'nw': 0} if key_phrases: # Pre-process key_phrase list if self._preprocess_handler: self.key_phrases = \ [self._preprocess_handler(k) for k in self.key_phrases] # Init keyphrase_counts for every key phrase self.ref_keyphrase_counts = {} self.hyp_keyphrase_counts = {} self.matched_keyphrase_counts = {} for k in self.key_phrases: self.ref_keyphrase_counts[k] = 0 self.hyp_keyphrase_counts[k] = 0 self.matched_keyphrase_counts[k] = 0 else: self.ref_keyphrase_counts = None self.hyp_keyphrase_counts = None self.matched_keyphrase_counts = None def AddHypRef(self, hypothesis, reference): """Update WER when adding one pair of strings: (hypothesis, reference). Args: hypothesis: Hypothesis string. reference: Reference string. Raises: ValueError: when the program fails to parse edit distance matrix. """ if self._preprocess_handler: hypothesis = self._preprocess_handler(hypothesis) reference = self._preprocess_handler(reference) # Compute edit distance. hyp_words = hypothesis.split() ref_words = reference.split() distmat = ComputeEditDistanceMatrix(hyp_words, ref_words) # Back trace, to distinguish different errors: ins, del, sub. pos_hyp, pos_ref = len(hyp_words), len(ref_words) wer_info = {'sub': 0, 'ins': 0, 'del': 0, 'nw': len(ref_words)} aligned_html = '' matched_ref = '' while pos_hyp > 0 or pos_ref > 0: err_type = '' # Distinguish error type by back tracking if pos_ref == 0: err_type = 'ins' elif pos_hyp == 0: err_type = 'del' else: if hyp_words[pos_hyp - 1] == ref_words[pos_ref - 1]: err_type = 'none' # correct error elif distmat[pos_ref][pos_hyp] == distmat[pos_ref - 1][pos_hyp - 1] + 1: err_type = 'sub' # substitute error elif distmat[pos_ref][pos_hyp] == distmat[pos_ref - 1][pos_hyp] + 1: err_type = 'del' # deletion error elif distmat[pos_ref][pos_hyp] == distmat[pos_ref][pos_hyp - 1] + 1: err_type = 'ins' # insersion error else: raise ValueError('fail to parse edit distance matrix.') # Generate aligned_html if self._html_handler: if pos_hyp == 0 or not hyp_words: tmph = ' ' else: tmph = hyp_words[pos_hyp - 1] if pos_ref == 0 or not ref_words: tmpr = ' ' else: tmpr = ref_words[pos_ref - 1] aligned_html = self._html_handler(tmph, tmpr, err_type) + aligned_html # If no error, go to previous ref and hyp. if err_type == 'none': matched_ref = hyp_words[pos_hyp - 1] + ' ' + matched_ref pos_hyp, pos_ref = pos_hyp - 1, pos_ref - 1 continue # Update error. wer_info[err_type] += 1 # Adjust position of ref and hyp. if err_type == 'del': pos_ref = pos_ref - 1 elif err_type == 'ins': pos_hyp = pos_hyp - 1 else: # err_type == 'sub' pos_hyp, pos_ref = pos_hyp - 1, pos_ref - 1 # Verify the computation of edit distance finishes assert distmat[-1][-1] == wer_info['ins'] + \ wer_info['del'] + wer_info['sub'] # Accumulate err_info before the next (hyp, ref). for k in wer_info: self.wer_info[k] += wer_info[k] # Collect aligned_htmls. if self._html_handler: self.aligned_htmls += [aligned_html] # Update key phrase info. if self.key_phrases: for w in self.key_phrases: self.ref_keyphrase_counts[w] += reference.count(w) self.hyp_keyphrase_counts[w] += hypothesis.count(w) self.matched_keyphrase_counts[w] += matched_ref.count(w) def GetWER(self): """Compute Word Error Rate (WER). Note WER can be larger than 100.0, esp when there are many insertion errors. Returns: WER as percentage number, usually between 0.0 to 100.0 """ nref = self.wer_info['nw'] nref = max(1, nref) # non_zero value for division total_error = self.wer_info['ins'] \ + self.wer_info['del'] + self.wer_info['sub'] return total_error * 100.0 / nref def GetBreakdownWER(self): """Compute breakdown WER. Returns: A dictionary with del/ins/sub as key, and the error rates in percentage number as value. """ nref = self.wer_info['nw'] nref = max(1, nref) # non_zero value for division wer_breakdown = dict() wer_breakdown['ins'] = self.wer_info['ins'] * 100.0 / nref wer_breakdown['del'] = self.wer_info['del'] * 100.0 / nref wer_breakdown['sub'] = self.wer_info['sub'] * 100.0 / nref return wer_breakdown def GetKeyPhraseStats(self): """Measure the Jaccard similarity of key phrases between hyps and refs. Returns: jaccard_similarity: jaccard similarity, between 0.0 and 1.0 F1_keyphrase: F1 score (=2/(1/prec + 1/recall)), between 0.0 and 1.0 matched_keyphrases: num of matched key phrases. ref_keyphrases: num of key phrases in the reference strings. hyp_keyphrases: num of key phrases in the hypothesis strings. """ matched_k = sum(self.matched_keyphrase_counts.values()) ref_k = sum(self.ref_keyphrase_counts.values()) hyp_k = sum(self.hyp_keyphrase_counts.values()) joined_k = ref_k + hyp_k - matched_k joined_k = max(1, joined_k) # non_zero value for division jaccard_similarity = matched_k * 1.0 / joined_k f1_k = 2.0 * matched_k / max(ref_k + hyp_k, 1.0) return (jaccard_similarity, f1_k, matched_k, ref_k, hyp_k) def GetSummaries(self): """Generate strings to summarize word errors and key phrase errors. Returns: str_sum: string summarizing total error, total word and WER. str_details: string breaking down three error types: del, ins, sub. str_str_keyphrases_info: string summarizing kerphrase information. """ nref = self.wer_info['nw'] total_error = self.wer_info['ins'] \ + self.wer_info['del'] + self.wer_info['sub'] str_sum = 'total WER = %d, total word = %d, wer = %.2f%%' % ( total_error, nref, self.GetWER()) str_details = 'Error breakdown: del = %.2f%%, ins=%.2f%%, sub=%.2f%%' % ( self.wer_info['del'] * 100.0 / nref, self.wer_info['ins'] * 100.0 / nref, self.wer_info['sub'] * 100.0 / nref) str_keyphrases_info = '' if self.key_phrases: jaccard_p, f1_p, matched_p, ref_p, hyp_p = self.GetKeyPhraseStats() str_keyphrases_info = ('matched %d key phrases (%d in ref, %d in hyp), ' 'jaccard similarity=%.2f, F1=%.2f') % \ (matched_p, ref_p, hyp_p, jaccard_p, f1_p) return str_sum, str_details, str_keyphrases_info def main(argv): hypothesis = open(argv[1], 'r').read() reference = open(argv[2], 'r').read() if len(argv) == 4: phrase_lines = open(argv[3]).readlines() keyphrases = [line.strip() for line in phrase_lines] else: keyphrases = None wer_obj = SimpleWER( key_phrases=keyphrases, html_handler=HighlightAlignedHtml, preprocess_handler=RemoveCommentTxtPreprocess) wer_obj.AddHypRef(hypothesis, reference) str_summary, str_details, str_keyphrases_info = wer_obj.GetSummaries() print(str_summary) print(str_details) print(str_keyphrases_info) try: fn_output = argv[1] + '_diagnosis.html' aligned_html = '<br>'.join(wer_obj.aligned_htmls) with open(fn_output, 'wt') as fp: fp.write('<body><html>') fp.write('<div>%s</div>' % aligned_html) fp.write('</body></html>') except IOError: print('failed to write diagnosis html') if __name__ == '__main__': if len(sys.argv) < 3 or len(sys.argv) > 4: print(""" Example of Usage: python simple_wer_v2.py file_hypothesis file_reference or python simple_wer_v2.py file_hypothesis file_reference file_keyphrases where file_hypothesis is the file name for hypothesis text file_reference is the file name for reference text. file_keyphrases (optional) is the filename of key phrases over which you want to measure accuracy. Or you can use this file as a library, and call class SimpleWER .AddHypRef(hyp, ref): add one pair of hypothesis/reference. You can call this function multiple times. .GetWER(): get the Word Error Rate (WER). .GetBreakdownWER(): get the del/ins/sub breakdown WER. .GetKeyPhraseStats(): get stats for key phrases. The first value is Jaccard Similarity of key phrases. .GetSummaries(): generate strings to summarize word error and key phrase errors. """) sys.exit(1) main(sys.argv)
src/onedrivesdk/error.py
meson800/onedrive-sdk-python
912
132409
<filename>src/onedrivesdk/error.py ''' ------------------------------------------------------------------------------ Copyright (c) 2015 Microsoft Corporation Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ ''' from __future__ import unicode_literals class OneDriveError(Exception): def __init__(self, prop_dict, status_code): """Initialize a OneDriveError given the JSON error response dictionary, and the HTTP status code Args: prop_dict (dict): A dictionary containing the response from OneDrive status_code (int): The HTTP status code (ex. 200, 201, etc.) """ if "code" not in prop_dict or "message" not in prop_dict: prop_dict["code"] = ErrorCode.Malformed prop_dict["message"] = "The received response was malformed" super(OneDriveError, self).__init__(prop_dict["code"]+" - "+prop_dict["message"]) else: super(OneDriveError, self).__init__(prop_dict["code"]+" - "+prop_dict["message"]) self._prop_dict = prop_dict self._status_code = status_code @property def status_code(self): """The HTTP status code Returns: int: The HTTP status code """ return self._status_code @property def code(self): """The OneDrive error code sent back in the response. Possible codes can be found in the :class:`ErrorCode` enum. Returns: str: The error code """ return self._prop_dict["code"] @property def inner_error(self): """Creates a OneDriveError object from the specified inner error within the response. Returns: :class:`OneDriveError`: Error from within the inner response """ return OneDriveError(self._prop_dict["innererror"], self.status_code) if "innererror" in self._prop_dict else None def matches(self, code): """Recursively searches the :class:`OneDriveError` to find if the specified code was found Args: code (str): The error code to search for Returns: bool: True if the error code was found, false otherwise """ if self.code == code: return True return False if self.inner_error is None else self.inner_error.matches(code) class ErrorCode(object): #: Access was denied to the resource AccessDenied = "accessDenied" #: The activity limit has been reached ActivityLimitReached = "activityLimitReached" #: A general exception occured GeneralException = "generalException" #: An invalid range was provided InvalidRange = "invalidRange" #: An invalid request was provided InvalidRequest = "invalidRequest" #: The requested resource was not found ItemNotFound = "itemNotFound" #: Malware was detected in the resource MalwareDetected = "malwareDetected" #: The name already exists NameAlreadyExists = "nameAlreadyExists" #: The action was not allowed NotAllowed = "notAllowed" #: The action was not supported NotSupported = "notSupported" #: The resource was modified ResourceModified = "resourceModified" #: A resync is required ResyncRequired = "resyncRequired" #: The OneDrive service is not available ServiceNotAvailable = "serviceNotAvailable" #: The quota for this OneDrive has been reached QuotaLimitReached = "quotaLimitReached" #: The user is unauthenticated Unauthenticated = "unauthenticated" #: The response was malformed Malformed = "malformed"
senet_train.py
sanghuynh1501/speech_denoising_tensorflow
184
132452
from model import * from data_import import * import sys, getopt # SPEECH ENHANCEMENT NETWORK SE_LAYERS = 13 # NUMBER OF INTERNAL LAYERS SE_CHANNELS = 64 # NUMBER OF FEATURE CHANNELS PER LAYER SE_LOSS_LAYERS = 6 # NUMBER OF FEATURE LOSS LAYERS SE_NORM = "NM" # TYPE OF LAYER NORMALIZATION (NM, SBN or None) SE_LOSS_TYPE = "FL" # TYPE OF TRAINING LOSS (L1, L2 or FL) # FEATURE LOSS NETWORK LOSS_LAYERS = 14 # NUMBER OF INTERNAL LAYERS LOSS_BASE_CHANNELS = 32 # NUMBER OF FEATURE CHANNELS PER LAYER IN FIRT LAYER LOSS_BLK_CHANNELS = 5 # NUMBER OF LAYERS BETWEEN CHANNEL NUMBER UPDATES LOSS_NORM = "SBN" # TYPE OF LAYER NORMALIZATION (NM, SBN or None) SET_WEIGHT_EPOCH = 10 # NUMBER OF EPOCHS BEFORE FEATURE LOSS BALANCE SAVE_EPOCHS = 10 # NUMBER OF EPOCHS BETWEEN MODEL SAVES log_file = open("logfile.txt", 'w+') # COMMAND LINE OPTIONS datafolder = "dataset" modfolder = "models" outfolder = "." try: opts, args = getopt.getopt(sys.argv[1:],"hd:l:o:",["ifolder=,lossfolder=,outfolder="]) except getopt.GetoptError: print 'Usage: python senet_infer.py -d <datafolder> -l <lossfolder> -o <outfolder>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'Usage: python senet_infer.py -d <datafolder> -l <lossfolder> -o <outfolder>' sys.exit() elif opt in ("-d", "--datafolder"): datafolder = arg elif opt in ("-l", "--lossfolder"): modfolder = arg elif opt in ("-o", "--outfolder"): outfolder = arg print 'Data folder is "' + datafolder + '/"' print 'Loss model folder is "' + modfolder + '/"' print 'Output model folder is "' + outfolder + '/"' # SET LOSS FUNCTIONS AND PLACEHOLDERS with tf.variable_scope(tf.get_variable_scope()): input=tf.placeholder(tf.float32,shape=[None,1,None,1]) clean=tf.placeholder(tf.float32,shape=[None,1,None,1]) enhanced=senet(input, n_layers=SE_LAYERS, norm_type=SE_NORM, n_channels=SE_CHANNELS) if SE_LOSS_TYPE == "L1": # L1 LOSS loss_weights = tf.placeholder(tf.float32, shape=[]) loss_fn = l1_loss(clean, enhanced) elif SE_LOSS_TYPE == "L2": # L2 LOSS loss_weights = tf.placeholder(tf.float32, shape=[]) loss_fn = l2_loss(clean, enhanced) else: # FEATURE LOSS loss_weights = tf.placeholder(tf.float32, shape=[SE_LOSS_LAYERS]) loss_fn = featureloss(clean, enhanced, loss_weights, loss_layers=SE_LOSS_LAYERS, n_layers=LOSS_LAYERS, norm_type=LOSS_NORM, base_channels=LOSS_BASE_CHANNELS, blk_channels=LOSS_BLK_CHANNELS) # LOAD DATA trainset, valset = load_full_data_list(datafolder = datafolder) trainset, valset = load_full_data(trainset, valset) # TRAINING OPTIMIZER opt=tf.train.AdamOptimizer(learning_rate=1e-4).\ minimize(loss_fn[0],var_list=[var for var in tf.trainable_variables() if var.name.startswith("se_")]) # BEGIN SCRIPT ######################################################################################################### # INITIALIZE GPU CONFIG config=tf.ConfigProto() config.gpu_options.allow_growth=True sess=tf.Session(config=config) print "Config ready" sess.run(tf.global_variables_initializer()) print "Session initialized" # LOAD FEATURE LOSS if SE_LOSS_TYPE == "FL": loss_saver = tf.train.Saver([var for var in tf.trainable_variables() if var.name.startswith("loss_")]) loss_saver.restore(sess, "./%s/loss_model.ckpt" % modfolder) Nepochs = 320 saver = tf.train.Saver([var for var in tf.trainable_variables() if var.name.startswith("se_")]) ######################################################################################################################## if SE_LOSS_TYPE == "FL": loss_train = np.zeros((len(trainset["innames"]),SE_LOSS_LAYERS+1)) loss_val = np.zeros((len(valset["innames"]),SE_LOSS_LAYERS+1)) else: loss_train = np.zeros((len(trainset["innames"]),1)) loss_val = np.zeros((len(valset["innames"]),1)) if SE_LOSS_TYPE == "FL": loss_w = np.ones(SE_LOSS_LAYERS) else: loss_w = [] ##################################################################################### for epoch in range(1,Nepochs+1): print("Epoch no.%d"%epoch) # TRAINING EPOCH ################################################################ ids = np.random.permutation(len(trainset["innames"])) # RANDOM FILE ORDER for id in tqdm(range(0, len(ids)), file=sys.stdout): i = ids[id] # RANDOMIZED ITERATION INDEX inputData = trainset["inaudio"][i] # LOAD DEGRADED INPUT outputData = trainset["outaudio"][i] # LOAD GROUND TRUTH # TRAINING ITERATION _, loss_vec = sess.run([opt, loss_fn], feed_dict={input: inputData, clean: outputData, loss_weights: loss_w}) # SAVE ITERATION LOSS loss_train[id,0] = loss_vec[0] if SE_LOSS_TYPE == "FL": for j in range(SE_LOSS_LAYERS): loss_train[id,j+1] = loss_vec[j+1] # PRINT EPOCH TRAINING LOSS AVERAGE str = "T: %d\t " % (epoch) if SE_LOSS_TYPE == "FL": for j in range(SE_LOSS_LAYERS+1): str += ", %10.6e"%(np.mean(loss_train, axis=0)[j]) else: str += ", %10.6e"%(np.mean(loss_train, axis=0)[0]) log_file.write(str + "\n") log_file.flush() # SET WEIGHTS AFTER M EPOCHS if SE_LOSS_TYPE == "FL" and epoch == SET_WEIGHT_EPOCH: loss_w = np.mean(loss_train, axis=0)[1:] # SAVE MODEL EVERY N EPOCHS if epoch % SAVE_EPOCHS != 0: continue saver.save(sess, outfolder + "/se_model.ckpt") # VALIDATION EPOCH ############################################################## print("Validation epoch") for id in tqdm(range(0, len(valset["innames"])), file=sys.stdout): i = id # NON-RANDOMIZED ITERATION INDEX inputData = valset["inaudio"][i] # LOAD DEGRADED INPUT outputData = valset["outaudio"][i] # LOAD GROUND TRUTH # VALIDATION ITERATION output, loss_vec = sess.run([enhanced, loss_fn], feed_dict={input: inputData, clean: outputData, loss_weights: loss_w}) # SAVE ITERATION LOSS loss_val[id,0] = loss_vec[0] if SE_LOSS_TYPE == "FL": for j in range(SE_LOSS_LAYERS): loss_val[id,j+1] = loss_vec[j+1] # PRINT VALIDATION EPOCH LOSS AVERAGE str = "V: %d " % (epoch) if SE_LOSS_TYPE == "FL": for j in range(SE_LOSS_LAYERS+1): str += ", %10.6e"%(np.mean(loss_val, axis=0)[j]*1e9) else: str += ", %10.6e"%(np.mean(loss_val, axis=0)[0]*1e9) log_file.write(str + "\n") log_file.flush() log_file.close()