content
stringlengths
0
894k
type
stringclasses
2 values
from os.path import basename from pandas import read_csv from NaiveBayes import * from DecisionTrees import * from KNN import * from K_Means import * from Evaluator import * from PickleFiles import * def run(): try: os.mkdir(os.path.join("", "myFiles")) except FileExistsError: pass ask_to_load = input("Restore a recently created model?\n1) Yes\n2) No\nYour choice: ") if ask_to_load == '1': pickle_file = input("Enter dump file destination: ") file_dump = loadData(pickle_file) analysis(file_dump) if ask_to_load == '2': discretization_mode = {'1': 'equal-width', '2': 'equal-frequency', '3': 'entropy'} train_path = input("Please enter training file location: ") test_path = input("Please enter testing file location: ") user_bins = int(input("\nEnter amount of bins: ")) bin_mode = input("\nEnter discretization mode:\n1) Equal-Width\n2) Equal-Frequency\n3) Entropy\nYour choice: ") user_algorithm = input("\nEnter algorithm mode:\n" "1) Decision Tree\n" "2) SKLearn Decision Tree\n" "3) Naive Bayes\n" "4) SKLearn Naive Bayes\n" "5) KNN\n" "6) K-Means\n" "Your choice: ") bin_mode = discretization_mode[bin_mode] train = read_csv(filepath_or_buffer=train_path, delimiter=',') test = read_csv(filepath_or_buffer=test_path, delimiter=',') if user_algorithm == '1': decision_tree = DecisionTree(train, test, basename(train_path), basename(test_path), 0.001, user_bins,bin_mode) decision_tree.run() storeData(decision_tree) analysis(decision_tree) if user_algorithm == '2': decision_tree_sk = DecisionTreeSKLearn(train, test, 10, 10, basename(train_path), basename(test_path)) decision_tree_sk.run() storeData(decision_tree_sk) analysis(decision_tree_sk) if user_algorithm == '3': naive_bayes = NaiveBayes(train, test, basename(train_path), basename(test_path), user_bins, bin_mode) naive_bayes.run() storeData(naive_bayes) analysis(naive_bayes) if user_algorithm == '4': naive_bayes_sk = NaiveBayes_SKLearn(train, test, basename(train_path), basename(test_path)) naive_bayes_sk.run() storeData(naive_bayes_sk) analysis(naive_bayes_sk) if user_algorithm == '5': knn = KNN(train, test, int(input("How many K clusters??\nYour choice: ")), basename(train_path),basename(test_path)) knn.run() storeData(knn) analysis(knn) if user_algorithm == '6': k_means = KMeans(train, int(input("How many K clusters??\nYour choice: ")), 100, 30) k_means.run() storeData(k_means) analysis(k_means) repeated = True while (repeated): run() if input("\n\nRun Again?\n1) Yes\n2) No\nYour choice: ") == '2': repeated = False
python
import pickle import pandas as pd import nltk import re from nltk.corpus import wordnet as ewn import numpy as np def load_dataset(path,train): train_data = np.load(path, allow_pickle=True) ########if(not train): #train_data = train_data[()] embeddings = train_data['embeddings'] labels = train_data['labels'] sense_keys = train_data['synsets'] synsets = [sc2ss(sensekey) for sensekey in sense_keys] print('loaded BERT embeddings') return embeddings, labels, synsets def sc2ss(sensekey): '''Look up a synset given the information from SemCor''' ### Assuming it is the same WN version (e.g. 3.0) # TO DO: Need a better way of extracting string synset = str(ewn.lemma_from_key(sensekey).synset())[8:-2] #print(synset) return synset count = 0 def get_neg_sampling(data_loc,loc,save_loc): print(data_loc) print(loc) embeddings, labels, synsets = load_dataset(data_loc,True) df = pd.read_csv(loc,sep='\t') def get_key(sent): return sent.split()[0] df['key'] = df['gloss'].apply(get_key) print('keys done') def sc2ss(sensekey): '''Look up a synset given the information from SemCor''' ### Assuming it is the same WN version (e.g. 3.0) # TO DO: Need a better way of extracting string synset = str(ewn.lemma_from_key(sensekey).synset())[8:-2] #print(synset) return synset def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return 's' elif treebank_tag.startswith('V'): return 'v' elif treebank_tag.startswith('N'): return 'n' elif treebank_tag.startswith('R'): return 'r' else: return None def sensekey_2_syn(x): syn = sc2ss(x).split('.')[1] return syn df['syn'] = df['sense_key'].apply(sensekey_2_syn) print('got syn') def get_tag(x): sent = x['sentence'] #key = x['gloss'].split()[0] key = x['key'] #sense = x['sense_key'] global count count+=1 if(count%2000==0): print('We are at line ',count) #syn = sc2ss(sense).split('.')[1] syn = x['syn'] #sent is a single sentence tokens = nltk.word_tokenize(sent) tokens = [t for t in tokens if not re.search(r'[^\w\d\s]',t)] tags = nltk.pos_tag(tokens) for i in range(len(tokens)): if tokens[i]==key: val = get_wordnet_pos(tags[i][1]) if val==syn: return 1 else: return 0 return 0 print('done') df['pos'] = df.apply(get_tag,axis=1) out = df['pos'].to_numpy() #print(df['pos'].head()) #print(df['pos'].sum()) #np.save('mask_train_pos.npy',out) embeddings = embeddings[out==1] labels = labels[out==1] synsets = np.array(synsets)[out==1] dataset = {} dataset['embeddings'] = embeddings dataset['labels'] = labels dataset['synsets'] = synsets with open(save_loc, 'wb') as handle: pickle.dump(out, handle, protocol=pickle.HIGHEST_PROTOCOL) return dataset import argparse if __name__ =='__main__': parser = argparse.ArgumentParser() parser.add_argument("--embeddings_loc",default=None,type=str,help="Location to embeddings of numpy") parser.add_argument("--csv_loc",default=None,type=str,help="Location to the csv") parser.add_argument("--save_location",default=None,type=str,help="Location for the final dataset") args = parser.parse_args() d = get_neg_sampling(data_loc=args.embeddings_loc,loc=args.csv_loc,save_loc = args.save_location) # d = get_neg_sampling(data_loc='combined.npy',loc= '/home/pratyushgarg11/data/bert-n-graph-embeddings/GlossBert-GraphEmbeddings/Training_Corpora/SemCor/semcor_train_sent_cls_ws.csv') ''' count= 0 def count_zeros(word): global count if not word: count+=1 return 0 _ = words.apply(count_zeros) print(count) print(words.head()) '''
python
import io import json import time import errno import socket import struct import threading from . import logs from . import utils TIMEOUT = 0.1 BACKLOG = socket.SOMAXCONN CHUNK_SIZE = io.DEFAULT_BUFFER_SIZE error = socket.error timeout = socket.timeout log = logs.get(__name__) def start_client(address, handler, stop=None, retry_limit=-1, retry_interval=1): stop = stop or threading.Event() t = utils.start_thread(client_loop, address, handler, stop, retry_limit, retry_interval) return (StoppableThread(t, stop), address) def client_loop(address, handler, stop, retry_limit, retry_interval): count = 0 timeout = TIMEOUT while not stop.is_set(): try: with connect(address, timeout) as sock: sock.sendinit() handler(sock) except socket.error as e: log.error('connection error: %s', e) if stop.is_set(): break count += 1 if retry_limit != -1 and count > retry_limit: log.warning('retry limit reached (attempt #%s)', count) break time.sleep(retry_interval) log.warning('retrying connection (attempt #%s)', count) def start_server(address, handler, stop=None, backlog=None): stop = stop or threading.Event() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(address) sock.listen(backlog or BACKLOG) host, port = sock.getsockname() log.info('listening: %s:%s', host, port) t = utils.start_thread(server_loop, sock, handler, stop) return (StoppableThread(t, stop), (host, port)) def server_loop(server_sock, handler, stop): timeout = TIMEOUT server_sock.settimeout(timeout) while not stop.is_set(): try: s, addr = server_sock.accept() except socket.timeout: continue log.info('connected: %s:%s', *addr) with SockIO(s) as sock: sock.recvinit() sock.settimeout(timeout) handler(sock) def connect(address, timeout=None): log.debug('connecting: %s:%s', *address) sock = socket.create_connection(address, timeout) log.info('connected: %s:%s', *address) return SockIO(sock) class SockIO(object): def __init__(self, sock, chunk_size=None): self._sock = sock self._chunk_size = chunk_size or CHUNK_SIZE def sendinit(self): log.debug('sendinit') self.sendmsg({'cmd': 'init'}) def recvinit(self): msg = self.recvmsg() log.debug('recvinit: %s', msg) try: if msg['cmd'] == 'init': return except Exception: pass raise InvalidInitialization() def sendmsg(self, msg): data = json.dumps(msg).encode('utf8') self.send(data) def recvmsg(self): data = self.recv() return json.loads(data.decode('utf8')) def send(self, data): data_len = len(data) size = struct.pack('>I', data_len) self._sock.sendall(size) self._sock.sendall(data) def recv(self): return b''.join(self.recviter()) def recviter(self): buf = b''.join(self.recvsize(4)) data_len = struct.unpack('>I', buf)[0] for chunk in self.recvsize(data_len): yield chunk def recvsize(self, size): sock = self._sock pos = 0 chunk_size = min(size, self._chunk_size) while pos < size: chunk = sock.recv(min(size-pos, chunk_size)) if not chunk: raise ReceiveInterrupted() pos += len(chunk) yield chunk def settimeout(self, t): self._sock.settimeout(t) def close(self): try: self._sock.shutdown(socket.SHUT_RDWR) except (OSError, socket.error) as e: # ignore if not connected if e.errno not in (errno.ENOTCONN,): raise self._sock.close() def __enter__(self): return self def __exit__(self, etype, evalue, etb): self.close() class StoppableThread(object): def __init__(self, thread, stop): self._thread = thread self._stop = stop def stop(self): self._stop.set() def join(self): self._thread.join() class SockIOError(Exception): pass class InvalidInitialization(SockIOError): pass class ReceiveInterrupted(SockIOError, error): pass
python
from flask import Flask, request, render_template from flask_cors import cross_origin import pickle app = Flask(__name__) model = open('car.pkl','rb') regressor = pickle.load(model) @app.route("/") @cross_origin() def home(): return render_template('car.html') @app.route("/predict", methods=["GET","POST"]) @cross_origin() def predict(): #CAR BRAND AMBASSADOR=0 AUDI=0 BENTLEY=0 BMW=0 CHEVROLET=0 DATSUN=0 FIAT=0 FORCE=0 FORD=0 HONDA=0 HYUNDAI=0 ISUZU=0 JAGUAR=0 JEEP=0 LAMBORGHINI=0 LAND=0 MAHINDRA=0 MARUTI=0 MERCEDES=0 MINI=0 MITSUBISHI=0 NISSAN=0 PORSCHE=0 RENAULT=0 SKODA=0 TATA=0 TOYOTA=0 VOLKSWAGEN=0 VOLVO=0 #LOCATION Ahmedabad=0 Bangalore=0 Chennai=0 Pune=0 Mumbai=0 Coimbatore=0 Hyderabad=0 Jaipur=0 Kochi=0 Kolkata=0 Delhi=0 #FUEL Diesel=0 LPG=0 Petrol=0 CNG=0 #TRANSMISSION Manual=0 if request.method == 'POST': name = request.form['Brand'] if name == 'AUDI': AUDI=1 elif name == 'BENTLEY': BENTLEY=1 elif name == 'BMW': BMW=1 elif name == 'CHEVROLET': CHEVROLET=1 elif name == 'DATSUN': DATSUN=1 elif name == 'FIAT': FIAT=1 elif name == 'FORCE': FORCE=1 elif name == 'FORD': FORD=1 elif name == 'HONDA': HONDA=1 elif name == 'HYUNDAI': HYUNDAI=1 elif name == 'ISUZU': ISUZU=1 elif name == 'JAGUAR': JAGUAR=1 elif name == 'JEEP': JEEP=1 elif name == 'LAMBORGHINI': LAMBORGHINI=1 elif name == 'LAND': LAND=1 elif name == 'MAHINDRA': MAHINDRA=1 elif name == 'MARUTI': MARUTI=1 elif name == 'MERCEDES-BENZ': MERCEDES=1 elif name == 'MINI': MINI=1 elif name == 'MITSUBUSHI': MITSUBISHI=1 elif name == 'NISSAN': NISSAN=1 elif name == 'PORSCHE': PORSCHE=1 elif name == 'RENAULT': RENAULT=1 elif name == 'SKODA': SKODA=1 elif name == 'TATA': TATA=1 elif name == 'TOYOTA': TOYOTA=1 elif name == 'VOLKSWAGEN': VOLKSWAGEN=1 elif name == 'VOLVO': VOLVO=1 else: AMBASSADOR=1 loc = request.form['Location'] if loc=='Bangalore': Bangalore=1 elif loc=='Chennai': Chennai=1 elif loc=='Pune': Pune=1 elif loc=='Mumbai': Mumbai=1 elif loc=='Coimbatore': Coimbatore=1 elif loc=='Hyderabad': Hyderabad=1 elif loc=='Jaipur': Jaipur=1 elif loc=='Kochi': Kochi=1 elif loc=='Kolkata': Kolkata=1 elif loc=='Delhi': Delhi=1 else: Ahmedabad=1 fuel = request.form['Fuel'] if fuel=='Diesel': Diesel=1 elif fuel=='Petrol': Petrol=1 elif fuel=='LPG': LPG=1 else: CNG=1 trans = request.form['Transmission'] if trans == 'Manual': Manual=1 Year = request.form['Year'] Kms = request.form['Kms'] Own = request.form['Owner'] Mileage = request.form['Mileage'] Engine = request.form['Engine'] Power = request.form['Power'] Seat = request.form['Seats'] #PREDICTION Price = regressor.predict([[ Year,Kms,Own,Mileage,Engine,Power,Seat,AUDI,BENTLEY,BMW,CHEVROLET,DATSUN,FIAT,FORCE,FORD,HONDA, HYUNDAI,ISUZU,JAGUAR,JEEP,LAMBORGHINI,LAND,MAHINDRA,MARUTI,MERCEDES,MINI,MITSUBISHI,NISSAN, PORSCHE,RENAULT,SKODA,TATA,TOYOTA,VOLKSWAGEN,VOLVO,Bangalore,Chennai,Coimbatore,Delhi,Hyderabad, Jaipur,Kochi,Kolkata,Mumbai,Pune,Diesel,LPG,Petrol,Manual ]]) output=round(Price[0],2) return render_template('car.html',prediction_text="Your car's price should be Rs. {} lakhs. This price may change depending on the condition of the car.".format(output)) return render_template("car.html") if __name__ == "__main__": app.run(debug=True)
python
"""Qgroupbox module.""" # -*- coding: utf-8 -*- from PyQt6 import QtWidgets, QtCore # type: ignore[import] from pineboolib.core import decorators from pineboolib.core import settings from pineboolib import logging from . import qwidget from typing import Any logger = logging.get_logger(__name__) class QGroupBox(QtWidgets.QGroupBox, qwidget.QWidget): # type: ignore [misc] # noqa: F821 """QGroupBox class.""" # style_str: str # _line_width: int presset = QtCore.pyqtSignal(int) selectedId: int line_width: int = 1 def __init__(self, *args, **kwargs) -> None: """Inicialize.""" if len(args): name = None parent = None if isinstance(args[0], str): name = args[0] else: parent = args[0] if len(args) > 1: if isinstance(args[1], str): name = args[1] else: parent = args[1] if parent is not None: super().__init__(parent, **kwargs) else: super().__init__(**kwargs) if name is not None: self.setObjectName(name) else: super().__init__() if not settings.CONFIG.value("ebcomportamiento/spacerLegacy", False): self.setSizePolicy( QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Preferred ) self.setContentsMargins(0, 2, 0, 2) def setLayout(self, layout: QtWidgets.QLayout) -> None: """Set layout to QGroupBox.""" # layout.setContentsMargins(0, 0, 0, 0) # layout.setSpacing(0) super().setLayout(layout) def setLineWidth(self, width: int) -> None: """Set line width.""" style_ = ( "QGroupBox#%s { border: %spx solid gray; margin-top: 20px; border-radius: 3px;}" % (self.objectName(), width) ) self.line_width = width self.setStyleSheet(style_) def setTitle(self, title: str) -> None: """Set title.""" if self.line_width == 0: title = "" if title == "": self.setLineWidth(0) super().setTitle(title) def get_enabled(self) -> bool: """Return if enabled.""" return self.isEnabled() def set_enabled(self, value: bool) -> None: """Set enabled.""" self.setDisabled(not value) @decorators.pyqt_slot(bool) def setShown(self, value: bool) -> None: """Set shown.""" self.setVisible(value) def __setattr__(self, name: str, value: Any) -> None: """Set an attribute especified by name.""" if name == "title": self.setTitle(str(value)) else: super().__setattr__(name, value) @decorators.not_implemented_warn def setFrameShadow(self, frame_shadow: None) -> None: """Set frame shadow.""" pass @decorators.not_implemented_warn def setFrameShape(self, frame_shape: None) -> None: """Set frame shape.""" pass @decorators.not_implemented_warn def newColumn(self) -> None: """Create a new column.""" pass enabled = property(get_enabled, set_enabled)
python
from datetime import timedelta from django.test import TestCase from django.utils.timezone import now from core.models.route import Route from core.models.station import Station from core.models.tender import Tender from core.models.workshop import Workshop TEST_WORKSHOP = 'Bw Hagen' TEST_ROUTE = 'KBS 100 Hamburg - Rostock' TEST_DESCRIPTION = """Die Stadt Hamburg, die Nahverkehrsgesellschaft Schleswig-Holstein und das Verkehrsministerium Mecklenburg-Vorpommern schreiben aus.""" class TenderModelTest(TestCase): def setUp(self): Route.objects.create(name=TEST_ROUTE, type=Route.LOCAL) Station.objects.create(name='Hagen Hbf') Workshop.objects.create(name='Bw Hagen', station=Station.objects.get(name="Hagen Hbf")) @staticmethod def test_create_valid_min(): Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE)) @staticmethod def test_create_valid_full(): Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE), text=TEST_DESCRIPTION, start_date=now(), end_date=now() + timedelta(days=2 * 365)) @staticmethod def test_add_workshop(): tender = Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE)) tender.workshops.add(Workshop.objects.get(name=TEST_WORKSHOP)) def test_to_string(self): tender = Tender.objects.create(route=Route.objects.get(name=TEST_ROUTE), text=TEST_DESCRIPTION, start_date=now(), end_date=now() + timedelta(days=2 * 365)) self.assertEquals(tender.__str__(), TEST_ROUTE)
python
from io import StringIO from .. import * from bfg9000 import path from bfg9000 import safe_str from bfg9000.shell.syntax import * class my_safe_str(safe_str.safe_string): pass class TestWriteString(TestCase): def test_variable(self): out = Writer(StringIO()) out.write('foo', Syntax.variable) out.write('$bar', Syntax.variable) self.assertEqual(out.stream.getvalue(), 'foo$bar') def test_shell(self): out = Writer(StringIO()) out.write('foo', Syntax.shell) out.write('$bar', Syntax.shell) self.assertEqual(out.stream.getvalue(), "foo'$bar'") class TestWriteLiteral(TestCase): def test_variable(self): out = Writer(StringIO()) out.write(safe_str.literal('$foo'), Syntax.variable) self.assertEqual(out.stream.getvalue(), '$foo') def test_shell(self): out = Writer(StringIO()) out.write(safe_str.literal('$foo'), Syntax.shell) self.assertEqual(out.stream.getvalue(), '$foo') class TestWriteJbos(TestCase): def test_variable(self): out = Writer(StringIO()) s = safe_str.jbos('$foo', safe_str.literal('bar')) out.write(s, Syntax.variable) self.assertEqual(out.stream.getvalue(), '$foobar') def test_shell(self): out = Writer(StringIO()) s = safe_str.jbos('$foo', safe_str.literal('bar')) out.write(s, Syntax.shell) self.assertEqual(out.stream.getvalue(), "'$foo'bar") class TestWritePath(PathTestCase): def test_variable(self): out = Writer(StringIO()) out.write(self.Path('foo', path.InstallRoot.bindir), Syntax.variable) self.assertEqual(out.stream.getvalue(), self.ospath.join('${bindir}', 'foo')) def test_shell(self): out = Writer(StringIO()) out.write(self.Path('foo', path.InstallRoot.bindir), Syntax.shell) self.assertEqual(out.stream.getvalue(), "'" + self.ospath.join('${bindir}', 'foo') + "'") class TestWriteInvalid(TestCase): def test_invalid(self): out = Writer(StringIO()) with self.assertRaises(TypeError): out.write(my_safe_str(), Syntax.variable) class TestWriteEach(TestCase): def test_basic(self): out = Writer(StringIO()) out.write_each(['foo', 'bar'], Syntax.variable) self.assertEqual(out.stream.getvalue(), 'foo bar') def test_delims(self): out = Writer(StringIO()) out.write_each(['foo', 'bar'], Syntax.variable, ',', '[', ']') self.assertEqual(out.stream.getvalue(), '[foo,bar]') class TestVariable(TestCase): def test_equality(self): self.assertTrue(Variable('foo') == Variable('foo')) self.assertFalse(Variable('foo') != Variable('foo')) self.assertFalse(Variable('foo') == Variable('bar')) self.assertTrue(Variable('foo') != Variable('bar')) def test_concat_str(self): self.assertEqual(Variable('foo') + 'bar', safe_str.jbos( safe_str.literal('${foo}'), 'bar' )) self.assertEqual('foo' + Variable('bar'), safe_str.jbos( 'foo', safe_str.literal('${bar}') )) def test_concat_path(self): self.assertEqual(Variable('foo') + path.Path('bar'), safe_str.jbos( safe_str.literal('${foo}'), path.Path('bar') )) self.assertEqual(path.Path('foo') + Variable('bar'), safe_str.jbos( path.Path('foo'), safe_str.literal('${bar}') )) def test_concat_var(self): self.assertEqual(Variable('foo') + Variable('bar'), safe_str.jbos( safe_str.literal('${foo}'), safe_str.literal('${bar}') )) def test_hash(self): self.assertEqual(hash(Variable('foo')), hash(Variable('foo')))
python
""" A collection of utilities for working with observation dictionaries and different kinds of modalities such as images. """ import numpy as np from copy import deepcopy from collections import OrderedDict import torch import torch.nn.functional as F import robomimic.utils.tensor_utils as TU # DO NOT MODIFY THIS! # This keeps track of observation types - and is populated on call to @initialize_obs_utils_with_obs_specs. # This will be a dictionary that maps observation type (e.g. low_dim, image) to a list of observation # modalities under that observation type. OBS_TYPE_TO_MODALITIES = None def initialize_obs_utils_with_obs_specs(obs_modality_specs): """ This function should be called before using any modality-specific functions in this file, in order to make sure that all utility functions are aware of the observation types (e.g. which ones are low-dimensional, and which ones are images). It constructs a dictionary that map observation type (e.g. low_dim, image) to a list of observation modalities under that type. Input should be a nested dictionary (or list of such dicts) with the following structure: obs_variant (str): obs_type (str): modalities (list) ... ... Example: { "obs": { "low_dim": ["robot0_eef_pos", "robot0_eef_quat"], "image": ["agentview_image", "robot0_eye_in_hand"], } "goal": { "low_dim": ["robot0_eef_pos"], "image": ["agentview_image"] } } In the example, raw observations consist of low-dim and image types, with the robot end effector pose under low-dim, and the agentview and wrist camera images under image, while goal observations also consist of low-dim and image types, with a subset of the raw observation modalities per type. Args: obs_modality_specs (dict or list): A nested dictionary (see docstring above for an example) or a list of nested dictionaries. Accepting a list as input makes it convenient for situations where multiple modules may each have their own modality spec. """ global OBS_TYPE_TO_MODALITIES # accept one or more spec dictionaries - if it's just one, account for this if isinstance(obs_modality_specs, dict): obs_modality_spec_list = [obs_modality_specs] else: obs_modality_spec_list = obs_modality_specs # iterates over observation specs obs_type_mapping = {} for obs_modality_spec in obs_modality_spec_list: # iterates over observation variants (e.g. observations, goals, subgoals) for obs_variant in obs_modality_spec: for obs_type in obs_modality_spec[obs_variant]: # add all modalities for each obs-type to the corresponding list in obs_type_mapping if obs_type not in obs_type_mapping: obs_type_mapping[obs_type] = [] obs_type_mapping[obs_type] += obs_modality_spec[obs_variant][obs_type] # remove duplicate entries and store in global mapping OBS_TYPE_TO_MODALITIES = { obs_type : list(set(obs_type_mapping[obs_type])) for obs_type in obs_type_mapping } print("\n============= Initialized Observation Utils with Obs Spec =============\n") for obs_type in OBS_TYPE_TO_MODALITIES: print("using obs type: {} with modalities: {}".format(obs_type, OBS_TYPE_TO_MODALITIES[obs_type])) def initialize_obs_utils_with_config(config): """ Utility function to parse config and call @initialize_obs_utils_with_obs_specs with the correct arguments. Args: config (BaseConfig instance): config object """ if config.algo_name == "hbc": obs_modality_specs = [ config.observation.planner.modalities, config.observation.actor.modalities, ] elif config.algo_name == "iris": obs_modality_specs = [ config.observation.value_planner.planner.modalities, config.observation.value_planner.value.modalities, config.observation.actor.modalities, ] else: obs_modality_specs = [config.observation.modalities] initialize_obs_utils_with_obs_specs(obs_modality_specs=obs_modality_specs) def key_is_obs_type(key, obs_type): """ Check if observation key corresponds to a type @obs_type. Args: key (str): modality name to check obs_type (str): observation type - usually one of "low_dim" or "image" """ assert OBS_TYPE_TO_MODALITIES is not None, "error: must call ObsUtils.initialize_obs_utils_with_obs_config first" return (key in OBS_TYPE_TO_MODALITIES[obs_type]) def key_is_image(key): """ Check if observation key corresponds to image observation. """ return key_is_obs_type(key, obs_type="image") def center_crop(im, t_h, t_w): """ Takes a center crop of an image. Args: im (np.array or torch.Tensor): image of shape (..., height, width, channel) t_h (int): height of crop t_w (int): width of crop Returns: im (np.array or torch.Tensor): center cropped image """ assert(im.shape[-3] >= t_h and im.shape[-2] >= t_w) assert(im.shape[-1] in [1, 3]) crop_h = int((im.shape[-3] - t_h) / 2) crop_w = int((im.shape[-2] - t_w) / 2) return im[..., crop_h:crop_h + t_h, crop_w:crop_w + t_w, :] def batch_image_hwc_to_chw(im): """ Channel swap for images - useful for preparing images for torch training. Args: im (np.array or torch.Tensor): image of shape (batch, height, width, channel) or (height, width, channel) Returns: im (np.array or torch.Tensor): image of shape (batch, channel, height, width) or (channel, height, width) """ start_dims = np.arange(len(im.shape) - 3).tolist() s = start_dims[-1] if len(start_dims) > 0 else -1 if isinstance(im, np.ndarray): return im.transpose(start_dims + [s + 3, s + 1, s + 2]) else: return im.permute(start_dims + [s + 3, s + 1, s + 2]) def batch_image_chw_to_hwc(im): """ Inverse of channel swap in @batch_image_hwc_to_chw. Args: im (np.array or torch.Tensor): image of shape (batch, channel, height, width) or (channel, height, width) Returns: im (np.array or torch.Tensor): image of shape (batch, height, width, channel) or (height, width, channel) """ start_dims = np.arange(len(im.shape) - 3).tolist() s = start_dims[-1] if len(start_dims) > 0 else -1 if isinstance(im, np.ndarray): return im.transpose(start_dims + [s + 2, s + 3, s + 1]) else: return im.permute(start_dims + [s + 2, s + 3, s + 1]) def process_obs(obs_dict): """ Process image observations in observation dictionary to prepare for network input. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. Returns: new_dict (dict): dictionary where image modalities have been processsed by @process_image """ new_dict = { k : obs_dict[k] for k in obs_dict } # shallow copy for k in new_dict: if key_is_image(k): new_dict[k] = process_image(new_dict[k]) return new_dict def process_image(image): """ Given image fetched from dataset, process for network input. Converts array to float (from uint8), normalizes pixels to [0, 1], and channel swaps from (H, W, C) to (C, H, W). Args: image (np.array or torch.Tensor): image array Returns: processed_image (np.array or torch.Tensor): processed image """ assert image.shape[-1] == 3 # check for channel dimensions image = TU.to_float(image) image /= 255. image = batch_image_hwc_to_chw(image) return image def unprocess_obs(obs_dict): """ Prepare processed image observations for saving to dataset. Inverse of @process_obs. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. Returns: new_dict (dict): dictionary where image modalities have been processsed by @unprocess_image """ new_dict = { k : obs_dict[k] for k in obs_dict } # shallow copy for k in new_dict: if key_is_image(k): new_dict[k] = unprocess_image(new_dict[k]) return new_dict def unprocess_image(image): """ Given image prepared for network input, prepare for saving to dataset. Inverse of @process_image. Args: image (np.array or torch.Tensor): image array Returns: unprocessed_image (np.array or torch.Tensor): image passed through inverse operation of @process_image """ assert image.shape[-3] == 3 # check for channel dimension image = batch_image_chw_to_hwc(image) image *= 255. image = TU.to_uint8(image) return image def process_image_shape(image_shape): """ Given image shape in dataset, infer the network input shape. This accounts for the channel swap to prepare images for torch training (see @process_image). Args: image_shape (tuple or list): tuple or list of size 3 or 4, corresponding to the image shape to process Returns: processed_image_shape (tuple): image shape that would result from the output of @process_image """ if len(image_shape) == 3: return image_shape[2], image_shape[0], image_shape[1] elif len(image_shape) == 4: return image_shape[0], image_shape[3], image_shape[1], image_shape[2] else: raise ValueError("cannot handle image shape {}".format(image_shape)) def normalize_obs(obs_dict, obs_normalization_stats): """ Normalize observations using the provided "mean" and "std" entries for each observation modality. The observation dictionary will be modified in-place. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. obs_normalization_stats (dict): this should map observation modality keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the default shape for the observation. Returns: obs_dict (dict): obs dict with normalized observation arrays """ # ensure we have statistics for each modality key in the observation assert set(obs_dict.keys()).issubset(obs_normalization_stats) for m in obs_dict: mean = obs_normalization_stats[m]["mean"] std = obs_normalization_stats[m]["std"] # check shape consistency shape_len_diff = len(mean.shape) - len(obs_dict[m].shape) assert shape_len_diff in [0, 1], "shape length mismatch in @normalize_obs" assert mean.shape[shape_len_diff:] == obs_dict[m].shape, "shape mismatch in @normalize obs" # handle case where obs dict is not batched by removing stats batch dimension if shape_len_diff == 1: mean = mean[0] std = std[0] obs_dict[m] = (obs_dict[m] - mean) / std return obs_dict def has_image(obs_keys): """ Returns True if image modalities are present in the list of modalities. Args: obs_key (list): list of modalities """ for k in obs_keys: if key_is_image(k): return True return False def repeat_and_stack_observation(obs_dict, n): """ Given an observation dictionary and a desired repeat value @n, this function will return a new observation dictionary where each modality is repeated @n times and the copies are stacked in the first dimension. For example, if a batch of 3 observations comes in, and n is 2, the output will look like [ob1; ob1; ob2; ob2; ob3; ob3] in each modality. Args: obs_dict (dict): dictionary mappping observation modality to np.array or torch.Tensor. Leading batch dimensions are optional. n (int): number to repeat by Returns: repeat_obs_dict (dict): repeated obs dict """ return TU.repeat_by_expand_at(obs_dict, repeats=n, dim=0) def crop_image_from_indices(images, crop_indices, crop_height, crop_width): """ Crops images at the locations specified by @crop_indices. Crops will be taken across all channels. Args: images (torch.Tensor): batch of images of shape [..., C, H, W] crop_indices (torch.Tensor): batch of indices of shape [..., N, 2] where N is the number of crops to take per image and each entry corresponds to the pixel height and width of where to take the crop. Note that the indices can also be of shape [..., 2] if only 1 crop should be taken per image. Leading dimensions must be consistent with @images argument. Each index specifies the top left of the crop. Values must be in range [0, H - CH - 1] x [0, W - CW - 1] where H and W are the height and width of @images and CH and CW are @crop_height and @crop_width. crop_height (int): height of crop to take crop_width (int): width of crop to take Returns: crops (torch.Tesnor): cropped images of shape [..., C, @crop_height, @crop_width] """ # make sure length of input shapes is consistent assert crop_indices.shape[-1] == 2 ndim_im_shape = len(images.shape) ndim_indices_shape = len(crop_indices.shape) assert (ndim_im_shape == ndim_indices_shape + 1) or (ndim_im_shape == ndim_indices_shape + 2) # maybe pad so that @crop_indices is shape [..., N, 2] is_padded = False if ndim_im_shape == ndim_indices_shape + 2: crop_indices = crop_indices.unsqueeze(-2) is_padded = True # make sure leading dimensions between images and indices are consistent assert images.shape[:-3] == crop_indices.shape[:-2] device = images.device image_c, image_h, image_w = images.shape[-3:] num_crops = crop_indices.shape[-2] # make sure @crop_indices are in valid range assert (crop_indices[..., 0] >= 0).all().item() assert (crop_indices[..., 0] < (image_h - crop_height)).all().item() assert (crop_indices[..., 1] >= 0).all().item() assert (crop_indices[..., 1] < (image_w - crop_width)).all().item() # convert each crop index (ch, cw) into a list of pixel indices that correspond to the entire window. # 2D index array with columns [0, 1, ..., CH - 1] and shape [CH, CW] crop_ind_grid_h = torch.arange(crop_height).to(device) crop_ind_grid_h = TU.unsqueeze_expand_at(crop_ind_grid_h, size=crop_width, dim=-1) # 2D index array with rows [0, 1, ..., CW - 1] and shape [CH, CW] crop_ind_grid_w = torch.arange(crop_width).to(device) crop_ind_grid_w = TU.unsqueeze_expand_at(crop_ind_grid_w, size=crop_height, dim=0) # combine into shape [CH, CW, 2] crop_in_grid = torch.cat((crop_ind_grid_h.unsqueeze(-1), crop_ind_grid_w.unsqueeze(-1)), dim=-1) # Add above grid with the offset index of each sampled crop to get 2d indices for each crop. # After broadcasting, this will be shape [..., N, CH, CW, 2] and each crop has a [CH, CW, 2] # shape array that tells us which pixels from the corresponding source image to grab. grid_reshape = [1] * len(crop_indices.shape[:-1]) + [crop_height, crop_width, 2] all_crop_inds = crop_indices.unsqueeze(-2).unsqueeze(-2) + crop_in_grid.reshape(grid_reshape) # For using @torch.gather, convert to flat indices from 2D indices, and also # repeat across the channel dimension. To get flat index of each pixel to grab for # each sampled crop, we just use the mapping: ind = h_ind * @image_w + w_ind all_crop_inds = all_crop_inds[..., 0] * image_w + all_crop_inds[..., 1] # shape [..., N, CH, CW] all_crop_inds = TU.unsqueeze_expand_at(all_crop_inds, size=image_c, dim=-3) # shape [..., N, C, CH, CW] all_crop_inds = TU.flatten(all_crop_inds, begin_axis=-2) # shape [..., N, C, CH * CW] # Repeat and flatten the source images -> [..., N, C, H * W] and then use gather to index with crop pixel inds images_to_crop = TU.unsqueeze_expand_at(images, size=num_crops, dim=-4) images_to_crop = TU.flatten(images_to_crop, begin_axis=-2) crops = torch.gather(images_to_crop, dim=-1, index=all_crop_inds) # [..., N, C, CH * CW] -> [..., N, C, CH, CW] reshape_axis = len(crops.shape) - 1 crops = TU.reshape_dimensions(crops, begin_axis=reshape_axis, end_axis=reshape_axis, target_dims=(crop_height, crop_width)) if is_padded: # undo padding -> [..., C, CH, CW] crops = crops.squeeze(-4) return crops def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_enc=False): """ For each image, randomly sample @num_crops crops of size (@crop_height, @crop_width), from @images. Args: images (torch.Tensor): batch of images of shape [..., C, H, W] crop_height (int): height of crop to take crop_width (int): width of crop to take num_crops (n): number of crops to sample pos_enc (bool): if True, also add 2 channels to the outputs that gives a spatial encoding of the original source pixel locations. This means that the output crops will contain information about where in the source image it was sampled from. Returns: crops (torch.Tensor): crops of shape (..., @num_crops, C, @crop_height, @crop_width) if @pos_enc is False, otherwise (..., @num_crops, C + 2, @crop_height, @crop_width) crop_inds (torch.Tensor): sampled crop indices of shape (..., N, 2) """ device = images.device # maybe add 2 channels of spatial encoding to the source image source_im = images if pos_enc: # spatial encoding [y, x] in [0, 1] h, w = source_im.shape[-2:] pos_y, pos_x = torch.meshgrid(torch.arange(h), torch.arange(w)) pos_y = pos_y.float().to(device) / float(h) pos_x = pos_x.float().to(device) / float(w) position_enc = torch.stack((pos_y, pos_x)) # shape [C, H, W] # unsqueeze and expand to match leading dimensions -> shape [..., C, H, W] leading_shape = source_im.shape[:-3] position_enc = position_enc[(None,) * len(leading_shape)] position_enc = position_enc.expand(*leading_shape, -1, -1, -1) # concat across channel dimension with input source_im = torch.cat((source_im, position_enc), dim=-3) # make sure sample boundaries ensure crops are fully within the images image_c, image_h, image_w = source_im.shape[-3:] max_sample_h = image_h - crop_height max_sample_w = image_w - crop_width # Sample crop locations for all tensor dimensions up to the last 3, which are [C, H, W]. # Each gets @num_crops samples - typically this will just be the batch dimension (B), so # we will sample [B, N] indices, but this supports having more than one leading dimension, # or possibly no leading dimension. # # Trick: sample in [0, 1) with rand, then re-scale to [0, M) and convert to long to get sampled ints crop_inds_h = (max_sample_h * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() crop_inds_w = (max_sample_w * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() crop_inds = torch.cat((crop_inds_h.unsqueeze(-1), crop_inds_w.unsqueeze(-1)), dim=-1) # shape [..., N, 2] crops = crop_image_from_indices( images=source_im, crop_indices=crop_inds, crop_height=crop_height, crop_width=crop_width, ) return crops, crop_inds
python
# -*- coding: utf-8 -*- """ Tests for the salt-run command """ from __future__ import absolute_import, print_function, unicode_literals import logging import pytest from tests.support.case import ShellCase from tests.support.helpers import slowTest log = logging.getLogger(__name__) @pytest.mark.usefixtures("salt_sub_minion") class CacheTest(ShellCase): """ Test the cache runner. """ @slowTest def test_cache(self): """ Store, list, fetch, then flush data """ # Store the data ret = self.run_run_plus( "cache.store", bank="cachetest/runner", key="test_cache", data="The time has come the walrus said", ) # Make sure we can see the new key ret = self.run_run_plus("cache.list", bank="cachetest/runner") self.assertIn("test_cache", ret["return"]) # Make sure we can see the new data ret = self.run_run_plus( "cache.fetch", bank="cachetest/runner", key="test_cache" ) self.assertIn("The time has come the walrus said", ret["return"]) # Make sure we can delete the data ret = self.run_run_plus( "cache.flush", bank="cachetest/runner", key="test_cache" ) ret = self.run_run_plus("cache.list", bank="cachetest/runner") self.assertNotIn("test_cache", ret["return"]) @slowTest def test_cache_invalid(self): """ Store, list, fetch, then flush data """ # Store the data ret = self.run_run_plus("cache.store",) # Make sure we can see the new key expected = "Passed invalid arguments:" self.assertIn(expected, ret["return"]) @slowTest def test_grains(self): """ Test cache.grains """ # Store the data ret = self.run_run_plus("cache.grains", tgt="minion") self.assertIn("minion", ret["return"]) @slowTest def test_pillar(self): """ Test cache.pillar """ # Store the data ret = self.run_run_plus("cache.pillar", tgt="minion") assert "minion" in ret["return"] assert "sub_minion" not in ret["return"] @slowTest def test_pillar_no_tgt(self): """ Test cache.pillar when no tgt is supplied. This should return pillar data for all minions """ # Store the data ret = self.run_run_plus("cache.pillar",) assert all(x in ret["return"] for x in ["minion", "sub_minion"]) @slowTest def test_pillar_minion_noexist(self): """ Test cache.pillar when the target does not exist """ ret = self.run_run_plus("cache.pillar", tgt="doesnotexist") assert "minion" not in ret["return"] assert "sub_minion" not in ret["return"] @slowTest def test_pillar_minion_tgt_type_pillar(self): """ Test cache.pillar when the target exists and tgt_type is pillar """ ret = self.run_run_plus("cache.pillar", tgt="monty:python", tgt_type="pillar",) assert all(x in ret["return"] for x in ["minion", "sub_minion"]) @slowTest def test_mine(self): """ Test cache.mine """ # Store the data ret = self.run_run_plus("cache.mine", tgt="minion") self.assertIn("minion", ret["return"])
python
from os import listdir, path import random import csv import re import natsort import numpy import theano from skimage.io import imread from block_designer import BlockDesigner from sampler import Sampler import pdb class ImageFlipOracle(object): """ *_flip methods should take an image_name """ def __init__(self, flip_mode): self.noise = 0 if re.search('\.csv', flip_mode): self.image_name_to_flip_coord = {} with open(flip_mode, 'rb') as csvfile: reader = csv.reader(csvfile) next(reader, None) for row in reader: image_name = row[0] flip_coords = [int(row[1]), int(row[2])] self.image_name_to_flip_coord[image_name] = flip_coords def get_flip_lambda(self, flip_mode, deterministic=False): if re.search('\.csv', flip_mode): if deterministic: return self.align_flip else: return self.noisy_align_flip else: return { "no_flip": self.no_flip, "rand_flip": self.rand_flip, "align_flip": self.align_flip, "noisy_align_flip": self.noisy_align_flip }[flip_mode] def no_flip(self, image_name): return numpy.zeros(2) def rand_flip(self, image_name): return numpy.array([int(round(random.random())), int(round(random.random()))]) def align_flip(self, image_name): return numpy.array(self.image_name_to_flip_coord[image_name]) def noisy_align_flip(self, image_name): """ :param noise: float (0,1) where 1 is fully noise and 0 is fully deterministic. If greater than 0, predetermined correct flips will be swapped with a random flip with Pr(noise) """ if random.random() < self.noise: return ((self.align_flip(image_name) + self.rand_flip(image_name)) % 2) else: return self.align_flip(image_name) def reset_noise(self, level): assert(level >= 0 and level <= 1) self.noise = level class CropOracle(object): def __init__(self, out_dim): self.out_dim = out_dim def bottom_right_crop(self, img): h,w,c = img.shape max_t = h - self.out_dim max_l = w - self.out_dim return(max_t,h, max_l,w) def center_crop(self, img): max_t,h, max_l,w = self.bottom_right_crop(img) center_t = max_t / 2 center_l = max_l / 2 return(center_t, center_t + self.out_dim, center_l, center_l + self.out_dim) def uniform_crop(self, img): max_t,h, max_l,w = self.bottom_right_crop(img) rand_t = random.randint(0, max_t) rand_l = random.randint(0, max_l) return(rand_t, rand_t + self.out_dim, rand_l, rand_l + self.out_dim) def get_crop_lambda(self, mode): return { "center_crop": self.center_crop, "uniform_crop": self.uniform_crop }[mode] class ColorCastOracle(object): def __init__(self, n_channels, color_cast_range): self.n_channels = n_channels self.color_cast_range = color_cast_range def no_cast(self): return numpy.zeros(self.n_channels) def baidu_cast(self): # http://arxiv.org/abs/1501.02876v3 s = self.color_cast_range / 3.0 # 99.73% of values within 3 std deviations casts = [] mask = [] while len(casts) < self.n_channels: casts.append(numpy.random.normal(scale=s)) mask.append(round(random.random())) return(numpy.array(casts, dtype=int) * numpy.array(mask, dtype=int)) def get_color_cast_lambda(self, mode): return { "no_cast": self.no_cast, "baidu_cast": self.baidu_cast }[mode] class DataStream(object): """ Provides an interface for easily filling and replacing GPU cache of images """ def __init__(self, train_image_dir="data/train/centered_crop/", train_labels_csv_path="data/train/trainLabels.csv", image_shape=(128, 128, 3), batch_size=128, cache_size_factor=8, center=0, normalize=0, amplify=1, train_flip='no_flip', shuffle=1, test_image_dir=None, random_seed=None, valid_dataset_size=4864, valid_flip='no_flip', test_flip='no_flip', sample_class=None, custom_distribution=None, train_color_cast='no_cast', valid_color_cast='no_cast', test_color_cast='no_cast', color_cast_range=20, pre_train_crop='center_crop', train_crop='uniform_crop', valid_test_crop='center_crop', image_extension='.png'): self.train_image_dir = train_image_dir self.test_image_dir = test_image_dir self.image_shape = image_shape self.batch_size = batch_size self.cache_size = (self.batch_size * cache_size_factor) # size in images self.center = center self.mean = None self.normalize = normalize self.std = None self.amplify = amplify self.train_set_flipper = ImageFlipOracle(train_flip) test_set_flipper = ImageFlipOracle(test_flip) self.train_flip_lambda = self.train_set_flipper.get_flip_lambda(train_flip) self.valid_flip_lambda = self.train_set_flipper.get_flip_lambda(valid_flip, deterministic=True) self.test_flip_lambda = test_set_flipper.get_flip_lambda(test_flip, deterministic=True) self.valid_dataset_size = valid_dataset_size self.random_seed = random_seed self.sample_class = sample_class self.custom_distribution = custom_distribution color_cast_oracle = ColorCastOracle(self.image_shape[-1], color_cast_range) self.train_color_cast_lambda = color_cast_oracle.get_color_cast_lambda(train_color_cast) self.valid_color_cast_lambda = color_cast_oracle.get_color_cast_lambda(valid_color_cast) self.test_color_cast_lambda = color_cast_oracle.get_color_cast_lambda(test_color_cast) crop_oracle = CropOracle(self.image_shape[0]) self.pre_train_crop_lambda = crop_oracle.get_crop_lambda(pre_train_crop) self.train_crop_lambda = crop_oracle.get_crop_lambda(train_crop) self.valid_test_crop_lambda = crop_oracle.get_crop_lambda(valid_test_crop) self.image_extension = image_extension bd = BlockDesigner(train_labels_csv_path, seed=self.random_seed) self.K = bd.K valid_examples = bd.break_off_block(self.valid_dataset_size) self.train_examples = bd.remainder() self.n_train_batches = int(bd.size() / self.batch_size) self.valid_dataset = self.setup_valid_dataset(valid_examples) self.train_dataset = None if shuffle else self.setup_train_dataset() self.test_dataset = self.setup_test_dataset() self.n_test_examples = len(self.test_dataset["X"]) if self.sample_class: self.n_train_batches = int(len(self.train_dataset["X"]) / self.batch_size) # override in case Sampler is used (TODO make this neater) self.train_dataset_size = self.n_train_batches * self.batch_size if self.center == 1 or self.normalize == 1: self.calc_mean_std_image() def valid_set(self): all_val_images = numpy.zeros(((len(self.valid_dataset["y"]),) + self.image_shape), dtype=theano.config.floatX) for i, image in enumerate(self.valid_dataset["X"]): all_val_images[i, ...] = self.feed_image(image, self.train_image_dir, self.valid_test_crop_lambda, self.valid_flip_lambda, self.valid_color_cast_lambda) # b01c, Theano: bc01 CudaConvnet: c01b return numpy.rollaxis(all_val_images, 3, 1), numpy.array(self.valid_dataset["y"], dtype='int32') def train_buffer(self, new_flip_noise=None): """ Yields a x_cache_block, has a size that is a multiple of training batches """ if new_flip_noise: self.train_set_flipper.reset_noise(new_flip_noise) train_dataset = self.train_dataset or self.setup_train_dataset() x_cache_block = numpy.zeros(((self.cache_size,) + self.image_shape), dtype=theano.config.floatX) n_cache_blocks = int(len(train_dataset["y"]) / float(self.cache_size)) # rounding down skips the leftovers if not n_cache_blocks: raise ValueError("Train dataset length %i is too small for cache size %i" % (len(train_dataset["y"]), self.cache_size)) for ith_cache_block in xrange(n_cache_blocks): ith_cache_block_end = (ith_cache_block + 1) * self.cache_size ith_cache_block_slice = slice(ith_cache_block * self.cache_size, ith_cache_block_end) for i, image in enumerate(train_dataset["X"][ith_cache_block_slice]): x_cache_block[i, ...] = self.feed_image(image, self.train_image_dir, self.train_crop_lambda, self.train_flip_lambda, self.train_color_cast_lambda) yield numpy.rollaxis(x_cache_block, 3, 1), numpy.array(train_dataset["y"][ith_cache_block_slice], dtype='int32') def test_buffer(self): """ Yields a x_cache_block, has a size that is a multiple of training batches """ x_cache_block = numpy.zeros(((self.cache_size,) + self.image_shape), dtype=theano.config.floatX) n_full_cache_blocks, n_leftovers = divmod(len(self.test_dataset["X"]), self.cache_size) if not n_full_cache_blocks: raise ValueError("Test dataset length %i is too small for cache size %i" % (len(self.test_dataset["X"]), self.cache_size)) for ith_cache_block in xrange(n_full_cache_blocks): ith_cache_block_end = (ith_cache_block + 1) * self.cache_size ith_cache_block_slice = slice(ith_cache_block * self.cache_size, ith_cache_block_end) idxs_to_full_dataset = list(range(ith_cache_block * self.cache_size, ith_cache_block_end)) for i, image in enumerate(self.test_dataset["X"][ith_cache_block_slice]): x_cache_block[i, ...] = self.feed_image(image, self.test_image_dir, self.valid_test_crop_lambda, self.test_flip_lambda, self.test_color_cast_lambda) yield numpy.rollaxis(x_cache_block, 3, 1), numpy.array(idxs_to_full_dataset, dtype='int32') # sneak the leftovers out, padded by the previous full cache block if n_leftovers: leftover_slice = slice(ith_cache_block_end, ith_cache_block_end + n_leftovers) for i, image in enumerate(self.test_dataset["X"][leftover_slice]): idxs_to_full_dataset[i] = ith_cache_block_end + i x_cache_block[i, ...] = self.feed_image(image, self.test_image_dir, self.valid_test_crop_lambda, self.test_flip_lambda, self.test_color_cast_lambda) yield numpy.rollaxis(x_cache_block, 3, 1), numpy.array(idxs_to_full_dataset, dtype='int32') def read_image(self, image_name, image_dir, crop_lambda, extension): """ :type image: string """ as_grey = True if self.image_shape[2] == 1 else False img = imread(image_dir + image_name + extension, as_grey=as_grey) img = self.crop_image(img, crop_lambda) if crop_lambda else img img = img / 255. if len(img.shape) == 2: return img.reshape(img.shape + (1,)) # when grey, img might lack dimension else: return img def preprocess_image(self, image, flip_coords, color_cast): """ Important, use with read_image. This method assumes image is already standardized to have [0,1] pixel values """ image = self.flip_image(image, flip_coords) image = self.color_cast_image(image, color_cast) if not self.mean == None: image = image - self.mean if not self.std == None: image = image / (self.std + 1e-5) return self.amplify * image def crop_image(self, img, crop_lambda): t,b,l,r = crop_lambda(img) assert(b-t == self.image_shape[0]) assert(r-l == self.image_shape[1]) return img[t:b, l:r, :] def color_cast_image(self, image, color_cast, masked=False): if masked: # Observed to perform much worse coloring = numpy.zeros(image.shape) + color_cast mask = (image > 0) / 255. return(image + (mask * coloring)) else: return(image + (color_cast/255.0)) def flip_image(self, image, flip_coords): assert(len(flip_coords) == 2) assert(max(flip_coords) <= 1) assert(min(flip_coords) >= 0) if flip_coords[0] == 1: image = numpy.flipud(image) if flip_coords[1] == 1: image = numpy.fliplr(image) return image def feed_image(self, image_name, image_dir, crop_lambda=None, flip_lambda=None, color_cast_lambda=None): img = self.read_image(image_name, image_dir, crop_lambda, self.image_extension) flip_coords = flip_lambda(image_name) if flip_lambda else numpy.zeros(2) color_cast = color_cast_lambda() if color_cast_lambda else numpy.zeros(self.image_shape[-1]) return self.preprocess_image(img, flip_coords, color_cast) def calc_mean_std_image(self): """ Streaming variance calc: http://math.stackexchange.com/questions/20593/calculate-variance-from-a-stream-of-sample-values Will not look at the validation set images """ print("Calculating mean and std dev image...") mean = numpy.zeros(self.image_shape, dtype=theano.config.floatX) mean_sqr = numpy.zeros(self.image_shape, dtype=theano.config.floatX) N = sum([len(ids) for y, ids in self.train_examples.items()]) # self.train_dataset_size + remainders for y, ids in self.train_examples.items(): for image in ids: img = self.read_image(image, self.train_image_dir, self.pre_train_crop_lambda, self.image_extension) mean += img mean_sqr += numpy.square(img) self.mean = mean / N self.std = numpy.sqrt(numpy.abs(mean_sqr / N - numpy.square(self.mean))) def setup_valid_dataset(self, block): images = [] labels = [] for y, ids in block.items(): for id in ids: images.append(id) labels.append(y) return {"X": images, "y": labels} def setup_train_dataset(self): """ Each self.batch_size of examples follows the same distribution """ bd = BlockDesigner(self.train_examples) if self.sample_class: samp = Sampler(bd.remainder(), seed=self.random_seed) images, labels = samp.custom_distribution(self.sample_class, self.batch_size, self.custom_distribution) return {"X": images, "y": labels} else: blocks = bd.break_off_multiple_blocks(self.n_train_batches, self.batch_size) images = [] labels = [] for block in blocks: for y, ids in block.items(): for id in ids: images.append(id) labels.append(y) return {"X": images, "y": labels} def setup_test_dataset(self): if self.test_image_dir: images = numpy.array([path.splitext(f)[0] for f in listdir(self.test_image_dir) if re.search('\.(jpeg|jpg|png)', f, flags=re.IGNORECASE)]) else: images = [] return {"X": natsort.natsorted(images)}
python
from g2net.input import extract_dict_from_df import pandas as pd import pytest @pytest.mark.parametrize( 'data_dict, key_col, val_col, expected_dict', ( pytest.param( { 'col1': [1, 2, 5], 'col2': [3, 4, 6] }, 'col1', 'col2', { 1: 3, 2: 4, 5: 6 }, id='2-columns-only'), pytest.param( { 'col1': [1, 2, 5], 'col2': [3, 4, 6], 'col3': [-1, -2, -3] }, 'col3', 'col1', { -1: 1, -2: 2, -3: 5 }, id='3-columns'), ) ) def test_extract_dict_from_df(data_dict, key_col, val_col, expected_dict): # Given source_df = pd.DataFrame(data=data_dict) # When result_dict = extract_dict_from_df(source_df, key_col, val_col) # Then assert expected_dict == result_dict
python
# # Copyright (c) 2020 by Philipp Scheer. All Rights Reserved. # # usage: nlu.py [-h] [--config CONFIG] # # Natural language understanding engine using spaCy and RASA # Convert spoken language into a command (skill) and arguments # # optional arguments: # -h, --help show this help message and exit # --config CONFIG Path to jarvis configuration file ## input: jarvis/stt -> command:[words] ## output: jarvis/nlu -> (started|stopped|error|intent:[intent]:probability:[probability]:slots:[slots]) ## import global packages import io, os, sys, time, json, argparse, configparser import urllib.parse as urlparse from http.server import BaseHTTPRequestHandler, HTTPServer ## import local packages import lib.helper as helper import snips_nlu ## set port for webserver port = 1885 class Handler(BaseHTTPRequestHandler): def do_GET(self): global dataset self.send_response(200) self.send_header('Content-type','text/json') self.send_header('Access-Control-Allow-Origin','*') self.end_headers() path = self.path.split("?")[0] arguments = urlparse.parse_qs((urlparse.urlparse(self.path)).query) if path == "/execute": try: cmd = arguments["command"][0] self.wfile.write(json.dumps({"success":True,"message":nlu.parse(cmd)}).encode()) except KeyError: self.wfile.write(json.dumps({"success":False,"message":"need to set 'command' url argument"}).encode()) if path == "/info": try: self.wfile.write(json.dumps({"success":True,"message":dataset}).encode()) except KeyError: self.wfile.write(json.dumps({"success":False,"message":"something went wrong"}).encode()) # this function is being called when the stt engine detects a command def handler(client, userdata, message): global nlp, mqtt data = message.payload.decode() if data.startswith("command:"): command = data.split(":")[1] parsed = nlu.parse(command) mqtt.publish("jarvis/nlu", json.dumps(parsed)) # add a description and parse arguments parser = argparse.ArgumentParser(description="Natural language understanding engine using snips-nlu\nConvert spoken language into a command (skill) and arguments", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("--config", type=str, help="Path to jarvis configuration file", default="../jarvis.conf") parser.add_argument("--message", type=str, help="A string to run against the NLU (Might take several seconds)") args = parser.parse_args() # get the config file from argparse and read it config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation()) config.read(args.config) config = config["nlu"] if args.message is not None: print(runSnipsOnce(args.message)) exit(0) # initialize mqtt/webserver instance mqtt = helper.MQTT(client_id="nlu.py") mqtt.on_message(handler) mqtt.subscribe("jarvis/stt") server = HTTPServer(('', port), Handler) # mark as started mqtt.publish("jarvis/nlu", "started") # start snips instance with io.open(config["dataset"]) as f: dataset = json.load(f) dataset = helper.transform_dataset(dataset) # log messages helper.log("nlu", "training nlu engine") start = time.time() nlu = snips_nlu.SnipsNLUEngine(dataset) nlu = nlu.fit(dataset) helper.log("nlu", "fininshed training (took {:.2f}s)".format(time.time()-start)) if args.message is not None: parsed = nlu.parse(args.message) print(json.dumps(parsed)) exit(0) # mainloop while True: server.handle_request() mqtt.publish("jarvis/nlu", "stopped")
python
import pickle import pytest from routrie import Router def test_routing() -> None: router = Router( routes={ "/": 0, "/users": 1, "/users/:id": 2, "/users/:id/:org": 3, "/users/:user_id/repos": 4, "/users/:user_id/repos/:id": 5, "/users/:user_id/repos/:id/*any": 6, "/:username": 7, "/*any": 8, "/about": 9, "/about/": 10, "/about/us": 11, "/users/repos/*any": 12, } ) # Matched "/" node = router.find("/") assert node is not None match, params = node assert match == 0 assert params == [] # Matched "/:username" node = router.find("/username") assert node is not None match, params = node assert match == 7 assert params == [("username", "username")] # Matched "/*any" node = router.find("/user/s") assert node is not None match, params = node assert match == 8 assert params == [("any", "user/s")] def test_no_match() -> None: router = Router(routes={"/": 0}) # No match node = router.find("/noway-jose") assert node is None def test_serialization() -> None: router = Router({"/": 0}) router: Router[int] = pickle.loads(pickle.dumps(router)) # No match node = router.find("/noway-jose") assert node is None # Match node = router.find("/") assert node is not None match, params = node assert match == 0 assert params == [] def test_duplicate_route() -> None: router = Router( routes=dict( [ ("/", 0), ("/", 1), ] ) ) # No match node = router.find("/") assert node is not None match, params = node assert match == 1 assert params == [] if __name__ == "__main__": pytest.main()
python
import os import sys sys.path.append(f'{os.getcwd()}/example/bpapi/vendor')
python
# Generated by Django 2.2.11 on 2020-04-09 13:49 from django.db import migrations, models import django.db.models.deletion import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'), ('contentPages', '0015_auto_20200408_1435'), ] operations = [ migrations.CreateModel( name='ReusableContent', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('name', models.CharField(max_length=100)), ('content_body', wagtail.core.fields.RichTextField(default='')), ], options={ 'verbose_name': 'Content Title', 'verbose_name_plural': 'Content Titles', }, bases=('wagtailcore.page',), ), ]
python
import urllib def assert_urls_match(u1, u2): p1 = urllib.parse.urlparse(u1) p2 = urllib.parse.urlparse(u2) assert p1.scheme == p2.scheme assert p1.netloc == p2.netloc assert p1.path == p2.path assert urllib.parse.parse_qs(p1.query) == urllib.parse.parse_qs(p2.query) class FakeResponse: def __init__(self, text='', status_code=200, url=None): self.text = text self.status_code = status_code self.content = text and bytes(text, 'utf8') self.url = url self.headers = {'content-type': 'text/html'} def __repr__(self): return 'FakeResponse(status={}, text={}, url={})'.format( self.status_code, self.text, self.url) def raise_for_status(self): pass class FakeUrlOpen: def __init__(self, url=None, info=None): self.url_ = url self.info_ = info def __repr__(self): return 'FakeUrlOpenResponse(url={})'.format(self.url) def geturl(self): return self.url_ def info(self): return self.info_ class FakeUrlMetadata: def __init__(self, content_type, content_length): self.content_type = content_type self.content_length = content_length def get(self, prop): if prop.lower() == 'content-length': return self.content_length if prop.lower() == 'content-type': return self.content_type def get_content_maintype(self): return self.content_type.split('/')[0]
python
# -*- coding: utf-8 -*- """Defines `json.JSONEncoder` subclass that makes parsed object (including bytes and bitarray) JSON-serializable """ import bitarray import json import sys class JSONEncoder(json.JSONEncoder): """JSON encoder with additional support for bytes and bitarray Examples: >>> JSONEncoder().encode({"field1": 123}) '{"field1": 123}' >>> JSONEncoder().encode({"field1": b'\x12\x34'}) '{"field1": "1234"}' >>> JSONEncoder().encode({"field1": bitarray.bitarray('01010')}) '{"field1": "01010"}' >>> JSONEncoder(compact_bitarray=True).encode({"field1": bitarray.bitarray('01010')}) '{"field1": {"value": "50", "length": 5}}' >>> JSONEncoder().encode({"field1": {"Type": 567}}) '{"field1": {"Type": 567}}' """ def __init__(self, compact_bitarray=False, *args, **kwargs): super().__init__(*args, **kwargs) self._compact_bitarray = bool(compact_bitarray) def default(self, o): if isinstance(o, (bytes, bytearray)): return o.hex() elif isinstance(o, bitarray.bitarray): if self._compact_bitarray: return {'value': o.tobytes().hex(), 'length': len(o)} else: return o.to01() else: super().default(o)
python
from os.path import getsize from .constants import ATTACHMENT_CONTENT_TYPES from .errors import FastScoreError class Attachment(object): """ Represents a model attachment. An attachment can be created directly but it must (ultimately) associated with the model: >>> att = fastscore.Attachment('att-1', datafile='/tmp/att1.zip') >>> model = mm.models['model-1'] >>> att.upload(model) :param atype: An attachment type. Guessed from the data file name if omitted. :param datafile: The data file. :param model: The model instance. """ def __init__(self, name, atype=None, datafile=None, datasize=None, model=None): self._name = name if atype == None and datafile != None: atype = guess_type(datafile) self._atype = atype if datasize == None and datafile != None: datasize = getsize(datafile) self._datasize = datasize self._datafile = datafile self._model = model @property def name(self): """ An attachment name. """ return self._name @property def atype(self): """ An attachment type. * **zip** A ZIP archive. * **tgz** A gzipped tarball. """ return self._atype @atype.setter def atype(self, atype): assert atype in ATTACHMENT_CONTENT_TYPES self._atype = atype @property def datafile(self): """ A name of the file that contains the attachment data. The attachment is downloaded when this property is first accessed. """ if self._datafile == None: self._datafile = self._model.download_attachment(self._name) return self._datafile @datafile.setter def datafile(self, datafile): self._datafile = datafile if datafile: self._datasize = getsize(datafile) else: self._datasize = None @property def datasize(self): """ The size of the attachment. Checking the attachment size does NOT trigger the download. """ return self._datasize def upload(self, model=None): """ Adds the attachment to the model. :param model: The model instance. Can be None if the model instance has been provided when the attachemnet was created. """ if model == None and self._model == None: raise FastScoreError("Attachment '%s' not associated with a model" % self.name) if self._model == None: self._model = model self._model.save_attachment(self) def guess_type(datafile): if datafile.endswith('.zip'): return 'zip' elif datafile.endswith('.tar.gz'): return 'tgz' elif datafile.endswith('.tgz'): return 'tgz' else: raise FastScoreError("Unable to guess attachment type for '%s'" % datafile)
python
# The Path class represents paths on a graph and records the total path cost class Path: def __init__(self): self.length = 0 self.cost = 0 self.nodes = [] # adds a node to the end of the path def add_node(self, node_label, cost): self.length += 1 self.cost += cost self.nodes.append(node_label) # reverses the path (this is useful when building Paths from child to parent) def reverse(self): self.nodes.reverse() def __str__(self): return " -> ".join(self.nodes) + "\t (Cost: %s)" % self.cost
python
######################################################### # 2020-01-28 13:15:09 # AI # ins: MOV @Ri, A ######################################################### import random from .. import testutil as u from ..sim51util import SIMRAM from ..asmconst import * p = u.create_test() ram = SIMRAM() def test_rs(rs, psw_rs, p): p += ";; set rs" p += atl.move(SFR_PSW, atl.I(psw_rs)) ram.set_direct(SFR_PSW.x, psw_rs) def test_ri(RI, p): indirect = random.getrandbits(8) a = random.getrandbits(8) p += atl.move(atl.D(RI.addr), atl.I(indirect)) p += atl.move(SFR_A, atl.I(a)) p += f'MOV {RI}, A' ram.set_iram(RI.addr, indirect) ram.set_direct(SFR_A.x, a) ram.set_iram(indirect, ram.get_direct(SFR_A.x)) p += atl.aste(RI, atl.I(ram.get_iram(ram.get_direct(RI.addr)))) for x in range(486): p.iter_ri(test_rs, test_ri)
python
import sys, re, hashlib, json, random import GenePredBasics, SequenceBasics from SerializeBasics import encode_64, decode_64 # Transcriptome is a set of genepred entries # with the corresponding fasta file. # alternatively, you can read in a serialized transcriptome. # # You can further define a transcriptome file with an expression file # This file can be of the form of a TSV # class Transcriptome: def __init__(self): self.transcripts = {} self.expression = None self.ref_hash = None def get_serialized(self): a = {} a['transcripts'] = self.transcripts if self.expression: a['expression'] = self.expression.get_serialized() else: a['expression'] = None a['ref_hash'] = self.ref_hash return encode_64(a) def read_serialized(self,input): a = decode_64(input) self.transcripts = a['transcripts'] if a['expression']: self.expression = IsoformExpression() self.expression.read_serialized(a['expression']) else: self.expression = a['expression'] self.ref_hash = a['ref_hash'] def set_reference_genome_dictionary(self,indict): self.ref_hash = indict return # Adds an expression value and updates the rng data def add_expression(self,inname,exp): if not self.expression: self.expression = IsoformExpression() for name in self.transcripts: self.expression.add_expression(name,0) self.expression.add_expression(inname,exp) self.expression.update_expression() # Add an expression value, but you'll have to update it yourself. def add_expression_no_update(self,inname,exp): if not self.expression: self.expression = IsoformExpression() for name in self.transcripts: self.expression.add_expression(name,0) self.expression.add_expression(inname,exp) def update_expression(self): if self.expression: self.expression.update_expression() else: sys.stderr.write("WARNING: expression was not set yet. nothing to update\n") def add_genepred_line(self,inline): if not self.ref_hash: sys.stderr.write("ERROR: Must assign a reference genome dictionary first\n") sys.exit() gpd = GenePredBasics.GenePredEntry(inline) if gpd.value('name') in self.transcripts: sys.stderr.write("WARNING: "+inline+" transcript was already set\n") seq = '' for i in range(0,gpd.value('exonCount')): seq += self.ref_hash[gpd.value('chrom')][gpd.value('exonStarts')[i]:gpd.value('exonEnds')[i]].upper() if gpd.value('strand') == '-': seq = SequenceBasics.rc(seq) self.transcripts[gpd.value('name')] = seq return # This is depreciated #def read_from_fasta_and_genepred(self,genomefastafile,genepredfile): # # read in our genome # seen_names = {} # seen_coords = {} # genepred = {} # with open(genepredfile) as inf: # for line in inf: # if re.match('^#',line): continue # e = GenePredBasics.line_to_entry(line) # hexcoord = hashlib.sha1(e['chrom']+"\t"+e['strand'] + "\t" + str(e['exonStarts'])+"\t" + str(e['exonEnds'])).hexdigest() # dupname = 0 # dupcoord = 0 # if hexcoord in seen_coords: # sys.stderr.write("Warning "+ e['name'] + " " + e['gene_name'] + " exists at identical coordinates as another entry\n") # dupcoord = 1 # seen_coords[hexcoord] = 1 # currname = e['name'] # if e['name'] in seen_names: # if dupcoord == 1: # sys.stderr.write("skipping perfect duplicate of "+e['name']+"\n") # continue # newname = e['name'] + "."+str(len(seen_names[e['name']])+1) # currname = newname # seen_names[e['name']].append(newname) # sys.stderr.write("Warning "+ e['name'] + " " + e['gene_name'] + " is a duplicate name.. renaming to "+newname+ "\n") # dupname = 1 # else: # seen_names[e['name']] = [] # seen_names[e['name']].append(e['name']) # genepred[currname] = e # # #print "reading names and locs" # ref = SequenceBasics.read_fasta_into_hash(genomefastafile) # #print "converting sequences" # for transcript in genepred: # e = genepred[transcript] # if e['chrom'] in ref: # seq = '' # self.transcript_names[transcript] = genepred[transcript]['name'] # for i in range(0,e['exonCount']): # seq += ref[e['chrom']][e['exonStarts'][i]:e['exonEnds'][i]] # if e['strand'] == '-': seq = SequenceBasics.rc(seq) # self.transcripts[transcript] = seq.upper() # self.gpds[transcript] = e # Pre: Expression must have been set # Post: Returns a random transcript name def get_random_by_expression(self): return self.expression.get_random_by_expression() def get_uniform_random(self): tnames = self.transcripts.keys() tnum = len(tnames) rnum = random.randint(0,tnum-1) return tnames[rnum] # Default to random by expression if its set def get_random(self): if self.expression: return self.get_random_by_expression() return self.get_uniform_random() def get_sequence(self,name): if name not in self.transcripts: sys.stderr.write("ERROR: "+name+" not in transcripts\n") sys.exit() return self.transcripts('name') # Class holds the isoform names and expression values # And also has functions for randomly getting an isoform name # either by uniform distribution or class IsoformExpression: def __init__(self): self.expression = {} self.total_expression = None self.names = None return # Pre: TSV with <transcript name> <expression level> def read_tsv(self,filename): with open(filename) as inf: for line in inf: f = line.rstrip().split("\t") self.expression[f[0]]=float(f[1]) self.update_expression() def get_expression(self,transcript_name): if transcript_name not in self.expression: sys.stderr.write("ERROR: "+transcript_name+" not in expression") sys.exit() return self.expression[transcript_name] # Add a single expression value, you need to update_expression in order to set rng things def add_expression(self,transcript_name,expression): self.expression[transcript_name] = expression def read_serialized(self,instring): self.expression = decode_64(instring) self.update_expression() def get_serialized(self): return encode_64(self.expression) def get_random_by_expression(self): rnum = random.random() total = 0 for name in self.names: total += self.expression[name]/self.total_expression if rnum < total: return name return name def get_uniform_random(self): rnum = random.randint(0,len(self.names)-1) return self.names[rnum] def update_expression(self): self.names = sorted(self.expression.keys()) self.total_expression = sum([self.expression[x] for x in self.expression])
python
from cnnlevelset.pascalvoc_util import PascalVOC from cnnlevelset.localizer import Localizer from cnnlevelset import config as cfg from collections import defaultdict import tensorflow as tf import keras.backend as K import numpy as np import matplotlib.pyplot as plt import sys import time tf.python.control_flow_ops = tf pascal = PascalVOC(cfg.PASCAL_PATH) X_img_test, X_test, y_test, y_seg = pascal.get_test_data(10000, False) cls_y_test = y_test[:, :, 0] N = float(X_img_test.shape[0]) localizer = Localizer(model_path=cfg.MODEL_PATH) start = time.time() cls_preds, bbox_preds = localizer.predict(X_test) end = time.time() print('CNN time: {:.4f}'.format(end - start)) print('Average: {:.4f}'.format((end - start) / N)) cls_acc = np.mean(np.argmax(cls_preds, axis=1) == np.argmax(cls_y_test, axis=1)) print(cls_acc) K.clear_session() from cnnlevelset.segmenter import * if len(sys.argv) > 1 and sys.argv[1] == 'show': show = True else: show = False bbox_res, border_res, cnn_res = defaultdict(list), defaultdict(list), defaultdict(list) i = 0 for img, y, cls_pred, bbox_pred, ys in zip(X_img_test, y_test, cls_preds, bbox_preds, y_seg): if show: label = pascal.idx2label[np.argmax(cls_pred)] print(label) img = img.reshape(224, 224, 3) plt.imshow(pascal.draw_bbox(img, bbox_pred)) plt.show() phi = phi_from_bbox(img, bbox_pred) levelset_segment_theano(img, phi=phi, sigma=5, v=1, alpha=100000, n_iter=80, print_after=80) input() else: start = time.time() phi = phi_from_bbox(img, bbox_pred) mask = (phi < 0) end = time.time() bbox_res['time'].append(end - start) bbox_res['accuracy'].append(pascal.segmentation_accuracy(mask, ys)) p, r, f1 = pascal.segmentation_prec_rec_f1(mask, ys) bbox_res['precision'].append(p) bbox_res['recall'].append(r) bbox_res['f1'].append(f1) start = time.time() phi = default_phi(img) mask = levelset_segment_theano(img, phi=phi, sigma=5, v=1, alpha=100000, n_iter=80) end = time.time() border_res['time'].append(end - start) border_res['accuracy'].append(pascal.segmentation_accuracy(mask, ys)) p, r, f1 = pascal.segmentation_prec_rec_f1(mask, ys) border_res['precision'].append(p) border_res['recall'].append(r) border_res['f1'].append(f1) start = time.time() phi = phi_from_bbox(img, bbox_pred) mask = levelset_segment_theano(img, phi=phi, sigma=5, v=1, alpha=100000, n_iter=80) end = time.time() cnn_res['time'].append(end - start) cnn_res['accuracy'].append(pascal.segmentation_accuracy(mask, ys)) p, r, f1 = pascal.segmentation_prec_rec_f1(mask, ys) cnn_res['precision'].append(p) cnn_res['recall'].append(r) cnn_res['f1'].append(f1) i += 1 print(i) if not show: for metric in ['accuracy', 'precision', 'recall', 'f1']: print(metric) print('----------------') print('Bbox: {}'.format(np.mean(bbox_res[metric]))) print('Border: {}'.format(np.mean(border_res[metric]))) print('CNN: {}'.format(np.mean(cnn_res[metric]))) print() print('Time') print('---------------------') print('Bbox: {}'.format(np.mean(bbox_res['time']))) print('Border: {}'.format(np.mean(border_res['time']))) print('CNN: {}'.format(np.mean(cnn_res['time']))) print()
python
a''' Created on 6-feb-2017 Modified the 20170321, by EP @author: roncolato ''' import numpy as np import scipy.interpolate as interpol from sherpa.training.step1 import from7to28 as f7 from sherpa.training.step1 import quant as q from sherpa.training.step1 import EquaPrec as ep from sherpa.training import EquaIndic as ei from sherpa.training.step1 import nlparci as nlpa from sherpa.training.step1 import InvDistN_opt_prec as inv from sherpa.training.step1 import nlinfit as nlin def step1_potency(conf): prctileVec1=np.array([35, 35, 35, 35, 35]); prctileVec2=np.array([70, 70, 70, 70, 70]); categories=np.array([1,2,3]) #convert from 28 to 7 km Prec = f7.from7to28(conf.Prec); ny = int(conf.ny/4); nx = int(conf.nx/4); rad = conf.radStep1; nPrec = conf.nPrec; rf = conf.rf; flagRegioMat = np.copy(conf.flagRegioMat); #pad Prec with zeros around initial matrix, to perform matrix products later on Prec2 = np.zeros((ny+rad*2,nx+rad*2,Prec.shape[2],Prec.shape[3])); Prec2[rad:-rad,rad:-rad,:,:] = Prec[:,:,:,:]; Prec=Prec2; #convert from 28 to 7 km Indic = f7.from7to28(conf.Indic); flagRegioMat = f7.from7to28(flagRegioMat); #initialize variables omega = np.full([ny,nx,nPrec],np.nan); alpha = np.full([ny,nx,nPrec],np.nan); ci2 = np.empty((categories.size,nPrec), dtype=object); CovB2 = np.empty((categories.size,nPrec), dtype=object); alphaTmp = np.zeros((categories.size)); omegaTmp = np.zeros((categories.size)); #define training scenarios; note scenarios number is +1 if checking DoE...as in line 74 it is -1 if conf.domain == 'emep10km': if conf.aqi == 'SURF_ug_PM25_rh50-Yea': IdeVec = (np.array([1, 1]),np.array([1, 2]),np.array([1, 3]),np.array([1, 5]),np.array([1, 6])); elif conf.aqi == 'SURF_ug_PM10_rh50-Yea': IdeVec = (np.array([1, 1]),np.array([1, 2]),np.array([1, 3]),np.array([1, 4]),np.array([1, 6])); elif conf.domain == 'ineris7km': IdeVec = (np.array([1, 8]),np.array([1, 9]),np.array([1, 10]),np.array([1, 11]),np.array([1, 12])); #loop over precursors for precursor in range(0, nPrec): PREC = precursor; Ide = IdeVec[precursor]; icel = 0; #intialize variables PrecPatch = np.zeros((nx*ny,(rad*2+1)**2)); IndicEq = np.zeros((nx*ny,1)); indexUsed = np.full((nx*ny,1),np.nan);#np.zeros((nx*ny,1)); potency=np.full((ny,nx),np.nan);#np.zeros((ny,nx)); print('precursor: '+str(PREC)); #loop over cells to create groups for ic in range(0, nx): #print(PREC, ic); for ir in range(0, ny): if flagRegioMat[ir,ic]>0: #create data for omega calculation nSc = Ide.shape[0]-1;# size(Ide,2)-1 tmpPrec = ep.EquaPrec(ic,ir,rf,nx,ny,nSc,Prec.shape[3],Prec[:,:,Ide[1],PREC],rad); # patches tmpInde = ei.EquaIndic(ic,ir,rf,nx,ny,nSc,Indic[:,:,Ide[1]]); # indicator x0=np.array([1, 2]); [inp2_aggemi]= inv.InvDistN_opt_prec(x0,tmpPrec,rad); #store data for omega calculation potency[ir,ic]=tmpInde/inp2_aggemi; prc1=np.percentile(potency[np.isfinite(potency)],prctileVec1[precursor]); prc9=np.percentile(potency[np.isfinite(potency)],prctileVec2[precursor]); speed=potency.copy(); speed[np.isnan(speed)]=0 potency[speed<prc1]=1; potency[(speed>=prc1) & (speed<prc9)]=2; potency[speed>=prc9]=3; val=categories; for ic in range(0, nx): #print(PREC, ic); for ir in range(0, ny): if flagRegioMat[ir,ic]>0: #variable to store which group ot be considered indexUsed[icel] = np.where(val==potency[ir,ic]); #create data for omega calculation nSc = Ide.shape[0]-1;# size(Ide,2)-1 tmpPrec = ep.EquaPrec(ic,ir,rf,nx,ny,nSc,Prec.shape[3],Prec[:,:,Ide[1],PREC],rad); # patches tmpInde = ei.EquaIndic(ic,ir,rf,nx,ny,nSc,Indic[:,:,Ide[1]]); # indicator #store data for omega calculation PrecPatch[icel,:] = tmpPrec; #np.squeeze(tmpPrec) IndicEq[icel] = tmpInde; icel = icel+1; indexUsedLin = np.reshape(indexUsed, -1, order='F'); #compute omega for each group of cells, given precursor p for i in range(val.size): x0 = [1, 2]; ind = np.where(indexUsedLin==i)[0]; inp1 = PrecPatch[ind,:]; inp2 = IndicEq[ind]; iop = lambda inp1,beta1,beta2: inv.InvDistN_opt_prec([beta1,beta2],inp1,rad); [mdl,r,J,CovB] = nlin.nlinfit(iop,inp1,inp2.ravel(),x0); ci2[i,PREC] = nlpa.nlparci(r,J); CovB2[i,PREC] = CovB; alphaTmp[i] = mdl[0]; omegaTmp[i] = mdl[1]; #repeat result for each belonging to a given group for ic in range(0, nx): for ir in range(0, ny): if flagRegioMat[ir,ic]>0: indexUsed = np.where(val==potency[ir,ic])[0]; alpha[ir,ic,PREC] = alphaTmp[indexUsed]; omega[ir,ic,PREC] = omegaTmp[indexUsed]; del(PrecPatch,IndicEq,indexUsed,potency,speed) #rescale to initial spatial resolution, through nearest interpolation #initialize variable omegaFinal = np.zeros((conf.Prec.shape[0],conf.Prec.shape[1],5)); #loop on precursors for i in range(0, nPrec): #define interpolator object xgv = np.arange(1., conf.Prec.shape[0]/4+1); ygv = np.arange(1., conf.Prec.shape[1]/4+1); F=interpol.RegularGridInterpolator((xgv, ygv), omega[:,:,i],method='nearest',bounds_error=False, fill_value=None); #interpolate Xq = np.arange(1., conf.Prec.shape[0]/4+1, 1/4); Yq = np.arange(1., conf.Prec.shape[1]/4+1, 1/4); [Y2,X2] = np.meshgrid(Yq, Xq); pts=((X2.flatten(),Y2.flatten())) omegaFinal[:,:,i] = F(pts).reshape(conf.Prec.shape[0],conf.Prec.shape[1]) print('precursor interpolated: '+str(i)); #store final results conf.omegaFinalStep1 = omegaFinal; conf.ci2Step1 = ci2; conf.CovB2Step1 = CovB2;
python
"""ibc client module data objects.""" from __future__ import annotations import attr from terra_proto.ibc.core.client.v1 import Height as Height_pb from terra_sdk.util.json import JSONSerializable __all__ = ["Height"] @attr.s class Height(JSONSerializable): revision_number: int = attr.ib(default=0, converter=int) revision_height: int = attr.ib(default=0, converter=int) def to_amino(self) -> dict: return { "revision_number": self.revision_number, "revision_height": self.revision_height } @classmethod def from_data(cls, data: dict) -> Height: return cls( revision_number=data["revision_number"], revision_height=data["revision_height"], ) @classmethod def from_proto(cls, proto: Height_pb) -> Height: return cls( revision_number=proto.revision_number, revision_height=proto.revision_height, ) def to_proto(self) -> Height_pb: return Height_pb( revision_number=self.revision_number, revision_height=self.revision_height )
python
import numpy as np import scipy as scp from numpy.linalg import norm ############################################# # Add the one-folder-up-path import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), '../')) ############################################# from envs.blocking_env import BlockingEnv def test_create_environment(): x = 5 assert x == 5, 'test failed' # # env_info = { # 'agent_count_red': 4, # 'agent_count_blue': 4 # } # env = BlockingEnv(env_info)
python
from annotation_utils.ndds.structs import NDDS_Dataset dataset = NDDS_Dataset.load_from_dir('/home/clayton/workspace/prj/data_keep/data/ndds/measure_kume_map3_1_200', show_pbar=True) dataset.save_to_dir('save_test', show_pbar=True)
python
from datetime import date def run_example(): march_2020_15 = date(year=2020, month=3, day=15) print("march_2020_15.toordinal():", march_2020_15.toordinal()) print("march_2020_15.isocalendar():", march_2020_15.isocalendar()) if __name__ == "__main__": run_example()
python
class Solution: def XXX(self, nums: List[int]) -> int: length = len(nums) if length <= 1: return nums[0] for i in range(1, length): sum_ = nums[i-1] + nums[i] if sum_ > nums[i]: nums[i] = sum_ return max(nums) undefined for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
python
import itertools # Have the function ArrayAdditionI(arr) take the array of numbers stored in arr and return the string true if any combination of numbers in the array # (excluding the largest number) can be added up to equal the largest number in the array, otherwise return the string false. # For example: if arr contains [4, 6, 23, 10, 1, 3] the output should return true because 4 + 6 + 10 + 3 = 23. # The array will not be empty, will not contain all the same elements, and may contain negative numbers. def ArrayAdditionI(arr): # find max value in arr _max = max(arr) arr.remove(_max) _comb = [] # for i in range(1, len(arr)): # if arr[i] > _max: # _max = arr[i] for i in range(len(arr)+1): for cb in itertools.combinations(arr, i): _comb.append(cb) for i in _comb: if sum(int(x) for x in i) == _max: return True return False test = [3,5,-1,8,12] print(ArrayAdditionI(test))
python
# -*- coding: utf-8 -*- from aliyun.api.rest import * from aliyun.api.base import FileItem
python
total_pf = {{packing_fraction}} poly_coeff = {{polynomial_triso}}
python
# Declare Variables name = input() # Seller's name salary = float(input()) # Seller's salary sales = float(input()) # Sale's total made by the seller in the month # Calculate salary with bonus total = salary + (sales * .15) # Show result print("Total = R$ {:.2f}".format(total))
python
from contextlib import contextmanager import sys @contextmanager def stdout_translator(stream): old_stdout = sys.stdout sys.stdout = stream try: yield finally: sys.stdout = old_stdout def read_translation(stream): out = stream.getvalue() outs = out.split('\n') for item in outs: if outs.index(item) + 1 != len(outs): if 'coverage' in item: item = item.replace('coverage','covfefe') else: item += ' covfefe' print(item)
python
#!/usr/bin/env python3 import os import redis import json from flask import Flask, render_template, redirect, request, url_for, make_response #r = redis.Redis(host='123.12.148.95', port='15379', password='ABCDEFG1231LQ4L') if 'VCAP_SERVICES' in os.environ: VCAP_SERVICES = json.loads(os.environ['VCAP_SERVICES']) CREDENTIALS = VCAP_SERVICES["rediscloud"][0]["credentials"] r = redis.Redis(host=CREDENTIALS["hostname"], port=CREDENTIALS["port"], password=CREDENTIALS["password"]) else: r = redis.Redis(host='127.0.0.1', port='6379') app = Flask(__name__) @app.route('/') def survey(): resp = make_response(render_template('survey.html')) return resp @app.route('/suthankyou.html', methods=['POST']) def suthankyou(): ## This is how you grab the contents from the form f = request.form['feedback'] ## Now you can now do someting with variable "f" print ("The feedback received was:") print (f) resp = """ <h3> - THANKS FOR TAKING THE SURVEY - </h3> <a href="/"><h3>Back to main menu</h3></a> """ return resp if __name__ == "__main__": app.run(debug=False, host='0.0.0.0', \ port=int(os.getenv('PORT', '5000')), threaded=True)
python
import battlecode as bc import sys import traceback import time import pathFinding #TODO: remove random and use intelligent pathing import random totalTime = 0 start = time.time() #build my environment gc = bc.GameController() directions = list(bc.Direction) #get the starting map myMap = gc.starting_map(gc.planet()) #get my team name my_team = gc.team() #get the details of the orbit orbit = gc.orbit_pattern() #TOTO:research, currently only gets the first level of rockets gc.queue_research(bc.UnitType.Rocket) #count my starting units, and find out where the enemy spawned enemyx = 0 enemyy = 0 friendlyx = 0 friendlyy = 0 myStartingUnits = 0 #TODO:account for starting off world for unit in myMap.initial_units: if unit.team != my_team: enemyLocation = unit.location enemyx = enemyLocation.map_location().x enemyy = enemyLocation.map_location().y continue if unit.team == my_team: myStartingUnits += 1 friendlyx = unit.location.map_location().x friendlyy = unit.location.map_location().y continue #processes the map into an int field thisMap = pathFinding.pathPlanetMap(myMap) resourcesMap = pathFinding.miningMap(thisMap,myMap) #enemyx,enemyy is the starting locations of(at least one) of the enemies bots #I am making the assumption that they stay near there #start = time.time() #if we are mars, figure out 1 safe landing spot for each wholy blocked off zone #and send it to earth #TODO: a 50*50 map with a full grid of 1*1 accessable squares may exceed the num of team array slots, should cap at ~10 if gc.planet() == bc.Planet.Mars: print("we on mars") landingZones = pathFinding.landingZone(thisMap) for zone in range(0,len(landingZones)): gc.write_team_array(zone*2,landingZones[zone][0]) gc.write_team_array(zone*2+1,landingZones[zone][1]) if gc.planet() == bc.Planet.Earth: landingZones = [] #TODO:map testing #TODO: generalize map again, multiple destinations(one for each enemy bot, store the targets so i can recalculate the field every x turns? myMap = pathFinding.pathMap(thisMap, enemyx, enemyy) #reverseMap = pathFinding.pathMap(myMap, friendlyx, friendlyy) #end = time.time() #print("did the map thing in:") #print(end-start) #print(myMap.initial_units) #unit counters init numFactories = 0 numRockets = 0 numWorkers = 0 numKnights = 0 numRangers = 0 numMages = 0 numHealers = 0 factoryCount = 0 rocketCount = 0 workerCount = myStartingUnits knightCount = 0 rangerCount = 0 mageCount = 0 healerCount = 0 end = time.time() totalTime+= end-start #logic for each unit type def factoryLogic(): #TODO: build order/rations ect if gc.can_produce_robot(unit.id, bc.UnitType.Ranger) and numRangers < (5*numHealers+5):#make this a ratio gc.produce_robot(unit.id, bc.UnitType.Ranger) if gc.can_produce_robot(unit.id, bc.UnitType.Healer) and numRangers *5 > numHealers: gc.produce_robot(unit.id, bc.UnitType.Healer) if len(unit.structure_garrison()) > 0: myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y) for d in myDirections: if gc.can_unload(unit.id, d): gc.unload(unit.id, d) return def workerLogic(): #If i am on a map if unit.location.is_on_map():#TODO: testing rockets and maps things, remove False #get valid directions around me myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y) #find out what else is near me nearby = gc.sense_nearby_units(unit.location.map_location(), 50) nearbyWorkers = 0 for other in nearby: if gc.can_build(unit.id, other.id):#if its something I can build, then I should gc.build(unit.id, other.id) continue if other.unit_type == unit.unit_type and other.team == unit.team:#note, this unit shows up here, so +1 nearbyWorkers +=1#we cound the number of other workers we can see if other.unit_type == bc.UnitType.Rocket and other.team == unit.team: print(len(other.structure_garrison())) if len(other.structure_garrison()) == 0: #distanceTo = unit.location.map_location().distance_squared_to(other.location.map_location()) #print(distanceTo) if gc.can_load(other.id, unit.id): gc.load(other.id, unit.id) else: me = unit.location.map_location() them = other.location.map_location() directionToThem = me.direction_to(them) if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem): gc.move_robot(unit.id, directionToThem) if numWorkers < 5:#if there arent enough, we build more workers for d in reversed(myDirections):#we want to buid the worker as far from the enemy as possible without moving if gc.can_replicate(unit.id, d): gc.replicate(unit.id, d) #TODO:factories on again """ if numFactories < 5:#if their arent many factories reporting in if gc.karbonite() > bc.UnitType.Factory.blueprint_cost():#can we afford it for d in myDirections:#furthest from the enemy again if gc.can_blueprint(unit.id, bc.UnitType.Factory, d):#if the direction is valid for building print("built factory") gc.blueprint(unit.id, bc.UnitType.Factory, d) """ #if numFactories > 3 and numWorkers > 5: if numWorkers > 5: if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.research_info().get_level(bc.UnitType.Rocket) > 0: for d in reversed(myDirections): if gc.can_blueprint(unit.id, bc.UnitType.Rocket, d): gc.blueprint(unit.id, bc.UnitType.Rocket, d) #next we want to harvest all the kryponite, we also want to track if we have harvested any #TODO: harvest and/or move at all haveHarvested = 0 for direction in myDirections: if gc.can_harvest(unit.id, direction): haveHarvested = 1 #print("found dirt") gc.harvest(unit.id, direction) #TODO:spread out to make sure we harvest all kryptonite on the map if haveHarvested == 0: #print("no dirt") for d in reversed(myDirections): if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d): #print(d) gc.move_robot(unit.id, d) #basicly do a fill, if i cant see another worker, make one, gather any kryponite i can see, then move slowly out from my corner """ #TODO: be picky about building placement if unit.location.is_on_map(): # and unit.location.is_on_planet(bc.Planet.Earth): nearby = gc.sense_nearby_units(unit.location.map_location(), 2) for other in nearby: if gc.can_build(unit.id, other.id): gc.build(unit.id, other.id) continue if gc.can_load(other.id, unit.id): gc.load(other.id, unit.id) else: if numRockets < 1: if gc.karbonite() > bc.UnitType.Rocket.blueprint_cost() and gc.can_blueprint(unit.id, bc.UnitType.Rocket, d) and gc.research_info().get_level(bc.UnitType.Rocket) > 0: #numRockets+=1#because we just built one, saves us making many at a time#makes numRockets local, breaks functionality print("built rocket") gc.blueprint(unit.id, bc.UnitType.Rocket, d) if numFactories < 5: if gc.karbonite() > bc.UnitType.Factory.blueprint_cost() and gc.can_blueprint(unit.id, bc.UnitType.Factory, d): print("built factory") gc.blueprint(unit.id, bc.UnitType.Factory, d) """ return def rocketLogic(): if unit.location.is_on_planet(bc.Planet.Mars): myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y) for d in myDirections: if gc.can_unload(unit.id, d): gc.unload(unit.id, d) elif unit.location.is_on_planet(bc.Planet.Earth): #TODO:wait until has someone in before launch garrison = len(unit.structure_garrison()) #print("waitin on friends") if garrison > 0: if len(landingZones)>0: myx = landingZones[0][0] myy = landingZones[0][1] print("im going where im told") else: myx = unit.location.map_location().x myy = unit.location.map_location().y print("we lazy") destination = bc.MapLocation(bc.Planet.Mars, myx, myy) print("we takin off boys") #TODO:make sure destination is a valid landing zone, currently keeps x,y from earth if gc.can_launch_rocket(unit.id, destination): del landingZones[0] gc.launch_rocket(unit.id, destination) return def knightLogic(): #TODO: movement and attack logic if unit.location.is_on_map(): nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range) myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y) knightsNearby = 0 for other in nearby: if other.unit_type == unit.unit_type and other.team == unit.team: knightsNearby+=1 if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id): gc.attack(unit.id, other.id) if other.team != unit.team: me = unit.location.map_location() them = other.location.map_location() directionToThem = me.direction_to(them) if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem): gc.move_robot(unit.id, directionToThem) #print(myDirections) for d in myDirections: if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d): #print(d) gc.move_robot(unit.id, d) return def rangerLogic(): #TODO: movement and attack logic #print("i'm alive") #TODO: dont move into my minimum range if unit.location.is_on_map(): nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range) myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y) rangersNearby = 0 for other in nearby: if other.unit_type == unit.unit_type and other.team == unit.team: rangersNearby+=1 if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id): gc.attack(unit.id, other.id) if other.team != unit.team: distanceTo = unit.location.map_location().distance_squared_to(other.location.map_location()) myRange = unit.attack_range() if distanceTo < myRange: #move away for d in reversed(myDirections): if gc.is_move_ready(unit.id) and gc.can_move(unit.id,d): gc.move_robot(unit.id,d) else: me = unit.location.map_location() them = other.location.map_location() directionToThem = me.direction_to(them) if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem): gc.move_robot(unit.id, directionToThem) #outside range, inside view range, move closer #print(myDirections) for d in myDirections: if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d): #print(d) gc.move_robot(unit.id, d) #since I have moved, check again if there is anything to shoot for other in nearby: if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id): gc.attack(unit.id, other.id) #TODO: wait for friends #TODO: once i dont have enemies, full map search #if there are 3? other rangers nearme, then move toward target return def mageLogic(): #TODO: movement and attack logic if unit.location.is_on_map(): nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range) myDirections = pathFinding.whereShouldIGo(myMap, unit.location.map_location().x, unit.location.map_location().y) magesNearby = 0 for other in nearby: if other.unit_type == unit.unit_type and other.team == unit.team: magesNearby+=1 if other.team != unit.team and gc.is_attack_ready(unit.id) and gc.can_attack(unit.id, other.id): gc.attack(unit.id, other.id) if other.team != unit.team: distanceTo = unit.location.map_location().distance_squared_to(other.location.map_location()) myRange = unit.attack_range() if distanceTo < myRange: #move away for d in reversed(myDirections): if gc.is_move_ready(unit.id) and gc.can_move(unit.id,d): gc.move_robot(unit.id,d) else: me = unit.location.map_location() them = other.location.map_location() directionToThem = me.direction_to(them) if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem): gc.move_robot(unit.id, directionToThem) #outside range, inside view range, move closer #print(myDirections) for d in myDirections: if gc.is_move_ready(unit.id) and gc.can_move(unit.id, d): #print(d) gc.move_robot(unit.id, d) return def healerLogic(): #TODO: movement and heal logic if unit.location.is_on_map(): nearby = gc.sense_nearby_units(unit.location.map_location(), unit.vision_range) for other in nearby:#find the nearest ranger and follow them if other.unit_type == bc.UnitType.Ranger: me = unit.location.map_location() them = other.location.map_location() directionToThem = me.direction_to(them) if gc.is_move_ready(unit.id) and gc.can_move(unit.id, directionToThem): gc.move_robot(unit.id, directionToThem) return #turn loop while True: try: start = time.time() #TODO:testing communications delay and potential offloading work to mars #communications delay is 50 if gc.planet() == bc.Planet.Earth and gc.round() == 52: commArray = gc.get_team_array(bc.Planet.Mars) for i in range(0,10,2): x=commArray[i] y=commArray[i+1] landingZones.append([x,y]) #print("Recieved:", gc.round()) #print(landingZones) """ if gc.planet() == bc.Planet.Mars: index = 0 value = 1 gc.write_team_array(index,value) """ #print(gc.karbonite())#proves karbonite is shared accross planets #unit counters numFactories = factoryCount numWorkers = workerCount numRockets = rocketCount numKnights = knightCount numRangers = rangerCount numMages = mageCount numHealers = healerCount factoryCount = 0 rocketCount = 0 workerCount = 0 knightCount = 0 rangerCount = 0 mageCount = 0 healerCount = 0 #turn logic goes here, #we seperate into a function for each unit type, #and count the number of each unit we have #so we can have build ratios and limits for unit in gc.my_units(): if unit.unit_type == bc.UnitType.Factory: factoryCount+=1 factoryLogic() continue if unit.unit_type == bc.UnitType.Rocket: rocketCount+=1 rocketLogic() continue if unit.unit_type == bc.UnitType.Worker: if unit.location.is_on_map(): workerCount+=1 workerLogic() continue if unit.unit_type == bc.UnitType.Knight: knightCount+=1 knightLogic() continue if unit.unit_type == bc.UnitType.Ranger: rangerCount+=1 rangerLogic() continue if unit.unit_type == bc.UnitType.Mage: mageCount+=1 mageLogic() continue if unit.unit_type == bc.UnitType.Healer: healerCount+=1 healerLogic() continue #TODO: remove time keeping end = time.time() totalTime+= end-start #print(totalTime) except Exception as e: print('Error:', e) # use this to show where the error was traceback.print_exc() # send the actions we've performed, and wait for our next turn. gc.next_turn() # these lines are not strictly necessary, but it helps make the logs make more sense. # it forces everything we've written this turn to be written to the manager. sys.stdout.flush() sys.stderr.flush()
python
#!/usr/bin/env python import numpy as np # For efficient utilization of array import cv2 # Computer vision library import os # Here this package is used writing CLI commands import vlc_ctrl import time import pandas as pd import os # package used for controlling vlc media player import subprocess import tkinter as tk import math from tkinter import filedialog as fd from tkinter import messagebox from tkinter import Canvas from tkinter import * from PIL import Image, ImageTk root = tk.Tk() root.configure(background="#426999") load = Image.open("bg.png") render = ImageTk.PhotoImage(load) img = Label(image=render) img.image = render img.place(x=0, y=0) root.title('Vision Based Media Player') def write_slogan(): global filename filename = fd.askopenfilename() def play(): cap = cv2.VideoCapture(0) try: os.system("vlc-ctrl play -p "+filename) # Frontal face classifier is imported here face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') #LOADING HAND CASCADE hand_cascaderr = cv2.CascadeClassifier('Hand_haar_cascade.xml') hand_cascade = cv2.CascadeClassifier('hand.xml') count = 0 # Flag is used to pause and play the video [ if flag is 1 then the video plays else it doesn't ] Pauseflag = 0 try: while True: ret , img = cap.read() # For caturing the frame blur = cv2.GaussianBlur(img,(5,5),0) # BLURRING IMAGE TO SMOOTHEN EDGES grayc = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) hands = hand_cascade.detectMultiScale(grayc, 1.5, 2) contour = hands contour = np.array(contour) if count==0: if len(contour)==2: cv2.putText(img=img, text='Your engine started', org=(int(100 / 2 - 20), int(100 / 2)), fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=1, color=(0, 255, 0)) for (x, y, w, h) in hands: cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2) if count>0: if len(contour)>=2: pass elif len(contour)==1: subprocess.Popen(['vlc-ctrl', 'volume', '-0.1']) elif len(contour)==0: pass count+=1 grayh = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY) # BGR -> GRAY CONVERSION retval2,thresh1 = cv2.threshold(grayh,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # THRESHOLDING IMAGE hand = hand_cascaderr.detectMultiScale(thresh1, 1.3, 5) # DETECTING HAND IN THE THRESHOLDE IMAGE mask = np.zeros(thresh1.shape, dtype = "uint8") # CREATING MASK for (x,y,w,h) in hand: # MARKING THE DETECTED ROI cv2.rectangle(img,(x,y),(x+w,y+h), (122,122,0), 2) cv2.rectangle(mask, (x,y),(x+w,y+h),255,-1) img2 = cv2.bitwise_and(thresh1, mask) final = cv2.GaussianBlur(img2,(7,7),0) contours, hierarchy = cv2.findContours(final, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(img, contours, 0, (255,255,0), 3) cv2.drawContours(final, contours, 0, (255,255,0), 3) if len(contours) > 0: cnt=contours[0] hull = cv2.convexHull(cnt, returnPoints=False) # finding convexity defects defects = cv2.convexityDefects(cnt, hull) count_defects = 0 # applying Cosine Rule to find angle for all defects (between fingers) # with angle > 90 degrees and ignore defect if not (defects is None): for i in range(defects.shape[0]): p,q,r,s = defects[i,0] finger1 = tuple(cnt[p][0]) finger2 = tuple(cnt[q][0]) dip = tuple(cnt[r][0]) # find length of all sides of triangle a = math.sqrt((finger2[0] - finger1[0])**2 + (finger2[1] - finger1[1])**2) b = math.sqrt((dip[0] - finger1[0])**2 + (dip[1] - finger1[1])**2) c = math.sqrt((finger2[0] - dip[0])**2 + (finger2[1] - dip[1])**2) # apply cosine rule here angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57.29 # ignore angles > 90 and highlight rest with red dots if angle <= 90: count_defects += 1 # define actions required if count_defects == 1: print("2") subprocess.Popen(['vlc-ctrl', 'volume', '+10%']) #cv2.putText(img,"THIS IS 2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) elif count_defects == 2: print("3") subprocess.Popen(['vlc-ctrl', 'volume', '+10%']) #cv2.putText(img, "THIS IS 3", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) elif count_defects == 3: print("4") subprocess.Popen(['vlc-ctrl', 'volume', '+10%']) #cv2.putText(img,"This is 4", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) elif count_defects == 4: print("5") subprocess.Popen(['vlc-ctrl', 'volume', '+10%']) #cv2.putText(img,"THIS IS 5", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2) # face detection section gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) # Gets the x and y coordinates of the face as well the width and height of the face if detected for (x, y, w, h) in faces: print ("Face is facing front") os.system("vlc-ctrl play") time.sleep(0.2) Pauseflag = 1 # Face is detected hence play the video continuesly if Pauseflag == 0: # Face is not facing front hence pause the video print ("Face is not facing front") ti=time.asctime() m=ti[14:16] s=ti[17:19] mi=int(m) si=int(s) print(mi,si) os.system("vlc-ctrl pause") if mi==59: mi=00 else: co=mi+1 cs=si if mi==co and si==cs: os.system("systemct1 suspend") Pauseflag = 0 except KeyboardInterrupt: print ("Closing the application!!! [Interrupted]") cap.release() except: messagebox.showerror("warning", "upload the video") def fun(): messagebox.showinfo("Instructoions", "step1 : upload the video \n \nstep2 : Click the play Button \n\n step3 : If face fronts the camera then video will play else it will pause \n \nstep4 : Closed fist will decrease the volume opened hand will increase the volume") tk.Entry(root, width = 100).grid(row=0, column=0) tk.Button(root, text = "Upload",command=write_slogan, height = 2, width=8,fg = "black",activeforeground = "white",activebackground = "black").grid(row=1, column=0, pady = (40,50)) tk.Button(root, text = "How to use",command=fun).grid(row=4, column=0, pady = (180,50)) tk.Button(root, text = "play",command=play).grid(row=2, column=0, pady = (180,50)) tk.Entry(root, width = 100).grid(row=5, column=0) root.mainloop()
python
# -*- coding: utf-8 -*- import logging from dku_model_accessor.constants import DkuModelAccessorConstants from dku_model_accessor.preprocessing import Preprocessor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor logger = logging.getLogger(__name__) class SurrogateModel(object): """ In case the chosen saved model uses a non-tree based algorithm (and thus does not have feature importance), we fit this surrogate model on top of the prediction of the former one to be able to retrieve the feature importance information. """ def __init__(self, prediction_type): self.check(prediction_type) self.feature_names = None self.target = None self.prediction_type = prediction_type # TODO should we define some params of RF to avoid long computation ? if prediction_type == DkuModelAccessorConstants.CLASSIFICATION_TYPE: self.clf = RandomForestClassifier(random_state=1407) else: self.clf = RandomForestRegressor(random_state=1407) def check(self, prediction_type): if prediction_type not in [DkuModelAccessorConstants.CLASSIFICATION_TYPE, DkuModelAccessorConstants.REGRRSSION_TYPE]: raise ValueError('Prediction type must either be CLASSIFICATION or REGRESSION.') def get_features(self): return self.feature_names def fit(self, df, target): preprocessor = Preprocessor(df, target) train, test = preprocessor.get_processed_train_test() train_X = train.drop(target, axis=1) train_Y = train[target] self.clf.fit(train_X, train_Y) self.feature_names = train_X.columns
python
import yaml try: # use faster C loader if available from yaml import CLoader as Loader except ImportError: from yaml import Loader # follows similar logic to cwrap, ignores !inc, and just looks for [[]] def parse(filename): with open(filename, 'r') as file: declaration_lines = [] declarations = [] in_declaration = False for line in file.readlines(): line = line.rstrip() if line == '[[': declaration_lines = [] in_declaration = True elif line == ']]': in_declaration = False declaration = yaml.load('\n'.join(declaration_lines), Loader=Loader) declarations.append(declaration) elif in_declaration: declaration_lines.append(line) return declarations
python
import re class Graph: def __init__(self, nodes, numWorkers=5): self.graph = {} for asciiCode in range(65, 91): self.graph[chr(asciiCode)] = [] # populate link nodes for node in nodes: if node.pre in self.graph: self.graph[node.pre].append(node.post) # sort link nodes into descending alphabetical order for key,val in self.graph.items(): val.sort() # visited nodes - initially empty self.visitedNodes = [] # available nodes - initially those nodes with no predecessors self.initialiseAvailableNodes() # PART 2 - add workers self.workers = [Worker(workerId+1) for workerId in range(0,numWorkers)] def outputVisitedNodes(self): output = '' for node in self.visitedNodes: output = f'{output}{node}' return output def initialiseAvailableNodes(self): self.availableNodes = [] for node in self.graph: predecessors = self.predecessors(node) if len(predecessors) == 0: self.availableNodes.append(node) self.availableNodes.sort() # list all the predecessors of given node i.e. all nodes that link to given node def predecessors(self, node): predecessors = [] for key, val in self.graph.items(): if node in val: predecessors.append(key) predecessors.sort() return predecessors def predecessorsAllVisited(self, node): # predecessors all visited if all preceeding nodes in visited nodes allVisited = True predecessors = self.predecessors(node) for predecessor in predecessors: if predecessor not in self.visitedNodes: allVisited = False break return allVisited def updateAvailableNodes(self, node): # update available nodes to: # 1. Include all successor nodes of given node # 2. Remove given node # Available nodes must not contain duplicated and must always be sorted in alphabetical order newAvailableNodes = self.graph[node] for newAvailableNode in newAvailableNodes: if not newAvailableNode in self.availableNodes: self.availableNodes.append(newAvailableNode) if node in self.availableNodes: self.availableNodes.remove(node) self.availableNodes.sort() def stepOrder(self): # while there are available nodes: # check each available node in order. # First node where all predecessors have been visited should be added to visited nodes #  Available nods are then updated to include all successors of just visited node (do not allow duplicates to be added to available nodes) and remove just visited node # Note: Available nodes must remain in alphabetical order #  Break and repeat self.visitedNodes = [] self.initialiseAvailableNodes() while len(self.availableNodes) > 0: for node in self.availableNodes: if self.predecessorsAllVisited(node): self.visitedNodes.append(node) self.updateAvailableNodes(node) break def starters(self, currentTime): # get all available nodes and workers # assign available nodes to available workers availableWorkers = [worker for worker in self.workers if worker.available()] availableNodesWithPre = [node for node in self.availableNodes if self.predecessorsAllVisited(node)] availableWorkerIndex = len(availableWorkers) - 1 for currNode in availableNodesWithPre: if availableWorkerIndex >= 0: avWorker = availableWorkers[availableWorkerIndex] avWorker.workingOn = currNode avWorker.finishTime = currentTime + (ord(currNode) - ord('A') + 1) + 60 self.availableNodes.remove(currNode) availableWorkerIndex -= 1 def finishers(self, currentTime): # any workers finishing at currentTime? for worker in self.workers: if worker.finishTime == currentTime: node = worker.workingOn worker.workingOn = None worker.finishTime = None self.visitedNodes.append(node) self.updateAvailableNodes(node) def workersAllAvailable(self): return len([worker for worker in self.workers if worker.available()]) == len(self.workers) def timeToCompleteSteps(self): # Part 2 currentTime = 1 self.visitedNodes = [] self.initialiseAvailableNodes() complete = False while not complete: self.finishers(currentTime) # check if complete if len(self.availableNodes) == 0: complete = self.workersAllAvailable() if not complete: self.starters(currentTime) currentTime += 1 return currentTime-1 class Worker: def __init__(self,workerId): self.workerId = workerId self.workingOn = None self.finishTime = None def available(self): return self.workingOn == None def unavailable(self): return not self.available() class Node: def __init__(self, pre, post): self.pre = pre self.post = post def processFile(filename): with open(filename, "r") as input: nodes = [Node(line.strip()[5], line.strip()[-12]) for line in input] return nodes # solve puzzle nodes = processFile("day7.txt") graph = Graph(nodes) # Part 1 - Work out order in which steps should be completed graph.stepOrder() print(f'Step Order: {graph.outputVisitedNodes()}') # Part 2 - Time to complete all steps # 5 workers available each step takes 60 seconds plus number of seconds corresponding to its letter A=1 (61), B=2 (62), .. Z=26 (86) # Available steps can begin simultaneously but where multiple steps are available they must still begin alphabetically time = graph.timeToCompleteSteps() print(f'Time to complete steps: {time} seconds. Step order: {graph.outputVisitedNodes()}')
python
# sql/expression.py # Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php """Defines the public namespace for SQL expression constructs. """ from ._dml_constructors import delete as delete from ._dml_constructors import insert as insert from ._dml_constructors import update as update from ._elements_constructors import all_ as all_ from ._elements_constructors import and_ as and_ from ._elements_constructors import any_ as any_ from ._elements_constructors import asc as asc from ._elements_constructors import between as between from ._elements_constructors import bindparam as bindparam from ._elements_constructors import case as case from ._elements_constructors import cast as cast from ._elements_constructors import collate as collate from ._elements_constructors import column as column from ._elements_constructors import desc as desc from ._elements_constructors import distinct as distinct from ._elements_constructors import extract as extract from ._elements_constructors import false as false from ._elements_constructors import funcfilter as funcfilter from ._elements_constructors import label as label from ._elements_constructors import not_ as not_ from ._elements_constructors import null as null from ._elements_constructors import nulls_first as nulls_first from ._elements_constructors import nulls_last as nulls_last from ._elements_constructors import or_ as or_ from ._elements_constructors import outparam as outparam from ._elements_constructors import over as over from ._elements_constructors import text as text from ._elements_constructors import true as true from ._elements_constructors import tuple_ as tuple_ from ._elements_constructors import type_coerce as type_coerce from ._elements_constructors import typing as typing from ._elements_constructors import within_group as within_group from ._selectable_constructors import alias as alias from ._selectable_constructors import cte as cte from ._selectable_constructors import except_ as except_ from ._selectable_constructors import except_all as except_all from ._selectable_constructors import exists as exists from ._selectable_constructors import intersect as intersect from ._selectable_constructors import intersect_all as intersect_all from ._selectable_constructors import join as join from ._selectable_constructors import lateral as lateral from ._selectable_constructors import outerjoin as outerjoin from ._selectable_constructors import select as select from ._selectable_constructors import table as table from ._selectable_constructors import tablesample as tablesample from ._selectable_constructors import union as union from ._selectable_constructors import union_all as union_all from ._selectable_constructors import values as values from .base import _from_objects as _from_objects from .base import _select_iterables as _select_iterables from .base import ColumnCollection as ColumnCollection from .base import Executable as Executable from .cache_key import CacheKey as CacheKey from .dml import Delete as Delete from .dml import Insert as Insert from .dml import Update as Update from .dml import UpdateBase as UpdateBase from .dml import ValuesBase as ValuesBase from .elements import _truncated_label as _truncated_label from .elements import BinaryExpression as BinaryExpression from .elements import BindParameter as BindParameter from .elements import BooleanClauseList as BooleanClauseList from .elements import Case as Case from .elements import Cast as Cast from .elements import ClauseElement as ClauseElement from .elements import ClauseList as ClauseList from .elements import CollectionAggregate as CollectionAggregate from .elements import ColumnClause as ColumnClause from .elements import ColumnElement as ColumnElement from .elements import Extract as Extract from .elements import False_ as False_ from .elements import FunctionFilter as FunctionFilter from .elements import Grouping as Grouping from .elements import Label as Label from .elements import literal as literal from .elements import literal_column as literal_column from .elements import Null as Null from .elements import Over as Over from .elements import quoted_name as quoted_name from .elements import ReleaseSavepointClause as ReleaseSavepointClause from .elements import RollbackToSavepointClause as RollbackToSavepointClause from .elements import SavepointClause as SavepointClause from .elements import TextClause as TextClause from .elements import True_ as True_ from .elements import Tuple as Tuple from .elements import TypeClause as TypeClause from .elements import TypeCoerce as TypeCoerce from .elements import UnaryExpression as UnaryExpression from .elements import WithinGroup as WithinGroup from .functions import func as func from .functions import Function as Function from .functions import FunctionElement as FunctionElement from .functions import modifier as modifier from .lambdas import lambda_stmt as lambda_stmt from .lambdas import LambdaElement as LambdaElement from .lambdas import StatementLambdaElement as StatementLambdaElement from .operators import ColumnOperators as ColumnOperators from .operators import custom_op as custom_op from .operators import Operators as Operators from .selectable import Alias as Alias from .selectable import AliasedReturnsRows as AliasedReturnsRows from .selectable import CompoundSelect as CompoundSelect from .selectable import CTE as CTE from .selectable import Exists as Exists from .selectable import FromClause as FromClause from .selectable import FromGrouping as FromGrouping from .selectable import GenerativeSelect as GenerativeSelect from .selectable import HasCTE as HasCTE from .selectable import HasPrefixes as HasPrefixes from .selectable import HasSuffixes as HasSuffixes from .selectable import Join as Join from .selectable import LABEL_STYLE_DEFAULT as LABEL_STYLE_DEFAULT from .selectable import ( LABEL_STYLE_DISAMBIGUATE_ONLY as LABEL_STYLE_DISAMBIGUATE_ONLY, ) from .selectable import LABEL_STYLE_NONE as LABEL_STYLE_NONE from .selectable import ( LABEL_STYLE_TABLENAME_PLUS_COL as LABEL_STYLE_TABLENAME_PLUS_COL, ) from .selectable import Lateral as Lateral from .selectable import ReturnsRows as ReturnsRows from .selectable import ScalarSelect as ScalarSelect from .selectable import Select as Select from .selectable import Selectable as Selectable from .selectable import SelectBase as SelectBase from .selectable import Subquery as Subquery from .selectable import TableClause as TableClause from .selectable import TableSample as TableSample from .selectable import TableValuedAlias as TableValuedAlias from .selectable import TextAsFrom as TextAsFrom from .selectable import TextualSelect as TextualSelect from .selectable import Values as Values from .visitors import Visitable as Visitable nullsfirst = nulls_first nullslast = nulls_last
python
import argparse from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.utils.data as data from PIL import Image, ImageFile from torchvision import transforms from tqdm import tqdm from template import imagenet_templates import fast_stylenet from sampler import InfiniteSamplerWrapper import clip from template import imagenet_templates import torch.nn.functional as F from torchvision.utils import save_image from torchvision.transforms.functional import adjust_contrast cudnn.benchmark = True Image.MAX_IMAGE_PIXELS = None ImageFile.LOAD_TRUNCATED_IMAGES = True import time def test_transform(): transform_list = [ transforms.Resize(size=(512, 512)), transforms.ToTensor() ] return transforms.Compose(transform_list) def hr_transform(): transform_list = [ transforms.ToTensor() ] return transforms.Compose(transform_list) class FlatFolderDataset(data.Dataset): def __init__(self, root, transform): super(FlatFolderDataset, self).__init__() self.root = root self.paths = list(Path(self.root).glob('*')) self.transform = transform def __getitem__(self, index): path = self.paths[index] img = Image.open(str(path)).convert('RGB') img = self.transform(img) return img def __len__(self): return len(self.paths) def name(self): return 'FlatFolderDataset' parser = argparse.ArgumentParser() parser.add_argument('--test_dir', type=str, default ='./test_set') parser.add_argument('--hr_dir', type=str) parser.add_argument('--vgg', type=str, default='models/vgg_normalised.pth') # training options parser.add_argument('--n_threads', type=int, default=16) parser.add_argument('--num_test', type=int, default=16) parser.add_argument('--decoder', type=str, default='./experiments/clip_decoder_pencil.pth.tar') args = parser.parse_args() device = torch.device('cuda') decoder = fast_stylenet.decoder vgg = fast_stylenet.vgg vgg.load_state_dict(torch.load(args.vgg)) vgg = nn.Sequential(*list(vgg.children())[:31]) decoder.load_state_dict(torch.load(args.decoder)) network = net.Net(vgg, decoder) network.eval() network.to(device) test_tf = test_transform() test_dataset = FlatFolderDataset(args.test_dir, test_tf) test_iter = iter(data.DataLoader( test_dataset, batch_size=args.num_test, num_workers=args.n_threads)) test_images1 = next(test_iter) test_images1 = test_images1.cuda() if args.hr_dir is not None: hr_tf = hr_transform() hr_dataset = FlatFolderDataset(args.hr_dir, hr_tf) hr_iter = iter(data.DataLoader( hr_dataset, batch_size=1, num_workers=args.n_threads)) hr_images = next(hr_iter) hr_images = hr_images.cuda() with torch.no_grad(): _, test_out1 = network( test_images1) test_out1 = adjust_contrast(test_out1,1.5) output_test = torch.cat([test_images1,test_out1],dim=0) output_name = './output_test/test.png' save_image(output_test, str(output_name),nrow=test_out1.size(0),normalize=True,scale_each=True) if args.hr_dir is not None: _, test_out = network(hr_images) test_out = adjust_contrast(test_out,1.5) output_name = './output_test/hr.png' save_image(test_out, str(output_name),nrow=test_out.size(0),normalize=True,scale_each=True)
python
# import os # import json # # target_dirs = [ 'home_1', 'home_2', 'home_3', 'real_v0', 'real_v1', 'real_v2', 'real_v3', 'human_label_kobeF2', 'victor_1'] # target_file = './data/' # for target_dir in target_dirs: # target_file += target_dir + '_' # target_file += 'output.json' # # output_images = {} # output_annotations = {} # # for idx, target_dir in enumerate(target_dirs): # target_json = os.path.join('./data', target_dir, 'annotations', 'output.json') # labels = json.load(open(target_json)) # if idx == 0: # output_images = labels['images'] # output_annotations = labels['annotations'] # for i in range(len(output_images)): # output_images[i]['file_name'] = os.path.join(target_dir, 'images', output_images[i]['file_name']) # output_images[i]['id'] = int(output_images[i]['id']) # for i in range(len(output_annotations)): # output_annotations[i]['image_id'] = int(output_annotations[i]['image_id']) # print(len(output_images)) # print(len(output_annotations)) # else: # temp_images = labels['images'] # temp_annotations = labels['annotations'] # for i in range(len(temp_images)): # temp_images[i]['file_name'] = os.path.join(target_dir, 'images', temp_images[i]['file_name']) # temp_images[i]['id'] = int(temp_images[i]['id']) + len(output_images) # for i in range(len(temp_annotations)): # temp_annotations[i]['image_id'] = int(temp_annotations[i]['image_id']) + len(output_images) # temp_annotations[i]['id'] = len(output_images) + i # # temp_annotations[i]['id'] = int(temp_annotations[i]['id']) + len(output_annotations) # # output_images.extend(temp_images) # output_annotations.extend(temp_annotations) # print(len(output_images)) # print(len(output_annotations)) # output_json = { # 'images': output_images, # 'annotations': output_annotations # } # # with open(target_file, 'w') as f: # json.dump(output_json, f) import os import json import datetime import numpy as np IsPrivacy = True if IsPrivacy: Privacyname = 'images_privacy' else: Privacyname = 'images' target_dirs = ['real_v0', 'real_v1', 'real_v2', 'real_v3', 'home_1', 'home_2', 'home_3', 'human_label_kobeF2', 'Virtual_V7', 'Virtual_V7_2', 'Virtual_V7_3', 'Virtual_V8_1', 'Virtual_victor_v1'] target_file = './data/' target_file = target_file + Privacyname + '_' for target_dir in target_dirs: target_file += target_dir + '_' target_file += 'keypoint.json' output_images = {} output_annotations = {} INFO = { "description": "Dataset", "url": "", "version": "0.1.0", "year": 2019, "contributor": "", "date_created": datetime.datetime.utcnow().isoformat(' ') } LICENSES = [ { "id": 1, "name": "", "url": "" } ] CATEGORIES = [ { 'id': 1, 'name': 'human', 'supercategory': 'human', 'keypoints': ["nose", "left_eye", "right_eye", "left_ear", "right_ear", "left_shoulder", "right_shoulder", "left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip", "right_hip", "left_knee", "right_knee", "left_ankle", "right_ankle"], 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]], } ] temp_id = 0 anotation_id = 0 for idx, target_dir in enumerate(target_dirs): target_json = os.path.join('./data', target_dir, 'annotations', 'output.json') labels = json.load(open(target_json)) if idx == 0: max_id = 0 output_images = labels['images'] output_annotations = labels['annotations'] for i in range(len(output_images)): output_images[i]['file_name'] = os.path.join(target_dir, Privacyname, output_images[i]['file_name']) output_images[i]['id'] = int(output_images[i]['id']) if output_images[i]['id'] > max_id: max_id = output_images[i]['id'] for i in range(len(output_annotations)): output_annotations[i]['image_id'] = int(output_annotations[i]['image_id']) output_annotations[i]['id'] = '{}'.format(anotation_id) anotation_id = anotation_id + 1 temp_id += max_id else: max_id = 0 temp_images = labels['images'] temp_annotations = labels['annotations'] for i in range(len(temp_images)): temp_images[i]['file_name'] = os.path.join(target_dir, Privacyname, temp_images[i]['file_name']) temp_images[i]['id'] = int(temp_images[i]['id']) + temp_id if temp_images[i]['id'] > max_id: max_id = temp_images[i]['id'] for i in range(len(temp_annotations)): temp_annotations[i]['image_id'] = int(temp_annotations[i]['image_id']) + temp_id temp_annotations[i]['id'] = '{}'.format(anotation_id) anotation_id = anotation_id + 1 # temp_annotations[i]['id'] = int(temp_annotations[i]['id']) + len(output_annotations) output_images.extend(temp_images) output_annotations.extend(temp_annotations) temp_id += max_id # check id is unique image_ids = [] annotation_ids = [] for i in range(len(output_images)): image_ids.append(output_images[i]['id']) for i in range(len(output_annotations)): annotation_ids.append(output_annotations[i]['id']) image_ids = np.array(image_ids) annotation_ids = np.array(annotation_ids) unique = False if len(image_ids) == len(np.unique(image_ids)): print('image_id is unique!') if len(annotation_ids) == len(np.unique(annotation_ids)): print('annotation_id is unique!') unique = True # save file output_json = { 'info': INFO, 'licenses': LICENSES, 'categories': CATEGORIES, 'images': output_images, 'annotations': output_annotations } if unique: with open(target_file, 'w') as f: json.dump(output_json, f) print('save annotation!')
python
# ================================================================================================== # Copyright 2013 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== from abc import ABCMeta from numbers import Integral, Real from sys import version_info as sys_version_info # TODO(wickman) Since the io package is available in 2.6.x, use that instead of # cStringIO/StringIO try: # CPython 2.x from cStringIO import StringIO except ImportError: try: # Python 2.x from StringIO import StringIO except: # Python 3.x from io import StringIO from io import BytesIO AbstractClass = ABCMeta('AbstractClass', (object,), {}) PY2 = sys_version_info[0] == 2 PY3 = sys_version_info[0] == 3 StringIO = StringIO BytesIO = BytesIO if PY3 else StringIO integer = (Integral,) real = (Real,) numeric = integer + real string = (str,) if PY3 else (str, unicode) bytes = (bytes,) if PY2: def to_bytes(st): if isinstance(st, unicode): return st.encode('utf-8') else: return str(st) else: def to_bytes(st): return st.encode('utf-8') if PY3: def exec_function(ast, globals_map): locals_map = globals_map exec(ast, globals_map, locals_map) return locals_map else: eval(compile( """ def exec_function(ast, globals_map): locals_map = globals_map exec ast in globals_map, locals_map return locals_map """, "<exec_function>", "exec")) if PY3: from contextlib import contextmanager, ExitStack @contextmanager def nested(*context_managers): enters = [] with ExitStack() as stack: for manager in context_managers: enters.append(stack.enter_context(manager)) yield tuple(enters) else: from contextlib import nested __all__ = ( 'AbstractClass', 'BytesIO', 'PY2', 'PY3', 'StringIO', 'bytes', 'exec_function', 'nested', 'string', 'to_bytes', )
python
__author__ = 'Kalyan' from placeholders import * # For most of these tests use the interpreter to fill up the blanks. # type(object) -> returns the object's type. def test_numbers_types(): assert "int" == type(7).__name__ assert "float" == type(7.5).__name__ assert "long" == type(10L).__name__ def test_numbers_int_arithmetic_operations(): assert 30 == 10 + 20 assert 200 == 10 * 20 assert 32 == 2 ** 5 assert -10 == 10 - 20 assert 2 == 7/3 def test_numbers_string_to_int(): """hint: execute print int.__doc__ in python console to find out what int(..) does""" assert 255== int("FF", 16) assert 63== int("77", 8) def test_numbers_int_to_string(): assert "012" == oct(10) assert "0x64" == hex(100) assert "0b11111111"== bin(255) def test_numbers_long(): """Long is not the long in c""" assert 1606938044258990275541962092341162602522202993782792835301376L == 2 ** 200 # Being comfortable with number bases mentally is important and it is routinely asked in interviews as quick test # of a candidate. # # Replace the __ with the correct string representation by working it out on paper (don't use any code or console). # # Read the following links: # http://courses.cs.vt.edu/~cs1104/number_conversion/convexp.html # https://docs.python.org/2/library/functions.html#int def test_numbers_base(): assert 255 == int("11111111", 2) assert 254 == int("FE", 16) assert 121 == int("232", 7) assert 675 == int("pp", 26) three_things_i_learnt = """ -base conversions -length function -difference between type and instanceof """
python
# This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import bpy from mathutils import Vector class AMTH_NODE_OT_AddTemplateVignette(bpy.types.Operator): bl_idname = "node.template_add_vignette" bl_label = "Add Vignette" bl_description = "Add a vignette effect" bl_options = {"REGISTER", "UNDO"} @classmethod def poll(cls, context): space = context.space_data return space.type == "NODE_EDITOR" \ and space.node_tree is not None \ and space.tree_type == "CompositorNodeTree" # used as reference the setup scene script from master nazgul def _setupNodes(self, context): scene = context.scene space = context.space_data tree = scene.node_tree has_act = True if tree.nodes.active else False bpy.ops.node.select_all(action="DESELECT") ellipse = tree.nodes.new(type="CompositorNodeEllipseMask") ellipse.width = 0.8 ellipse.height = 0.4 blur = tree.nodes.new(type="CompositorNodeBlur") blur.use_relative = True blur.factor_x = 30 blur.factor_y = 50 ramp = tree.nodes.new(type="CompositorNodeValToRGB") ramp.color_ramp.interpolation = "B_SPLINE" ramp.color_ramp.elements[1].color = (0.6, 0.6, 0.6, 1) overlay = tree.nodes.new(type="CompositorNodeMixRGB") overlay.blend_type = "OVERLAY" overlay.inputs[0].default_value = 0.8 overlay.inputs[1].default_value = (0.5, 0.5, 0.5, 1) tree.links.new(ellipse.outputs["Mask"], blur.inputs["Image"]) tree.links.new(blur.outputs["Image"], ramp.inputs[0]) tree.links.new(ramp.outputs["Image"], overlay.inputs[2]) if has_act: tree.links.new(tree.nodes.active.outputs[0], overlay.inputs[1]) if has_act: overlay.location = tree.nodes.active.location overlay.location += Vector((350.0, 0.0)) else: overlay.location += Vector( (space.cursor_location[0], space.cursor_location[1])) ellipse.location = overlay.location ellipse.location += Vector((-715.0, -400)) ellipse.inputs[0].hide = True ellipse.inputs[1].hide = True blur.location = ellipse.location blur.location += Vector((300.0, 0.0)) blur.inputs["Size"].hide = True ramp.location = blur.location ramp.location += Vector((175.0, 0)) ramp.outputs["Alpha"].hide = True for node in (ellipse, blur, ramp, overlay): node.select = True node.show_preview = False bpy.ops.node.join() frame = ellipse.parent frame.label = "Vignette" frame.use_custom_color = True frame.color = (0.1, 0.1, 0.1) overlay.parent = None overlay.label = "Vignette Overlay" def execute(self, context): self._setupNodes(context) return {"FINISHED"}
python
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training Script for STFTGAN on a waveform dataset. Follows the same setup as SpecPhaseGAN, but generates STFTs instead of Magnitude and Instantaneous Frequency. """ import os import tensorflow as tf from audio_synthesis.structures import spec_gan from audio_synthesis.models import wgan from audio_synthesis.datasets import waveform_dataset from audio_synthesis.utils import waveform_save_helper as save_helper from audio_synthesis.utils import spectral # Setup Paramaters D_UPDATES_PER_G = 5 Z_DIM = 64 BATCH_SIZE = 64 EPOCHS = 1800 SAMPLING_RATE = 16000 FFT_FRAME_LENGTH = 512 FFT_FRAME_STEP = 128 Z_IN_SHAPE = [4, 8, 1024] SPECTOGRAM_IMAGE_SHAPE = [-1, 128, 256, 2] CHECKPOINT_DIR = '_results/representation_study/SpeechMNIST/STFTGAN_HR/training_checkpoints/' RESULT_DIR = '_results/representation_study/SpeechMNIST/STFTGAN_HR/audio/' DATASET_PATH = 'data/SpeechMNIST_1850.npz' def main(): os.environ['CUDA_VISIBLE_DEVICES'] = '0' print('Num GPUs Available: ', len(tf.config.experimental.list_physical_devices('GPU'))) raw_dataset = waveform_dataset.get_stft_dataset( DATASET_PATH, frame_length=FFT_FRAME_LENGTH, frame_step=FFT_FRAME_STEP ) generator = spec_gan.Generator(channels=2, in_shape=Z_IN_SHAPE) discriminator = spec_gan.Discriminator(input_shape=SPECTOGRAM_IMAGE_SHAPE) generator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9) get_waveform = lambda stft:\ spectral.stft_2_waveform( stft, FFT_FRAME_LENGTH, FFT_FRAME_STEP )[0] save_examples = lambda epoch, real, generated:\ save_helper.save_wav_data( epoch, real, generated, SAMPLING_RATE, RESULT_DIR, get_waveform ) stft_gan_model = wgan.WGAN( raw_dataset, generator, [discriminator], Z_DIM, generator_optimizer, discriminator_optimizer, discriminator_training_ratio=D_UPDATES_PER_G, batch_size=BATCH_SIZE, epochs=EPOCHS, checkpoint_dir=CHECKPOINT_DIR, fn_save_examples=save_examples ) stft_gan_model.restore('ckpt-100', 1000) stft_gan_model.train() if __name__ == '__main__': main()
python
# File to explore the difference between the error function relying on Hoeffding's bound and the one relying on the # bound of Maurer and Pontil. import os import sys import configparser import numpy as np directory = os.path.dirname(os.path.dirname(os.path.expanduser(__file__))) sys.path.append(directory) path_config = configparser.ConfigParser() path_config.read(os.path.join(directory, 'paths.ini')) spibb_path = path_config['PATHS']['spibb_path'] sys.path.append(spibb_path) from wet_chicken_discrete.dynamics import WetChicken from wet_chicken_discrete.baseline_policy import WetChickenBaselinePolicy from batch_rl_algorithms.soft_spibb import ApproxSoftSPIBB import spibb_utils if __name__ == '__main__': nb_iterations = 50 seed = 1602421836 seed = 1 np.random.seed(seed) log = True # ratio = 0.9 epsilon = 0.1 delta = 1 gamma = 0.95 length = 5 width = 5 max_turbulence = 3.5 max_velocity = 3 nb_states = length * width nb_actions = 5 learning_rate = 0.5 max_nb_it = 10 ** 5 epsilon_baseline = 0.1 order_epsilon = np.inf order_learning_rate = 3 episodic = False results = [] wet_chicken = WetChicken(length=length, width=width, max_turbulence=max_turbulence, max_velocity=max_velocity) pi_baseline = WetChickenBaselinePolicy(env=wet_chicken, gamma=gamma, method='heuristic', order_epsilon=order_epsilon, learning_rate=learning_rate, max_nb_it=max_nb_it, epsilon=epsilon_baseline, order_learning_rate=order_learning_rate) pi_b = pi_baseline.pi P = wet_chicken.get_transition_function() R = wet_chicken.get_reward_function() r_reshaped = spibb_utils.get_reward_model(P, R) length_trajectory = 10000 trajectory = spibb_utils.generate_batch_wet_chicken(length_trajectory, wet_chicken, pi_b) approx_soft_spibb = ApproxSoftSPIBB(pi_b=pi_b, gamma=gamma, nb_states=nb_states, nb_actions=nb_actions, data=trajectory, R=R, delta=delta, epsilon=epsilon, error_kind='hoeffding', episodic=episodic, checks=False) e_hoeffding = np.nan_to_num(approx_soft_spibb.errors, nan=0, posinf=0) approx_soft_spibb = ApproxSoftSPIBB(pi_b=pi_b, gamma=gamma, nb_states=nb_states, nb_actions=nb_actions, data=trajectory, R=R, delta=delta, epsilon=epsilon, error_kind='mpeb', episodic=episodic, checks=False, g_max=40) e_mpeb = np.nan_to_num(approx_soft_spibb.errors, nan=0, posinf=0) print(f'L1 distance (interpreted as long vector instead of matrix) : {np.sum(np.abs(e_hoeffding - e_mpeb))}') # count_state_action = approx_soft_spibb.count_state_action print(f'Hi')
python
import keras from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, ConvLSTM2D from keras.layers import Activation, Dropout, Flatten, Dense, LeakyReLU from keras.layers import LSTM, TimeDistributed, Lambda, BatchNormalization from keras import optimizers from keras import backend as K import tensorflow as tf from matplotlib import pyplot as plt from IPython.display import clear_output img_width, img_height = 4101, 247 train_data_dir = '/training' validation_data_dir = 'validation' multiplier = 1 num_classes = 9 nb_train_samples = multiplier*num_classes*70 nb_validation_samples = multiplier*num_classes*20 epochs = 50 batch_size = 10 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) else: input_shape = (img_width, img_height, 3) class PlotLearning(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.i = 0 self.x = [] self.losses = [] self.val_losses = [] self.acc = [] self.val_acc = [] self.fig = plt.figure() self.logs = [] def on_epoch_end(self, epoch, logs={}): self.logs.append(logs) self.x.append(self.i) self.losses.append(logs.get('loss')) self.val_losses.append(logs.get('val_loss')) self.acc.append(logs.get('categorical_accuracy')) self.val_acc.append(logs.get('val_categorical_accuracy')) self.i += 1 clear_output(wait=True) color1 = 'tab:red' color2 = 'tab:blue' fig, ax1 = plt.subplots(figsize=(10, 6)) ax1.set_xlabel('Epoch',size=24) ax1.set_ylabel('Loss',color=color1,size=24) ax1.plot(self.x, self.losses, label="tr_loss",color=color1,linestyle='dashed') ax1.plot(self.x, self.val_losses, label="val_loss",color=color1) ax1.tick_params(axis='x', labelsize = 16) ax1.tick_params(axis='y_train', labelcolor=color1, labelsize = 14) ax1.legend(loc='center right',fontsize=16,bbox_to_anchor=(0.4, 1.1),ncol = 2) ax2 = ax1.twinx() ax2.set_ylabel('Accuracy',color=color2,size=24) ax2.plot(self.x, self.acc, label="tr_accuracy",color=color2,linestyle='dashed') ax2.plot(self.x, self.val_acc, label="val_accuracy",color=color2) ax2.tick_params(axis='y_train', labelcolor=color2, labelsize = 16) ax2.legend(loc='center right',fontsize=16, bbox_to_anchor=(1.1, 1.1),ncol = 2) fig.tight_layout() plt.show(); plot_losses = PlotLearning() model = Sequential() #CNN: model.add(Conv2D(8, (3, 3), input_shape=input_shape)) model.add(LeakyReLU(alpha=0.01)) model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same')) model.add(Dropout(0.5)) model.add(Conv2D(16, (3, 3), padding = 'same')) model.add(LeakyReLU(alpha=0.01)) model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same')) model.add(Dropout(0.5)) model.add(Conv2D(32, (3, 3), padding = 'same')) model.add(LeakyReLU(alpha=0.01)) model.add(MaxPooling2D(pool_size=(2, 2), padding = 'same')) model.add(Dropout(0.5)) model.add(Flatten()) #MLP: model.add(Dense(128)) model.add(LeakyReLU(alpha=0.01)) model.add(Dropout(0.5)) model.add(Dense(16)) model.add(LeakyReLU(alpha=0.01)) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) adam = optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8, decay = 1e-6, amsgrad = False) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy']) model.summary() train_datagen = ImageDataGenerator(rescale = 1. / 255) test_datagen = ImageDataGenerator(rescale = 1. / 255) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, color_mode='rgb', class_mode='categorical') validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, color_mode='rgb', class_mode='categorical') model.fit_generator( train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=epochs, callbacks=[plot_losses], validation_data=validation_generator, validation_steps=nb_validation_samples // batch_size) model_json = model.to_json() with open("conv_lstm_model.json", "w") as json_file: json_file.write(model_json) model.save("predictor.h5") print("Saved conv_lstm_model to disk")
python
# -*- coding: utf-8 -*- from nseta.analytics.model import * from nseta.common.history import historicaldata from nseta.common.log import tracelog, default_logger from nseta.plots.plots import * from nseta.cli.inputs import * from nseta.archives.archiver import * import click from datetime import datetime __all__ = ['create_cdl_model'] @click.command(help='Create candlestick model.Plot uncovered patterns') @click.option('--symbol', '-S', help='Security code') @click.option('--start', '-s', help='Start date in yyyy-mm-dd format') @click.option('--end', '-e', help='End date in yyyy-mm-dd format') @click.option('--file', '-o', 'file_name', help='Output file name. Default is {symbol}.csv') @click.option('--steps/--no-steps', default=False, help='--steps for saving intermediate steps in output file') @click.option('--clear', '-c', default=False, is_flag=True, help='Clears the cached data for the given options.') @click.option('--format', '-f', default='csv', type=click.Choice(['csv', 'pkl']), help='Output format, pkl - to save as Pickel and csv - to save as csv') @tracelog def create_cdl_model(symbol, start, end, file_name, steps, clear, format): if not validate_inputs(start, end, symbol): print_help_msg(create_cdl_model) return sd = datetime.strptime(start, '%Y-%m-%d').date() ed = datetime.strptime(end, '%Y-%m-%d').date() try: if clear: arch = archiver() arch.clearcache(response_type=ResponseType.History, force_clear=False) historyinstance = historicaldata() df = historyinstance.daily_ohlc_history(symbol, sd, ed, type=ResponseType.History) df = df.sort_values(by='Date',ascending=True) df.set_index('Date', inplace=True) df = model_candlestick(df, steps) click.echo('\n{}\n'.format(df.to_string(index=False))) except Exception as e: default_logger().debug(e, exc_info=True) click.secho('Failed to create candlestick model', fg='red', nl=True) return except SystemExit: pass if not file_name: file_name = symbol + '.' + format if format == 'csv': df.to_csv(file_name) else: df.to_pickle(file_name) default_logger().debug('Model saved to: {}'.format(file_name)) default_logger().debug('Candlestick pattern model plot saved to: {}'.format(symbol +'_candles.html')) click.secho('Model saved to: {}'.format(file_name), fg='green', nl=True) try: plot_candlestick(df, symbol, 'Candlestick Pattern Model Recognition for ' + symbol) click.secho('Candlestick pattern model plot saved to: {}'.format(symbol +'_candles.html'), fg='green', nl=True) except Exception as e: default_logger().debug(e, exc_info=True) click.secho('Failed to plot candlestick pattern for the model', fg='red', nl=True) return except SystemExit: pass
python
import socket with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(('127.0.0.1', 50007)) s.listen(1) while True: conn, addr = s.accept() with conn: while True: data = conn.recv(1024) if not data: break print('data: {}, add: {}'.format(data, addr)) conn.sendall(b'Recieved: ' + data)
python
#!/usr/bin/python3 # -*- coding:utf-8 -*- # ganben: MontyLemmatiser port of montylingua 2.1` import re # think about import class MontyLemmatiser: # # original implt read `.mdf` file as db # u obviously need a bigger db file xtag_morph_zhcn_corpus = '' # add a real source exceptions_source = '' # file or db, see LEMMAEXCEPTION.MDF regular_any = [] regular_verb = [] regular_noun = [] regular_adj = [] regular_operators = [] # operator/operands concept # regular can be default option irregular_re_any = [] irregular_re_verb = [] # irregular can be tf model/db irregular_nouns += [ ] # additional irregular nouns def __init__(self): # filename_str=[] self.regular_any,self.regular_verb,self.regular_noun,self.irregular_re_any,self.irregular_re_verbs,self.irregular_re_nouns=map(lambda the_tokenizers:map(lambda the_tokenizer_str:[re.compile('^'+the_tokenizer_str[0].lower()+'$')]+the_tokenizer_str[1:],the_tokenizers),filename_str) return def lemmatise_untagged_sentence(self,untagged): # def lemmatise_tagged_sentence(self,tagged): # def lemmatise_word(self,word,pos=""): # def verify_lemmatiser(self): # def make_verification_dictionary(self): # return def fix_case(self, word1, word2): # return def _re_match_helper(self, re_kb, word): # return def find_irregular_morph(self,word,pos=""): a1=self._re_match_helper groupnames1=self.find_irregular_morph cron_dictr=a1(self.irregular_re_any,word) return
python
import logging from typing import List, Tuple, Dict import psycopg2 from src.tools.utils import read_config class Postgres: def __init__(self, config: Dict = None): self.connection = None self.cursor = None self.connect(config) def connect(self, config: Dict = None) -> None: config = config or read_config() pg_con_params = config.get('postgresql') assert pg_con_params self.connection = psycopg2.connect(**pg_con_params) logging.info("Database connect established") self.cursor = self.connection.cursor() def execute(self, req: str) -> None: self.cursor.execute(req) def fetch_data(self) -> List[Tuple]: return self.cursor.fetchall() def commit(self) -> None: self.connection.commit() def exec_file(self, filepath: str): with open(filepath, 'r') as f: self.cursor.execute(f.read())
python
import os from typing import Any, Dict, Literal import wandb from wicker.core.config import get_config from wicker.core.definitions import DatasetID from wicker.core.storage import S3PathFactory def version_dataset( dataset_name: str, dataset_version: str, entity: str, metadata: Dict[str, Any], dataset_backend: Literal["s3"] = "s3", ) -> None: """ Version the dataset on Weights and Biases using the config parameters defined in wickerconfig.json. Args: dataset_name: The name of the dataset to be versioned dataset_version: The version of the dataset to be versioned entity: Who the run will belong to metadata: The metadata to be logged as an artifact, enforces dataclass for metadata documentation dataset_backend: The backend where the dataset is stored, currently only supports s3 """ # needs to first acquire and set wandb creds # WANDB_API_KEY, WANDB_BASE_URL # _set_wandb_credentials() # needs to init the wandb run, this is going to be a 'data' run dataset_run = wandb.init(project="dataset_curation", name=f"{dataset_name}_{dataset_version}", entity=entity) # grab the uri of the dataset to be versioned dataset_uri = _identify_s3_url_for_dataset_version(dataset_name, dataset_version, dataset_backend) # establish the artifact and save the dir/s3_url to the artifact data_artifact = wandb.Artifact(f"{dataset_name}_{dataset_version}", type="dataset") data_artifact.add_reference(dataset_uri, name="dataset") # save metadata dict to the artifact data_artifact.metadata["version"] = dataset_version data_artifact.metadata["s3_uri"] = dataset_uri for key, value in metadata.items(): data_artifact.metadata[key] = value # save the artifact to the run dataset_run.log_artifact(data_artifact) # type: ignore dataset_run.finish() # type: ignore def _set_wandb_credentials() -> None: """ Acquire the weights and biases credentials and load them into the environment. This load the variables into the environment as ENV Variables for WandB to use, this function overrides the previously set wandb env variables with the ones specified in the wicker config if they exist. """ # load the config config = get_config() # if the keys are present in the config add them to the env wandb_config = config.wandb_config for field in wandb_config.__dataclass_fields__: # type: ignore attr = wandb_config.__getattribute__(field) if attr is not None: os.environ[str(field).upper()] = attr else: if str(field).upper() not in os.environ: raise EnvironmentError( f"Cannot use W&B without setting {str(field.upper())}. " f"Specify in either ENV or through wicker config file." ) def _identify_s3_url_for_dataset_version( dataset_name: str, dataset_version: str, dataset_backend: Literal["s3"] = "s3", ) -> str: """ Identify and return the s3 url for the dataset and version specified in the backend. Args: dataset_name: name of the dataset to retrieve url dataset_version: version of the dataset to retrieve url dataset_backend: backend of the dataset to retrieve url Returns: The url pointing to the dataset on storage """ schema_path = "" if dataset_backend == "s3": # needs to do the parsing work to fetch the correct s3 uri schema_path = S3PathFactory().get_dataset_assets_path(DatasetID(name=dataset_name, version=dataset_version)) return schema_path
python
from .munger import * # noqa from .munger_link_only import * # noqa
python
#!/usr/bin/env python # encoding: utf-8 """ Test _extend_kb_with_fixed_labels from core """ import pyqms import sys import unittest TESTS = [ # { # 'in' : { # 'params' : { # 'molecules' : ['KLEINERTEST'], # 'charges' : [2, ], # 'fixed_labels' : { # 'R' : ['C(-6) 13C(6) N(-4) 15N(4)'] # }, # } # }, # 'out' : { # 'formated_molecule' : ['KLEINER0TEST'], # } # }, { "in": { "params": { "molecules": ["KLEINERTEST"], "charges": [2], "fixed_labels": { "R": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "K": ["", "C(-6) 13C(6) N(-4) 15N(4)"], }, } }, "out": { "formated_molecule": sorted( ["K0LEINER0TEST", "K1LEINER0TEST", "K1LEINER1TEST", "K0LEINER1TEST"] ) }, }, { "in": { "params": { "molecules": ["KLEINERTEST"], "charges": [2], "fixed_labels": { "R": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "K": ["", "C(-6) 13C(6) N(-4) 15N(4)"], }, "params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]}, } }, "out": {"formated_molecule": sorted(["K0LEINER0TEST", "K1LEINER1TEST"])}, }, { "in": { "params": { "molecules": ["KLEINERTEST"], "charges": [2], "fixed_labels": { "R": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "K": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "I": ["FOCK"], }, "params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]}, } }, "out": {"formated_molecule": sorted(["K0LEI0NER0TEST", "K1LEI0NER1TEST"])}, }, { "in": { "params": { "molecules": ["KLEINERTEST"], "charges": [2], "fixed_labels": { "R": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "K": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "I": ["FOCK", ""], }, "params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]}, } }, "out": { "formated_molecule": sorted( ["K0LEI0NER0TEST", "K1LEI0NER1TEST", "K0LEI1NER0TEST", "K1LEI1NER1TEST"] ) }, }, { "in": { "params": { "molecules": ["KLEINERTEST"], "charges": [2], "fixed_labels": { "R": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "K": ["", "C(-6) 13C(6) N(-4) 15N(4)"], "I": ["FOCK", ""], "L": ["Noo", "Way"], }, "params": {"SILAC_AAS_LOCKED_IN_EXPERIMENT": ["K", "R"]}, } }, "out": { "formated_molecule": sorted( [ "K0L0EI0NER0TEST", "K1L0EI0NER1TEST", "K0L0EI1NER0TEST", "K1L0EI1NER1TEST", "K0L1EI0NER0TEST", "K1L1EI0NER1TEST", "K0L1EI1NER0TEST", "K1L1EI1NER1TEST", ] ) }, }, ] # 2 isotope element (N,nitrogen) CRASH_TESTS = { "in": { "params": { "molecules": ["KLEINERTEST"], "charges": [2], "fixed_labels": { # non existing aa "U": ["C(-6) 13C(6) N(-4) 15N(4)"] }, } }, "out": { # 'formated_molecule' : ['KLEINER0TEST'], }, } def extend_kb_with_fixed_labels_test(): for test_id, test_dict in enumerate(TESTS): yield _extend_kb_with_fixed_labels, test_id, test_dict def _extend_kb_with_fixed_labels(test_id, test_dict): lib_1 = pyqms.IsotopologueLibrary(**test_dict["in"]["params"]) print(lib_1.lookup["molecule fixed label variations"]) formula_1 = list(lib_1.keys())[0] # __oOo__ lookup_key = test_dict["in"]["params"]["molecules"][0] for label_percentile in lib_1[formula_1]["env"].keys(): assert ( sorted(list(lib_1.lookup["molecule fixed label variations"][lookup_key])) == test_dict["out"]["formated_molecule"] ) class TestResults(unittest.TestCase): def setUp(self): pass def crash_test(self): """ Check if a key error is raised when using a non existent amino acid """ with self.assertRaises(SystemExit) as system_exit_check: pyqms.IsotopologueLibrary(**CRASH_TESTS["in"]["params"]) self.assertEqual(system_exit_check.exception.code, 1) if __name__ == "__main__": pass
python
''' EXERCÍCIO 015: Aluguel de Carros Escreva um programa que pergunte a quantidade de km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$ 60 por dia e R$ 0,15 por km rodado. Escreva um programa que pergunte a quantidade de km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$ 60 por dia e R$ 0,15 por km rodado. ''' def menu_inicial(): print('='*15,'By Portela','='*15,'\n') print('-'*15,'Aluguel de carros','-'*15) def valor_pago(): t = (d * 60) + (r * 0.15) print(f'Total a pagar é de R${t:.2f}.') def lin(): print('-'*29) from time import sleep def fim(): for contagem in range(0,1): print('Saindo...') sleep(6) print('Muito obrigado, volte sempre.') if __name__=='__main__': menu_inicial() n = 'N' while n == 'N': lin() d = float(input('Quantos dias alugados? ')) lin() lin() r = float(input('Quantos Km rodados? ')) lin() valor_pago() lin() n = str(input('Deseja sair do programa? ')).upper() lin() fim()
python
#!/usr/bin/env python3 # coding: utf8 """ Day 5: Alchemical Reduction part 2 https://adventofcode.com/2018/day/5 """ from string import ascii_lowercase def reactPolymer(polymer): pats = [] pats += [c + c.upper() for c in ascii_lowercase] pats += [c.upper() + c for c in ascii_lowercase] reactedPolymer = polymer while True: for pat in pats: reactedPolymer = reactedPolymer.replace(pat, '') if polymer == reactedPolymer: break else: polymer = reactedPolymer return reactedPolymer def main(): with open('day05input.txt') as f: line = f.readline() line = line.strip() polymers = [line] * 26 for i, c in enumerate(ascii_lowercase): polymers[i] = polymers[i].replace(c, '').replace(c.upper(), '') print(min([len(reactPolymer(x)) for x in polymers])) if __name__ == '__main__': main()
python
from pathlib import Path import os import random import json import itertools import copy import torch from torch.utils.data import Dataset, DataLoader, BatchSampler, RandomSampler, \ SequentialSampler from torchvision import transforms import numpy as np import cv2 import PIL import scipy.io import glob from . import utils default_data_dir = Path(__file__).resolve().parent.parent / "data" # Set default paths if "DReye" not in os.environ: os.environ["DReye_DATA_DIR"] = str(default_data_dir / "New_DReye") if "DADA2000_DATA_DIR" not in os.environ: os.environ["DADA2000_DATA_DIR"] = str(default_data_dir / "DADA") if "DT16_DATA_DIR" not in os.environ: os.environ["DT16_DATA_DIR"] = str(default_data_dir / "DT16") if "BDDA_DATA_DIR" not in os.environ: os.environ["BDDA_DATA_DIR"] = str(default_data_dir / "BDDA") config_path = Path(__file__).resolve().parent / "cache" # os.environ["DADA2000_DATA_DIR"] = "/media/acl/7A4A85A74A85612D/01_Driver_Gaze/TASED_Net_DADA/data" def get_dataloader(src='DHF1K'): if src in ('MIT1003',): return ImgSizeDataLoader return DataLoader class ImgSizeBatchSampler: def __init__(self, dataset, batch_size=1, shuffle=False, drop_last=False): assert(isinstance(dataset, MIT1003Dataset)) self.batch_size = batch_size self.shuffle = shuffle self.drop_last = drop_last out_size_array = [ dataset.size_dict[img_idx]['out_size'] for img_idx in dataset.samples] self.out_size_set = sorted(list(set(out_size_array))) self.sample_idx_dict = { out_size: [] for out_size in self.out_size_set} for sample_idx, img_idx in enumerate(dataset.samples): self.sample_idx_dict[dataset.size_dict[img_idx]['out_size']].append( sample_idx) self.len = 0 self.n_batches_dict = {} for out_size, sample_idx_array in self.sample_idx_dict.items(): this_n_batches = len(sample_idx_array) // self.batch_size self.len += this_n_batches self.n_batches_dict[out_size] = this_n_batches def __iter__(self): batch_array = list(itertools.chain.from_iterable( [out_size for _ in range(n_batches)] for out_size, n_batches in self.n_batches_dict.items())) if not self.shuffle: random.seed(27) random.shuffle(batch_array) this_sample_idx_dict = copy.deepcopy(self.sample_idx_dict) for sample_idx_array in this_sample_idx_dict.values(): random.shuffle(sample_idx_array) for out_size in batch_array: this_indices = this_sample_idx_dict[out_size][:self.batch_size] del this_sample_idx_dict[out_size][:self.batch_size] yield this_indices def __len__(self): return self.len class ImgSizeDataLoader(DataLoader): def __init__(self, dataset, batch_size=1, shuffle=False, drop_last=False, **kwargs): if batch_size == 1: if shuffle: sampler = RandomSampler(dataset) else: sampler = SequentialSampler(dataset) batch_sampler = BatchSampler(sampler, batch_size, drop_last) else: batch_sampler = ImgSizeBatchSampler( dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last) super().__init__(dataset, batch_sampler=batch_sampler, **kwargs) def get_optimal_out_size(img_size): ar = img_size[0] / img_size[1] min_prod = 100 max_prod = 120 ar_array = [] size_array = [] for n1 in range(7, 14): for n2 in range(7, 14): if min_prod <= n1 * n2 <= max_prod: this_ar = n1 / n2 this_ar_ratio = min((ar, this_ar)) / max((ar, this_ar)) ar_array.append(this_ar_ratio) size_array.append((n1, n2)) max_ar_ratio_idx = np.argmax(np.array(ar_array)).item() bn_size = size_array[max_ar_ratio_idx] out_size = tuple(r * 32 for r in bn_size) return out_size class FolderVideoDataset(Dataset): def __init__(self, images_path, frame_modulo=None, source=None): self.images_path = images_path self.frame_modulo = frame_modulo or 5 self.preproc_cfg = { 'rgb_mean': (0.485, 0.456, 0.406), 'rgb_std': (0.229, 0.224, 0.225), } frame_files = sorted(list(images_path.glob("*"))) frame_files = [file for file in frame_files if file.suffix in ('.png', '.jpg', '.jpeg')] self.frame_files = frame_files self.vid_nr_array = [0] self.n_images_dict = {0: len(frame_files)} img = cv2.imread(str(frame_files[0])) img_size = tuple(img.shape[:2]) self.target_size_dict = {0: img_size} if source == 'DHF1K' and img_size == (360, 640): self.out_size = (224, 384) elif source == 'Hollywood': self.out_size = (224, 416) elif source == 'UCFSports': self.out_size = (256, 384) else: self.out_size = get_optimal_out_size(img_size) def load_frame(self, f_nr): frame_file = self.frame_files[f_nr - 1] frame = cv2.imread(str(frame_file)) if frame is None: raise FileNotFoundError(frame_file) frame = np.ascontiguousarray(frame[:, :, ::-1]) return frame def preprocess_sequence(self, frame_seq): transformations = [] transformations.append(transforms.ToPILImage()) transformations.append(transforms.Resize( self.out_size, interpolation=PIL.Image.LANCZOS)) transformations.append(transforms.ToTensor()) if 'rgb_mean' in self.preproc_cfg: transformations.append( transforms.Normalize( self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std'])) processing = transforms.Compose(transformations) tensor = [processing(img) for img in frame_seq] tensor = torch.stack(tensor) return tensor def get_data(self, vid_nr, start): n_images = self.n_images_dict[vid_nr] frame_nrs = list(range(start, n_images + 1, self.frame_modulo)) frame_seq = [self.load_frame(f_nr) for f_nr in frame_nrs] frame_seq = self.preprocess_sequence(frame_seq) target_size = self.target_size_dict[vid_nr] return frame_nrs, frame_seq, target_size def __len__(self): return len(self.samples) def __getitem__(self, item): return self.get_data(item, 0) class FolderImageDataset(Dataset): def __init__(self, images_path): self.images_path = images_path self.frame_modulo = 1 self.preproc_cfg = { 'rgb_mean': (0.485, 0.456, 0.406), 'rgb_std': (0.229, 0.224, 0.225), } image_files = sorted(list(images_path.glob("*"))) image_files = [file for file in image_files if file.suffix in ('.png', '.jpg', '.jpeg')] self.image_files = image_files self.n_images_dict = { img_idx: 1 for img_idx in range(len(self.image_files))} self.target_size_dict = {} self.out_size_dict = {} for img_idx, file in enumerate(image_files): img = cv2.imread(str(file)) img_size = tuple(img.shape[:2]) self.target_size_dict[img_idx] = img_size self.out_size_dict[img_idx] = get_optimal_out_size(img_size) def load_image(self, img_idx): image_file = self.image_files[img_idx] image = cv2.imread(str(image_file)) if image is None: raise FileNotFoundError(image_file) image = np.ascontiguousarray(image[:, :, ::-1]) return image def preprocess(self, img, out_size): transformations = [ transforms.ToPILImage(), transforms.Resize( out_size, interpolation=PIL.Image.LANCZOS), transforms.ToTensor(), ] if 'rgb_mean' in self.preproc_cfg: transformations.append( transforms.Normalize( self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std'])) processing = transforms.Compose(transformations) tensor = processing(img) return tensor def get_data(self, img_idx): file = self.image_files[img_idx] img = cv2.imread(str(file)) assert (img is not None) img = np.ascontiguousarray(img[:, :, ::-1]) out_size = self.out_size_dict[img_idx] img = self.preprocess(img, out_size) return [1], img, self.target_size_dict[img_idx] def __len__(self): return len(self.image_files) def __getitem__(self, item): return self.get_data(item, 0) ### class DReyeDataset(Dataset, utils.KwConfigClass): img_channels = 1 n_train_val_videos = 405 # 570 test_vid_nrs = (406, 780) #1110 frame_rate = 24 # note video 25fps and modify frame_modulo=4 source = 'DReye' dynamic = True def __init__(self, seq_len=12, frame_modulo=4, max_seq_len=1e6, preproc_cfg=None, out_size=(224, 384), phase='train', target_size=(360, 640), debug=False, val_size=27, n_x_val=3, x_val_step=2, x_val_seed=0, seq_per_vid=1, subset=None, verbose=1, n_images_file='DReye_n_images.dat', seq_per_vid_val=2, sal_offset=None): self.phase = phase self.train = phase == 'train' if not self.train: preproc_cfg = {} elif preproc_cfg is None: preproc_cfg = {} preproc_cfg.update({ 'rgb_mean': (0.485, 0.456, 0.406), 'rgb_std': (0.229, 0.224, 0.225), }) self.preproc_cfg = preproc_cfg self.out_size = out_size self.debug = debug self.val_size = val_size self.n_x_val = n_x_val self.x_val_step = x_val_step self.x_val_seed = x_val_seed self.seq_len = seq_len self.seq_per_vid = seq_per_vid self.seq_per_vid_val = seq_per_vid_val self.frame_modulo = frame_modulo self.clip_len = seq_len * frame_modulo self.subset = subset self.verbose = verbose self.n_images_file = n_images_file self.target_size = target_size self.sal_offset = sal_offset self.max_seq_len = max_seq_len self._dir = None self._n_images_dict = None self.vid_nr_array = None # Evaluation if phase in ('eval', 'test'): self.seq_len = int(1e6) if self.phase in ('test',): self.vid_nr_array = list(range( self.test_vid_nrs[0], self.test_vid_nrs[1] + 1)) self.samples, self.target_size_dict = self.prepare_samples() return # Cross-validation split n_videos = self.n_train_val_videos assert(self.val_size <= n_videos // self.n_x_val) assert(self.x_val_step < self.n_x_val) vid_nr_array = np.arange(1, n_videos + 1) if self.x_val_seed > 0: np.random.seed(self.x_val_seed) np.random.shuffle(vid_nr_array) val_start = (len(vid_nr_array) - self.val_size) //\ (self.n_x_val - 1) * self.x_val_step vid_nr_array = vid_nr_array.tolist() if not self.train: self.vid_nr_array =\ vid_nr_array[val_start:val_start + self.val_size] else: del vid_nr_array[val_start:val_start + self.val_size] self.vid_nr_array = vid_nr_array if self.subset is not None: self.vid_nr_array =\ self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)] self.samples, self.target_size_dict = self.prepare_samples() @property def n_images_dict(self): if self._n_images_dict is None: with open(config_path.parent / self.n_images_file, 'r') as f: self._n_images_dict = { idx + 1: int(line) for idx, line in enumerate(f) if idx + 1 in self.vid_nr_array} return self._n_images_dict @property def dir(self): if self._dir is None: self._dir = Path(os.environ["DReye_DATA_DIR"]) return self._dir @property def n_samples(self): return len(self.vid_nr_array) def __len__(self): return len(self.samples) def prepare_samples(self): samples = [] too_short = 0 too_long = 0 for vid_nr, n_images in self.n_images_dict.items(): if self.phase in ('eval', 'test'): samples += [ (vid_nr, offset + 1) for offset in range(self.frame_modulo)] continue # 帧数过小多大直接跳过 if n_images < self.clip_len: too_short += 1 continue if n_images // self.frame_modulo > self.max_seq_len: too_long += 1 continue # if self.phase == 'train': samples += [(vid_nr, None)] * self.seq_per_vid continue elif self.phase == 'valid': x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2 start = max(1, x) end = min(n_images - self.clip_len, n_images - x) samples += [ (vid_nr, int(start)) for start in np.linspace(start, end, self.seq_per_vid_val)] continue # 打印数据集加载的基本信息 if self.phase not in ('eval', 'test') and self.n_images_dict: n_loaded = len(self.n_images_dict) - too_short - too_long print(f"{n_loaded} videos loaded " f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_short} videos are too short " f"({too_short / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_long} videos are too long " f"({too_long / len(self.n_images_dict) * 100:.1f}%)") target_size_dict = { vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()} return samples, target_size_dict def get_frame_nrs(self, vid_nr, start): n_images = self.n_images_dict[vid_nr] if self.phase in ('eval', 'test'): return list(range(start, n_images + 1, self.frame_modulo)) return list(range(start, start + self.clip_len, self.frame_modulo)) def get_data_file(self, vid_nr, f_nr, dkey): if dkey == 'frame': folder = 'images' elif dkey == 'sal': folder = 'new_maps' elif dkey == 'fix': folder = 'fixation' else: raise ValueError(f'Unknown data key {dkey}') ### img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png') return img_path def load_data(self, vid_nr, f_nr, dkey): read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE data_file = self.get_data_file(vid_nr, f_nr, dkey) if read_flag is not None: data = cv2.imread(str(data_file), read_flag) else: data = cv2.imread(str(data_file)) if data is None: raise FileNotFoundError(data_file) if dkey == 'frame': data = np.ascontiguousarray(data[:, :, ::-1]) if dkey == 'sal' and self.train and self.sal_offset is not None: data += self.sal_offset data[0, 0] = 0 return data def preprocess_sequence(self, frame_seq, dkey, vid_nr): transformations = [] if dkey == 'frame': transformations.append(transforms.ToPILImage()) transformations.append(transforms.Resize( self.out_size, interpolation=PIL.Image.LANCZOS)) transformations.append(transforms.ToTensor()) if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg: transformations.append( transforms.Normalize( self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std'])) elif dkey == 'sal': transformations.append(transforms.Lambda(utils.normalize_tensor)) # elif dkey == 'fix': # transformations.append( # transforms.Lambda(lambda fix: torch.gt(fix, 0.5))) ##! processing = transforms.Compose(transformations) tensor = [processing(img) for img in frame_seq] tensor = torch.stack(tensor) return tensor def get_seq(self, vid_nr, frame_nrs, dkey): data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs] return self.preprocess_sequence(data_seq, dkey, vid_nr) def get_data(self, vid_nr, start): if start is None: max_start = self.n_images_dict[vid_nr] - self.clip_len + 1 if max_start == 1: start = max_start else: start = np.random.randint(1, max_start) frame_nrs = self.get_frame_nrs(vid_nr, start) frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame') target_size = self.target_size_dict[vid_nr] # if self.phase == 'test' and self.source in ('DReye',): # return frame_nrs, frame_seq, target_size sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal') fix_seq = torch.full(self.target_size, 0, dtype=torch.bool) # fix used for nss aucj and aucs # fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix') # 用 sal_seq替换fix_seq return frame_nrs, frame_seq, sal_seq, fix_seq, target_size def __getitem__(self, item): vid_nr, start = self.samples[item] data = self.get_data(vid_nr, start) return data class DADA2000Dataset(Dataset, utils.KwConfigClass): img_channels = 1 n_train_val_videos = 797 test_vid_nrs = (798, 1013) frame_rate = 30 source = 'DADA200' dynamic = True def __init__(self, seq_len=12, frame_modulo=5, max_seq_len=1e6, preproc_cfg=None, out_size=(224, 538), phase='train', target_size=(224, 538), debug=False, val_size=100, n_x_val=3, x_val_step=2, x_val_seed=0, seq_per_vid=1, subset=None, verbose=1, n_images_file='DADA_n_images.dat', seq_per_vid_val=2, sal_offset=None): self.phase = phase self.train = phase == 'train' if not self.train: preproc_cfg = {} elif preproc_cfg is None: preproc_cfg = {} preproc_cfg.update({ 'rgb_mean': (0.485, 0.456, 0.406), 'rgb_std': (0.229, 0.224, 0.225), }) self.preproc_cfg = preproc_cfg self.out_size = out_size self.debug = debug self.val_size = val_size self.n_x_val = n_x_val self.x_val_step = x_val_step self.x_val_seed = x_val_seed self.seq_len = seq_len self.seq_per_vid = seq_per_vid self.seq_per_vid_val = seq_per_vid_val self.frame_modulo = frame_modulo self.clip_len = seq_len * frame_modulo self.subset = subset self.verbose = verbose self.n_images_file = n_images_file self.target_size = target_size self.sal_offset = sal_offset self.max_seq_len = max_seq_len self._dir = None self._n_images_dict = None self.vid_nr_array = None # Evaluation if phase in ('eval', 'test'): self.seq_len = int(1e6) if self.phase in ('test',): self.vid_nr_array = list(range( self.test_vid_nrs[0], self.test_vid_nrs[1] + 1)) self.samples, self.target_size_dict = self.prepare_samples() return # Cross-validation split n_videos = self.n_train_val_videos assert(self.val_size <= n_videos // self.n_x_val) assert(self.x_val_step < self.n_x_val) vid_nr_array = np.arange(1, n_videos + 1) if self.x_val_seed > 0: np.random.seed(self.x_val_seed) np.random.shuffle(vid_nr_array) val_start = (len(vid_nr_array) - self.val_size) //\ (self.n_x_val - 1) * self.x_val_step vid_nr_array = vid_nr_array.tolist() if not self.train: self.vid_nr_array =\ vid_nr_array[val_start:val_start + self.val_size] else: del vid_nr_array[val_start:val_start + self.val_size] self.vid_nr_array = vid_nr_array if self.subset is not None: self.vid_nr_array =\ self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)] self.samples, self.target_size_dict = self.prepare_samples() @property def n_images_dict(self): if self._n_images_dict is None: with open(config_path.parent / self.n_images_file, 'r') as f: self._n_images_dict = { idx + 1: int(line) for idx, line in enumerate(f) if idx + 1 in self.vid_nr_array} return self._n_images_dict @property def dir(self): if self._dir is None: self._dir = Path(os.environ["DADA2000_DATA_DIR"]) return self._dir @property def n_samples(self): return len(self.vid_nr_array) def __len__(self): return len(self.samples) def prepare_samples(self): samples = [] too_short = 0 too_long = 0 for vid_nr, n_images in self.n_images_dict.items(): if self.phase in ('eval', 'test'): samples += [ (vid_nr, offset + 1) for offset in range(self.frame_modulo)] continue # 帧数过小多大直接跳过 if n_images < self.clip_len: too_short += 1 continue if n_images // self.frame_modulo > self.max_seq_len: too_long += 1 continue # if self.phase == 'train': samples += [(vid_nr, None)] * self.seq_per_vid continue elif self.phase == 'valid': x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2 start = max(1, x) end = min(n_images - self.clip_len, n_images - x) samples += [ (vid_nr, int(start)) for start in np.linspace(start, end, self.seq_per_vid_val)] continue # 打印数据集加载的基本信息 if self.phase not in ('eval', 'test') and self.n_images_dict: n_loaded = len(self.n_images_dict) - too_short - too_long print(f"{n_loaded} videos loaded " f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_short} videos are too short " f"({too_short / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_long} videos are too long " f"({too_long / len(self.n_images_dict) * 100:.1f}%)") target_size_dict = { vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()} return samples, target_size_dict def get_frame_nrs(self, vid_nr, start): n_images = self.n_images_dict[vid_nr] if self.phase in ('eval', 'test'): return list(range(start, n_images + 1, self.frame_modulo)) return list(range(start, start + self.clip_len, self.frame_modulo)) def get_data_file(self, vid_nr, f_nr, dkey): if dkey == 'frame': folder = 'images' elif dkey == 'sal': folder = 'maps' elif dkey == 'fix': folder = 'fixation' else: raise ValueError(f'Unknown data key {dkey}') ### img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png') return img_path def load_data(self, vid_nr, f_nr, dkey): read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE data_file = self.get_data_file(vid_nr, f_nr, dkey) if read_flag is not None: data = cv2.imread(str(data_file), read_flag) else: data = cv2.imread(str(data_file)) if data is None: raise FileNotFoundError(data_file) if dkey == 'frame': data = np.ascontiguousarray(data[:, :, ::-1]) if dkey == 'sal' and self.train and self.sal_offset is not None: data += self.sal_offset data[0, 0] = 0 return data def preprocess_sequence(self, frame_seq, dkey, vid_nr): transformations = [] if dkey == 'frame': transformations.append(transforms.ToPILImage()) transformations.append(transforms.Resize( self.out_size, interpolation=PIL.Image.LANCZOS)) transformations.append(transforms.ToTensor()) if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg: transformations.append( transforms.Normalize( self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std'])) elif dkey == 'sal': transformations.append(transforms.ToPILImage()) transformations.append(transforms.Resize( self.out_size, interpolation=PIL.Image.LANCZOS)) transformations.append(transforms.ToTensor()) transformations.append(transforms.Lambda(utils.normalize_tensor)) # elif dkey == 'fix': # transformations.append( # transforms.Lambda(lambda fix: torch.gt(fix, 0.5))) ##! processing = transforms.Compose(transformations) tensor = [processing(img) for img in frame_seq] tensor = torch.stack(tensor) return tensor def get_seq(self, vid_nr, frame_nrs, dkey): data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs] return self.preprocess_sequence(data_seq, dkey, vid_nr) def get_data(self, vid_nr, start): if start is None: max_start = self.n_images_dict[vid_nr] - self.clip_len + 1 if max_start == 1: start = max_start else: start = np.random.randint(1, max_start) frame_nrs = self.get_frame_nrs(vid_nr, start) frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame') target_size = self.target_size_dict[vid_nr] # if self.phase == 'test' and self.source in ('DADA2000',): # return frame_nrs, frame_seq, target_size sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal') fix_seq = torch.full(self.target_size, 0, dtype=torch.bool) # fix used for nss aucj and aucs # fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix') # 用 sal_seq替换fix_seq return frame_nrs, frame_seq, sal_seq, fix_seq, target_size def __getitem__(self, item): vid_nr, start = self.samples[item] data = self.get_data(vid_nr, start) return data class DT16Dataset(Dataset, utils.KwConfigClass): img_channels = 1 n_train_val_videos = 115 test_vid_nrs = (115, 153) #1110 frame_rate = 24 source = 'DT16' dynamic = True def __init__(self, seq_len=12, frame_modulo=4, max_seq_len=1e6, preproc_cfg=None, out_size=(224, 384), phase='train', target_size=(360, 640), debug=False, val_size=19, n_x_val=3, x_val_step=2, x_val_seed=0, seq_per_vid=1, subset=None, verbose=1, n_images_file='DT16_n_images.dat', seq_per_vid_val=2, sal_offset=None): self.phase = phase self.train = phase == 'train' if not self.train: preproc_cfg = {} elif preproc_cfg is None: preproc_cfg = {} preproc_cfg.update({ 'rgb_mean': (0.485, 0.456, 0.406), 'rgb_std': (0.229, 0.224, 0.225), }) self.preproc_cfg = preproc_cfg self.out_size = out_size self.debug = debug self.val_size = val_size self.n_x_val = n_x_val self.x_val_step = x_val_step self.x_val_seed = x_val_seed self.seq_len = seq_len self.seq_per_vid = seq_per_vid self.seq_per_vid_val = seq_per_vid_val self.frame_modulo = frame_modulo self.clip_len = seq_len * frame_modulo self.subset = subset self.verbose = verbose self.n_images_file = n_images_file self.target_size = target_size self.sal_offset = sal_offset self.max_seq_len = max_seq_len self._dir = None self._n_images_dict = None self.vid_nr_array = None # Evaluation if phase in ('eval', 'test'): self.seq_len = int(1e6) if self.phase in ('test',): self.vid_nr_array = list(range( self.test_vid_nrs[0], self.test_vid_nrs[1] + 1)) self.samples, self.target_size_dict = self.prepare_samples() return # Cross-validation split n_videos = self.n_train_val_videos assert(self.val_size <= n_videos // self.n_x_val) assert(self.x_val_step < self.n_x_val) vid_nr_array = np.arange(1, n_videos + 1) if self.x_val_seed > 0: np.random.seed(self.x_val_seed) np.random.shuffle(vid_nr_array) val_start = (len(vid_nr_array) - self.val_size) //\ (self.n_x_val - 1) * self.x_val_step vid_nr_array = vid_nr_array.tolist() if not self.train: self.vid_nr_array =\ vid_nr_array[val_start:val_start + self.val_size] else: del vid_nr_array[val_start:val_start + self.val_size] self.vid_nr_array = vid_nr_array if self.subset is not None: self.vid_nr_array =\ self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)] self.samples, self.target_size_dict = self.prepare_samples() @property def n_images_dict(self): if self._n_images_dict is None: with open(config_path.parent / self.n_images_file, 'r') as f: self._n_images_dict = { idx + 1: int(line) for idx, line in enumerate(f) if idx + 1 in self.vid_nr_array} return self._n_images_dict @property def dir(self): if self._dir is None: self._dir = Path(os.environ["DT16_DATA_DIR"]) return self._dir @property def n_samples(self): return len(self.vid_nr_array) def __len__(self): return len(self.samples) def prepare_samples(self): samples = [] too_short = 0 too_long = 0 for vid_nr, n_images in self.n_images_dict.items(): if self.phase in ('eval', 'test'): samples += [ (vid_nr, offset + 1) for offset in range(self.frame_modulo)] continue # 帧数过小多大直接跳过 if n_images < self.clip_len: too_short += 1 continue if n_images // self.frame_modulo > self.max_seq_len: too_long += 1 continue # if self.phase == 'train': samples += [(vid_nr, None)] * self.seq_per_vid continue elif self.phase == 'valid': x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2 start = max(1, x) end = min(n_images - self.clip_len, n_images - x) samples += [ (vid_nr, int(start)) for start in np.linspace(start, end, self.seq_per_vid_val)] continue # 打印数据集加载的基本信息 if self.phase not in ('eval', 'test') and self.n_images_dict: n_loaded = len(self.n_images_dict) - too_short - too_long print(f"{n_loaded} videos loaded " f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_short} videos are too short " f"({too_short / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_long} videos are too long " f"({too_long / len(self.n_images_dict) * 100:.1f}%)") target_size_dict = { vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()} return samples, target_size_dict def get_frame_nrs(self, vid_nr, start): n_images = self.n_images_dict[vid_nr] if self.phase in ('eval', 'test'): return list(range(start, n_images + 1, self.frame_modulo)) return list(range(start, start + self.clip_len, self.frame_modulo)) def get_data_file(self, vid_nr, f_nr, dkey): if dkey == 'frame': folder = 'images' elif dkey == 'sal': folder = 'maps' elif dkey == 'fix': folder = 'fixation' else: raise ValueError(f'Unknown data key {dkey}') ### img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png') return img_path def load_data(self, vid_nr, f_nr, dkey): read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE data_file = self.get_data_file(vid_nr, f_nr, dkey) if read_flag is not None: data = cv2.imread(str(data_file), read_flag) else: data = cv2.imread(str(data_file)) if data is None: raise FileNotFoundError(data_file) if dkey == 'frame': data = np.ascontiguousarray(data[:, :, ::-1]) if dkey == 'sal' and self.train and self.sal_offset is not None: data += self.sal_offset data[0, 0] = 0 return data def preprocess_sequence(self, frame_seq, dkey, vid_nr): transformations = [] if dkey == 'frame': transformations.append(transforms.ToPILImage()) transformations.append(transforms.Resize( self.out_size, interpolation=PIL.Image.LANCZOS)) transformations.append(transforms.ToTensor()) if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg: transformations.append( transforms.Normalize( self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std'])) elif dkey == 'sal': transformations.append(transforms.Lambda(utils.normalize_tensor)) # elif dkey == 'fix': # transformations.append( # transforms.Lambda(lambda fix: torch.gt(fix, 0.5))) ##! processing = transforms.Compose(transformations) tensor = [processing(img) for img in frame_seq] tensor = torch.stack(tensor) return tensor def get_seq(self, vid_nr, frame_nrs, dkey): data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs] return self.preprocess_sequence(data_seq, dkey, vid_nr) def get_data(self, vid_nr, start): if start is None: max_start = self.n_images_dict[vid_nr] - self.clip_len + 1 if max_start == 1: start = max_start else: start = np.random.randint(1, max_start) # print('vid_nr:', vid_nr, '\t start:', start) frame_nrs = self.get_frame_nrs(vid_nr, start) frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame') target_size = self.target_size_dict[vid_nr] # if self.phase == 'test' and self.source in ('DReye',): # return frame_nrs, frame_seq, target_size sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal') fix_seq = torch.full(self.target_size, 0, dtype=torch.bool) # fix used for nss aucj and aucs # fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix') # 用 sal_seq替换fix_seq return frame_nrs, frame_seq, sal_seq, fix_seq, target_size def __getitem__(self, item): vid_nr, start = self.samples[item] data = self.get_data(vid_nr, start) return data class BDDADataset(Dataset, utils.KwConfigClass): img_channels = 1 n_train_val_videos = 926 test_vid_nrs = (1127, 1429) #1110 frame_rate = 30 source = 'BDDA' dynamic = True def __init__(self, seq_len=12, frame_modulo=5, max_seq_len=1e6, preproc_cfg=None, out_size=(224, 384), phase='train', target_size=(360, 640), debug=False, val_size=200, n_x_val=3, x_val_step=2, x_val_seed=0, seq_per_vid=1, subset=None, verbose=1, n_images_file='BDDA_n_images.dat', seq_per_vid_val=2, sal_offset=None): self.phase = phase self.train = phase == 'train' if not self.train: preproc_cfg = {} elif preproc_cfg is None: preproc_cfg = {} preproc_cfg.update({ 'rgb_mean': (0.485, 0.456, 0.406), 'rgb_std': (0.229, 0.224, 0.225), }) self.preproc_cfg = preproc_cfg self.out_size = out_size self.debug = debug self.val_size = val_size self.n_x_val = n_x_val self.x_val_step = x_val_step self.x_val_seed = x_val_seed self.seq_len = seq_len self.seq_per_vid = seq_per_vid self.seq_per_vid_val = seq_per_vid_val self.frame_modulo = frame_modulo self.clip_len = seq_len * frame_modulo self.subset = subset self.verbose = verbose self.n_images_file = n_images_file self.target_size = target_size self.sal_offset = sal_offset self.max_seq_len = max_seq_len self._dir = None self._n_images_dict = None self.vid_nr_array = None # Evaluation if phase in ('eval', 'test'): self.seq_len = int(1e6) if self.phase in ('test',): self.vid_nr_array = list(range( self.test_vid_nrs[0], self.test_vid_nrs[1] + 1)) self.samples, self.target_size_dict = self.prepare_samples() return # Cross-validation split n_videos = self.n_train_val_videos assert(self.val_size <= n_videos // self.n_x_val) assert(self.x_val_step < self.n_x_val) vid_nr_array = np.arange(1, n_videos + 1) if self.x_val_seed > 0: np.random.seed(self.x_val_seed) np.random.shuffle(vid_nr_array) val_start = (len(vid_nr_array) - self.val_size) //\ (self.n_x_val - 1) * self.x_val_step vid_nr_array = vid_nr_array.tolist() if not self.train: self.vid_nr_array =\ vid_nr_array[val_start:val_start + self.val_size] else: del vid_nr_array[val_start:val_start + self.val_size] self.vid_nr_array = vid_nr_array if self.subset is not None: self.vid_nr_array =\ self.vid_nr_array[:int(len(self.vid_nr_array) * self.subset)] self.samples, self.target_size_dict = self.prepare_samples() @property def n_images_dict(self): if self._n_images_dict is None: with open(config_path.parent / self.n_images_file, 'r') as f: self._n_images_dict = { idx + 1: int(line) for idx, line in enumerate(f) if idx + 1 in self.vid_nr_array} return self._n_images_dict @property def dir(self): if self._dir is None: self._dir = Path(os.environ["BDDA_DATA_DIR"]) return self._dir @property def n_samples(self): return len(self.vid_nr_array) def __len__(self): return len(self.samples) def prepare_samples(self): samples = [] too_short = 0 too_long = 0 for vid_nr, n_images in self.n_images_dict.items(): if self.phase in ('eval', 'test'): samples += [ (vid_nr, offset + 1) for offset in range(self.frame_modulo)] continue # 帧数过小多大直接跳过 if n_images < self.clip_len: too_short += 1 continue if n_images // self.frame_modulo > self.max_seq_len: too_long += 1 continue # if self.phase == 'train': samples += [(vid_nr, None)] * self.seq_per_vid continue elif self.phase == 'valid': x = n_images // (self.seq_per_vid_val * 2) - self.clip_len // 2 start = max(1, x) end = min(n_images - self.clip_len, n_images - x) samples += [ (vid_nr, int(start)) for start in np.linspace(start, end, self.seq_per_vid_val)] continue # 打印数据集加载的基本信息 if self.phase not in ('eval', 'test') and self.n_images_dict: n_loaded = len(self.n_images_dict) - too_short - too_long print(f"{n_loaded} videos loaded " f"({n_loaded / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_short} videos are too short " f"({too_short / len(self.n_images_dict) * 100:.1f}%)") print(f"{too_long} videos are too long " f"({too_long / len(self.n_images_dict) * 100:.1f}%)") target_size_dict = { vid_nr: self.target_size for vid_nr in self.n_images_dict.keys()} return samples, target_size_dict def get_frame_nrs(self, vid_nr, start): n_images = self.n_images_dict[vid_nr] if self.phase in ('eval', 'test'): return list(range(start, n_images + 1, self.frame_modulo)) return list(range(start, start + self.clip_len, self.frame_modulo)) def get_data_file(self, vid_nr, f_nr, dkey): if dkey == 'frame': folder = 'images' elif dkey == 'sal': folder = 'new_maps' elif dkey == 'fix': folder = 'fixation' else: raise ValueError(f'Unknown data key {dkey}') ### img_path = str(self.dir / f'{vid_nr:04d}' / folder/ f'{f_nr:04d}.png') return img_path def load_data(self, vid_nr, f_nr, dkey): read_flag = None if dkey == 'frame' else cv2.IMREAD_GRAYSCALE data_file = self.get_data_file(vid_nr, f_nr, dkey) if read_flag is not None: data = cv2.imread(str(data_file), read_flag) else: data = cv2.imread(str(data_file)) if data is None: raise FileNotFoundError(data_file) if dkey == 'frame': data = np.ascontiguousarray(data[:, :, ::-1]) if dkey == 'sal' and self.train and self.sal_offset is not None: data += self.sal_offset data[0, 0] = 0 return data def preprocess_sequence(self, frame_seq, dkey, vid_nr): transformations = [] if dkey == 'frame': transformations.append(transforms.ToPILImage()) transformations.append(transforms.Resize( self.out_size, interpolation=PIL.Image.LANCZOS)) transformations.append(transforms.ToTensor()) if dkey == 'frame' and 'rgb_mean' in self.preproc_cfg: transformations.append( transforms.Normalize( self.preproc_cfg['rgb_mean'], self.preproc_cfg['rgb_std'])) elif dkey == 'sal': transformations.append(transforms.Lambda(utils.normalize_tensor)) # elif dkey == 'fix': # transformations.append( # transforms.Lambda(lambda fix: torch.gt(fix, 0.5))) ##! processing = transforms.Compose(transformations) tensor = [processing(img) for img in frame_seq] tensor = torch.stack(tensor) return tensor def get_seq(self, vid_nr, frame_nrs, dkey): data_seq = [self.load_data(vid_nr, f_nr, dkey) for f_nr in frame_nrs] return self.preprocess_sequence(data_seq, dkey, vid_nr) def get_data(self, vid_nr, start): if start is None: max_start = self.n_images_dict[vid_nr] - self.clip_len + 1 if max_start == 1: start = max_start else: start = np.random.randint(1, max_start) frame_nrs = self.get_frame_nrs(vid_nr, start) frame_seq = self.get_seq(vid_nr, frame_nrs, 'frame') target_size = self.target_size_dict[vid_nr] # if self.phase == 'test' and self.source in ('DReye',): # return frame_nrs, frame_seq, target_size sal_seq = self.get_seq(vid_nr, frame_nrs, 'sal') fix_seq = torch.full(self.target_size, 0, dtype=torch.bool) # fix used for nss aucj and aucs # fix_seq = self.get_seq(vid_nr, frame_nrs, 'fix') # 用 sal_seq替换fix_seq return frame_nrs, frame_seq, sal_seq, fix_seq, target_size def __getitem__(self, item): vid_nr, start = self.samples[item] data = self.get_data(vid_nr, start) return data
python
from matplotlib import mlab def SY_PeriodVital(x): f1 = 1 f2 = 6 z = np.diff(x) [F, t, p] = signal.spectrogram(z,fs = 60) f = np.logical_and(F >= f1,F <= f2) p = p[f] F = F[f] Pmean = np.mean(p) Pmax = np.max(p) ff = np.argmax(p) if ff >= len(F): Pf = np.nan else: Pf = F[ff] Pr = Pmax / Pmean Pstat = np.log(Pr) return {'Pstat':Pstat,'Pmax':Pmax,'Pmean':Pmean,'Pf':Pf}
python
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modified from espnet(https://github.com/espnet/espnet) import math from typing import Optional from typing import Tuple import paddle import paddle.nn.functional as F from paddle import nn class ResidualBlock(nn.Layer): """Residual block module in WaveNet.""" def __init__( self, kernel_size: int=3, residual_channels: int=64, gate_channels: int=128, skip_channels: int=64, aux_channels: int=80, global_channels: int=-1, dropout_rate: float=0.0, dilation: int=1, bias: bool=True, scale_residual: bool=False, ): """Initialize ResidualBlock module. Args: kernel_size (int): Kernel size of dilation convolution layer. residual_channels (int): Number of channels for residual connection. skip_channels (int): Number of channels for skip connection. aux_channels (int): Number of local conditioning channels. dropout (float): Dropout probability. dilation (int): Dilation factor. bias (bool): Whether to add bias parameter in convolution layers. scale_residual (bool): Whether to scale the residual outputs. """ super().__init__() self.dropout_rate = dropout_rate self.residual_channels = residual_channels self.skip_channels = skip_channels self.scale_residual = scale_residual # check assert ( kernel_size - 1) % 2 == 0, "Not support even number kernel size." assert gate_channels % 2 == 0 # dilation conv padding = (kernel_size - 1) // 2 * dilation self.conv = nn.Conv1D( residual_channels, gate_channels, kernel_size, padding=padding, dilation=dilation, bias_attr=bias, ) # local conditioning if aux_channels > 0: self.conv1x1_aux = nn.Conv1D( aux_channels, gate_channels, kernel_size=1, bias_attr=False) else: self.conv1x1_aux = None # global conditioning if global_channels > 0: self.conv1x1_glo = nn.Conv1D( global_channels, gate_channels, kernel_size=1, bias_attr=False) else: self.conv1x1_glo = None # conv output is split into two groups gate_out_channels = gate_channels // 2 # NOTE: concat two convs into a single conv for the efficiency # (integrate res 1x1 + skip 1x1 convs) self.conv1x1_out = nn.Conv1D( gate_out_channels, residual_channels + skip_channels, kernel_size=1, bias_attr=bias) def forward( self, x: paddle.Tensor, x_mask: Optional[paddle.Tensor]=None, c: Optional[paddle.Tensor]=None, g: Optional[paddle.Tensor]=None, ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Calculate forward propagation. Args: x (Tensor): Input tensor (B, residual_channels, T). x_mask Optional[paddle.Tensor]: Mask tensor (B, 1, T). c (Optional[Tensor]): Local conditioning tensor (B, aux_channels, T). g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). Returns: Tensor: Output tensor for residual connection (B, residual_channels, T). Tensor: Output tensor for skip connection (B, skip_channels, T). """ residual = x x = F.dropout(x, p=self.dropout_rate, training=self.training) x = self.conv(x) # split into two part for gated activation splitdim = 1 xa, xb = paddle.split(x, 2, axis=splitdim) # local conditioning if c is not None: c = self.conv1x1_aux(c) ca, cb = paddle.split(c, 2, axis=splitdim) xa, xb = xa + ca, xb + cb # global conditioning if g is not None: g = self.conv1x1_glo(g) ga, gb = paddle.split(g, 2, axis=splitdim) xa, xb = xa + ga, xb + gb x = paddle.tanh(xa) * F.sigmoid(xb) # residual + skip 1x1 conv x = self.conv1x1_out(x) if x_mask is not None: x = x * x_mask # split integrated conv results x, s = paddle.split( x, [self.residual_channels, self.skip_channels], axis=1) # for residual connection x = x + residual if self.scale_residual: x = x * math.sqrt(0.5) return x, s
python
import random from collections import deque from mesh.generic.nodeConfig import NodeConfig from mesh.generic.formationClock import FormationClock from mesh.generic.nodeState import NodeState, LinkStatus from mesh.generic.cmdDict import CmdDict class NodeParams(): def __init__(self, configFile=[], config=[]): if configFile: self.config = NodeConfig(configFile) elif config: self.config = config # Configuration update holder self.newConfig = None # Mesh status self.restartTime = None self.restartRequested = False self.restartConfirmed = False self.setupParams() def setupParams(self): self.configConfirmed = False #self.commStartTime = None #self.cmdRelayBuffer = [] self.cmdHistory = deque(maxlen=100) # FIFO list of last commands received self.cmdResponse = dict() # Initialize node status self.initNodeStatus() # Formation clock self.clock = FormationClock() def initNodeStatus(self): # Node status self.nodeStatus = [NodeState(node+1) for node in range(self.config.maxNumNodes)] # Comm link status self.linkStatus = [[LinkStatus.NoLink for i in range(self.config.maxNumNodes)] for j in range(self.config.maxNumNodes)] def get_cmdCounter(self): #if self.commStartTime: # time-based counter # return int((self.clock.getTime() - self.commStartTime)*1000) #else: # random counter cmdCounter = random.randint(1, 65536) # Add counter value to history self.cmdHistory.append(cmdCounter) return cmdCounter def loadConfig(self, newConfig, hashValue): '''Verify and queue new configuration for loading.''' # Convert from protobuf to json jsonConfig = NodeConfig.fromProtoBuf(newConfig) jsonConfig['node']['nodeId'] = self.config.nodeId # Don't overwrite node id via update # Create, verify, and store new configuration newConfig = NodeConfig(configData=jsonConfig) if (newConfig.calculateHash() == hashValue and newConfig.loadSuccess): # configuration verified #self.newConfig = newConfig return [True, newConfig] else: #self.newConfig = None return [False, None] def updateConfig(self): retValue = False if (self.newConfig and self.newConfig.loadSuccess): # load pending configuration update print("Node " + str(self.config.nodeId) + ": Updating to new configuration") self.config = self.newConfig retValue = True self.newConfig = None return retValue def updateStatus(self): """Update status information.""" self.nodeStatus[self.config.nodeId-1].status = 0 if (self.configConfirmed == True): self.nodeStatus[self.config.nodeId-1].status += 64 # bit 6 def checkNodeLinks(self): """Checks status of links to other nodes.""" thisNode = self.config.nodeId - 1 for i in range(self.config.maxNumNodes): # Check for direct link if (self.nodeStatus[i].present and (self.clock.getTime() - self.nodeStatus[i].lastMsgRcvdTime) < self.config.commConfig['linkTimeout']): self.linkStatus[thisNode][i] = LinkStatus.GoodLink # Check for indirect link elif (self.nodeStatus[i].updating == True): # state data is updating, so at least an indirect link self.linkStatus[thisNode][i] = LinkStatus.IndirectLink else: # no link self.linkStatus[thisNode][i] = LinkStatus.NoLink def addCmdResponse(self, cmdCounter, cmdResponse, sourceId): if (cmdCounter in self.cmdResponse): # update existing responses self.cmdResponse[cmdCounter][sourceId] = cmdResponse else: # add new command response self.cmdResponse[cmdCounter] = dict() self.cmdResponse[cmdCounter][sourceId] = cmdResponse
python
""" Reference : https://github.com/mattalcock/blog/blob/master/2012/12/5/python-spell-checker.rst """ import re import collections class SpellCorrect: def __init__(self, text=None, files=[], initialize=True): self.NWORDS = collections.defaultdict(lambda: 1) self.alphabet = 'abcdefghijklmnopqrstuvwxyz' if initialize: self.initialize(text, files) def initialize(self, text, files): for f in files: self.train(self.words(open(f, encoding='utf-8').read())) if isinstance(text, str): self.train(self.words(text)) def words(self, text): return re.findall('[a-z0-9]+', text.lower()) def train(self, features): for f in features: self.NWORDS[f] += 1 def edits1(self, word): s = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [a + b[1:] for a, b in s if b] transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b) > 1] replaces = [a + c + b[1:] for a, b in s for c in self.alphabet if b] inserts = [a + c + b for a, b in s for c in self.alphabet] return set(deletes + transposes + replaces + inserts) def known_edits2(self, word): return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS) def known(self, words): return set(w for w in words if w in self.NWORDS) def correct(self, word): candidates = self.known([word]) or\ self.known(self.edits1(word)) or\ self.known_edits2(word) or\ [word] return max(candidates, key=self.NWORDS.get) def sentence_correct(self, sentence, joined=True, ignore_case=True): if ignore_case: sentence = sentence.lower() if joined: sentence = sentence.split() sent = [word.lower() if word.isupper() else self.correct(word.lower()) for word in sentence] return " ".join(sent)
python
ERROR_CODES = { 0: "EFW_SUCCESS",# = 0, 1: "EFW_ERROR_INVALID_INDEX",#, 3: "EFW_ERROR_INVALID_ID",#, 4: "EFW_ERROR_INVALID_VALUE",#, 5: "EFW_ERROR_REMOVED",#, //failed to find the filter wheel, maybe the filter wheel has been removed 6: "EFW_ERROR_MOVING",#,//filter wheel is moving 7: "EFW_ERROR_ERROR_STATE",#,//filter wheel is in error state 8: "EFW_ERROR_GENERAL_ERROR",#,//other error 9: "EFW_ERROR_NOT_SUPPORTED",#, 10: "EFW_ERROR_CLOSED",#, -1: "EFW_ERROR_END",# = -1 } class EFWError(IOError): def __init__(self, errno): self.errno = errno def __str__(self): return f'EFWError {self.errno}: {ERROR_CODES[self.errno]}' @classmethod def from_errno(cls, errno): return cls(errno) def check_error(errno): if errno != 0: raise EFWError.from_errno(errno)
python
from django.http import Http404 from django.shortcuts import render_to_response from django.template import RequestContext from seaserv import get_repo, is_passwd_set from winguhub.utils import check_and_get_org_by_repo, check_and_get_org_by_group def sys_staff_required(func): """ Decorator for views that checks the user is system staff. """ def _decorated(request, *args, **kwargs): if request.user.is_staff: return func(request, *args, **kwargs) raise Http404 return _decorated # def ctx_switch_required(func): # """ # Decorator for views to change navigation bar automatically that render # same template when both in org context and personal context. # """ # def _decorated(request, *args, **kwargs): # if not request.cloud_mode: # # no need to switch context when `CLOUD_MODE` is false # request.user.org = None # request.base_template = 'myhome_base.html' # return func(request, *args, **kwargs) # repo_id = kwargs.get('repo_id', '') # group_id = kwargs.get('group_id', '') # if repo_id and group_id: # return func(request, *args, **kwargs) # if not repo_id and not group_id: # return func(request, *args, **kwargs) # user = request.user.username # if repo_id: # org, base_template = check_and_get_org_by_repo(repo_id, user) # if group_id: # org, base_template = check_and_get_org_by_group(int(group_id), user) # if org: # request.user.org = org._dict # else: # request.user.org = None # request.base_template = base_template # return func(request, *args, **kwargs) # return _decorated def repo_passwd_set_required(func): """ Decorator for views to redirect user to repo decryption page if repo is encrypt and password is not set by user. """ def _decorated(request, *args, **kwargs): repo_id = kwargs.get('repo_id', None) if not repo_id: raise Exception, 'Repo id is not found in url.' repo = get_repo(repo_id) if not repo: raise Http404 username = request.user.username if repo.encrypted and not is_passwd_set(repo_id, username): # Redirect uesr to decrypt repo page. return render_to_response('decrypt_repo_form.html', { 'repo': repo, 'next': request.get_full_path(), }, context_instance=RequestContext(request)) return func(request, *args, **kwargs) return _decorated
python
from dataclasses import dataclass @dataclass class CheckpointCallback: _target_: str = "pytorch_lightning.callbacks.ModelCheckpoint" monitor: str = "loss/Validation" save_top_k: int = 1 save_last: bool = True mode: str = "min" verbose: bool = False dirpath: str = "./logs/checkpoints/" # use relative path, so it can be adjusted by hydra filename: str = "{epoch:02d}" @dataclass class GPUMonitur: _target_: str = "pytorch_lightning.callbacks.DeviceStatsMonitor" @dataclass class EarlyStoppingCallback: _target_: str = "pytorch_lightning.callbacks.EarlyStopping" monitor: str = "Accuracy/Validation" min_delta: float = 0.00 patience: int = 20 verbose: bool = True mode: str = "max" @dataclass class LRMonitor: _target_: str = "pytorch_lightning.callbacks.lr_monitor.LearningRateMonitor" logging_interval: str = "step"
python
import redis from twisted.python import log def open_redis(config): global redis_pool, redis_info host = config.get("redis", "host") port = int(config.get("redis", "port")) socket = config.get("redis", "socket") redis_info = ( host, port, socket ) if socket != "": redis_pool = redis.ConnectionPool( connection_class = redis.connection.UnixDomainSocketConnection, path = socket ) else: redis_pool = redis.ConnectionPool( host = host, port = port, db = 0 ) def get_redis(): global redis_pool return redis.StrictRedis(connection_pool = redis_pool) def get_redis_pubsub(): global redis_info host, port, socket = redis_info if socket != "": conn = redis.StrictRedis( connection_class = redis.connection.UnixDomainSocketConnection, path = socket ) else: conn = redis.StrictRedis( host = host, port = port, db = 0 ) return conn.pubsub()
python
#!/usr/bin/python # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """App Engine data model (schema) definition for Quiz.""" # Python imports import base64 import logging import md5 import operator import os import re import time # AppEngine imports from google.appengine.ext import db from google.appengine.api import memcache class QuizBaseModel(db.Model): """Base class for quiz models.""" class QuizTrunkModel(QuizBaseModel): """Maintains trunk for quiz model. Attributes: head: Maintians the head of a quiz. """ head = db.StringProperty() class QuizRevisionModel(QuizBaseModel): """Maintains list of revisions for a quiz. Quiz trunk associated with the revision is made parent of the model. Attributes: quiz_id: Id (key) for particular version of the quiz. time_stamp: Time_stamp for a new revision. commit_message: Commit message associated with new version. """ quiz_id = db.StringProperty() time_stamp = db.DateTimeProperty(auto_now=True) commit_message = db.StringProperty(default='Commiting a new version') class QuizPropertyModel(QuizBaseModel): """Defines various properties for a quiz. Attributes: shuffle_questions: If set questions are presented in random order. min_options: minimum number of options to be presented. max_options: maximum number of options to be presented. min_questions: minimum number of questions required to complete the quiz. Used to track the progress. repeat_questions: If set questions are repeated. repeat_wrongly_answered_questions: If set wrongly answered questions are repeated. """ shuffle_questions = db.BooleanProperty(default=True) min_options = db.IntegerProperty(default=2) max_options = db.IntegerProperty(default=10) # 0 implies all min_questions = db.IntegerProperty(default=0) # 0 implies all repeat_questions = db.BooleanProperty(default=False) repeat_wrongly_answered_questions = db.BooleanProperty(default=False) class QuizModel(QuizBaseModel): """Represents a quiz. Attributes: difficulty_level: Difficulty level for the quiz (range 0-10). quiz_property: Reference to property asscociated with quiz. title: Title of the quiz. tags: Associated tags with quiz. trunk: Reference to asscociated trunk with the quiz. introduction: Introduction text to be shown on the start page for quiz. """ # implicit id difficulty_level = db.RatingProperty(default=5) quiz_property = db.ReferenceProperty(QuizPropertyModel) title = db.StringProperty() tags = db.ListProperty(db.Category) trunk = db.ReferenceProperty(QuizTrunkModel) introduction = db.StringProperty() class ChoiceModel(QuizBaseModel): """Represents a choice/option provided to user for a question model. Attributes: body: Body of the choice. message: Message to be displayed when choice is selected. May act like a hint. is_correct: If the choice selected is correct. """ # implicit id body = db.TextProperty() message = db.StringProperty() is_correct = db.BooleanProperty(default=False) def dump_to_dict(self): """Dumps choice to a dictionary for passing around as JSON object.""" data_dict = {'body': self.body, 'id': str(self.key())} return data_dict class QuestionModel(QuizBaseModel): """Represents a question. Attributes: body: Text asscociated with quiz. choices: List of possible choices. shuffle_choices: If set choices are randomly shuffled. hints: Ordered list of progressive hints """ # implicit id body = db.TextProperty() choices = db.ListProperty(db.Key) shuffle_choices = db.BooleanProperty(default=True) hints = db.StringListProperty() def dump_to_dict(self): """Dumps the question model to a dictionary for passing around as JSON object.""" data_dict = {'id': str(self.key()), 'body': self.body, 'hints': self.hints, 'choices': [db.get(el).dump_to_dict() for el in self.choices] } if self.shuffle_choices and data_dict['choices']: data_dict['choices'] = random.shuffle(data_dict['choices']) return data_dict class QuizQuestionListModel(QuizBaseModel): """Maintains a list of question with its quiz id. This is necessary because questions may be shared between different quizes. Attributes: quiz: Reference to quiz object. question: Reference to question object asscociated with quiz. time_stamp: Time stamp. """ quiz = db.ReferenceProperty(QuizModel) question = db.ReferenceProperty(QuestionModel) time_stamp = db.DateTimeProperty(auto_now_add=True) class ResponseModel(QuizBaseModel): """Stores response data required for producing next question. Attributes: session_id: Session Identifier. answered_correctly: Set if the response resulted in correct answer. question: Reference to question being answered. quiz: Reference to associated quiz. quiz_trunk: Reference to associated quiz trunk. time_stamp: Time stamp of the response attempts: Number of attempts so far, useful for scoring. """ session_id = db.StringProperty(required=True) answered_correctly = db.BooleanProperty(db.Key) question = db.ReferenceProperty(QuestionModel) quiz = db.ReferenceProperty(QuizModel) quiz_trunk = db.ReferenceProperty(QuizTrunkModel) time_stamp = db.DateTimeProperty(auto_now=True) attempts = db.IntegerProperty(default=0) class QuizScoreModel(QuizBaseModel): """Stores progress status associated with a quiz and session. Both score and progress are out of 100. Attributes: session_id: Session Identifier. quiz: Reference to associated quiz. quiz_trunk: Reference to associated quiz trunk. score: Current score. progress: Current progress status questions_attempted: Number of questions attempted so far. """ quiz_trunk = db.ReferenceProperty(QuizTrunkModel) session_id = db.StringProperty(required=True) quiz = db.ReferenceProperty(QuizModel) score = db.FloatProperty(default=0.0) progress = db.FloatProperty(default=0.0) questions_attempted = db.IntegerProperty(default=0)
python
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Standard set of plugins.""" import base64 import datetime import os import sys import netaddr from oslo_config import cfg from oslo_utils import units import six from ironic_inspector.common.i18n import _, _LC, _LE, _LI, _LW from ironic_inspector import conf from ironic_inspector.plugins import base from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard') class RootDiskSelectionHook(base.ProcessingHook): """Smarter root disk selection using Ironic root device hints. This hook must always go before SchedulerHook, otherwise root_disk field might not be updated. """ def before_update(self, introspection_data, node_info, **kwargs): """Detect root disk from root device hints and IPA inventory.""" hints = node_info.node().properties.get('root_device') if not hints: LOG.debug('Root device hints are not provided', node_info=node_info, data=introspection_data) return inventory = introspection_data.get('inventory') if not inventory: raise utils.Error( _('Root device selection requires ironic-python-agent ' 'as an inspection ramdisk'), node_info=node_info, data=introspection_data) disks = inventory.get('disks', []) if not disks: raise utils.Error(_('No disks found'), node_info=node_info, data=introspection_data) for disk in disks: properties = disk.copy() # Root device hints are in GiB, data from IPA is in bytes properties['size'] //= units.Gi for name, value in hints.items(): actual = properties.get(name) if actual != value: LOG.debug('Disk %(disk)s does not satisfy hint ' '%(name)s=%(value)s, actual value is %(actual)s', {'disk': disk.get('name'), 'name': name, 'value': value, 'actual': actual}, node_info=node_info, data=introspection_data) break else: LOG.debug('Disk %(disk)s of size %(size)s satisfies ' 'root device hints', {'disk': disk.get('name'), 'size': disk['size']}, node_info=node_info, data=introspection_data) introspection_data['root_disk'] = disk return raise utils.Error(_('No disks satisfied root device hints'), node_info=node_info, data=introspection_data) class SchedulerHook(base.ProcessingHook): """Nova scheduler required properties.""" KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb') def before_update(self, introspection_data, node_info, **kwargs): """Update node with scheduler properties.""" inventory = introspection_data.get('inventory') errors = [] root_disk = introspection_data.get('root_disk') if root_disk: introspection_data['local_gb'] = root_disk['size'] // units.Gi if CONF.processing.disk_partitioning_spacing: introspection_data['local_gb'] -= 1 elif inventory: errors.append(_('root disk is not supplied by the ramdisk and ' 'root_disk_selection hook is not enabled')) if inventory: try: introspection_data['cpus'] = int(inventory['cpu']['count']) introspection_data['cpu_arch'] = six.text_type( inventory['cpu']['architecture']) except (KeyError, ValueError, TypeError): errors.append(_('malformed or missing CPU information: %s') % inventory.get('cpu')) try: introspection_data['memory_mb'] = int( inventory['memory']['physical_mb']) except (KeyError, ValueError, TypeError): errors.append(_('malformed or missing memory information: %s; ' 'introspection requires physical memory size ' 'from dmidecode') % inventory.get('memory')) else: LOG.warning(_LW('No inventory provided: using old bash ramdisk ' 'is deprecated, please switch to ' 'ironic-python-agent'), node_info=node_info, data=introspection_data) missing = [key for key in self.KEYS if not introspection_data.get(key)] if missing: raise utils.Error( _('The following required parameters are missing: %s') % missing, node_info=node_info, data=introspection_data) if errors: raise utils.Error(_('The following problems encountered: %s') % '; '.join(errors), node_info=node_info, data=introspection_data) LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, ' 'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'), {key: introspection_data.get(key) for key in self.KEYS}, node_info=node_info, data=introspection_data) overwrite = CONF.processing.overwrite_existing properties = {key: str(introspection_data[key]) for key in self.KEYS if overwrite or not node_info.node().properties.get(key)} node_info.update_properties(**properties) class ValidateInterfacesHook(base.ProcessingHook): """Hook to validate network interfaces.""" def __init__(self): if CONF.processing.add_ports not in conf.VALID_ADD_PORTS_VALUES: LOG.critical(_LC('Accepted values for [processing]add_ports are ' '%(valid)s, got %(actual)s'), {'valid': conf.VALID_ADD_PORTS_VALUES, 'actual': CONF.processing.add_ports}) sys.exit(1) if CONF.processing.keep_ports not in conf.VALID_KEEP_PORTS_VALUES: LOG.critical(_LC('Accepted values for [processing]keep_ports are ' '%(valid)s, got %(actual)s'), {'valid': conf.VALID_KEEP_PORTS_VALUES, 'actual': CONF.processing.keep_ports}) sys.exit(1) def _get_interfaces(self, data=None): """Convert inventory to a dict with interfaces. :return: dict interface name -> dict with keys 'mac' and 'ip' """ result = {} inventory = data.get('inventory', {}) if inventory: for iface in inventory.get('interfaces', ()): name = iface.get('name') mac = iface.get('mac_address') ip = iface.get('ipv4_address') if not name: LOG.error(_LE('Malformed interface record: %s'), iface, data=data) continue LOG.debug('Found interface %(name)s with MAC "%(mac)s" and ' 'IP address "%(ip)s"', {'name': name, 'mac': mac, 'ip': ip}, data=data) result[name] = {'ip': ip, 'mac': mac} else: LOG.warning(_LW('No inventory provided: using old bash ramdisk ' 'is deprecated, please switch to ' 'ironic-python-agent'), data=data) result = data.get('interfaces') return result def _validate_interfaces(self, interfaces, data=None): """Validate interfaces on correctness and suitability. :return: dict interface name -> dict with keys 'mac' and 'ip' """ if not interfaces: raise utils.Error(_('No interfaces supplied by the ramdisk'), data=data) pxe_mac = utils.get_pxe_mac(data) if not pxe_mac and CONF.processing.add_ports == 'pxe': LOG.warning(_LW('No boot interface provided in the introspection ' 'data, will add all ports with IP addresses')) result = {} for name, iface in interfaces.items(): mac = iface.get('mac') ip = iface.get('ip') if not mac: LOG.debug('Skipping interface %s without link information', name, data=data) continue if not utils.is_valid_mac(mac): LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not ' 'valid, skipping'), {'mac': mac, 'name': name}, data=data) continue mac = mac.lower() if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()): LOG.debug('Skipping local interface %s', name, data=data) continue if (CONF.processing.add_ports == 'pxe' and pxe_mac and mac != pxe_mac): LOG.debug('Skipping interface %s as it was not PXE booting', name, data=data) continue elif CONF.processing.add_ports != 'all' and not ip: LOG.debug('Skipping interface %s as it did not have ' 'an IP address assigned during the ramdisk run', name, data=data) continue result[name] = {'ip': ip, 'mac': mac.lower()} if not result: raise utils.Error(_('No suitable interfaces found in %s') % interfaces, data=data) return result def before_processing(self, introspection_data, **kwargs): """Validate information about network interfaces.""" bmc_address = utils.get_ipmi_address_from_data(introspection_data) if bmc_address: introspection_data['ipmi_address'] = bmc_address else: LOG.debug('No BMC address provided in introspection data, ' 'assuming virtual environment', data=introspection_data) all_interfaces = self._get_interfaces(introspection_data) interfaces = self._validate_interfaces(all_interfaces, introspection_data) LOG.info(_LI('Using network interface(s): %s'), ', '.join('%s %s' % (name, items) for (name, items) in interfaces.items()), data=introspection_data) introspection_data['all_interfaces'] = all_interfaces introspection_data['interfaces'] = interfaces valid_macs = [iface['mac'] for iface in interfaces.values()] introspection_data['macs'] = valid_macs def before_update(self, introspection_data, node_info, **kwargs): """Drop ports that are not present in the data.""" if CONF.processing.keep_ports == 'present': expected_macs = { iface['mac'] for iface in introspection_data['all_interfaces'].values() } elif CONF.processing.keep_ports == 'added': expected_macs = set(introspection_data['macs']) else: return # list is required as we modify underlying dict for port in list(node_info.ports().values()): if port.address not in expected_macs: LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is " "not in expected MAC list %(expected)s"), {'port': port.uuid, 'mac': port.address, 'expected': list(sorted(expected_macs))}, node_info=node_info, data=introspection_data) node_info.delete_port(port) class RamdiskErrorHook(base.ProcessingHook): """Hook to process error send from the ramdisk.""" DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f' def before_processing(self, introspection_data, **kwargs): error = introspection_data.get('error') logs = introspection_data.get('logs') if error or CONF.processing.always_store_ramdisk_logs: if logs: self._store_logs(logs, introspection_data) else: LOG.debug('No logs received from the ramdisk', data=introspection_data) if error: raise utils.Error(_('Ramdisk reported error: %s') % error, data=introspection_data) def _store_logs(self, logs, introspection_data): if not CONF.processing.ramdisk_logs_dir: LOG.warning( _LW('Failed to store logs received from the ramdisk ' 'because ramdisk_logs_dir configuration option ' 'is not set'), data=introspection_data) return if not os.path.exists(CONF.processing.ramdisk_logs_dir): os.makedirs(CONF.processing.ramdisk_logs_dir) time_fmt = datetime.datetime.utcnow().strftime(self.DATETIME_FORMAT) bmc_address = introspection_data.get('ipmi_address', 'unknown') file_name = 'bmc_%s_%s' % (bmc_address, time_fmt) with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), 'wb') as fp: fp.write(base64.b64decode(logs)) LOG.info(_LI('Ramdisk logs stored in file %s'), file_name, data=introspection_data)
python
def content_length_check(content, allow_short=False): maxlen = 40000 if len(content)>maxlen: raise Exception('content too long {}/{}'.format(len(content), maxlen)) if (len(content)<2 and allow_short==False) or len(content)==0: raise Exception('content too short') def title_length_check(title): if len(title)>140: raise Exception('title too long') if len(title)<2: raise Exception('title too short')
python
# -*- coding: utf-8 -*- """ Created on Sun Jul 14 17:36:13 2019 @author: Mangifera """ import seaborn as sns import pandas as pd from scipy import stats def is_it_random(filename): with open(filename, "r") as text_file: demon = text_file.read() demon = [int(x) for x in demon.split('\n')] occurrence = {} for i in demon: if i in occurrence: occurrence[i] += 1 else: occurrence[i] = 1 return occurrence def make_df(filename_ctrl, filename_sample): # occurrence_ctrl = is_it_random(filename_ctrl) # occurrences_ctrl = pd.DataFrame.from_dict(occurrence_ctrl, orient = "index", columns=['rolls_ctrl']) # occurrences_ctrl = occurrences_ctrl.reset_index() # occurrences_ctrl = occurrences_ctrl.rename(index=str, columns={"index": "die_side"}) occurrence_samp = is_it_random(filename_sample) occurrences_samp = pd.DataFrame.from_dict(occurrence_samp, orient = "index", columns=['rolls_samp']) occurrences_samp = occurrences_samp.reset_index() occurrences_samp = occurrences_samp.rename(index=str, columns={"index": "die_side"}) # occurrences = pd.merge(occurrences_ctrl, occurrences_samp, on='die_side') max_die_no = max(occurrences_samp['die_side']) total_rolls = sum(occurrence_samp.values()) uniform_prediction = total_rolls/max_die_no occurrences = occurrences_samp.set_index("die_side") occurrences['uniform_dist'] = pd.Series(uniform_prediction, index=occurrences.index) sns.set(style="whitegrid") ax = sns.barplot(x=occurrences.index, y="rolls_samp", data=occurrences) chi2 = stats.chi2_contingency(occurrences) chi_square_stat = chi2[0] p_value = chi2[1] degrees_of_freedom = chi2[2] print (f"chi_square_stat: {chi_square_stat}, p-value: {p_value}, degrees_of_freedom: {degrees_of_freedom}") filename_sample = "actual_data_yeenoghu.txt" filename_ctrl = "yeenoghu_my_pc.txt" z = make_df(filename_ctrl, filename_sample)
python
import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import matplotlib.pyplot as plt import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, inspect from sqlalchemy import func, desc from matplotlib.ticker import NullFormatter import matplotlib.dates as mdates from datetime import datetime, timedelta import seaborn as sns from flask import Flask, jsonify import datetime as dt engine = create_engine("sqlite:///belly_button_biodiversity.sqlite", echo=False) Base = automap_base() Base.prepare(engine, reflect=True) Base.classes.keys() Otu = Base.classes.otu Samples = Base.classes.samples Samples_MD = Base.classes.samples_metadata session = Session(engine) def get_sample_names(): samples_query = session.query(Samples) samples_df = pd.read_sql(samples_query.statement, samples_query.session.bind) return list(samples_df.columns[1:]) def otu_descriptions(): otu_query = session.query(Otu) otu_df = pd.read_sql(otu_query.statement, otu_query.session.bind) return list(otu_df['lowest_taxonomic_unit_found'].values)
python
import sys import socket import threading class Server: def __init__(self, hostname='localhost', port=8080): self.host = hostname self.port = port self.clients = [] # crea un socket TCP self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM) # asigna al socket la direccion y puerto del server self.socket.bind((self.host, self.port)) # espera por la conexion de los clientes self.socket.listen(10) # desbloquea el socket self.socket.setblocking(False) # crea los hilos para aceptar y procesar las conexiones self.create_threads() # hilo principal while True: message = input('=> ') if message == 'exit': # cerrar la conexion self.socket.close() sys.exit() def create_threads(self): ''' Crea los hilos para aceptar y procesar las conexiones. ''' accept_connection_thread = threading.Thread(target=self.accept_connection) process_connection_thread = threading.Thread(target=self.process_connection) accept_connection_thread.daemon = True accept_connection_thread.start() process_connection_thread.daemon = True process_connection_thread.start() def message_to_all(self, message, client): ''' Permite enviar los mensajes a todos los clientes conectados. ''' for _client in self.clients: try: if _client != client: _client.send(message) except: self.clients.remove(_client) def accept_connection(self): ''' Acepta las conexiones de los clientes y las almacena. ''' while True: try: connection, address = self.socket.accept() connection.setblocking(False) self.clients.append(connection) except: pass def process_connection(self): ''' Recorre la lista de clientes para saber cuando recibe un mensaje. ''' while True: if len(self.clients) > 0: for client in self.clients: try: data = client.recv(1024) if data: self.message_to_all(data, client) except: pass def main(): if len(sys.argv) == 3: hostname = str(sys.argv[1]) port = int(sys.argv[2]) server = Server(hostname, port) elif len(sys.argv) == 1: server = Server() else: print('Debe ingresar direccion y puerto del servidor') if __name__ == '__main__': main()
python
from Server.models.business.ListenThread import ListenThread listenThread = ListenThread() listenThread.main_execution()
python
''' Description on how to produce metadata file. ''' input_filter = None treename = 'deepntuplizer/tree' reweight_events = -1 reweight_bins = [list(range(200, 2051, 50)), [-10000, 10000]] metadata_events = 1000000 selection = '''jet_tightId \ && ( !label_H_cc )''' # && ( (sample_isQCD && fj_isQCD) || (!sample_isQCD && !fj_isQCD)) \ var_groups = { # 'group_name': ( ('regex1', 'regex2', ...), list_length ) 'fjvars': (('fj_sdmass',), None), } var_blacklist = [ 'fj_gen_pt', 'fj_gen_eta', ] var_no_transform_branches = [ 'fj_labelJMAR', 'fjJMAR_gen_pt', 'fjJMAR_gen_eta', 'fjJMAR_gen_pdgid', 'fj_label', 'fj_isQCD', 'fj_isTop', 'fj_isW', 'fj_isZ', 'fj_isH', 'npv', 'n_pfcands', 'n_tracks', 'n_sv', 'fj_pt', 'fj_eta', 'fj_phi', 'fj_mass', 'fj_n_sdsubjets', 'fjPuppi_tau21', 'fjPuppi_tau32', 'fjPuppi_corrsdmass', 'fj_doubleb', 'pfCombinedInclusiveSecondaryVertexV2BJetTags', "fj_tau21", "fj_tau32", "fj_sdmass", "fj_sdsj1_pt", "fj_sdsj1_eta", "fj_sdsj1_phi", "fj_sdsj1_mass", "fj_sdsj1_csv", "fj_sdsj1_ptD", "fj_sdsj1_axis1", "fj_sdsj1_axis2", "fj_sdsj1_mult", "fj_sdsj2_pt", "fj_sdsj2_eta", "fj_sdsj2_phi", "fj_sdsj2_mass", "fj_sdsj2_csv", "fj_sdsj2_ptD", "fj_sdsj2_axis1", "fj_sdsj2_axis2", "fj_sdsj2_mult", "fj_ptDR", "fj_relptdiff", "fj_sdn2", 'fj_z_ratio', 'fj_trackSipdSig_3', 'fj_trackSipdSig_2', 'fj_trackSipdSig_1', 'fj_trackSipdSig_0', 'fj_trackSipdSig_1_0', 'fj_trackSipdSig_0_0', 'fj_trackSipdSig_1_1', 'fj_trackSipdSig_0_1', 'fj_trackSip2dSigAboveCharm_0', 'fj_trackSip2dSigAboveBottom_0', 'fj_trackSip2dSigAboveBottom_1', 'fj_tau1_trackEtaRel_0', 'fj_tau1_trackEtaRel_1', 'fj_tau1_trackEtaRel_2', 'fj_tau0_trackEtaRel_0', 'fj_tau0_trackEtaRel_1', 'fj_tau0_trackEtaRel_2', 'fj_tau_vertexMass_0', 'fj_tau_vertexEnergyRatio_0', 'fj_tau_vertexDeltaR_0', 'fj_tau_flightDistance2dSig_0', 'fj_tau_vertexMass_1', 'fj_tau_vertexEnergyRatio_1', 'fj_tau_flightDistance2dSig_1', 'fj_jetNTracks', 'fj_nSV', ] # label_list = ['fj_isQCD', 'fj_isTop', 'fj_isW', 'fj_isZ', 'fj_isH'] label_list = ['label_Top_bcq', 'label_Top_bqq', 'label_Top_bc', 'label_Top_bq', 'label_W_cq', 'label_W_qq', 'label_Z_bb', 'label_Z_cc', 'label_Z_qq', 'label_H_bb', 'label_H_qqqq', 'label_QCD_bb', 'label_QCD_cc', 'label_QCD_b', 'label_QCD_c', 'label_QCD_others', ] reweight_var = ['fj_pt', 'fj_sdmass'] reweight_classes = ['fj_isTop', 'fj_isW', 'fj_isZ', 'fj_isH', 'fj_isQCD'] reweight_method = 'flat' var_img = None var_pos = None n_pixels = None img_ranges = None
python
from argparse import ArgumentTypeError import numpy as np from PIL import Image from convolution_functions import apply_filter, filters debug_mode = False """ Seznam pouzitelnych funkci pro tento program na upravu obrazku. Pro pridani fuknce ji napiste zde, a pridejte do action_dict (seznam pouzitelnych fci) a pote ji udelejte CLI callable v Main pres add_argument. """ def read_image(file_name: str) -> np.array: """ pomocna funkce na nacteni obrazku :param file_name: cesta k souboru :return: numpy array, pripravene na upravy pomoci nasich funkcni """ return np.asarray(Image.open(file_name), dtype=np.int32) def save_image(array, file_path): """ pomocna funkce na ulozeni obrazku, sama prevede pole z int32 na unit8 a ulozi :param array: :param file_path: :return: """ out = array.astype("uint8") Image.fromarray(out).save(file_path) def percentage(val): """ Vlastni datovy typ pro argparse, pouze kontroluje zda uzivatel zadal cislo vetsi nez nula :param val: vstup z argparse :return: int v rozmezi 0 - 100 (bez upravy) """ try: n = int(val) if 0 <= n: return n else: msg = "Cislo nemuze byt mensi nez nula" raise ArgumentTypeError(msg) except ValueError: msg = 'Zadaný vstup se nepodařilo převést na číslo!' raise ArgumentTypeError(msg) """ image edit functions """ def do_rotate(np_image, args=None): out = np.rot90(np_image) if debug_mode: print("a do_rotate") return out def do_mirror(np_image, args=None): assert np_image.ndim > 1 out = np_image[::, ::-1] if debug_mode: print("a do_mirror") return out def do_inverse(np_image, args=None): """ funkce inverze barev (z cerne se stane bila apod). :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ if len(np_image.shape) > 2: out = np.abs(np_image[::, ::, 0:min(np_image.shape[2], 3)] - 255) else: out = np.abs(np_image - 255) if debug_mode: print("a do_inverse") return out def do_bw(np_image, args=None): """ funkce do prevodu sedi, pouzivame ITU-R 601-2 luma vzorec. :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ if np_image.ndim is not 3: # obrazek je uz v grayscale, takze neni treba ho opakovat print("Jiz ve stupni sedi, redudantni --bw") return np_image result_red = (np_image[::, ::, 0] * 0.299) result_green = (np_image[::, ::, 1] * 0.587) result_blue = (np_image[::, ::, 2] * 0.114) final = (result_red + result_green + result_blue) if debug_mode: print("a do_bw") return final def do_lighten(np_image, args): """ funkce ktera zesvetla vsechny pixely o dane procento :param np_image: numpy obrazek co chceme upravit :param args: Bere z argparseru lighten value :return: upraaveny obrazek v Numpy array """ if args is None: raise ValueError value = args.lighten.pop(0) # vime ze 100% = 1, 50% = 0.5, proto prenasobime a pricteme 1 abychom obrazek omylem neztmavili percentil_value = (value * 0.01) + 1 if len(np_image.shape) > 2: out = np.minimum(np_image[::, ::, 0:min(np_image.shape[2], 3)] * percentil_value, 255) else: out = np.minimum(np_image * percentil_value, 255) if debug_mode: print("a do_lighten") return out def do_darken(np_image, args): """ funkce ktera ztmavi vsechny pixely o dane procento :param np_image: numpy obrazek co chceme upravit :param args: Bere z argparseru lighten value :return: upraaveny obrazek v Numpy array """ if args is None: raise ValueError value = args.darken.pop(0) if len(np_image.shape) > 2: out = np_image[::, ::, 0:min(np_image.shape[2], 3)] * (value * 0.01) else: out = (np_image * (value * 0.01)) if debug_mode: print("a do_darken") return out def do_sharpen(np_image, args=None): """ funkce zostreni, zavola konvolucni metodu s danym filtrem a vrati vysledek :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ out = apply_filter(np_image, filters["Sharpening"]) if debug_mode: print("a do_sharpen") return out def do_blur_3x3(np_image, args=None): """ funkce rozmazani, zavola konvolucni metodu s danym filtrem a vrati vysledek :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ out = apply_filter(np_image, filters['Gaussian blur 3x3 (approx)']) if debug_mode: print("a do_blur_3x3") return out def do_blur_5x5(np_image, args=None): """ funkce rozmazani s vetsim zaberem okolim, zavola konvolucni metodu s danym filtrem a vrati vysledek :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ out = apply_filter(np_image, filters['Gaussian blur 5x5 (approx)']) if debug_mode: print("a do_blur_5x5") return out def do_edge_detection(np_image, args=None): """ funkce detekce hran, zavola konvolucni metodu s danym filtrem a vrati vysledek :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ out = apply_filter(np_image, filters['Edge detection']) if debug_mode: print("a do_edge_detection") return out def do_embossing(np_image, args=None): """ funkce vyrazeni, zavola konvolucni metodu s danym filtrem a vrati vysledek :param np_image: numpy obrazek co chceme upravit :param args: Neni zde potreba, pouze pro kompabilitu :return: upraaveny obrazek v Numpy array """ out = apply_filter(np_image, filters['Embossing']) if debug_mode: print("a do_embossing") return out """ Slovník (Dictionary) všech možných úprav obrázku, slouží pro parsování argparse a tohoto programu pro přidání nové fce je třeba jí napsat do funcions.py a poté jí přidat sem """ action_dict = { "--rotate": do_rotate, "--mirror": do_mirror, "--inverse": do_inverse, "--bw": do_bw, "--lighten": do_lighten, "--darken": do_darken, "--sharpen": do_sharpen, "--blur_3x3": do_blur_3x3, "--blur_5x5": do_blur_5x5, "--edge_detection": do_edge_detection, "--embossing": do_embossing }
python
"""Tests for the auth providers."""
python
# Generated by Django 2.2.1 on 2020-05-07 07:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('PropelRapp', '0009_auto_20200506_0627'), ] operations = [ migrations.AddField( model_name='menu', name='is_deleted', field=models.CharField(choices=[('N', 'NO'), ('Y', 'YES')], default='N', max_length=1), ), migrations.AddField( model_name='role', name='is_deleted', field=models.CharField(choices=[('N', 'NO'), ('Y', 'YES')], default='N', max_length=1), ), migrations.AddField( model_name='submenu', name='is_deleted', field=models.CharField(choices=[('N', 'NO'), ('Y', 'YES')], default='N', max_length=1), ), ]
python
''' Text Media Matching interface ''' from summarization.text_media_matching.text_media_matching_helper import \ TextMediaMatchingHelper from summarization.text_media_matching.text_media_matching_preprocessor import \ TextMediaMatchingPreprocessor # noqa class TextMediaMatcher: '''Class to integrate the TextMediaMatching utilities''' def __init__(self, text_contents, media_contents, distance_metric_type="absolute-difference"): self.text_contents = text_contents self.media_contents = media_contents self.distance_metric_type = distance_metric_type def _get_matched_and_unmatched_contents(self): if len(self.text_contents) == 0 or len(self.media_contents) == 0: return { "matched_contents": [], "unused_contents": self.text_contents if len( self.text_contents) != 0 else self.media_contents, "unused_content_type": "text" if len( self.text_contents) != 0 else "media"} preprocessor = TextMediaMatchingPreprocessor( self.text_contents, self.media_contents ) preprocessed_contents_dict = preprocessor.get_formatted_content() text_for_matching = preprocessed_contents_dict["sentences"] media_for_matching = preprocessed_contents_dict["media"] unused_contents \ = preprocessed_contents_dict["content_unused_for_matching"] unused_content_type = preprocessed_contents_dict["unused_content_type"] matcher = TextMediaMatchingHelper( text_for_matching, media_for_matching, self.distance_metric_type) matched_contents = matcher.get_text_media_matching() return { "matched_contents": matched_contents, "unused_contents": unused_contents, "unused_content_type": unused_content_type }
python
""" # MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. *Created with Breaking Point build : EB 9.10v9.10.110.25 -- ENGINEERING BUILD""" import requests import json import pprint import base64 from requests.adapters import HTTPAdapter from requests.packages.urllib3.poolmanager import PoolManager import ssl import logging bps_api_log = logging.getLogger(__name__) requests.packages.urllib3.disable_warnings() pp = pprint.PrettyPrinter(indent=1).pprint class TlsAdapter(HTTPAdapter): def init_poolmanager(self, connections, maxsize, block): self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block) ### this BPS REST API wrapper is generated for version: 9.10.110.25 class BPS(object): def __init__(self, host, user, password): self.host = host self.user = user self.password = password self.sessionId = None self.session = requests.Session() self.session.mount('https://', TlsAdapter()) self.evasionProfile = DataModelProxy(wrapper=self, name='evasionProfile') self.reports = DataModelProxy(wrapper=self, name='reports') self.capture = DataModelProxy(wrapper=self, name='capture') self.network = DataModelProxy(wrapper=self, name='network') self.topology = DataModelProxy(wrapper=self, name='topology') self.superflow = DataModelProxy(wrapper=self, name='superflow') self.testmodel = DataModelProxy(wrapper=self, name='testmodel') self.administration = DataModelProxy(wrapper=self, name='administration') self.results = DataModelProxy(wrapper=self, name='results') self.statistics = DataModelProxy(wrapper=self, name='statistics') self.appProfile = DataModelProxy(wrapper=self, name='appProfile') self.strikes = DataModelProxy(wrapper=self, name='strikes') self.loadProfile = DataModelProxy(wrapper=self, name='loadProfile') self.strikeList = DataModelProxy(wrapper=self, name='strikeList') def disablePrints(self,disable=True): if disable: log=bps_api_log.parent log.setLevel(logging.CRITICAL) logging.getLogger("requests").setLevel(logging.CRITICAL) logging.getLogger("urllib3").setLevel(logging.CRITICAL) else: log=bps_api_log.parent log.setLevel(logging.INFO) logging.getLogger("requests").setLevel(logging.ERROR) logging.getLogger("urllib3").setLevel(logging.ERROR) ### connect to the system def __connect(self): r = self.session.post(url='https://' + self.host + '/bps/api/v1/auth/session', data=json.dumps({'username': self.user, 'password': self.password}), headers={'content-type': 'application/json'}, verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code == 200): self.sessionId = r.json().get('sessionId') self.session.headers['sessionId'] = r.json().get('sessionId') self.session.headers['X-API-KEY'] = r.json().get('apiKey') bps_api_log.info('Successfully connected to %s.' % self.host) else: raise Exception('Failed connecting to %s: (%s, %s)' % (self.host, r.status_code, r.content)) ### disconnect from the system def __disconnect(self): r = self.session.delete(url='https://' + self.host + '/bps/api/v1/auth/session', verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code == 204): self.sessionId = None if 'sessionId' in self.session.headers: del self.session.headers['sessionId'] del self.session.headers['X-API-KEY'] bps_api_log.info('Successfully disconnected from %s.' % self.host) else: raise Exception('Failed disconnecting from %s: (%s, %s)' % (self.host, r.status_code, r.content)) ### login into the bps system def login(self): self.__connect() r = self.session.post(url='https://' + self.host + '/bps/api/v2/core/auth/login', data=json.dumps({'username': self.user, 'password': self.password, 'sessionId': self.sessionId}), headers={'content-type': 'application/json'}, verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code == 200): bps_api_log.info('Login successful.\nWelcome %s. \nYour session id is %s' % (self.user, self.sessionId)) else: raise Exception('Login failed.\ncode:%s, content:%s' % (r.status_code, r.content)) ### logout from the bps system def logout(self): r = self.session.post(url='https://' + self.host + '/bps/api/v2/core/auth/logout', data=json.dumps({'username': self.user, 'password': self.password, 'sessionId': self.sessionId}), headers={'content-type': 'application/json'}, verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code == 200): bps_api_log.info('Logout successful. \nBye %s.' % self.user) self.__disconnect() else: raise Exception('Logout failed: (%s, %s)' % (r.status_code, r.content)) ### Get from data model def _get(self, path, responseDepth=None, **kwargs): requestUrl = 'https://%s/bps/api/v2/core%s%s' % (self.host, path, '?responseDepth=%s' % responseDepth if responseDepth else '') for key, value in kwargs.items(): requestUrl = requestUrl + "&%s=%s" % (key, value) headers = {'content-type': 'application/json'} r = self.session.get(url=requestUrl, headers=headers, verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code in [200, 204]): return json.loads(r.content) if jsonContent else r.content raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### Get from data model def _patch(self, path, value): r = self.session.patch(url='https://' + self.host + '/bps/api/v2/core/' + path, headers={'content-type': 'application/json'}, data=json.dumps(value), verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code != 204): raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### Get from data model def _put(self, path, value): r = self.session.put(url='https://' + self.host + '/bps/api/v2/core/' + path, headers={'content-type': 'application/json'}, data=json.dumps(value), verify=False) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code != 204): raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### Get from data model def _delete(self, path): requestUrl = 'https://' + self.host + '/bps/api/v2/core/'+ path headers = {'content-type': 'application/json'} r = self.session.delete(url=requestUrl, headers=headers, verify=False) if(r.status_code == 400): methodCall = '%s'%path.replace('/', '.').replace('.operations', '') content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall raise Exception({'status_code': r.status_code, 'content': content_message}) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code in [200, 204]): return json.loads(r.content) if jsonContent else r.content raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### OPTIONS request def _options(self, path): r = self.session.options('https://' + self.host + '/bps/api/v2/core/'+ path) if(r.status_code == 400): methodCall = '%s'%path.replace('/', '.').replace('.operations', '') content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall raise Exception({'status_code': r.status_code, 'content': content_message}) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code in [200]): return json.loads(r.content) if jsonContent else r.content raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### generic post operation def _post(self, path, **kwargs): requestUrl = 'https://' + self.host + '/bps/api/v2/core/' + path r = self.session.post(url=requestUrl, headers={'content-type': 'application/json'}, data=json.dumps(kwargs), verify=False) if(r.status_code == 400): methodCall = '%s'%path.replace('/', '.').replace('.operations', '') content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall raise Exception({'status_code': r.status_code, 'content': content_message}) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code in [200, 204, 202]): return json.loads(r.content) if jsonContent else r.content raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### generic import operation def _import(self, path, filename, **kwargs): requestUrl = 'https://' + self.host + '/bps/api/v2/core/' + path files = {'file': (kwargs['name'], open(filename, 'rb'), 'application/xml')} r = self.session.post(url=requestUrl, files=files, data={'fileInfo':str(kwargs)}, verify=False) if(r.status_code == 400): methodCall = '%s'%path.replace('/', '.').replace('.operations', '') content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall raise Exception({'status_code': r.status_code, 'content': content_message}) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code in [200, 204]): return json.loads(r.content) if jsonContent else r.content raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### generic post operation def _export(self, path, **kwargs): requestUrl = 'https://' + self.host + '/bps/api/v2/core/' + path r = self.session.post(url=requestUrl, headers={'content-type': 'application/json'}, data=json.dumps(kwargs), verify=False) if(r.status_code == 400): methodCall = '%s'%path.replace('/', '.').replace('.operations', '') content_message = r.content + ' Execute: help(<BPS session name>%s) for more information about the method.'%methodCall raise Exception({'status_code': r.status_code, 'content': content_message}) jsonContent = r.content is not None and (r.content.startswith(b'{') or r.content.startswith(b'[')) if(r.status_code == 200) or r.status_code == 204: get_url = 'https://' + self.host + r.content get_req = self.session.get(url = get_url, verify = False) with open(kwargs['filepath'], 'wb') as fd: for chunk in get_req.iter_content(chunk_size=1024): fd.write(chunk) fd.close() get_req.close() return {'status_code': r.status_code, 'content': 'success'} else: raise Exception({'status_code': r.status_code, 'content': json.loads(r.content) if jsonContent else r.content}) ### null @staticmethod def _topology_operations_unreserve(self, unreservation): """ :param unreservation (list): list of object with fields slot (number): port (number): """ return self._wrapper._post('/topology/operations/unreserve', **{'unreservation': unreservation}) ### Deletes a given Evasion Profile from the database. @staticmethod def _evasionProfile_operations_delete(self, name): """ Deletes a given Evasion Profile from the database. :param name (string): The name of the profile to delete. """ return self._wrapper._post('/evasionProfile/operations/delete', **{'name': name}) ### Clones a component in the current working Test Model @staticmethod def _testmodel_operations_clone(self, template, type, active): """ Clones a component in the current working Test Model :param template (string): The ID of the test component to clone. :param type (string): Component Type: appsim, sesionsender .. :param active (bool): Set component enable (by default is active) or disable """ return self._wrapper._post('/testmodel/operations/clone', **{'template': template, 'type': type, 'active': active}) ### null @staticmethod def _loadProfile_operations_load(self, template): """ :param template (string): """ return self._wrapper._post('/loadProfile/operations/load', **{'template': template}) ### Sets the card mode of a board. @staticmethod def _topology_operations_setCardMode(self, board, mode): """ Sets the card mode of a board. :param board (number): Slot ID. :param mode (number): The new mode: 10(BPS-L23), 7(BPS L4-7), 3(IxLoad), 11(BPS QT L2-3), 12(BPS QT L4-7) """ return self._wrapper._post('/topology/operations/setCardMode', **{'board': board, 'mode': mode}) ### Sets the card speed of a board @staticmethod def _topology_operations_setCardSpeed(self, board, speed): """ Sets the card speed of a board :param board (number): Slot ID. :param speed (number): The new speed.(the int value for 1G is 1000, 10G(10000), 40G(40000)) """ return self._wrapper._post('/topology/operations/setCardSpeed', **{'board': board, 'speed': speed}) ### Sets the card fanout of a board @staticmethod def _topology_operations_setCardFanout(self, board, fanid): """ Sets the card fanout of a board :param board (number): Slot ID. :param fanid (number): The fan type represented by an integer id. For CloudStorm: 0(100G), 1(40G), 2(25G), 3(10G), 4(50G). For PerfectStorm 40G: 0(40G), 1(10G). For PerfectStorm 100G: 0(100G), 1(40G), 2(10G) """ return self._wrapper._post('/topology/operations/setCardFanout', **{'board': board, 'fanid': fanid}) ### Enables/Disables the performance acceleration for a BPS VE blade. @staticmethod def _topology_operations_setPerfAcc(self, board, perfacc): """ Enables/Disables the performance acceleration for a BPS VE blade. :param board (number): Slot ID. :param perfacc (bool): Boolean value: 'True' to enable the performance Acceleration and 'False' otherwise. """ return self._wrapper._post('/topology/operations/setPerfAcc', **{'board': board, 'perfacc': perfacc}) ### Deletes a given Application Profile from the database. @staticmethod def _appProfile_operations_delete(self, name): """ Deletes a given Application Profile from the database. :param name (string): The name of the Application Profiles. """ return self._wrapper._post('/appProfile/operations/delete', **{'name': name}) ### Saves the current working Test Model under specified name. @staticmethod def _evasionProfile_operations_saveAs(self, name, force): """ Saves the current working Test Model under specified name. :param name (string): The new name given for the current working Evasion Profile :param force (bool): Force to save the working Evasion Profile using a new name. """ return self._wrapper._post('/evasionProfile/operations/saveAs', **{'name': name, 'force': force}) ### Saves the working Test Model using the current name. No need to configure. The current name is used. @staticmethod def _evasionProfile_operations_save(self, name=None, force=True): """ Saves the working Test Model using the current name. No need to configure. The current name is used. :param name (string): This argument should be empty for saving the profile using it's actual name. :param force (bool): Force to save the working profile with the same name. """ return self._wrapper._post('/evasionProfile/operations/save', **{'name': name, 'force': force}) ### Imports a test model, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _testmodel_operations_importModel(self, name, filename, force): """ Imports a test model, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the object being imported :param filename (string): The file containing the object :param force (bool): Force to import the file and the object having the same name will be replaced. """ return self._wrapper._import('/testmodel/operations/importModel', **{'name': name, 'filename': filename, 'force': force}) ### Imports an application profile, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _appProfile_operations_importAppProfile(self, name, filename, force): """ Imports an application profile, given as a file. This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the object being imported :param filename (string): The file containing the object :param force (bool): Force to import the file and the object having the same name will be replaced. """ return self._wrapper._import('/appProfile/operations/importAppProfile', **{'name': name, 'filename': filename, 'force': force}) ### Imports a network neighborhood model, given as a file.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _network_operations_importNetwork(self, name, filename, force): """ Imports a network neighborhood model, given as a file.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the object being imported :param filename (string): The file containing the object :param force (bool): Force to import the file and replace the object having the same name. """ return self._wrapper._import('/network/operations/importNetwork', **{'name': name, 'filename': filename, 'force': force}) ### null @staticmethod def _superflow_operations_search(self, searchString, limit, sort, sortorder): """ :param searchString (string): Search Super Flow name matching the string given. :param limit (string): The limit of rows to return :param sort (string): Parameter to sort by. :param sortorder (string): The sort order (ascending/descending) """ return self._wrapper._post('/superflow/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Adds a new test component to the current working test model @staticmethod def _testmodel_operations_add(self, name, component, type, active): """ Adds a new test component to the current working test model :param name (string): Component Name :param component (string): Component template, preset. :param type (string): Component Type: appsim, sesionsender .. :param active (bool): Set component enable (by default is active) or disable """ return self._wrapper._post('/testmodel/operations/add', **{'name': name, 'component': component, 'type': type, 'active': active}) ### Add a host to the current working Superflow @staticmethod def _superflow_operations_addHost(self, hostParams, force): """ Add a host to the current working Superflow :param hostParams (object): object of object with fields name (string): The host name. hostname (string): The NickName of the host. iface (string): The traffic direction.Values can be: 'origin'(means client) and 'target'(means server) :param force (bool): The flow id. """ return self._wrapper._post('/superflow/operations/addHost', **{'hostParams': hostParams, 'force': force}) ### Stops the test run. @staticmethod def _testmodel_operations_stopRun(self, runid): """ Stops the test run. :param runid (number): Test RUN ID """ return self._wrapper._post('/testmodel/operations/stopRun', **{'runid': runid}) ### Stops the test run. @staticmethod def _topology_operations_stopRun(self, runid): """ Stops the test run. :param runid (number): Test RUN ID """ return self._wrapper._post('/topology/operations/stopRun', **{'runid': runid}) ### null @staticmethod def _superflow_actions_operations_getActionChoices(self, id): """ :param id (number): the flow id """ return self._wrapper._post('/superflow/actions/' + self._name + '/operations/getActionChoices', **{'id': id}) ### Recompute percentages in the current working Application Profile @staticmethod def _appProfile_operations_recompute(self): """ Recompute percentages in the current working Application Profile """ return self._wrapper._post('/appProfile/operations/recompute', **{}) ### null @staticmethod def _evasionProfile_operations_search(self, searchString, limit, sort, sortorder): """ :param searchString (string): Search evasion profile name matching the string given. :param limit (string): The limit of rows to return :param sort (string): Parameter to sort by. (name/createdBy ...) :param sortorder (string): The sort order (ascending/descending) :return results (list): list of object with fields name (string): label (string): createdBy (string): revision (number): description (string): """ return self._wrapper._post('/evasionProfile/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Searches a strike inside all BPS strike database.To list all the available strikes, leave the arguments empty. @staticmethod def _strikes_operations_search(self, searchString='', limit=10, sort='name', sortorder='ascending', offset=0): """ Searches a strike inside all BPS strike database.To list all the available strikes, leave the arguments empty. :param searchString (string): The string used as a criteria to search a strike by.Example: 'strike_name', 'year:2019', 'path:strikes/xml..' :param limit (number): The limit of rows to return. Use empty string or empty box to get all the available strikes. :param sort (string): Parameter to sort by. :param sortorder (string): The sort order (ascending/descending) :param offset (number): The offset to begin from. Default is 0. :return results (list): list of object with fields id (string): protocol (string): category (string): direction (string): keyword (string): name (string): path (string): variants (number): severity (string): reference (string): fileSize (string): fileExtension (string): year (string): """ return self._wrapper._post('/strikes/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder, 'offset': offset}) ### Loads an existing network config by name. @staticmethod def _network_operations_load(self, template): """ Loads an existing network config by name. :param template (string): The name of the network neighborhood template """ return self._wrapper._post('/network/operations/load', **{'template': template}) ### Creates a new Network Neighborhood configuration with no name. The template value must remain empty. @staticmethod def _network_operations_new(self, template=None): """ Creates a new Network Neighborhood configuration with no name. The template value must remain empty. :param template (string): The name of the template. In this case will be empty. No need to configure. """ return self._wrapper._post('/network/operations/new', **{'template': template}) ### Removes a flow from the current working SuperFlow. @staticmethod def _superflow_operations_removeFlow(self, id): """ Removes a flow from the current working SuperFlow. :param id (number): The flow ID. """ return self._wrapper._post('/superflow/operations/removeFlow', **{'id': id}) ### Lists all the component presets names. @staticmethod def _testmodel_component_operations_getComponentPresetNames(self, type='None'): """ Lists all the component presets names. :param type (string): The Component type. All the component types are listed under the node testComponentTypesDescription. If this argument is not set, all the presets will be listed. :return result (list): list of object with fields id (string): label (string): type (string): description (string): """ return self._wrapper._post('/testmodel/component/' + self._name + '/operations/getComponentPresetNames', **{'type': type}) ### Adds a list of strikes to the current working Strike List.([{id: 'b/b/v/f'}, {id: 'aa/f/h'}]) @staticmethod def _strikeList_operations_add(self, strike): """ Adds a list of strikes to the current working Strike List.([{id: 'b/b/v/f'}, {id: 'aa/f/h'}]) :param strike (list): The list of strikes to add. list of object with fields id (string): Strike path. """ return self._wrapper._post('/strikeList/operations/add', **{'strike': strike}) ### null @staticmethod def _superflow_flows_operations_getFlowChoices(self, id, name): """ :param id (number): The flow id. :param name (string): The flow type/name. :return result (list): """ return self._wrapper._post('/superflow/flows/' + self._name + '/operations/getFlowChoices', **{'id': id, 'name': name}) ### Runs a Test. @staticmethod def _testmodel_operations_run(self, modelname, group, allowMalware=False): """ Runs a Test. :param modelname (string): Test Name to run :param group (number): Group to run :param allowMalware (bool): Enable this option to allow malware in test. """ return self._wrapper._post('/testmodel/operations/run', **{'modelname': modelname, 'group': group, 'allowMalware': allowMalware}) ### Runs a Test. @staticmethod def _topology_operations_run(self, modelname, group, allowMalware=False): """ Runs a Test. :param modelname (string): Test Name to run :param group (number): Group to run :param allowMalware (bool): Enable this option to allow malware in test. """ return self._wrapper._post('/topology/operations/run', **{'modelname': modelname, 'group': group, 'allowMalware': allowMalware}) ### Deletes a Test Report from the database. @staticmethod def _reports_operations_delete(self, runid): """ Deletes a Test Report from the database. :param runid (number): The test run id that generated the report you want to delete. """ return self._wrapper._post('/reports/operations/delete', **{'runid': runid}) ### Create a new custom Load Profile. @staticmethod def _loadProfile_operations_createNewCustom(self, loadProfile): """ Create a new custom Load Profile. :param loadProfile (string): The Name of The load profile object to create. """ return self._wrapper._post('/loadProfile/operations/createNewCustom', **{'loadProfile': loadProfile}) ### Saves the current working Test Model under specified name. @staticmethod def _testmodel_operations_saveAs(self, name, force): """ Saves the current working Test Model under specified name. :param name (string): The new name given for the current working Test Model :param force (bool): Force to save the working Test Model using a new name. """ return self._wrapper._post('/testmodel/operations/saveAs', **{'name': name, 'force': force}) ### Saves the working Test Model using the current name. No need to configure. The current name is used. @staticmethod def _testmodel_operations_save(self, name=None, force=True): """ Saves the working Test Model using the current name. No need to configure. The current name is used. :param name (string): The name of the template that should be empty. :param force (bool): Force to save the working Test Model with the same name. """ return self._wrapper._post('/testmodel/operations/save', **{'name': name, 'force': force}) ### Deletes a given Test Model from the database. @staticmethod def _testmodel_operations_delete(self, name): """ Deletes a given Test Model from the database. :param name (string): The name of the Test Model. """ return self._wrapper._post('/testmodel/operations/delete', **{'name': name}) ### Load an existing Application Profile and sets it as the current one. @staticmethod def _appProfile_operations_load(self, template): """ Load an existing Application Profile and sets it as the current one. :param template (string): The name of the template application profile """ return self._wrapper._post('/appProfile/operations/load', **{'template': template}) ### Creates a new Application Profile. @staticmethod def _appProfile_operations_new(self, template=None): """ Creates a new Application Profile. :param template (string): This argument must remain unset. Do not set any value for it. """ return self._wrapper._post('/appProfile/operations/new', **{'template': template}) ### Saves the current working Strike List and gives it a new name. @staticmethod def _strikeList_operations_saveAs(self, name, force): """ Saves the current working Strike List and gives it a new name. :param name (string): The new name given for the current working Strike List :param force (bool): Force to save the working Strike List using the given name. """ return self._wrapper._post('/strikeList/operations/saveAs', **{'name': name, 'force': force}) ### Saves the current working Strike List using the current name @staticmethod def _strikeList_operations_save(self, name=None, force=True): """ Saves the current working Strike List using the current name :param name (string): The name of the template. Default is empty. :param force (bool): Force to save the working Strike List with the same name. """ return self._wrapper._post('/strikeList/operations/save', **{'name': name, 'force': force}) ### null @staticmethod def _testmodel_operations_search(self, searchString, limit, sort, sortorder): """ :param searchString (string): Search test name matching the string given. :param limit (string): The limit of rows to return :param sort (string): Parameter to sort by: 'createdOn'/'timestamp'/'bandwidth'/'result'/'lastrunby'/'createdBy'/'interfaces'/'testLabType' :param sortorder (string): The sort order: ascending/descending :return results (list): list of object with fields name (string): label (string): createdBy (string): network (string): duration (number): description (string): """ return self._wrapper._post('/testmodel/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Adds a list of SuperFlow to the current working Application Profile. ([{'superflow':'adadad', 'weight':'20'},{..}]) @staticmethod def _appProfile_operations_add(self, add): """ Adds a list of SuperFlow to the current working Application Profile. ([{'superflow':'adadad', 'weight':'20'},{..}]) :param add (list): list of object with fields superflow (string): The name of the super flow weight (string): The weight of the super flow """ return self._wrapper._post('/appProfile/operations/add', **{'add': add}) ### Sets a User Preference. @staticmethod def _administration_userSettings_operations_changeUserSetting(self, name, value): """ Sets a User Preference. :param name (string): The setting name. :param value (string): The new value for setting. """ return self._wrapper._post('/administration/userSettings/' + self._name + '/operations/changeUserSetting', **{'name': name, 'value': value}) ### Imports an ATI License file (.lic) on a hardware platform. This operation is NOT recommended to be used on BPS Virtual platforms. @staticmethod def _administration_atiLicensing_operations_importAtiLicense(self, filename, name): """ Imports an ATI License file (.lic) on a hardware platform. This operation is NOT recommended to be used on BPS Virtual platforms. :param filename (string): import file path :param name (string): the name of the license file """ return self._wrapper._import('/administration/atiLicensing/operations/importAtiLicense', **{'filename': filename, 'name': name}) ### null @staticmethod def _strikeList_operations_search(self, searchString='', limit=10, sort='name', sortorder='ascending'): """ :param searchString (string): Search strike list name matching the string given. :param limit (number): The limit of rows to return :param sort (string): Parameter to sort by. Default is by name. :param sortorder (string): The sort order (ascending/descending). Default is ascending. """ return self._wrapper._post('/strikeList/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Deletes a given Network Neighborhood Config from the database. @staticmethod def _network_operations_delete(self, name): """ Deletes a given Network Neighborhood Config from the database. :param name (string): The name of the Network Neighborhood Config. """ return self._wrapper._post('/network/operations/delete', **{'name': name}) ### Removes a SuperFlow from the current working Application Profile. @staticmethod def _appProfile_operations_remove(self, superflow): """ Removes a SuperFlow from the current working Application Profile. :param superflow (string): The name of the super flow. """ return self._wrapper._post('/appProfile/operations/remove', **{'superflow': superflow}) ### Returns stats series for a given component group stat output for a given timestamp @staticmethod def _results_operations_getHistoricalSeries(self, runid, componentid, dataindex, group): """ Returns stats series for a given component group stat output for a given timestamp :param runid (number): The test identifier :param componentid (string): The component identifier. Each component has an id and can be get loading the testand checking it's components info :param dataindex (number): The table index, equivalent with timestamp. :param group (string): The data group or one of the BPS component main groups. The group name can be get by executing the operation 'getGroups' from results node. :return results (list): list of object with fields name (string): content (string): datasetvals (string): """ return self._wrapper._post('/results/' + self._name + '/operations/getHistoricalSeries', **{'runid': runid, 'componentid': componentid, 'dataindex': dataindex, 'group': group}) ### Returns main groups of statistics for a single BPS Test Component. These groups can be used then in requesting statistics values from the history of a test run. @staticmethod def _results_operations_getGroups(self, name, dynamicEnums=True, includeOutputs=True): """ Returns main groups of statistics for a single BPS Test Component. These groups can be used then in requesting statistics values from the history of a test run. :param name (string): BPS Component name. This argument is actually the component type which can be get from 'statistics' table :param dynamicEnums (bool): :param includeOutputs (bool): :return results (object): object of object with fields name (string): label (string): groups (object): """ return self._wrapper._post('/results/' + self._name + '/operations/getGroups', **{'name': name, 'dynamicEnums': dynamicEnums, 'includeOutputs': includeOutputs}) ### Adds an action to the current working SuperFlow @staticmethod def _superflow_operations_addAction(self, flowid, type, actionid, source): """ Adds an action to the current working SuperFlow :param flowid (number): The flow id. :param type (string): The type of the action definition. :param actionid (number): The new action id. :param source (string): The action source. """ return self._wrapper._post('/superflow/operations/addAction', **{'flowid': flowid, 'type': type, 'actionid': actionid, 'source': source}) ### Exports a wanted test model by giving its name or its test run id.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _testmodel_operations_exportModel(self, name, attachments, filepath, runid=None): """ Exports a wanted test model by giving its name or its test run id.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the test model to be exported. :param attachments (bool): True if object attachments are needed. :param filepath (string): The local path where to save the exported object. :param runid (number): Test RUN ID """ return self._wrapper._export('/testmodel/operations/exportModel', **{'name': name, 'attachments': attachments, 'filepath': filepath, 'runid': runid}) ### Load an existing test model template. @staticmethod def _testmodel_operations_load(self, template): """ Load an existing test model template. :param template (string): The name of the template testmodel """ return self._wrapper._post('/testmodel/operations/load', **{'template': template}) ### Creates a new Test Model @staticmethod def _testmodel_operations_new(self, template=None): """ Creates a new Test Model :param template (string): The name of the template. In this case will be empty. """ return self._wrapper._post('/testmodel/operations/new', **{'template': template}) ### Saves the current working Application Profiles and gives it a new name. @staticmethod def _superflow_operations_saveAs(self, name, force): """ Saves the current working Application Profiles and gives it a new name. :param name (string): The new name given for the current working Super Flow :param force (bool): Force to save the working Super Flow using the given name. """ return self._wrapper._post('/superflow/operations/saveAs', **{'name': name, 'force': force}) ### Saves the working Super Flow using the current name @staticmethod def _superflow_operations_save(self, name=None, force=True): """ Saves the working Super Flow using the current name :param name (string): The name of the template that should be empty. :param force (bool): Force to save the working Super Flow with the same name. """ return self._wrapper._post('/superflow/operations/save', **{'name': name, 'force': force}) ### Exports an Application profile and all of its dependencies.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _appProfile_operations_exportAppProfile(self, name, attachments, filepath): """ Exports an Application profile and all of its dependencies.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the test model to be exported. :param attachments (bool): True if object attachments are needed. :param filepath (string): The local path where to save the exported object. """ return self._wrapper._export('/appProfile/operations/exportAppProfile', **{'name': name, 'attachments': attachments, 'filepath': filepath}) ### Exports the Strike List identified by its name and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _strikeList_operations_exportStrikeList(self, name, filepath): """ Exports the Strike List identified by its name and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the strike list to be exported. :param filepath (string): The local path where to save the exported object. The file should have .bap extension """ return self._wrapper._export('/strikeList/operations/exportStrikeList', **{'name': name, 'filepath': filepath}) ### null @staticmethod def _administration_operations_logs(self, error=False, messages=False, web=False, all=False, audit=False, info=False, system=False, lines=20, drop=0): """ :param error (bool): :param messages (bool): :param web (bool): :param all (bool): :param audit (bool): :param info (bool): :param system (bool): :param lines (number): number lines to return :param drop (number): number lines to drop """ return self._wrapper._post('/administration/operations/logs', **{'error': error, 'messages': messages, 'web': web, 'all': all, 'audit': audit, 'info': info, 'system': system, 'lines': lines, 'drop': drop}) ### null @staticmethod def _reports_operations_search(self, searchString, limit, sort, sortorder): """ :param searchString (string): Search test name matching the string given. :param limit (string): The limit of rows to return :param sort (string): Parameter to sort by: 'name'/'endTime'/'duration'/'result'/'startTime'/'iteration'/'network'/'dut'/'user'/'size' :param sortorder (string): The sort order: ascending/descending """ return self._wrapper._post('/reports/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Load an existing Super Flow and sets it as the current one. @staticmethod def _superflow_operations_load(self, template): """ Load an existing Super Flow and sets it as the current one. :param template (string): The name of the existing Super Flow template """ return self._wrapper._post('/superflow/operations/load', **{'template': template}) ### Creates a new Super Flow. @staticmethod def _superflow_operations_new(self, template=None): """ Creates a new Super Flow. :param template (string): The name of the template. In this case will be empty. """ return self._wrapper._post('/superflow/operations/new', **{'template': template}) ### Deletes a given Strike List from the database. @staticmethod def _strikeList_operations_delete(self, name): """ Deletes a given Strike List from the database. :param name (string): The name of the Strike List to be deleted. """ return self._wrapper._post('/strikeList/operations/delete', **{'name': name}) ### Gives abbreviated information about all Canned Flow Names. @staticmethod def _superflow_flows_operations_getCannedFlows(self): """ Gives abbreviated information about all Canned Flow Names. :return results (list): list of object with fields name (string): label (string): """ return self._wrapper._post('/superflow/flows/' + self._name + '/operations/getCannedFlows', **{}) ### Deletes a given Super Flow from the database. @staticmethod def _superflow_operations_delete(self, name): """ Deletes a given Super Flow from the database. :param name (string): The name of the Super Flow. """ return self._wrapper._post('/superflow/operations/delete', **{'name': name}) ### null @staticmethod def _results_operations_getHistoricalResultSize(self, runid, componentid, group): """ :param runid (number): The test run id :param componentid (string): The component identifier :param group (string): The data group or one of the BPS component main groups. The group name can be get by executing the operation 'getGroups' from results node :return result (string): """ return self._wrapper._post('/results/' + self._name + '/operations/getHistoricalResultSize', **{'runid': runid, 'componentid': componentid, 'group': group}) ### Adds a note to given port. @staticmethod def _topology_operations_addPortNote(self, interface, note): """ Adds a note to given port. :param interface (object): Slot and Port ID. object of object with fields slot (number): port (number): :param note (string): Note info. """ return self._wrapper._post('/topology/operations/addPortNote', **{'interface': interface, 'note': note}) ### Search Networks. @staticmethod def _network_operations_search(self, searchString, userid, clazz, sortorder, sort, limit, offset): """ Search Networks. :param searchString (string): Search networks matching the string given. :param userid (string): The owner to search for :param clazz (string): The 'class' of the object (usually 'canned' or 'custom') :param sortorder (string): The order in which to sort: ascending/descending :param sort (string): Parameter to sort by: 'name'/'class'/'createdBy'/'interfaces'/'timestamp' :param limit (number): The limit of network elements to return :param offset (number): The offset to begin from. :return results (list): list of object with fields name (string): label (string): createdBy (string): revision (number): description (string): """ return self._wrapper._post('/network/operations/search', **{'searchString': searchString, 'userid': userid, 'clazz': clazz, 'sortorder': sortorder, 'sort': sort, 'limit': limit, 'offset': offset}) ### Retrieves the real time statistics for the running test, by giving the run id. @staticmethod def _testmodel_operations_realTimeStats(self, runid, rtsgroup, numSeconds, numDataPoints=1): """ Retrieves the real time statistics for the running test, by giving the run id. :param runid (number): Test RUN ID :param rtsgroup (string): Real Time Stats group name. Values for this can be get from 'statistics' node, inside 'statNames' from each component at 'realtime Group' key/column. Examples: l7STats, all, bpslite, summary, clientStats etc. :param numSeconds (number): The number of seconds. If negative, means from the end :param numDataPoints (number): The number of data points, the default is 1. :return result (object): object of object with fields testStuck (bool): time (number): progress (number): values (string): """ return self._wrapper._post('/testmodel/operations/realTimeStats', **{'runid': runid, 'rtsgroup': rtsgroup, 'numSeconds': numSeconds, 'numDataPoints': numDataPoints}) ### Imports a capture file to the systemThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _capture_operations_importCapture(self, name, filename, force): """ Imports a capture file to the systemThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param name (string): The name of the capture being imported :param filename (string): The file containing the capture object :param force (bool): Force to import the file and the object having the same name will be replaced. """ return self._wrapper._import('/capture/operations/importCapture', **{'name': name, 'filename': filename, 'force': force}) ### Reboots the card. Only available for PerfectStorm and CloudStorm cards. @staticmethod def _topology_operations_reboot(self, board): """ Reboots the card. Only available for PerfectStorm and CloudStorm cards. :param board (number): """ return self._wrapper._post('/topology/operations/reboot', **{'board': board}) ### Saves the current working Application Profiles and gives it a new name. @staticmethod def _appProfile_operations_saveAs(self, name, force): """ Saves the current working Application Profiles and gives it a new name. :param name (string): The new name given for the current working Application Profile :param force (bool): Force to save the working Application Profile using the given name. """ return self._wrapper._post('/appProfile/operations/saveAs', **{'name': name, 'force': force}) ### Saves the current working application profile using the current name. No need to use any parameter. @staticmethod def _appProfile_operations_save(self, name=None, force=True): """ Saves the current working application profile using the current name. No need to use any parameter. :param name (string): The name of the template. No need to configure. The current name is used. :param force (bool): Force to save the working Application Profile with the same name. No need to configure. The default is used. """ return self._wrapper._post('/appProfile/operations/save', **{'name': name, 'force': force}) ### Get information about an action in the current working Superflow, retrieving also the choices for each action setting. @staticmethod def _superflow_actions_operations_getActionInfo(self, id): """ Get information about an action in the current working Superflow, retrieving also the choices for each action setting. :param id (number): The action id :return result (list): list of object with fields label (string): name (string): description (string): choice (object): """ return self._wrapper._post('/superflow/actions/' + self._name + '/operations/getActionInfo', **{'id': id}) ### null @staticmethod def _topology_operations_reserve(self, reservation, force=False): """ :param reservation (list): list of object with fields group (number): slot (number): port (number): capture (bool): :param force (bool): """ return self._wrapper._post('/topology/operations/reserve', **{'reservation': reservation, 'force': force}) ### Removes an action from the current working SuperFlow. @staticmethod def _superflow_operations_removeAction(self, id): """ Removes an action from the current working SuperFlow. :param id (number): The action ID. """ return self._wrapper._post('/superflow/operations/removeAction', **{'id': id}) ### Adds a flow to the current working SuperFlow @staticmethod def _superflow_operations_addFlow(self, flowParams): """ Adds a flow to the current working SuperFlow :param flowParams (object): The flow object to add. object of object with fields name (string): The name of the flow from (string): Traffic initiator. to (string): Traffic responder. """ return self._wrapper._post('/superflow/operations/addFlow', **{'flowParams': flowParams}) ### Imports a list of strikes residing in a file. @staticmethod def _strikeList_operations_importStrikeList(self, name, filename, force): """ Imports a list of strikes residing in a file. :param name (string): The name of the object being imported :param filename (string): The file containing the object to be imported. :param force (bool): Force to import the file and the object having the same name will be replaced. """ return self._wrapper._import('/strikeList/operations/importStrikeList', **{'name': name, 'filename': filename, 'force': force}) ### null @staticmethod def _network_operations_list(self, userid, clazz, sortorder, sort, limit, offset): """ :param userid (string): :param clazz (string): :param sortorder (string): :param sort (string): :param limit (number): :param offset (number): :return returnArg (list): list of object with fields name (string): type (string): author (string): createdOn (string): """ return self._wrapper._post('/network/operations/list', **{'userid': userid, 'clazz': clazz, 'sortorder': sortorder, 'sort': sort, 'limit': limit, 'offset': offset}) ### Exports everything including test models, network configurations and others from system.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _administration_operations_exportAllTests(self, filepath): """ Exports everything including test models, network configurations and others from system.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param filepath (string): The local path where to save the compressed file with all the models. The path must contain the file name and extension (.tar.gz): '/d/c/f/AllTests.tar.gz' """ return self._wrapper._export('/administration/operations/exportAllTests', **{'filepath': filepath}) ### Retrieves all the security options @staticmethod def _evasionProfile_StrikeOptions_operations_getStrikeOptions(self): """ Retrieves all the security options :return result (list): """ return self._wrapper._post('/evasionProfile/StrikeOptions/operations/getStrikeOptions', **{}) ### Saves the working network config and gives it a new name. @staticmethod def _network_operations_saveAs(self, name, regenerateOldStyle=True, force=False): """ Saves the working network config and gives it a new name. :param name (string): The new name given for the current working network config :param regenerateOldStyle (bool): Force to apply the changes made on the loaded network configuration. Force to generate a network from the old one. :param force (bool): Force to save the network config. It replaces a pre-existing config having the same name. """ return self._wrapper._post('/network/operations/saveAs', **{'name': name, 'regenerateOldStyle': regenerateOldStyle, 'force': force}) ### Save the current working network config. @staticmethod def _network_operations_save(self, name=None, regenerateOldStyle=True, force=True): """ Save the current working network config. :param name (string): The new name given for the current working network config. No need to configure. The current name is used. :param regenerateOldStyle (bool): No need to configure. The default is used. :param force (bool): No need to configure. The default is used. """ return self._wrapper._post('/network/operations/save', **{'name': name, 'regenerateOldStyle': regenerateOldStyle, 'force': force}) ### null @staticmethod def _appProfile_operations_search(self, searchString, limit, sort, sortorder): """ :param searchString (string): Search application profile name matching the string given. :param limit (string): The limit of rows to return :param sort (string): Parameter to sort by. :param sortorder (string): The sort order (ascending/descending) :return results (list): list of object with fields name (string): label (string): createdBy (string): revision (number): description (string): """ return self._wrapper._post('/appProfile/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Removes a strike from the current working Strike List.([{id: 'bb/c/d'}, {id: 'aa/f/g'}]) @staticmethod def _strikeList_operations_remove(self, strike): """ Removes a strike from the current working Strike List.([{id: 'bb/c/d'}, {id: 'aa/f/g'}]) :param strike (list): The list of strike ids to remove. The strike id is in fact the it's path. list of object with fields id (string): """ return self._wrapper._post('/strikeList/operations/remove', **{'strike': strike}) ### Load an existing Evasion Profile and sets it as the current one. @staticmethod def _evasionProfile_operations_load(self, template): """ Load an existing Evasion Profile and sets it as the current one. :param template (string): The name of an Evasion profile template. """ return self._wrapper._post('/evasionProfile/operations/load', **{'template': template}) ### Creates a new Evasion Profile. @staticmethod def _evasionProfile_operations_new(self, template=None): """ Creates a new Evasion Profile. :param template (string): The name should be empty to create a new object. """ return self._wrapper._post('/evasionProfile/operations/new', **{'template': template}) ### Removes a component from the current working Test Model. @staticmethod def _testmodel_operations_remove(self, id): """ Removes a component from the current working Test Model. :param id (string): The component id. """ return self._wrapper._post('/testmodel/operations/remove', **{'id': id}) ### Exports the result report of a test, identified by its run id and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _reports_operations_exportReport(self, filepath, runid, reportType, sectionIds='', dataType='ALL'): """ Exports the result report of a test, identified by its run id and all of its dependenciesThis operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param filepath (string): The local path where to export the report, including the report name. :param runid (number): Test RUN ID :param reportType (string): Report file format to be exported in. :param sectionIds (string): Chapter Ids. Can be extracted a chapter or many, a sub-chapter or many or the entire report: (sectionIds='6' / sectionIds='5,6,7' / sectionIds='7.4,8.5.2,8.6.3.1' / sectionIds=''(to export the entire report)) :param dataType (string): Report content data type to export. Default value is 'all data'. For tabular only use 'TABLE' and for graphs only use 'CHARTS'. """ return self._wrapper._export('/reports/operations/exportReport', **{'filepath': filepath, 'runid': runid, 'reportType': reportType, 'sectionIds': sectionIds, 'dataType': dataType}) ### Exports a port capture from a test run.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. @staticmethod def _topology_operations_exportCapture(self, filepath, args): """ Exports a port capture from a test run.This operation can not be executed from the RESTApi Browser, it needs to be executed from a remote system through a REST call. :param filepath (string): The local path where to save the exported object. :param args (object): Export filters. The Possible values for: 'dir'(direction) are 'tx','rx','both';for 'sizetype' and 'starttype'(units for size and start) are 'megabytes' or 'frames' object of object with fields port (number): Port number slot (number): Slot number dir (string): Capturing direction (rx, tx, both) size (number): The size of the capture to be exported. start (number): Start at point. sizetype (string): The size unit: megabytes or frames. starttype (string): The start unit: megabytes or frames. """ return self._wrapper._export('/topology/operations/exportCapture', **{'filepath': filepath, 'args': args}) ### Returns the report Table of Contents using the test run id. @staticmethod def _reports_operations_getReportContents(self, runid, getTableOfContents=True): """ Returns the report Table of Contents using the test run id. :param runid (number): The test run id. :param getTableOfContents (bool): Boolean value having the default value set on 'True'. To obtain the Table Contents this value should remain on 'True'. :return results (list): list of object with fields Section Name (string): Section ID (string): """ return self._wrapper._post('/reports/operations/getReportContents', **{'runid': runid, 'getTableOfContents': getTableOfContents}) ### Returns the section of a report @staticmethod def _reports_operations_getReportTable(self, runid, sectionId): """ Returns the section of a report :param runid (number): The test run id. :param sectionId (string): The section id of the table desired to extract. :return results (object): """ return self._wrapper._post('/reports/operations/getReportTable', **{'runid': runid, 'sectionId': sectionId}) ### null @staticmethod def _loadProfile_operations_save(self): return self._wrapper._post('/loadProfile/operations/save', **{}) ### Save the active editing LoadProfile under specified name @staticmethod def _loadProfile_operations_saveAs(self, name): """ Save the active editing LoadProfile under specified name :param name (string): """ return self._wrapper._post('/loadProfile/operations/saveAs', **{'name': name}) ### Deletes a specified load profile from the database. @staticmethod def _loadProfile_operations_delete(self, name): """ Deletes a specified load profile from the database. :param name (string): The name of the loadProfile object to delete. """ return self._wrapper._post('/loadProfile/operations/delete', **{'name': name}) ### null @staticmethod def _capture_operations_search(self, searchString, limit, sort, sortorder): """ :param searchString (string): Search capture name matching the string given. :param limit (string): The limit of rows to return :param sort (string): Parameter to sort by. :param sortorder (string): The sort order (ascending/descending) :return results (list): list of object with fields name (string): totalPackets (string): duration (string): ipv4Packets (string): ipv6Packets (string): avgPacketSize (string): udpPackets (string): contentType (string): pcapFilesize (string): tcpPackets (string): avgFlowLength (string): """ return self._wrapper._post('/capture/operations/search', **{'searchString': searchString, 'limit': limit, 'sort': sort, 'sortorder': sortorder}) ### Load an existing Strike List and sets it as the current one. @staticmethod def _strikeList_operations_load(self, template): """ Load an existing Strike List and sets it as the current one. :param template (string): The name of the Strike List template """ return self._wrapper._post('/strikeList/operations/load', **{'template': template}) ### Creates a new Strike List. @staticmethod def _strikeList_operations_new(self, template=None): """ Creates a new Strike List. :param template (string): The name of the template. In this case will be empty. """ return self._wrapper._post('/strikeList/operations/new', **{'template': template}) class DataModelMeta(type): _dataModel = { 'evasionProfile': { 'lockedBy': { }, 'createdBy': { }, 'author': { }, 'name': { }, 'description': { }, 'label': { }, 'StrikeOptions': { 'TCP': { 'DuplicateBadSyn': { }, 'DuplicateBadChecksum': { }, 'SneakAckHandshake': { }, 'AcknowledgeAllSegments': { }, 'DuplicateBadSeq': { }, 'SkipHandshake': { }, 'SourcePort': { }, 'MaxSegmentSize': { }, 'DestinationPort': { }, 'DuplicateBadReset': { }, 'DestinationPortType': { }, 'DuplicateLastSegment': { }, 'DuplicateNullFlags': { }, 'SegmentOrder': { }, 'SourcePortType': { } }, 'JAVASCRIPT': { 'Obfuscate': { }, 'Encoding': { } }, 'FTP': { 'PadCommandWhitespace': { }, 'Username': { }, 'FTPEvasionLevel': { }, 'AuthenticationType': { }, 'Password': { } }, 'IPv6': { 'TC': { } }, 'DCERPC': { 'MultiContextBindHead': { }, 'MultiContextBind': { }, 'MultiContextBindTail': { }, 'MaxFragmentSize': { }, 'UseObjectID': { } }, 'RTF': { 'FictitiousCW': { }, 'ASCII_Escaping': { }, 'MixedCase': { }, 'WhiteSpace': { } }, 'POP3': { 'PadCommandWhitespace': { }, 'Username': { }, 'POP3UseProxyMode': { }, 'AuthenticationType': { }, 'Password': { } }, 'Variations': { 'Subset': { }, 'Shuffle': { }, 'VariantTesting': { }, 'Limit': { }, 'TestType': { } }, 'OLE': { 'RefragmentData': { } }, 'HTML': { 'HTMLUnicodeUTF8EncodingMode': { }, 'HTMLUnicodeUTF8EncodingSize': { }, 'HTMLUnicodeEncoding': { }, 'HTMLUnicodeUTF7EncodingMode': { } }, 'EMAIL': { 'EnvelopeType': { }, 'ShuffleHeaders': { }, 'To': { }, 'From': { } }, 'Global': { 'FalsePositives': { }, 'IOTimeout': { }, 'AllowDeprecated': { }, 'BehaviorOnTimeout': { }, 'MaxTimeoutPerStrike': { }, 'CachePoisoning': { } }, 'MS_Exchange_Ports': { 'SystemAttendant': { } }, 'PDF': { 'HexEncodeNames': { }, 'ShortFilterNames': { }, 'RandomizeDictKeyOrder': { }, 'Version': { }, 'PreHeaderData': { } }, 'SNMP': { 'CommunityString': { } }, 'COMMAND': { 'PadCommandWhitespace': { }, 'PadPathSlashes': { }, 'Malicious': { } }, 'ICMP': { 'DoEcho': { } }, 'UDP': { 'DestinationPortType': { }, 'SourcePort': { }, 'SourcePortType': { }, 'DestinationPort': { } }, 'IP': { 'ReadWriteWindowSize': { }, 'RFC3128FakePort': { }, 'FragEvasion': { }, 'RFC3128': { }, 'TTL': { }, 'MaxReadSize': { }, 'RFC3514': { }, 'FragPolicy': { }, 'MaxFragSize': { }, 'FragOrder': { }, 'TOS': { }, 'IPEvasionsOnBothSides': { }, 'MaxWriteSize': { } }, 'SMB': { 'Username': { }, 'RandomPipeOffset': { }, 'MaxReadSize': { }, 'MaxWriteSize': { }, 'AuthenticationType': { }, 'Password': { } }, 'IMAP4': { 'Username': { }, 'IMAPUseProxyMode': { }, 'AuthenticationType': { }, 'Password': { } }, 'HTTP': { 'ClientChunkedTransferSize': { }, 'EncodeUnicodeBareByte': { }, 'VirtualHostname': { }, 'EncodeUnicodePercentU': { }, 'GetParameterRandomPrepend': { }, 'EncodeSecondNibbleHex': { }, 'EncodeUnicodeInvalid': { }, 'ServerChunkedTransferSize': { }, 'VersionRandomizeCase': { }, 'URIRandomizeCase': { }, 'AuthenticationType': { }, 'ServerCompression': { }, 'VirtualHostnameType': { }, 'URIPrependAltSpaces': { }, 'URIPrependAltSpacesSize': { }, 'EncodeFirstNibbleHex': { }, 'MethodRandomInvalid': { }, 'VersionRandomInvalid': { }, 'ServerChunkedTransfer': { }, 'EncodeDoublePercentHex': { }, 'URIAppendAltSpacesSize': { }, 'EncodeHexRandom': { }, 'DirectorySelfReference': { }, 'EndRequestFakeHTTPHeader': { }, 'EncodeUnicodeAll': { }, 'EncodeUnicodeRandom': { }, 'Base64EncodePOSTData': { }, 'IgnoreHeaders': { }, 'RequestFullURL': { }, 'HTTPTransportMethods': { }, 'Password': { }, 'MethodRandomizeCase': { }, 'MethodURISpaces': { }, 'ShuffleHeaders': { }, 'DirectoryFakeRelative': { }, 'URIAppendAltSpaces': { }, 'MethodURITabs': { }, 'RequireLeadingSlash': { }, 'EncodeDoubleNibbleHex': { }, 'ForwardToBackSlashes': { }, 'PadHTTPPost': { }, 'MethodURINull': { }, 'Username': { }, 'VersionUse0_9': { }, 'EncodeHexAll': { }, 'PostParameterRandomPrepend': { }, 'ClientChunkedTransfer': { }, 'HTTPServerProfile': { } }, 'SELF': { 'ApplicationPings': { }, 'TraversalVirtualDirectory': { }, 'AppSimUseNewTuple': { }, 'StartingFuzzerOffset': { }, 'URI': { }, 'FileTransferRandCase': { }, 'UnicodeTraversalWindowsDirectory': { }, 'AREA-ID': { }, 'AppSimAppProfile': { }, 'Repetitions': { }, 'FileTransferExtension': { }, 'Password': { }, 'AppSimSmartflow': { }, 'HTMLPadding': { }, 'MaximumIterations': { }, 'FileTransferFile': { }, 'AS-ID': { }, 'AppSimSuperflow': { }, 'EndingFuzzerOffset': { }, 'ReportCLSIDs': { }, 'DelaySeconds': { }, 'Username': { }, 'UnicodeTraversalVirtualDirectory': { }, 'TraversalWindowsDirectory': { }, 'FileTransferName': { }, 'MaximumRuntime': { }, 'ROUTER-ID': { }, 'TraversalRequestFilename': { } }, 'SHELLCODE': { 'RandomNops': { } }, 'SSL': { 'ClientCertificateFile': { }, 'EnableOnAllTCP': { }, 'SecurityProtocol': { }, 'DestPortOverride': { }, 'ServerCertificateFile': { }, 'ServerKeyFile': { }, 'EnableOnAllHTTP': { }, 'ClientKeyFile': { }, 'Cipher': { }, 'DisableDefaultStrikeSSL': { } }, 'SUNRPC': { 'OneFragmentMultipleTCPSegmentsCount': { }, 'RPCFragmentTCPSegmentDistribution': { }, 'TCPFragmentSize': { }, 'NullCredentialPadding': { } }, 'FILETRANSFER': { 'SmtpEncoding': { }, 'CompressionMethod': { }, 'FtpTransferMethod': { }, 'TransportProtocol': { }, 'Imap4Encoding': { }, 'Pop3Encoding': { } }, 'UNIX': { 'PadCommandWhitespace': { }, 'PadPathSlashes': { } }, 'SMTP': { 'SMTPUseProxyMode': { }, 'PadCommandWhitespace': { }, 'ShuffleHeaders': { } }, 'Ethernet': { 'MTU': { } }, 'MALWARE': { 'FilenameInsertEnvVar': { }, 'SmtpEncoding': { }, 'CompressionMethod': { }, 'FtpTransferMethod': { }, 'TransportProtocol': { }, 'Imap4Encoding': { }, 'Pop3Encoding': { } }, 'SIP': { 'EnvelopeType': { }, 'CompactHeaders': { }, 'PadHeadersWhitespace': { }, 'RandomizeCase': { }, 'ShuffleHeaders': { }, 'To': { }, 'From': { }, 'PadHeadersLineBreak': { } }, 'operations': { 'getStrikeOptions': [{ 'name': { }, 'description': { }, 'realtimeGroup': { }, 'label': { }, 'units': { }, 'choice': [{ 'name': { }, 'description': { }, 'label': { } }] }] } }, 'createdOn': { }, 'contentType': { }, 'revision': { }, 'operations': { 'delete': [{ }], 'saveAs': [{ }], 'save': [{ }], 'search': [{ }], 'load': [{ }], 'new': [{ }] } }, 'reports': { 'endtime': { }, 'starttime': { }, 'label': { }, 'testname': { }, 'network': { }, 'duration': { }, 'result': { }, 'size': { }, 'isPartOfResiliency': { }, 'name': { }, 'iteration': { }, 'testid': { }, 'user': { }, 'operations': { 'delete': [{ }], 'search': [{ }], 'exportReport': [{ }], 'getReportContents': [{ }], 'getReportTable': [{ }] } }, 'capture': { 'pcapFilesize': { }, 'avgPacketSize': { }, 'author': { }, 'udpPackets': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'name': { }, 'revision': { }, 'duration': { }, 'ipv4Packets': { }, 'ipv6Packets': { }, 'lockedBy': { }, 'tcpPackets': { }, 'createdBy': { }, 'avgFlowLength': { }, 'totalPackets': { }, 'contentType': { }, 'operations': { 'importCapture': [{ }], 'search': [{ }] } }, 'network': { 'lockedBy': { }, 'createdBy': { }, 'author': { }, 'name': { }, 'interfaceCount': { }, 'description': { }, 'label': { }, 'networkModel': { 'enodeb': [{ 'dns': { }, 'plmn': { }, 'psn': { }, 'psn_netmask': { }, 'sctp_over_udp': { }, 'enodebs': [{ 'mme_ip_address': { }, 'enodebCount': { }, 'ip_address': { } }], 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { }, 'sctp_sport': { } }], 'ip_router': [{ 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { }, 'ip_address': { } }], 'ip6_router': [{ 'hosts_ip_alloc_container': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'ip_address': { }, 'prefix_length': { } }], 'ue_info': [{ 'imsi_base': { }, 'secret_key_step': { }, 'count': { }, 'operator_variant': { }, 'secret_key': { }, 'imei_base': { }, 'msisdn_base': { }, 'maxmbps_per_ue': { }, 'mobility_session_infos': [{ 'id': { }, 'value': { } }], 'id': { } }], 'ip_ldap_server': [{ 'auth_timeout': { }, 'ldap_username_start_tag': { }, 'ldap_user_min': { }, 'ldap_user_count': { }, 'authentication_rate': { }, 'ldap_password_start_tag': { }, 'ldap_user_max': { }, 'id': { }, 'ldap_server_address': { }, 'dn_fixed_val': { } }], 'mme_sgw_pgw6': [{ 'ue_info': { }, 'max_sessions': { }, 'lease_address': { }, 'dns': { }, 'plmn': { }, 'ip_address': { }, 'sgw_advertised_sgw': { }, 'sgw_advertised_pgw': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'prefix_length': { } }], 'mobility_session_info': [{ 'password': { }, 'bearers': [{ 'qci_label': { } }], 'id': { }, 'access_point_name': { }, 'username': { }, 'initiated_dedicated_bearers': { } }], 'ggsn6': [{ 'lease_address': { }, 'count': { }, 'dns': { }, 'ggsn_advertised_control_ip_address': { }, 'ip_address': { }, 'ggsn_advertised_data_ip_address': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'prefix_length': { } }], 'ip_external_hosts': [{ 'proxy': { }, 'count': { }, 'id': { }, 'ip_address': { }, 'behind_snapt': { }, 'tags': { } }], 'ip_static_hosts': [{ 'mpls_list': [{ 'id': { }, 'value': { } }], 'ip_selection_type': { }, 'count': { }, 'dns': { }, 'psn': { }, 'psn_netmask': { }, 'ip_address': { }, 'tags': { }, 'proxy': { }, 'maxmbps_per_host': { }, 'gateway_ip_address': { }, 'netmask': { }, 'ldap': { }, 'default_container': { }, 'id': { }, 'dns_proxy': { }, 'behind_snapt': { }, 'enable_stats': { } }], 'ggsn': [{ 'lease_address': { }, 'count': { }, 'dns': { }, 'ggsn_advertised_control_ip_address': { }, 'ip_address': { }, 'ggsn_advertised_data_ip_address': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { } }], 'interface': [{ 'ignore_pause_frames': { }, 'duplicate_mac_address': { }, 'description': { }, 'packet_filter': { 'not_dest_port': { }, 'not_src_ip': { }, 'filter': { }, 'src_ip': { }, 'src_port': { }, 'vlan': { }, 'not_vlan': { }, 'dest_ip': { }, 'not_dest_ip': { }, 'dest_port': { }, 'not_src_port': { } }, 'impairments': { 'drop': { }, 'corrupt_lt64': { }, 'rate': { }, 'corrupt_lt256': { }, 'corrupt_rand': { }, 'corrupt_chksum': { }, 'corrupt_gt256': { }, 'frack': { } }, 'mtu': { }, 'vlan_key': { }, 'number': { }, 'use_vnic_mac_address': { }, 'mac_address': { }, 'id': { } }], 'ds_lite_b4': [{ 'aftr_addr': { }, 'count': { }, 'ip_address': { }, 'host_ip_base_addr': { }, 'ipv6_addr_alloc_mode': { }, 'gateway_ip_address': { }, 'default_container': { }, 'aftr_count': { }, 'hosts_ip_increment': { }, 'id': { }, 'prefix_length': { }, 'host_ip_addr_alloc_mode': { } }], 'ue': [{ 'allocation_rate': { }, 'mobility_interval_ms': { }, 'ue_info': { }, 'dns': { }, 'mobility_action': { }, 'tags': { }, 'proxy': { }, 'default_container': { }, 'mobility_with_traffic': { }, 'id': { }, 'behind_snapt': { }, 'request_ipv6': { }, 'enable_stats': { } }], 'ip_dns_proxy': [{ 'dns_proxy_ip_count': { }, 'dns_proxy_src_ip_base': { }, 'id': { }, 'dns_proxy_ip_base': { }, 'dns_proxy_src_ip_count': { } }], 'enodeb_mme_sgw6': [{ 'dns': { }, 'plmn': { }, 'ip_allocation_mode': { }, 'mme_ip_address': { }, 'pgw_ip_address': { }, 'ue_address': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'prefix_length': { } }], 'ip6_dns_proxy': [{ 'dns_proxy_ip_count': { }, 'dns_proxy_src_ip_base': { }, 'id': { }, 'dns_proxy_ip_base': { }, 'dns_proxy_src_ip_count': { } }], 'vlan': [{ 'tpid': { }, 'duplicate_mac_address': { }, 'description': { }, 'mtu': { }, 'outer_vlan': { }, 'inner_vlan': { }, 'mac_address': { }, 'default_container': { }, 'id': { } }], 'mme_sgw_pgw': [{ 'ue_info': { }, 'max_sessions': { }, 'lease_address': { }, 'dns': { }, 'plmn': { }, 'ip_address': { }, 'sgw_advertised_sgw': { }, 'sgw_advertised_pgw': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { } }], 'ds_lite_aftr': [{ 'count': { }, 'ip_address': { }, 'ipv6_addr_alloc_mode': { }, 'gateway_ip_address': { }, 'default_container': { }, 'b4_count': { }, 'b4_ip_address': { }, 'id': { }, 'prefix_length': { } }], 'ipsec_router': [{ 'gateway_ip_address': { }, 'netmask': { }, 'ipsec': { }, 'default_container': { }, 'id': { }, 'ip_address': { }, 'ike_peer_ip_address': { } }], 'dhcpv6c_req_opts_cfg': [{ 'dhcpv6v_req_preference': { }, 'dhcpv6v_req_dns_list': { }, 'dhcpv6v_req_dns_resolvers': { }, 'dhcpv6v_req_server_id': { }, 'id': { } }], 'sgsn': [{ 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'ggsn_ip_address': { }, 'id': { }, 'ip_address': { } }], 'path_advanced': [{ 'destination_port_count': { }, 'destination_port_base': { }, 'source_port_base': { }, 'tags': { }, 'enable_external_file': { }, 'source_container': { }, 'source_port_algorithm': { }, 'tuple_limit': { }, 'file': { }, 'destination_port_algorithm': { }, 'destination_container': { }, 'source_port_count': { }, 'xor_bits': { }, 'stream_group': { }, 'id': { } }], 'path_basic': [{ 'source_container': { }, 'destination_container': { }, 'id': { } }], 'enodeb_mme6': [{ 'dns': { }, 'plmn': { }, 'ip_allocation_mode': { }, 'enodebs': [{ 'gateway_ip_address': { }, 'default_container': { }, 'enodebCount': { }, 'ip_address': { }, 'prefix_length': { } }], 'mme_ip_address': { }, 'pgw_ip_address': { }, 'ue_address': { }, 'gateway_ip_address': { }, 'default_container': { }, 'sgw_ip_address': { }, 'id': { }, 'prefix_length': { } }], 'pgw': [{ 'max_sessions': { }, 'lease_address': { }, 'dns': { }, 'plmn': { }, 'ip_address': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { } }], 'pgw6': [{ 'max_sessions': { }, 'lease_address': { }, 'dns': { }, 'plmn': { }, 'ip_address': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'prefix_length': { } }], 'sgsn6': [{ 'gateway_ip_address': { }, 'default_container': { }, 'ggsn_ip_address': { }, 'id': { }, 'ip_address': { }, 'prefix_length': { } }], 'ip6_static_hosts': [{ 'mpls_list': [{ 'id': { }, 'value': { } }], 'ip_alloc_container': { }, 'ip_selection_type': { }, 'count': { }, 'dns': { }, 'ip_address': { }, 'tags': { }, 'proxy': { }, 'maxmbps_per_host': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'host_ipv6_addr_alloc_mode': { }, 'prefix_length': { }, 'dns_proxy': { }, 'behind_snapt': { }, 'enable_stats': { } }], 'plmn': [{ 'mnc': { }, 'description': { }, 'id': { }, 'mcc': { } }], 'enodeb_mme_sgw': [{ 'dns': { }, 'plmn': { }, 'ip_allocation_mode': { }, 'mme_ip_address': { }, 'pgw_ip_address': { }, 'ue_address': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { } }], 'sgw_pgw': [{ 'max_sessions': { }, 'lease_address': { }, 'dns': { }, 'plmn': { }, 'ip_address': { }, 'sgw_advertised_sgw': { }, 'sgw_advertised_pgw': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'id': { } }], 'ip6_dhcp_server': [{ 'ia_type': { }, 'pool_size': { }, 'ip_address': { }, 'pool_prefix_length': { }, 'offer_lifetime': { }, 'max_lease_time': { }, 'gateway_ip_address': { }, 'default_container': { }, 'pool_base_address': { }, 'default_lease_time': { }, 'pool_dns_address1': { }, 'id': { }, 'prefix_length': { }, 'pool_dns_address2': { } }], 'enodeb6': [{ 'dns': { }, 'plmn': { }, 'sctp_over_udp': { }, 'enodebs': [{ 'mme_ip_address': { }, 'enodebCount': { }, 'ip_address': { } }], 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'prefix_length': { }, 'sctp_sport': { } }], 'slaac_cfg': [{ 'use_rand_addr': { }, 'enable_dad': { }, 'id': { }, 'stateless_dhcpv6c_cfg': { }, 'fallback_ip_address': { } }], 'ip6_external_hosts': [{ 'proxy': { }, 'count': { }, 'id': { }, 'ip_address': { }, 'behind_snapt': { }, 'tags': { } }], 'ip_dns_config': [{ 'dns_domain': { }, 'id': { }, 'dns_server_address': { } }], 'dhcpv6c_tout_and_retr_cfg': [{ 'dhcp6c_inforeq_attempts': { }, 'dhcp6c_initial_rebind_tout': { }, 'dhcp6c_sol_attempts': { }, 'dhcp6c_max_rebind_tout': { }, 'dhcp6c_release_attempts': { }, 'dhcp6c_initial_release_tout': { }, 'dhcp6c_req_attempts': { }, 'dhcp6c_max_req_tout': { }, 'dhcp6c_max_renew_tout': { }, 'dhcp6c_max_sol_tout': { }, 'dhcp6c_initial_req_tout': { }, 'dhcp6c_max_inforeq_tout': { }, 'dhcp6c_initial_sol_tout': { }, 'dhcp6c_initial_renew_tout': { }, 'dhcp6c_initial_inforeq_tout': { }, 'id': { } }], 'ip_dhcp_server': [{ 'lease_address': { }, 'count': { }, 'dns': { }, 'ip_address': { }, 'gateway_ip_address': { }, 'netmask': { }, 'lease_time': { }, 'default_container': { }, 'id': { }, 'accept_local_requests_only': { } }], 'ip6_dns_config': [{ 'dns_domain': { }, 'id': { }, 'dns_server_address': { } }], 'sgw_pgw6': [{ 'max_sessions': { }, 'lease_address': { }, 'dns': { }, 'plmn': { }, 'ip_address': { }, 'sgw_advertised_sgw': { }, 'sgw_advertised_pgw': { }, 'lease_address_v6': { }, 'gateway_ip_address': { }, 'default_container': { }, 'id': { }, 'prefix_length': { } }], 'mpls_settings': [{ 'mpls_tags': [{ 'mpls_ttl': { }, 'mpls_label': { }, 'mpls_exp': { } }], 'id': { } }], 'ipsec_config': [{ 'ike_dh': { }, 'ipsec_lifetime': { }, 'ike_pfs': { }, 'ike_mode': { }, 'ike_1to1': { }, 'nat_traversal': { }, 'xauth_username': { }, 'ike_encr_alg': { }, 'psk': { }, 'dpd_enabled': { }, 'dpd_timeout': { }, 'init_rate': { }, 'setup_timeout': { }, 'esp_encr_alg': { }, 'ike_lifetime': { }, 'ike_version': { }, 'id': { }, 'left_id': { }, 'ike_prf_alg': { }, 'esp_auth_alg': { }, 'dpd_delay': { }, 'xauth_password': { }, 'initial_contact': { }, 'debug_log': { }, 'wildcard_tsr': { }, 'rekey_margin': { }, 'ike_auth_alg': { }, 'right_id': { }, 'max_outstanding': { }, 'retrans_interval': { }, 'enable_xauth': { } }], 'dhcpv6c_cfg': [{ 'dhcp6c_max_outstanding': { }, 'dhcp6c_duid_type': { }, 'dhcp6c_ia_type': { }, 'dhcp6c_req_opts_config': { }, 'dhcp6c_tout_and_retr_config': { }, 'dhcp6c_renew_timer': { }, 'dhcp6c_ia_t2': { }, 'id': { }, 'dhcp6c_ia_t1': { }, 'dhcp6c_initial_srate': { } }], 'sixrd_ce': [{ 'sixrd_prefix': { }, 'count': { }, 'dns': { }, 'sixrd_prefix_length': { }, 'ip_address': { }, 'tags': { }, 'br_ip_address': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'hosts_per_ce': { }, 'ip4_mask_length': { }, 'id': { }, 'enable_stats': { } }], 'ip_dhcp_hosts': [{ 'allocation_rate': { }, 'count': { }, 'tags': { }, 'proxy': { }, 'ldap': { }, 'default_container': { }, 'accept_local_offers_only': { }, 'id': { }, 'behind_snapt': { }, 'dns_proxy': { }, 'enable_stats': { } }], 'enodeb_mme': [{ 'dns': { }, 'plmn': { }, 'ip_allocation_mode': { }, 'enodebs': [{ 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'enodebCount': { }, 'ip_address': { } }], 'mme_ip_address': { }, 'pgw_ip_address': { }, 'ue_address': { }, 'gateway_ip_address': { }, 'netmask': { }, 'default_container': { }, 'sgw_ip_address': { }, 'id': { } }] }, 'createdOn': { }, 'contentType': { }, 'revision': { }, 'operations': { 'importNetwork': [{ }], 'load': [{ }], 'new': [{ }], 'delete': [{ }], 'search': [{ }], 'list': [{ }], 'saveAs': [{ }], 'save': [{ }] } }, 'topology': { 'ixoslicensed': { }, 'ixos': { }, 'runningTest': [{ 'phase': { }, 'timeRemaining': { }, 'runtime': { }, 'label': { }, 'completed': { }, 'initProgress': { }, 'result': { }, 'port': [{ }], 'capturing': { }, 'progress': { }, 'testid': { }, 'state': { }, 'user': { }, 'currentTest': { } }], 'model': { }, 'slot': [{ 'port': [{ 'owner': { }, 'number': { }, 'note': { }, 'exportProgress': { }, 'reservedBy': { }, 'capturing': { }, 'model': { }, 'id': { }, 'group': { }, 'link': { }, 'state': { }, 'speed': { } }], 'mode': { }, 'model': { }, 'state': { }, 'id': { }, 'serialNumber': { } }], 'serialNumber': { }, 'operations': { 'unreserve': [{ }], 'setCardMode': [{ }], 'setCardSpeed': [{ }], 'setCardFanout': [{ }], 'setPerfAcc': [{ }], 'stopRun': [{ }], 'run': [{ }], 'addPortNote': [{ }], 'reboot': [{ }], 'reserve': [{ }], 'exportCapture': [{ }] } }, 'superflow': { 'percentFlows': { }, 'seed': { }, 'hosts': [{ 'iface': { }, 'hostname': { }, 'ip': { 'type': { } }, 'id': { } }], 'author': { }, 'estimate_bytes': { }, 'estimate_flows': { }, 'weight': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'revision': { }, 'lockedBy': { }, 'flows': [{ 'singleNP': { }, 'name': { }, 'from': { }, 'label': { }, 'id': { }, 'to': { }, 'params': { }, 'flowcount': { }, 'operations': { 'getFlowChoices': [{ 'lockedBy': { }, 'createdBy': { }, 'author': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'contentType': { }, 'revision': { } }], 'getCannedFlows': [{ }] } }], 'generated': { }, 'createdBy': { }, 'percentBandwidth': { }, 'name': { }, 'actions': [{ 'flowlabel': { }, 'gotoBlock': { }, 'exflows': { }, 'matchBlock': { }, 'id': { }, 'source': { }, 'label': { }, 'type': { }, 'params': { }, 'flowid': { }, 'actionInfo': [{ 'name': { }, 'description': { }, 'realtimeGroup': { }, 'label': { }, 'units': { }, 'choice': [{ 'name': { }, 'description': { }, 'label': { } }] }], 'operations': { 'getActionChoices': [{ }], 'getActionInfo': [{ 'name': { }, 'description': { }, 'realtimeGroup': { }, 'label': { }, 'units': { }, 'choice': [{ 'name': { }, 'description': { }, 'label': { } }] }] } }], 'contentType': { }, 'operations': { 'search': [{ }], 'addHost': [{ }], 'removeFlow': [{ }], 'addAction': [{ }], 'saveAs': [{ }], 'save': [{ }], 'load': [{ }], 'new': [{ }], 'delete': [{ }], 'removeAction': [{ }], 'addFlow': [{ }] } }, 'testmodel': { 'lastrunby': { }, 'summaryInfo': { 'totalSubnets': { }, 'totalMacAddresses': { }, 'totalUniqueStrikes': { }, 'totalUniqueSuperflows': { }, 'requiredMTU': { } }, 'author': { }, 'lastrun': { }, 'description': { }, 'label': { }, 'sharedComponentSettings': { 'maximumConcurrentFlows': { 'current': { }, 'original': { }, 'content': { } }, 'totalAttacks': { 'current': { }, 'original': { }, 'content': { } }, 'totalBandwidth': { 'current': { }, 'original': { }, 'content': { } }, 'maxFlowCreationRate': { 'current': { }, 'original': { }, 'content': { } }, 'totalAddresses': { 'current': { }, 'original': { }, 'content': { } }, 'samplePeriod': { 'current': { }, 'original': { }, 'content': { } } }, 'createdOn': { }, 'network': { }, 'revision': { }, 'duration': { }, 'result': { }, 'component': [{ 'author': { }, 'originalPreset': { }, 'active': { }, 'originalPresetLabel': { }, 'description': { }, 'label': { }, 'type': { }, '@type:liveappsim': { 'app': { 'removeUnknownTcpUdp': { }, 'replace_streams': { }, 'removeUnknownSSL': { }, 'streamsPerSuperflow': { }, 'removedns': { }, 'fidelity': { } }, 'tcp': { 'disable_ack_piggyback': { }, 'delay_acks': { }, 'mss': { }, 'raw_flags': { }, 'psh_every_segment': { }, 'ecn': { }, 'tcp_window_scale': { }, 'initial_receive_window': { }, 'reset_at_end': { }, 'dynamic_receive_window_size': { }, 'tcp_connect_delay_ms': { }, 'aging_time_data_type': { }, 'tcp_4_way_close': { }, 'shutdown_data': { }, 'tcp_icw': { }, 'tcp_keepalive_timer': { }, 'aging_time': { }, 'add_timestamps': { }, 'retries': { }, 'handshake_data': { }, 'ack_every_n': { }, 'syn_data_padding': { }, 'retry_quantum_ms': { }, 'delay_acks_ms': { } }, 'inflateDeflate': { }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'sessions': { 'openFast': { }, 'closeFast': { }, 'max': { }, 'allocationOverride': { }, 'targetPerSecond': { }, 'target': { }, 'targetMatches': { }, 'maxPerSecond': { }, 'engine': { }, 'statDetail': { }, 'emphasis': { }, 'maxActive': { } }, 'loadprofile': { 'name': { }, 'label': { } }, 'ip': { 'tos': { }, 'ttl': { } }, 'ip6': { 'flowlabel': { }, 'traffic_class': { }, 'hop_limit': { } }, 'srcPortDist': { 'min': { }, 'max': { }, 'type': { } }, 'tputscalefactor': { }, 'rampUpProfile': { 'min': { }, 'max': { }, 'increment': { }, 'interval': { }, 'type': { } }, 'concurrencyscalefactor': { }, 'delayStart': { }, 'rampDist': { 'upBehavior': { }, 'down': { }, 'steadyBehavior': { }, 'downBehavior': { }, 'up': { }, 'synRetryMode': { }, 'steady': { } }, 'sfratescalefactor': { }, 'liveProfile': { } }, '@type:layer3advanced': { 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'rate': { }, 'increment': { }, 'type': { }, 'ramptype': { } }, 'bidirectional': { }, 'enableTCP': { }, 'slowStart': { }, 'Templates': { 'TemplateType': { } }, 'slowStartFps': { }, 'duration': { 'disable_nd_probes': { }, 'durationTime': { }, 'durationFrames': { } }, 'enablePerStreamStats': { }, 'tuple_gen_seed': { }, 'payload': { 'data': { }, 'type': { }, 'dataWidth': { } }, 'advancedUDP': { 'lengthVal': { }, 'lengthField': { }, 'checksumVal': { }, 'checksumField': { } }, 'delayStart': { }, 'payloadAdvanced': { 'udfMode': { }, 'udfLength': { }, 'udfDataWidth': { }, 'udfOffset': { } }, 'sizeDist': { 'increment': { }, 'type': { }, 'min': { }, 'rate': { }, 'mixlen2': { }, 'mixweight6': { }, 'mixlen1': { }, 'mixweight7': { }, 'mixlen4': { }, 'mixweight4': { }, 'mixlen3': { }, 'mixweight5': { }, 'mixlen6': { }, 'mixlen5': { }, 'mixlen8': { }, 'mixweight8': { }, 'mixlen7': { }, 'mixweight9': { }, 'mixlen9': { }, 'mixweight2': { }, 'max': { }, 'mixweight3': { }, 'mixweight1': { }, 'mixlen10': { }, 'mixweight10': { }, 'unit': { } }, 'advancedIPv4': { 'lengthVal': { }, 'optionHeaderField': { }, 'optionHeaderData': { }, 'lengthField': { }, 'checksumVal': { }, 'tos': { }, 'checksumField': { }, 'ttl': { } }, 'advancedIPv6': { 'flowLabel': { }, 'lengthVal': { }, 'extensionHeaderField': { }, 'lengthField': { }, 'nextHeader': { }, 'trafficClass': { }, 'extensionHeaderData': { }, 'hopLimit': { } } }, '@type:appsim': { 'app': { 'replace_streams': { }, 'streamsPerSuperflow': { }, 'removedns': { }, 'fidelity': { } }, 'tcp': { 'disable_ack_piggyback': { }, 'delay_acks': { }, 'mss': { }, 'raw_flags': { }, 'psh_every_segment': { }, 'ecn': { }, 'tcp_window_scale': { }, 'initial_receive_window': { }, 'reset_at_end': { }, 'dynamic_receive_window_size': { }, 'tcp_connect_delay_ms': { }, 'aging_time_data_type': { }, 'tcp_4_way_close': { }, 'shutdown_data': { }, 'tcp_icw': { }, 'tcp_keepalive_timer': { }, 'aging_time': { }, 'add_timestamps': { }, 'retries': { }, 'handshake_data': { }, 'ack_every_n': { }, 'syn_data_padding': { }, 'retry_quantum_ms': { }, 'delay_acks_ms': { } }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'sessions': { 'openFast': { }, 'closeFast': { }, 'max': { }, 'allocationOverride': { }, 'targetPerSecond': { }, 'target': { }, 'targetMatches': { }, 'maxPerSecond': { }, 'engine': { }, 'statDetail': { }, 'emphasis': { }, 'maxActive': { } }, 'loadprofile': { 'name': { }, 'label': { } }, 'profile': { }, 'ip': { 'tos': { }, 'ttl': { } }, 'experimental': { 'tcpSegmentsBurst': { }, 'unify_l4_bufs': { } }, 'ssl': { 'ssl_client_keylog': { }, 'sslReuseType': { }, 'server_record_len': { }, 'client_record_len': { }, 'ssl_keylog_max_entries': { } }, 'ip6': { 'flowlabel': { }, 'traffic_class': { }, 'hop_limit': { } }, 'srcPortDist': { 'min': { }, 'max': { }, 'type': { } }, 'rampUpProfile': { 'min': { }, 'max': { }, 'increment': { }, 'interval': { }, 'type': { } }, 'delayStart': { }, 'rampDist': { 'upBehavior': { }, 'down': { }, 'steadyBehavior': { }, 'downBehavior': { }, 'up': { }, 'synRetryMode': { }, 'steady': { } } }, '@type:security_all': { 'maxConcurrAttacks': { }, 'attackRetries': { }, 'maxPacketsPerSecond': { }, 'attackPlan': { }, 'randomSeed': { }, 'delayStart': { }, 'attackProfile': { }, 'attackPlanIterations': { }, 'attackPlanIterationDelay': { }, 'maxAttacksPerSecond': { } }, '@type:security_np': { 'attackRetries': { }, 'sessions': { 'max': { }, 'maxPerSecond': { } }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'attackPlan': { }, 'randomSeed': { }, 'delayStart': { }, 'attackProfile': { }, 'attackPlanIterations': { }, 'attackPlanIterationDelay': { } }, '@type:layer3': { 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'rate': { }, 'increment': { }, 'type': { }, 'ramptype': { } }, 'bidirectional': { }, 'randomizeIP': { }, 'enableTCP': { }, 'slowStart': { }, 'Templates': { 'TemplateType': { } }, 'srcPort': { }, 'slowStartFps': { }, 'duration': { 'disable_nd_probes': { }, 'durationTime': { }, 'durationFrames': { } }, 'udpSrcPortMode': { }, 'dstPort': { }, 'payload': { 'data': { }, 'type': { }, 'dataWidth': { } }, 'syncIP': { }, 'addrGenMode': { }, 'maxStreams': { }, 'dstPortMask': { }, 'udpDstPortMode': { }, 'advancedUDP': { 'lengthVal': { }, 'lengthField': { }, 'checksumVal': { }, 'checksumField': { } }, 'delayStart': { }, 'payloadAdvanced': { 'udfMode': { }, 'udfLength': { }, 'udfDataWidth': { }, 'udfOffset': { } }, 'sizeDist': { 'increment': { }, 'type': { }, 'min': { }, 'rate': { }, 'mixlen2': { }, 'mixweight6': { }, 'mixlen1': { }, 'mixweight7': { }, 'mixlen4': { }, 'mixweight4': { }, 'mixlen3': { }, 'mixweight5': { }, 'mixlen6': { }, 'mixlen5': { }, 'mixlen8': { }, 'mixweight8': { }, 'mixlen7': { }, 'mixweight9': { }, 'mixlen9': { }, 'mixweight2': { }, 'max': { }, 'mixweight3': { }, 'mixweight1': { }, 'mixlen10': { }, 'mixweight10': { }, 'unit': { } }, 'advancedIPv4': { 'lengthVal': { }, 'optionHeaderField': { }, 'optionHeaderData': { }, 'lengthField': { }, 'checksumVal': { }, 'tos': { }, 'checksumField': { }, 'ttl': { } }, 'srcPortMask': { }, 'advancedIPv6': { 'flowLabel': { }, 'lengthVal': { }, 'extensionHeaderField': { }, 'lengthField': { }, 'nextHeader': { }, 'trafficClass': { }, 'extensionHeaderData': { }, 'hopLimit': { } } }, '@type:layer4': { 'tcp': { 'disable_ack_piggyback': { }, 'delay_acks': { }, 'mss': { }, 'raw_flags': { }, 'psh_every_segment': { }, 'ecn': { }, 'tcp_window_scale': { }, 'initial_receive_window': { }, 'reset_at_end': { }, 'dynamic_receive_window_size': { }, 'tcp_connect_delay_ms': { }, 'aging_time_data_type': { }, 'tcp_4_way_close': { }, 'shutdown_data': { }, 'tcp_icw': { }, 'tcp_keepalive_timer': { }, 'aging_time': { }, 'add_timestamps': { }, 'retries': { }, 'handshake_data': { }, 'ack_every_n': { }, 'syn_data_padding': { }, 'retry_quantum_ms': { }, 'delay_acks_ms': { } }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'sessions': { 'openFast': { }, 'closeFast': { }, 'max': { }, 'allocationOverride': { }, 'targetPerSecond': { }, 'target': { }, 'targetMatches': { }, 'maxPerSecond': { }, 'engine': { }, 'statDetail': { }, 'emphasis': { }, 'maxActive': { } }, 'loadprofile': { 'name': { }, 'label': { } }, 'ip': { 'tos': { }, 'ttl': { } }, 'ip6': { 'flowlabel': { }, 'traffic_class': { }, 'hop_limit': { } }, 'srcPortDist': { 'min': { }, 'max': { }, 'type': { } }, 'rampUpProfile': { 'min': { }, 'max': { }, 'increment': { }, 'interval': { }, 'type': { } }, 'delayStart': { }, 'payload': { 'add_timestamp': { }, 'data': { }, 'http_type': { }, 'transport': { }, 'type': { } }, 'rampDist': { 'upBehavior': { }, 'down': { }, 'steadyBehavior': { }, 'downBehavior': { }, 'up': { }, 'synRetryMode': { }, 'steady': { } }, 'packetsPerSession': { }, 'payloadSizeDist': { 'min': { }, 'max': { }, 'type': { } }, 'dstPortDist': { 'min': { }, 'max': { }, 'type': { } } }, '@type:playback': { 'tcp': { 'disable_ack_piggyback': { }, 'delay_acks': { }, 'mss': { }, 'raw_flags': { }, 'psh_every_segment': { }, 'ecn': { }, 'tcp_window_scale': { }, 'initial_receive_window': { }, 'reset_at_end': { }, 'dynamic_receive_window_size': { }, 'tcp_connect_delay_ms': { }, 'aging_time_data_type': { }, 'tcp_4_way_close': { }, 'shutdown_data': { }, 'tcp_icw': { }, 'tcp_keepalive_timer': { }, 'aging_time': { }, 'add_timestamps': { }, 'retries': { }, 'handshake_data': { }, 'ack_every_n': { }, 'syn_data_padding': { }, 'retry_quantum_ms': { }, 'delay_acks_ms': { } }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'sessions': { 'openFast': { }, 'closeFast': { }, 'max': { }, 'allocationOverride': { }, 'targetPerSecond': { }, 'target': { }, 'targetMatches': { }, 'maxPerSecond': { }, 'engine': { }, 'statDetail': { }, 'emphasis': { }, 'maxActive': { } }, 'loadprofile': { 'name': { }, 'label': { } }, 'ip': { 'tos': { }, 'ttl': { } }, 'modification': { 'startpacket': { }, 'originalport': { }, 'newport': { }, 'replay': { }, 'bpfstring': { }, 'single': { }, 'loopcount': { }, 'endpacket': { }, 'independentflows': { }, 'serveripinjection': { } }, 'ip6': { 'flowlabel': { }, 'traffic_class': { }, 'hop_limit': { } }, 'srcPortDist': { 'min': { }, 'max': { }, 'type': { } }, 'rampUpProfile': { 'min': { }, 'max': { }, 'increment': { }, 'interval': { }, 'type': { } }, 'delayStart': { }, 'file': { }, 'rampDist': { 'upBehavior': { }, 'down': { }, 'steadyBehavior': { }, 'downBehavior': { }, 'up': { }, 'synRetryMode': { }, 'steady': { } }, 'behavior': { } }, '@type:layer2': { 'bidirectional': { }, 'maxStreams': { }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'rate': { }, 'increment': { }, 'type': { }, 'ramptype': { } }, 'advanced': { 'ethTypeField': { }, 'ethTypeVal': { } }, 'slowStart': { }, 'slowStartFps': { }, 'duration': { 'disable_nd_probes': { }, 'durationTime': { }, 'durationFrames': { } }, 'delayStart': { }, 'payloadAdvanced': { 'udfMode': { }, 'udfLength': { }, 'udfDataWidth': { }, 'udfOffset': { } }, 'sizeDist': { 'increment': { }, 'type': { }, 'min': { }, 'rate': { }, 'mixlen2': { }, 'mixweight6': { }, 'mixlen1': { }, 'mixweight7': { }, 'mixlen4': { }, 'mixweight4': { }, 'mixlen3': { }, 'mixweight5': { }, 'mixlen6': { }, 'mixlen5': { }, 'mixlen8': { }, 'mixweight8': { }, 'mixlen7': { }, 'mixweight9': { }, 'mixlen9': { }, 'mixweight2': { }, 'max': { }, 'mixweight3': { }, 'mixweight1': { }, 'mixlen10': { }, 'mixweight10': { }, 'unit': { } }, 'payload': { 'data': { }, 'type': { }, 'dataWidth': { } } }, '@type:stackscrambler': { 'tcp': { 'disable_ack_piggyback': { }, 'delay_acks': { }, 'mss': { }, 'raw_flags': { }, 'psh_every_segment': { }, 'ecn': { }, 'tcp_window_scale': { }, 'initial_receive_window': { }, 'reset_at_end': { }, 'dynamic_receive_window_size': { }, 'tcp_connect_delay_ms': { }, 'aging_time_data_type': { }, 'tcp_4_way_close': { }, 'shutdown_data': { }, 'tcp_icw': { }, 'tcp_keepalive_timer': { }, 'aging_time': { }, 'add_timestamps': { }, 'retries': { }, 'handshake_data': { }, 'ack_every_n': { }, 'syn_data_padding': { }, 'retry_quantum_ms': { }, 'delay_acks_ms': { } }, 'scrambleOptions': { 'maxCorruptions': { }, 'badIPFlags': { }, 'badIPFragOffset': { }, 'badIPLength': { }, 'badUrgentPointer': { }, 'badIPFlowLabel': { }, 'badEthType': { }, 'badTCPOptions': { }, 'badGTPNext': { }, 'handshakeTCP': { }, 'badIPChecksum': { }, 'badSCTPLength': { }, 'badTCPFlags': { }, 'badICMPType': { }, 'badIPTTL': { }, 'badIPProtocol': { }, 'badSCTPFlags': { }, 'badGTPFlags': { }, 'badIPVersion': { }, 'badL4HeaderLength': { }, 'badL4Checksum': { }, 'badIPOptions': { }, 'badSCTPType': { }, 'badSCTPChecksum': { }, 'badGTPNpdu': { }, 'badICMPCode': { }, 'badSCTPVerificationTag': { }, 'badIPTOS': { }, 'badIPTotalLength': { }, 'badGTPLen': { }, 'badGTPType': { }, 'badGTPSeqno': { } }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'sessions': { 'openFast': { }, 'closeFast': { }, 'max': { }, 'allocationOverride': { }, 'targetPerSecond': { }, 'target': { }, 'targetMatches': { }, 'maxPerSecond': { }, 'engine': { }, 'statDetail': { }, 'emphasis': { }, 'maxActive': { } }, 'loadprofile': { 'name': { }, 'label': { } }, 'ip': { 'tos': { }, 'ttl': { } }, 'ip6': { 'flowlabel': { }, 'traffic_class': { }, 'hop_limit': { } }, 'prng': { 'seed': { }, 'offset': { } }, 'srcPortDist': { 'min': { }, 'max': { }, 'type': { } }, 'rampUpProfile': { 'min': { }, 'max': { }, 'increment': { }, 'interval': { }, 'type': { } }, 'delayStart': { }, 'payload': { 'data': { }, 'transport': { }, 'type': { } }, 'rampDist': { 'upBehavior': { }, 'down': { }, 'steadyBehavior': { }, 'downBehavior': { }, 'up': { }, 'synRetryMode': { }, 'steady': { } }, 'payloadSizeDist': { 'min': { }, 'max': { }, 'type': { } }, 'dstPortDist': { 'min': { }, 'max': { }, 'type': { } } }, '@type:clientsim': { 'app': { 'replace_streams': { }, 'streamsPerSuperflow': { }, 'removedns': { }, 'fidelity': { } }, 'tcp': { 'disable_ack_piggyback': { }, 'delay_acks': { }, 'mss': { }, 'raw_flags': { }, 'psh_every_segment': { }, 'ecn': { }, 'tcp_window_scale': { }, 'initial_receive_window': { }, 'reset_at_end': { }, 'dynamic_receive_window_size': { }, 'tcp_connect_delay_ms': { }, 'aging_time_data_type': { }, 'tcp_4_way_close': { }, 'shutdown_data': { }, 'tcp_icw': { }, 'tcp_keepalive_timer': { }, 'aging_time': { }, 'add_timestamps': { }, 'retries': { }, 'handshake_data': { }, 'ack_every_n': { }, 'syn_data_padding': { }, 'retry_quantum_ms': { }, 'delay_acks_ms': { } }, 'rateDist': { 'unit': { }, 'min': { }, 'max': { }, 'unlimited': { }, 'scope': { }, 'type': { } }, 'sessions': { 'openFast': { }, 'closeFast': { }, 'max': { }, 'allocationOverride': { }, 'targetPerSecond': { }, 'target': { }, 'targetMatches': { }, 'maxPerSecond': { }, 'engine': { }, 'statDetail': { }, 'emphasis': { }, 'maxActive': { } }, 'loadprofile': { 'name': { }, 'label': { } }, 'ip': { 'tos': { }, 'ttl': { } }, 'ssl': { 'ssl_client_keylog': { }, 'sslReuseType': { }, 'server_record_len': { }, 'client_record_len': { }, 'ssl_keylog_max_entries': { } }, 'ip6': { 'flowlabel': { }, 'traffic_class': { }, 'hop_limit': { } }, 'srcPortDist': { 'min': { }, 'max': { }, 'type': { } }, 'rampUpProfile': { 'min': { }, 'max': { }, 'increment': { }, 'interval': { }, 'type': { } }, 'delayStart': { }, 'rampDist': { 'upBehavior': { }, 'down': { }, 'steadyBehavior': { }, 'downBehavior': { }, 'up': { }, 'synRetryMode': { }, 'steady': { } }, 'superflow': { } }, 'createdOn': { }, 'tags': [{ 'id': { }, 'type': { }, 'domainId': { 'name': { }, 'iface': { }, 'external': { } } }], 'revision': { }, 'lockedBy': { }, 'createdBy': { }, 'reportResults': { }, 'timeline': { 'timesegment': [{ 'label': { }, 'size': { }, 'type': { } }] }, 'id': { }, 'contentType': { }, 'operations': { 'getComponentPresetNames': [{ }] } }], 'lockedBy': { }, 'createdBy': { }, 'name': { }, 'contentType': { }, 'testComponentTypesDescription': [{ 'template': { }, 'name': { }, 'description': { }, 'label': { }, 'type': { } }], 'operations': { 'clone': [{ }], 'importModel': [{ }], 'add': [{ }], 'stopRun': [{ }], 'run': [{ }], 'saveAs': [{ }], 'save': [{ }], 'delete': [{ }], 'search': [{ }], 'exportModel': [{ }], 'load': [{ }], 'new': [{ }], 'realTimeStats': [{ }], 'remove': [{ }] } }, 'administration': { 'atiLicensing': { 'license': [{ 'expires': { }, 'issuedBy': { }, 'name': { }, 'boardserialno': { }, 'issued': { }, 'serialno': { } }], 'operations': { 'importAtiLicense': [{ }] } }, 'systemSettings': { 'strikepackUpdate': { 'password': { }, 'interval': { }, 'check': { }, 'username': { } }, 'author': { }, 'description': { }, 'label': { }, 'guardrailSettings': { 'enableStrictMode': { }, 'testStop': { }, 'testStatusWarning': { }, 'stopOnLinkdown': { }, 'testStartPrevention': { } }, 'createdOn': { }, 'revision': { }, 'vacuumSettings': { 'vacuumWindowHigh': { }, 'autoVacuum': { }, 'vacuumWindowLow': { }, 'vacuumWindowTZ': { } }, 'lockedBy': { }, 'createdBy': { }, 'softwareUpdate': { 'password': { }, 'interval': { }, 'check': { }, 'username': { } }, 'contentType': { } }, 'userSettings': [{ 'name': { }, 'content': { }, 'operations': { 'changeUserSetting': [{ }] } }], 'operations': { 'logs': [{ }], 'exportAllTests': [{ }] } }, 'results': [{ 'name': { }, 'content': { }, 'datasetvals': { }, 'operations': { 'getHistoricalSeries': [{ }], 'getGroups': [{ 'lockedBy': { }, 'createdBy': { }, 'author': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'contentType': { }, 'revision': { } }], 'getHistoricalResultSize': [{ }] } }], 'statistics': { 'component': [{ 'statNames': [{ 'name': { }, 'description': { }, 'realtimeGroup': { }, 'label': { }, 'units': { }, 'choice': [{ 'name': { }, 'description': { }, 'label': { } }] }], 'type': { }, 'label': { } }] }, 'appProfile': { 'weightType': { }, 'lockedBy': { }, 'createdBy': { }, 'author': { }, 'name': { }, 'superflow': [{ 'percentFlows': { }, 'seed': { }, 'author': { }, 'estimate_bytes': { }, 'estimate_flows': { }, 'weight': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'revision': { }, 'lockedBy': { }, 'generated': { }, 'createdBy': { }, 'percentBandwidth': { }, 'name': { }, 'contentType': { } }], 'description': { }, 'label': { }, 'createdOn': { }, 'contentType': { }, 'revision': { }, 'operations': { 'delete': [{ }], 'importAppProfile': [{ }], 'recompute': [{ }], 'load': [{ }], 'new': [{ }], 'add': [{ }], 'remove': [{ }], 'exportAppProfile': [{ }], 'saveAs': [{ }], 'save': [{ }], 'search': [{ }] } }, 'strikes': { 'severity': { }, 'year': { }, 'variants': { }, 'reference': [{ 'label': { }, 'type': { }, 'value': { } }], 'path': { }, 'protocol': { }, 'fileSize': { }, 'fileExtension': { }, 'name': { }, 'id': { }, 'category': { }, 'keyword': [{ 'name': { } }], 'direction': { }, 'operations': { 'search': [{ }] } }, 'loadProfile': { 'phase': [{ 'duration': { }, 'phaseId': { }, 'type': { }, 'sessions.max': { }, 'sessions.maxPerSecond': { }, 'rateDist.unit': { }, 'rateDist.min': { }, 'rampDist.steadyBehavior': { }, 'rateDist.type': { }, 'rateDist.scope': { } }], 'author': { }, 'regen': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'summaryData': { 'deviceType': { }, 'unknownUdpAppNames': { }, 'unknownSslSuperflowName': { }, 'magicNumber': { }, 'downloadBytesSum': { }, 'version': { }, 'phaseDuration': { }, 'unknownTcpAppNames': { }, 'uploadBytesSum': { }, 'summaryName': { }, 'basisOfRegeneration': { }, 'activeFlowsSum': { }, 'miniSlotDuration': { }, 'unknownSslAppNames': { }, 'dynamicSuperflowName': { }, 'appStat': [{ }], 'startTime': { }, 'endTime': { }, 'dynamicAppNames': { } }, 'revision': { }, 'lockedBy': { }, 'createdBy': { }, 'name': { }, 'contentType': { }, 'presets': [{ 'phase': [{ 'duration': { }, 'phaseId': { }, 'type': { }, 'sessions.max': { }, 'sessions.maxPerSecond': { }, 'rateDist.unit': { }, 'rateDist.min': { }, 'rampDist.steadyBehavior': { }, 'rateDist.type': { }, 'rateDist.scope': { } }], 'author': { }, 'regen': { }, 'description': { }, 'label': { }, 'createdOn': { }, 'summaryData': { 'deviceType': { }, 'unknownUdpAppNames': { }, 'unknownSslSuperflowName': { }, 'magicNumber': { }, 'downloadBytesSum': { }, 'version': { }, 'phaseDuration': { }, 'unknownTcpAppNames': { }, 'uploadBytesSum': { }, 'summaryName': { }, 'basisOfRegeneration': { }, 'activeFlowsSum': { }, 'miniSlotDuration': { }, 'unknownSslAppNames': { }, 'dynamicSuperflowName': { }, 'appStat': [{ }], 'startTime': { }, 'endTime': { }, 'dynamicAppNames': { } }, 'revision': { }, 'lockedBy': { }, 'createdBy': { }, 'name': { }, 'contentType': { } }], 'operations': { 'load': [{ }], 'createNewCustom': [{ }], 'save': [{ }], 'saveAs': [{ }], 'delete': [{ }] } }, 'strikeList': { 'author': { }, 'description': { }, 'label': { }, 'queryString': { }, 'createdOn': { }, 'revision': { }, 'lockedBy': { }, 'createdBy': { }, 'name': { }, 'contentType': { }, 'numStrikes': { }, 'strikes': [{ 'severity': { }, 'year': { }, 'variants': { }, 'reference': [{ 'label': { }, 'type': { }, 'value': { } }], 'path': { }, 'protocol': { }, 'fileSize': { }, 'fileExtension': { }, 'name': { }, 'id': { }, 'category': { }, 'keyword': [{ 'name': { } }], 'direction': { } }], 'operations': { 'add': [{ }], 'saveAs': [{ }], 'save': [{ }], 'search': [{ }], 'exportStrikeList': [{ }], 'delete': [{ }], 'importStrikeList': [{ }], 'remove': [{ }], 'load': [{ }], 'new': [{ }] } } } @staticmethod def _get_from_model(path): model_data = DataModelMeta._dataModel model_path = "" for path_part in path.split('/'): if len(path_part) == 0: continue if isinstance(model_data, list): model_data = model_data[0] continue if path_part not in model_data: return (None, None) model_data = model_data[path_part] model_path = model_path + "/" + path_part return (model_path, model_data) @staticmethod def _decorate_model_object_operations(data_model, data_model_path, obj): if 'operations' not in data_model: return for operation in data_model['operations']: if obj.__full_path__().replace("/", "") == '': continue method_name = data_model_path.replace("/", "_") + '_operations_' + operation setattr(obj, operation, obj._wrapper.__getattribute__(method_name).__get__(obj)) setattr(getattr(obj, operation).__func__, '__name__', operation) @staticmethod def _decorate_model_object(obj): obj_name = obj._name (data_model_path, data_model) = DataModelMeta._get_from_model(obj.__data_model_path__()) if data_model is None: return obj if isinstance(data_model, list): setattr(obj, '_getitem_', lambda x: DataModelProxy(wrapper=obj._wrapper, name=str(x), path=obj.__full_path__(), model_path=obj.__data_model_path__())) if data_model_path.endswith(obj_name): DataModelMeta._decorate_model_object_operations(data_model[0], data_model_path, obj) return obj else: data_model = data_model[0] DataModelMeta._decorate_model_object_operations(data_model, data_model_path, obj) for key in data_model: if key.startswith("@") or key == 'operations': continue setattr(obj, key, DataModelProxy(wrapper=obj._wrapper, name=key, path=obj.__full_path__(), model_path=obj.__data_model_path__())) if obj_name not in data_model: for key in data_model: if not key.startswith("@") or ":" not in key: continue [fieldName, fieldValue] = key.split(":") fieldName = fieldName.replace("@", "") try: if obj.__cached_get__(fieldName) != fieldValue: continue except: continue for extField in data_model[key]: ext_path = obj.__full_path__() ext_dm_path = obj.__data_model_path__() + "/" + key setattr(obj, extField, DataModelProxy(wrapper=obj._wrapper, name=extField, path=ext_path, model_path=ext_dm_path)) return obj def __call__(cls, *args, **kwds): return DataModelMeta._decorate_model_object(type.__call__(cls, *args, **kwds)) class DataModelProxy(object): __metaclass__ = DataModelMeta def __init__(self, wrapper, name, path='', model_path=None): self.__cache = {} self._wrapper = wrapper self._name = name self._path = path if model_path is None: self._model_path = self._path else: self._model_path = model_path def __full_path__(self): return '%s/%s' % (self._path, self._name) def __data_model_path__(self): return '%s/%s' % (self._model_path, self._name) def __url__(self): return 'https://%s/bps/api/v2/core%s' % (self._wrapper.host, self.__full_path__()) def __repr__(self): return 'proxy object for \'%s\' ' % (self.__url__()) def __getitem__(self, item): if type(item) == int: item = '{%s}'%item return self._getitem_(item) def get(self, responseDepth=None, **kwargs): return self._wrapper._get(self._path+'/'+self._name, responseDepth, **kwargs) def __cached_get__(self, field): if field not in self.__cache: self.__cache[field] = self._wrapper._get(self.__data_model_path__()+"/"+field) return self.__cache[field] def patch(self, value): return self._wrapper._patch(self._path+'/'+self._name, value) def set(self, value): return self.patch(value) def put(self, value): return self._wrapper._put(self._path+'/'+self._name, value) def delete(self): return self._wrapper._delete(self._path+'/'+self._name) def help(self): doc_data = self._wrapper._options(self._path+'/'+self._name) if doc_data and 'custom' in doc_data: doc_data = doc_data['custom'] if doc_data and 'description' in doc_data: bps_api_log.info(doc_data['description'])
python
from module import foo, bar from module import foo, \ bar, \ baz from module import (foo, bar) from module import (foo, bar, baz)
python
from jsonrpcserver.sentinels import Sentinel def test_Sentinel(): assert repr(Sentinel("foo")) == "<foo>"
python
# Copyright (c) 2019, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from django.core.management.base import BaseCommand from reviews_manager.models import ReviewsComparison import logging logger = logging.getLogger('promort_commands') class Command(BaseCommand): help = 'check third reviewer\'s worklist and fix it if necessary' def add_arguments(self, parser): parser.add_argument('--keep_reviews', action='store_true', help='Keep reviews performed by third reviewer even if not necessary') def _get_review_comparisons(self): return ReviewsComparison.objects.filter(positive_match=False, positive_quality_control=True) def _delete_review(self, clinical_annotation): if len(clinical_annotation.steps.all()) == 0: clinical_annotation.delete() logger.info('Clinical annotation %s deleted', clinical_annotation.label) def _delete_gs_review_step(self, clinical_annotation_step): fr_ann = clinical_annotation_step.focus_region_annotations.all() logger.info('Deleting %d focus region annotations', len(fr_ann)) fr_ann.delete() c_ann = clinical_annotation_step.core_annotations.all() logger.info('Deleting %d core annotations', len(c_ann)) c_ann.delete() s_ann = clinical_annotation_step.slice_annotations.all() logger.info('Deleting %d slice annotations', len(s_ann)) s_ann.delete() c_ann = clinical_annotation_step.clinical_annotation clinical_annotation_step.delete() logger.info('Clinical annotation step %s deleted', clinical_annotation_step.label) self._delete_review(c_ann) def _check_and_fix(self, rc_object, keep_review): if not rc_object.review_1.rois_review_step.is_positive(): logger.info('### ReviewComparison object %d --- NEED TO FIX! ###', rc_object.id) if rc_object.review_3 is not None and not keep_review: r3_obj = rc_object.review_3 logger.info('-- Clearing reviews step %s --', r3_obj.label) # unlink to prevent delete protection error rc_object.review_3 = None rc_object.save() # delete clinical annotation step self._delete_gs_review_step(r3_obj) rc_object.positive_match = True logger.info('Setting RC object positive_match to True') rc_object.save() def handle(self, *args, **opts): logger.info('Collecting ReviewsComparison objects') r_comp = self._get_review_comparisons() logger.info('Retrieved %d objects', len(r_comp)) for rc in r_comp: self._check_and_fix(rc, opts['keep_reviews'])
python
from numpy import zeros from sklearn.tree import _tree def _interpret_tree(tree, X, n_labels): # Tree preprocessing allowing down-top search parents = [-1 for _ in range(tree.node_count)] to_pursue = [0] while len(to_pursue): node_i = to_pursue.pop() child_l = tree.children_left[node_i] if child_l != _tree.TREE_LEAF: parents[child_l] = node_i child_r = tree.children_right[node_i] parents[child_r] = node_i to_pursue.append(child_l) to_pursue.append(child_r) # Values normalization -> probas values = tree.value.squeeze(axis=1) values /= values.sum(axis=1)[:, np.newaxis] n_features = len(X[0]) f_contribs = [ zeros( (1, n_labels) ) for _ in range(n_features) ] biases = zeros( (1, n_labels) ) f_indices = list(tree.feature) # For each sample to test, we check in which leaf it lands leaves = tree.apply(X) leaves_value = {} for leaf in leaves: if leaf not in leaves_value: l_contribs = [ zeros( (1, n_labels) ) for _ in range(n_features) ] cur_node = leaf while cur_node != -1: par_node = parents[cur_node] if par_node >= 0: resp_feature = f_indices[par_node] l_contribs[resp_feature] += (values[cur_node] - values[par_node]) cur_node = par_node leaves_value[leaf] = l_contribs, values[leaf] l_contribs, l_bias = leaves_value[leaf] f_contribs = [f_i + c_i for f_i, c_i in zip(f_contribs, l_contribs) ] biases += l_bias f_contribs = [i/len(X) for i in f_contribs] biases /= len(X) return f_contribs, biases def interpret_forest(forest, X, n_labels): f_contribs = [ zeros( (1, n_labels) ) for _ in range(len(X[0])) ] f_biases = 0 for tree in map(lambda x: x.tree_, forest.estimators_): t_contribs, t_biases = _interpret_tree(tree, X, n_labels) f_contribs = [x + y/forest.n_estimators for x, y in zip(f_contribs, t_contribs)] f_biases += t_biases/forest.n_estimators return f_contribs, f_biases
python
from behavioral.interpreter.logic.tokens.token_type import TokenType class Token: def __init__(self, token_type: TokenType, text: str) -> None: self.type = token_type self.text = text def __repr__(self) -> str: return f"Token '{self.type.name}' with value '{self.text}'"
python
import pytest from .fixtures import * @pytest.mark.parametrize(["num_partitions", "rows"], [(7, 30), (3, 125), (27, 36)]) def test_update_table(num_partitions, rows, store): fixtures = UpdateFixtures(rows) original_df = fixtures.make_df() update_df = fixtures.generate_update_values() partition_size = get_partition_size(original_df, num_partitions) table = store.select_table(TABLE_NAME) table.write(original_df, partition_size=partition_size) partition_names = table._partition_data.keys() partition_data = table._partition_data.read() # Act table.update(update_df) # Assert _assert_that_partitons_are_the_same(table, partition_names, partition_data) def _assert_that_partitons_are_the_same(table, partition_names, partition_data): # Check that partitions keep the same structure after update df = table.read_arrow(TABLE_NAME) index = df['index'] for partition, partition_name in zip(index.chunks, partition_names): metadata = partition_data[partition_name] index_start = partition[0].as_py() index_end = partition[-1].as_py() num_rows = len(partition) assert index_start == metadata['min'] assert index_end == metadata['max'] assert num_rows == metadata['num_rows'] def test_update_table(store): # Arrange fixtures = UpdateFixtures() original_df = fixtures.make_df() update_df = fixtures.generate_update_values() expected = fixtures.update_table(update_df) partition_size = get_partition_size(original_df, NUMBER_OF_PARTITIONS) table = store.select_table(TABLE_NAME) table.write(original_df, partition_size=partition_size) # Act table.update(update_df) # Assert df = store.read_pandas(TABLE_NAME) assert df.equals(expected) assert not df.equals(original_df) @pytest.mark.parametrize(["index", "rows"], [(None, [10, 13, 14, 21]), (hardcoded_string_index, ["row00010", "row00013", "row00014", "row00021"]), (hardcoded_datetime_index, ["2021-01-01", "2021-01-16", "2021-01-07"]) ] ) def test_update_table_with_pandas_series(index, rows, store): # Arrange fixtures = UpdateFixtures(index=index, update_rows=rows, update_cols=['c0']) original_df = fixtures.make_df() update_series = fixtures.generate_update_values(cols=1) expected = fixtures.update_table(update_series) table = store.select_table(TABLE_NAME) table.write(original_df) # Act table.update(update_series) # Assert df = store.read_pandas(TABLE_NAME) assert df.equals(expected) assert not df.equals(original_df) class UpdateFixtures: def __init__(self, rows=30, index=None, update_rows=(10, 13, 14, 21), update_cols=('c2', 'c0')): self.rows = rows self.index = index self.update_rows = update_rows self.update_cols = update_cols def make_df(self, cols=5): self.df = make_table(index=self.index, rows=self.rows, cols=cols, astype="pandas") self.df.index.name = 'index' return self.df def generate_update_values(self, cols=5, as_series=False): update_values = make_table(index=self.index, rows=self.rows, cols=cols, astype='pandas') update_values = update_values.loc[self.update_rows, self.update_cols] if as_series: update_values = update_values.squeeze() return update_values def update_table(self, values): expected = self.df.copy() expected.loc[self.update_rows, self.update_cols] = values return expected def _wrong_index_dtype(): df = make_table(sorted_datetime_index, astype="pandas") return df def _wrong_index_values(): df = make_table(astype="pandas") df = df.head(5) df.index = [2, 5, 7, 10, 459] return df def _duplicate_index_values(): df = make_table(astype="pandas") df = df.head(5) df.index = [2, 5, 7, 10, 10] return df def _wrong_column_dtype(): df = make_table(sorted_string_index, cols=1, astype="pandas") df = df.reset_index() df.columns = ['c1', 'c2'] df = df.head(5) return df def _wrong_column_names(): df = make_table(cols=2, astype="pandas") df = df.head(5) df.columns = ['c1', 'non-existant_column'] return df def _duplicate_column_names(): df = make_table(cols=2, astype="pandas") df = df.head(5) df.columns = ['c2', 'c2'] return df @pytest.mark.parametrize( ("update_df", "exception"), [ (_wrong_index_dtype(), TypeError), (_wrong_index_values(), ValueError), (_duplicate_index_values(), IndexError), (_wrong_column_dtype(), TypeError), (_wrong_column_names(), IndexError), (_duplicate_column_names(), IndexError), ], ids=[ "_wrong_index_dtype", "_wrong_index_values", "_duplicate_index_values", "_wrong_column_dtype", "_wrong_column_names", "_duplicate_column_names", ], ) def test_can_update_table(update_df, exception, store): # Arrange original_df = make_table(cols=5, astype='pandas') store.write_table(TABLE_NAME, original_df) table = store.select_table(TABLE_NAME) # Act with pytest.raises(exception) as e: table.update(update_df) # Assert assert isinstance(e.type(), exception)
python
"""Ghana specific form helpers.""" from django.forms.fields import Select from .gh_regions import REGIONS class GHRegionSelect(Select): """ A Select widget with option to select a region from list of all regions of Ghana. """ def __init__(self, attrs=None): super().__init__(attrs, choices=REGIONS)
python
from django.conf import settings def pytest_configure(): settings.configure(INSTALLED_APPS=["geoipdb_loader"])
python
import datetime from typing import Any, Optional from googleapiclient.discovery import build from jarvis.plugins.auth.google_auth import GoogleAuth from .config import GoogleCalendar class GoogleCalendar: def __init__(self, calendar_id: Optional[str] = None) -> None: self.calendars: dict = GoogleCalendar.calendars self.calendar_service: Any = build('calendar', 'v3', credentials=GoogleAuth().creds) self.current_calendar: dict = {calendar_id: self.calendars[calendar_id]} if calendar_id is not None else self.calendars self.events: dict = {} def list_events(self, min_time: Optional[str] = datetime.datetime.utcnow().isoformat() + 'Z', max_results: Optional[int] = 10) -> None: """Calendar API List Events """ for index, cal in self.current_calendar.items(): events_result = self.calendar_service.events().list( calendarId=cal, timeMin=min_time, maxResults=max_results, singleEvents=True, orderBy='startTime').execute() tmp_events = events_result.get('items', []) self.events = self.events[index] = tmp_events
python
import sys import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State # TODO: fix it sys.path.append("./") from calculus_of_variations import MultidimensionalSolver from web_interface.utils import ( dash_multidimensional_answer, dash_multidimensional_problem, get_argparse, ) external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = html.Div( [ dcc.Markdown("# Multidimensional problem"), dcc.Markdown("### Input"), html.Div( [ dcc.Markdown("Enter **L**:"), dcc.Input(id="L", value="x1_diff ** 2 + x2_diff ** 2", type="text"), ] ), html.Br(), html.Div( [dcc.Markdown("Enter **t0**:"), dcc.Input(id="t0", value="0", type="text")] ), html.Br(), html.Div( [dcc.Markdown("Enter **t1**:"), dcc.Input(id="t1", value="1", type="text")] ), html.Br(), html.Div( [ dcc.Markdown("Enter **x1_0**:"), dcc.Input(id="x1_0", value="0", type="text"), ] ), html.Br(), html.Div( [ dcc.Markdown("Enter **x1_1**:"), dcc.Input(id="x1_1", value="1", type="text"), ] ), html.Br(), html.Div( [ dcc.Markdown("Enter **x2_0**:"), dcc.Input(id="x2_0", value="0", type="text"), ] ), html.Br(), html.Div( [ dcc.Markdown("Enter **x2_1**:"), dcc.Input(id="x2_1", value="1", type="text"), ] ), html.Br(), html.Button("solve", id="solve"), html.Br(), html.Div(id="input"), ] ) @app.callback( Output(component_id="input", component_property="children"), [Input("solve", "n_clicks")], [ State("L", "value"), State("t0", "value"), State("t1", "value"), State("x1_0", "value"), State("x1_1", "value"), State("x2_0", "value"), State("x2_1", "value"), ], ) def update_output( n_clicks, L: str, t0: str, t1: str, x1_0: str, x1_1: str, x2_0: str, x2_1: str ): # click "solve" if n_clicks is None: return try: solver = MultidimensionalSolver( L=L, t0=t0, t1=t1, x1_0=x1_0, x1_1=x1_1, x2_0=x2_0, x2_1=x2_1 ) solver.solve() except: to_return = html.Div(dcc.Markdown("### Something went wrong :(")) else: to_return = html.Div( [ dcc.Markdown("### Problem"), dash_multidimensional_problem(solver=solver), dcc.Markdown("### Answer"), dash_multidimensional_answer(solver=solver), ] ) return to_return if __name__ == "__main__": # argparse parser = get_argparse() args = parser.parse_args() # run server app.run_server(host=args.host, port=args.port, debug=args.debug)
python
from datetime import date from nose.tools import eq_ from nose.plugins.attrib import attr from allmychanges.crawler import ( _filter_changelog_files, _extract_version, _parse_item, _extract_date) from allmychanges.utils import get_markup_type, get_change_type from allmychanges.downloaders.utils import normalize_url def test_changelog_finder(): in_ = [ './release.sh', './HISTORY.rst', './docs/RELEASE_NOTES.TXT', './docs/releases.rst', './kiva/agg/freetype2/docs/release', './seed/commands/release.py', './doc/source/manual/AppReleaseNotes.rst', './src/robotide/application/releasenotes.py', './scripts/make-release.py', './pypi_release.sh', './doc/release.rst', './release-process.txt', './docs/release_notes/v0.9.15.rst', './release.sh', './.travis-release-requirements.txt', './mkrelease.sh', './README.rst', ] out = [ './HISTORY.rst', './docs/RELEASE_NOTES.TXT', './docs/releases.rst', './doc/source/manual/AppReleaseNotes.rst', './doc/release.rst', './release-process.txt', './docs/release_notes/v0.9.15.rst', './.travis-release-requirements.txt', './README.rst', ] eq_(out, list(_filter_changelog_files(in_))) def test_extract_version(): def check(v, text=None): if text: eq_(v, _extract_version(text)) else: eq_(v, _extract_version(v)) eq_(v, _extract_version('v' + v)) check(v, '{0} (2013-09-24)'.format(v)) check(v, '{0} (2013.09.24)'.format(v)) check(v, '**{0} (2014-05-16)**'.format(v)) check(v, '**{0} (2014.05.16)**'.format(v)) eq_(v, _extract_version('New version {0}'.format(v))) eq_(v, _extract_version('New version v{0}'.format(v))) eq_(v, _extract_version('2015-03-12 {0}'.format(v))) eq_(v, _extract_version('2015-03-12 v{0}'.format(v))) eq_(v, _extract_version('2015-03-12 ({0})'.format(v))) eq_(v, _extract_version('2015-03-12 (v{0})'.format(v))) # from https://app-updates.agilebits.com/product_history/OPI4 check('5.3.BETA-22') # from http://spark.apache.org/releases/spark-release-1-3-0.html check(None, 'Upgrading to Spark 1.3') # https://archive.apache.org/dist/kafka/0.8.0/RELEASE_NOTES.html check('0.8.0', u'dist/kafka/0.8.0/RELEASE_NOTES.html') # https://github.com/numpy/numpy/tree/master/doc/release check('1.3.0', u'doc/release/1.3.0-notes.rst') # https://github.com/git/git/blob/master/Documentation/RelNotes/2.3.2.txt check(None, u'Fixes since v2.3.1') # this should work because we'll remove stop-words # like "release notes" and "for" check('3.0', u'Release Notes for MongoDB 3.0') # don't consider this a version # from https://bitbucket.org/cthedot/cssutils/src/d572ac8df6bd18cad203dea1bbf58867ff0d0ebe/docs/html/_sources/CHANGELOG.txt check(None, '0.3.x') # from https://github.com/meteor/meteor/blob/devel/History.md#v1032-2015-feb-25 check('1.0.3.2', 'v.1.0.3.2, 2015-Feb-25') # from https://itunes.apple.com/ru/app/chrome-web-browser-by-google/id535886823?l=en&mt=8 check('40.0.2214.73') check('05.10.2014.73') check('3.05.10.2014') # # from https://github.com/inliniac/suricata/blob/master/ChangeLog check('2.0.1rc1') check('2.0beta2') # from https://github.com/textmate/textmate/blob/master/Applications/TextMate/about/Changes.md check('2.0-beta.6.7', '2015-01-19 (v2.0-beta.6.7)') # # from https://github.com/ansible/ansible/blob/devel/CHANGELOG.md check('1.6.8', '1.6.8 "And the Cradle Will Rock" - Jul 22, 2014') check('0.2.1') # this horror is from the https://github.com/Test-More/TB2/blob/master/Changes check('1.005000_003') check('1.005000_003', '1.005000_003 Thu Mar 22 17:48:08 GMT 2012') check('3.0.0-pre', 'v3.0.0-pre (wip)') check('1.0.12') check('2.0.0-beta.1') check(None, 'Just a text with some 1 33 nubers') check('1.0') check('0.10.2') check('2.0.0') check('1.5.6') check('0.1.1', 'release-notes/0.1.1.md') check('1.3', 'doc/go1.3.html') check(None, ' some number in the item\'s text 0.1') check(None, 'This is the first version compatible with Django 1.7.') # this text is too long check(None, 'SWIG 3.0 required for programs that use SWIG library') check(None, 'HTTP/1.1 302 Found') check(None, '<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>') def test_parse_item(): eq_((True, 0, 'Feature #1155: Log packet payloads in eve alerts'), _parse_item('Feature #1155: Log packet payloads in eve alerts')) eq_((False, 0, None), _parse_item('Some very long feature: doing blah')) eq_((False, 0, None), _parse_item('Blah minor')) eq_((False, 2, 'Blah minor'), _parse_item(' Blah minor')) eq_((True, 2, 'Blah minor'), _parse_item('- Blah minor')) eq_((True, 3, 'Blah minor'), _parse_item(' - Blah minor')) eq_((True, 5, 'Blah minor'), _parse_item(' - Blah minor')) eq_((True, 5, 'Blah minor'), _parse_item(' * Blah minor')) eq_((True, 5, 'Damn Nginx'), _parse_item(' *) Damn Nginx')) def test_extract_date(): # from https://github.com/lepture/mistune/blob/master/CHANGES.rst eq_(date(2014, 12, 5), _extract_date('Released on Dec. 5, 2014.')) eq_(date(2014, 10, 31), _extract_date('31/10/2014')) eq_(date(2013, 3, 13), _extract_date('13th March 2013')) eq_(date(2014, 11, 3), _extract_date('3rd November 2014')) eq_(date(2013, 2, 22), _extract_date('22nd Feb 2013')) eq_(None, _extract_date('')) eq_(None, _extract_date('ejwkjw kjjwk 20')) eq_(None, _extract_date('2009 thouth 15 fne 04')) eq_(None, _extract_date('11')) eq_(None, _extract_date('12.2009')) eq_(None, _extract_date('4.2-3252')) eq_(None, _extract_date('2009-05/23')) # https://github.com/lodash/lodash/wiki/Changelog#aug-17-2012--diff--docs eq_(date(2012, 8, 17), _extract_date('Aug. 17, 2012')) eq_(date(2009, 5, 23), _extract_date('2009-05-23')) eq_(date(2009, 5, 23), _extract_date('2009-5-23')) eq_(date(2009, 5, 3), _extract_date('2009-05-03')) eq_(date(2014, 5, 17), _extract_date('2014/05/17')) eq_(date(2009, 5, 23), _extract_date('05-23-2009')) eq_(date(2009, 5, 23), _extract_date('05.23.2009')) eq_(date(2009, 5, 23), _extract_date('23.05.2009')) eq_(date(2013, 3, 31), _extract_date('1.2.0 (2013-03-31)')) eq_(date(2009, 5, 23), _extract_date('(2009-05-23)')) eq_(date(2009, 5, 23), _extract_date('v 1.0.0 (2009-05-23)')) eq_(date(2014, 5, 16), _extract_date('**1.5.6 (2014-05-16)**')) eq_(date(2009, 5, 23), _extract_date('in a far far 2009-05-23 there were star wars')) eq_(date(2009, 5, 23), _extract_date('in a far far 23-05-2009 there were star wars')) eq_(date(2009, 5, 23), _extract_date('in a far far 23.05.2009 there were star wars')) # this variant is from Nginx's changelog eq_(date(2014, 4, 24), _extract_date(' 24 Apr 2014')) eq_(date(2014, 4, 28), _extract_date('April 28, 2014')) # from django # these two are from python's click eq_(date(2014, 5, 23), _extract_date('(bugfix release, released on May 23rd 2014)')) eq_(date(2014, 5, 21), _extract_date('(no codename, released on May 21st 2014)')) eq_(date(2014, 8, 13), _extract_date('August 13th 2014')) # like click's but from handlebars.js eq_(date(2014, 9, 1), _extract_date('September 1st, 2014')) # and this one from https://enterprise.github.com/releases eq_(date(2012, 2, 9), _extract_date('February 9, 2012')) eq_(date(2014, 9, 2), _extract_date('September 2, 2014')) # from https://github.com/ingydotnet/boolean-pm/blob/master/Changes # https://github.com/miyagawa/Perlbal-Plugin-PSGI/blob/master/Changes eq_(date(2014, 8, 8), _extract_date('Fri Aug 8 19:12:51 PDT 2014')) # from https://github.com/tadam/Test-Mock-LWP-Dispatch/blob/master/Changes eq_(date(2013, 5, 28), _extract_date('Tue May 28, 2013')) eq_(date(2013, 4, 1), _extract_date('Mon Apr 01, 2013')) eq_(date(2013, 3, 29), _extract_date('Fri Mar 29, 2013')) # from https://github.com/alex/django-taggit/blob/develop/CHANGELOG.txt # we consider that first number is a month # all dates which use day in first position, should be normalized # by sed expressions eq_(date(2014, 10, 8), _extract_date('10.08.2014')) def test_url_normalization(): eq_(('https://github.com/lodash/lodash/wiki/Changelog', None, None), normalize_url('https://github.com/lodash/lodash/wiki/Changelog')) eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'), normalize_url('https://github.com/svetlyak40wt/blah')) eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'), normalize_url('https://github.com/svetlyak40wt/blah/')) eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'), normalize_url('https://github.com/svetlyak40wt/blah.git')) eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'), normalize_url('http://github.com/svetlyak40wt/blah')) eq_(('git://github.com/svetlyak40wt/blah', 'svetlyak40wt', 'blah'), normalize_url('[email protected]:svetlyak40wt/blah.git')) eq_(('https://some-server.com/repo', None, 'repo'), normalize_url('git+https://some-server.com/repo')) eq_(('https://github.com/sass/sass', 'sass', 'sass'), normalize_url('[email protected]:sass/sass.git', for_checkout=False)) eq_(('https://github.com/sass/sass', 'sass', 'sass'), normalize_url('https://github.com/sass/sass/releases', for_checkout=False)) def test_get_markup_type(): eq_('markdown', get_markup_type('README.MD')) eq_('markdown', get_markup_type('README.md')) eq_('markdown', get_markup_type('readme.mD')) eq_('markdown', get_markup_type('readme.txt.md')) eq_('markdown', get_markup_type('readme.markdown')) eq_('markdown', get_markup_type('readme.MARKDOWN')) eq_('markdown', get_markup_type('readme.mdown')) eq_('rest', get_markup_type('README.RST')) eq_('rest', get_markup_type('README.rst')) eq_('rest', get_markup_type('README.rSt')) eq_('rest', get_markup_type('readme.txt.rst')) eq_(None, get_markup_type('README')) eq_(None, get_markup_type('readme.rd')) eq_(None, get_markup_type('readme.txt')) eq_(None, get_markup_type('readme.rst.')) def test_get_change_type(): eq_('new', get_change_type('add new feature')) eq_('new', get_change_type('new feature was added')) eq_('fix', get_change_type('fix 100 bags')) eq_('fix', get_change_type('100 bags were fixed')) eq_('fix', get_change_type('change some bugfix')) eq_('fix', get_change_type('some fixes')) eq_('fix', get_change_type('[Fix] Resolved')) eq_('new', get_change_type('change something')) eq_('sec', get_change_type('This issue solves CVE-2014-3556 report')) eq_('dep', get_change_type('pip install --build and pip install --no-clean are now deprecated')) eq_('inc', get_change_type('BACKWARD INCOMPATIBLE Removed the bundle support which was deprecated in 1.4.')) eq_('fix', get_change_type('bug fix: HANDLER-{BIND,CASE} no longer drop into ldb when a clause')) eq_('fix', get_change_type('BUG/MINOR: http: fix typos in previous patch'))
python
# coding=utf-8 __author__ = 'cheng.hu' import logging # 第一步,创建一个logger logger = logging.getLogger() logger.setLevel(logging.INFO) # Log等级总开关 # 第二步,创建一个handler,用于写入日志文件 logfile = '/Users/CalvinHu/Documents/python/hurnado/src/test/log.txt' fh = logging.FileHandler(logfile, mode='w') fh.setLevel(logging.INFO) # 输出到file的log等级的开关 # 第三步,再创建一个handler,用于输出到控制台 # ch = logging.StreamHandler() # ch.setLevel(logging.WARNING) # 输出到console的log等级的开关 # 第四步,定义handler的输出格式 formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s") fh.setFormatter(formatter) # ch.setFormatter(formatter) # 第五步,将logger添加到handler里面 logger.addHandler(fh) # logger.addHandler(ch)
python
from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse from django.shortcuts import render # Create your views here. from django.template.loader import render_to_string from django.urls import reverse_lazy from django.views.decorators.http import require_http_methods from django.views.generic import ListView, DeleteView from sonsuz.news.models import News from sonsuz.utils.utils import ajax_required, AuthorRequiredMixin class NewsListView(ListView): # model = News paginate_by = 10 template_name = 'news/news_list.html' context_object_name = 'news_list' def get_queryset(self, *kwargs): return News.objects.filter(reply=False).select_related('user').prefetch_related('likers') class NewsManageView(ListView): # model = News paginate_by = 10 template_name = 'news/news_manages.html' context_object_name = 'news_manages_list' def get_queryset(self, *kwargs): return News.objects.filter(reply=False).select_related('user').prefetch_related('likers') @login_required @ajax_required @require_http_methods(["POST"]) def post_news(request): """发送动态,AJAX POST请求""" newsContent = request.POST['news_content'].strip() newsTitle = request.POST['news_title'].strip() if newsContent: news = News.objects.create(user=request.user, content=newsContent, title=newsTitle) html = render_to_string('news/news_single.html', {'news': news, 'request': request}) return HttpResponse(html) else: return HttpResponseBadRequest("内容不能为空!") # class NewsDeleteView(LoginRequiredMixin, AuthorRequiredMixin, DeleteView) class NewsDeleteView(LoginRequiredMixin, DeleteView): # class NewsDeleteView(DeleteView): """删除一条新闻记录""" model = News template_name = 'news/news_confirm_delete.html' success_url = reverse_lazy('news:news_manage') # 在项目的URLConf未加载前使用 @login_required @ajax_required @require_http_methods(["POST"]) def like(request): """点赞,响应AJAX POST请求""" news_id = request.POST['newsId'] news = News.objects.get(pk=news_id) # 取消或者添加赞 news.switch_like(request.user) # 返回赞的数量 return JsonResponse({"likers_count": news.likers_count()}) # @login_required @ajax_required @require_http_methods(["POST"]) def contents(request): news_id = request.POST['newsId'] news = News.objects.get(pk=news_id) like_flag = "outline" if request.user in news.get_likers(): like_flag = "inline" comment_flag = "outline" if news.replies_count() != 0: comment_flag = "inline" return JsonResponse({"news_conent": news.get_content(), "news_title": news.title, "news_like_count": news.likers_count(), "news_like_flag": like_flag, "news_comment_flag": comment_flag, "news_cocmment_count": news.replies_count() }) @login_required @ajax_required @require_http_methods(["POST"]) def post_reply(request): """发送回复,AJAX POST请求""" # replyContent = request.POST['reply-content'].strip() replyContent = request.POST['replyContent'].strip() parentId = request.POST['newsId'] parent = News.objects.get(pk=parentId) if replyContent: parent.reply_this(request.user, replyContent) return JsonResponse({'newsid': parent.pk,'replies_count': parent.replies_count()}) else: return HttpResponseBadRequest("内容不能为空!") # # @ajax_required @require_http_methods(["GET"]) def get_replies(request): """返回新闻的评论,AJAX GET请求""" news_id = request.GET['newsId'] news = News.objects.get(pk=news_id) # render_to_string()表示加载模板,填充数据,返回字符串 replies_html = render_to_string("news/reply_list.html", {"replies": news.get_children()}) # 有评论的时候 return JsonResponse({ "newsid": news_id, "replies_html": replies_html, }) @login_required def update_interactions(request): """更新互动信息""" data_point = request.GET['id_value'] news = News.objects.get(pk=data_point) return JsonResponse({'likes': news.likers_count(), 'replies': news.replies_count()})
python
# Copyright 2017 University of Maryland. # # This file is part of Sesame. It is subject to the license terms in the file # LICENSE.rst found in the top-level directory of this distribution. import numpy as np from .observables import * from .defects import defectsF def getF(sys, v, efn, efp, veq): ########################################################################### # organization of the right hand side vector # ########################################################################### # A site with coordinates (i,j,k) corresponds to a site number s as follows: # k = s//(Nx*Ny) # j = s - s//Nx # i = s - j*Nx - k*Nx*Ny # # Rows for (efn_s, efp_s, v_s) # ---------------------------- # fn_row = 3*s # fp_row = 3*s+1 # fv_row = 3*s+2 Nx, Ny, Nz = sys.xpts.shape[0], sys.ypts.shape[0], sys.zpts.shape[0] # right hand side vector global vec vec = np.zeros((3*Nx*Ny*Nz,)) def update(fn, fp, fv, sites): global vec vec[3*sites] = fn vec[3*sites+1] = fp vec[3*sites+2] = fv ########################################################################### # For all sites in the system # ########################################################################### # carrier densities n = sys.Nc * np.exp(+sys.bl + efn + v) p = sys.Nv * np.exp(-sys.Eg - sys.bl - efp - v) # equilibrium carrier densities n_eq = sys.Nc * np.exp(+sys.bl + veq) p_eq = sys.Nv * np.exp(-sys.Eg - sys.bl - veq) # bulk charges rho = sys.rho - n + p # recombination rates r = get_bulk_rr(sys, n, p) # charge defects if len(sys.defects_list) != 0: defectsF(sys, sys.defects_list, n, p, rho, r) # charge devided by epsilon rho = rho / sys.epsilon # reshape the array as array[y-indices, x-indices] _sites = np.arange(Nx*Ny*Nz, dtype=int).reshape(Nz, Ny, Nx) def currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites): jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN = 0, 0, 0, 0, 0, 0 jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = 0, 0, 0, 0, 0, 0 if dx.all() != 0: jnx_s = get_jn(sys, efn, v, sites, sites + 1, dx) jpx_s = get_jp(sys, efp, v, sites, sites + 1, dx) if dxm1.all() != 0: jnx_sm1 = get_jn(sys, efn, v, sites - 1, sites, dxm1) jpx_sm1 = get_jp(sys, efp, v, sites - 1, sites, dxm1) if dy.all() != 0: jny_s = get_jn(sys, efn, v, sites, sites + Nx, dy) jpy_s = get_jp(sys, efp, v, sites, sites + Nx, dy) if dym1.all() != 0: jny_smN = get_jn(sys, efn, v, sites - Nx, sites, dym1) jpy_smN = get_jp(sys, efp, v, sites - Nx, sites, dym1) if dz.all() != 0: jnz_s = get_jn(sys, efn, v, sites, sites + Nx*Ny, dz) jpz_s = get_jp(sys, efp, v, sites, sites + Nx*Ny, dz) if dzm1.all() != 0: jnz_smNN = get_jn(sys, efn, v, sites - Nx*Ny, sites, dzm1) jpz_smNN = get_jp(sys, efp, v, sites - Nx*Ny, sites, dzm1) return jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\ jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN def ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites): # Drift diffusion Poisson equations that determine fn, fp, fv # lattice distances dxbar = (dx + dxm1) / 2. dybar = (dy + dym1) / 2. dzbar = (dz + dzm1) / 2. # compute currents jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\ jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = \ currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) # drift diffusion u = sys.g[sites] - r[sites] fn = (jnx_s - jnx_sm1) / dxbar + (jny_s - jny_smN) / dybar \ + (jnz_s - jnz_smNN) / dzbar + u fp = (jpx_s - jpx_sm1) / dxbar + (jpy_s - jpy_smN) / dybar \ + (jpz_s - jpz_smNN) / dzbar - u # Poisson dv_sm1, dv_sp1, dv_smN, dv_spN, dv_smNN, dv_spNN = 0, 0, 0, 0, 0, 0 v_s = v[sites] if dx.all() != 0: dv_sp1 = (v[sites+1] - v_s) / dx if dxm1.all() != 0: dv_sm1 = (v_s - v[sites-1]) / dxm1 if dy.all() != 0: dv_spN = (v[sites+Nx] - v_s) / dy if dym1.all() != 0: dv_smN = (v_s - v[sites-Nx]) / dym1 if dz.all() != 0: dv_spNN = (v[sites+Nx*Ny] - v_s) / dz if dzm1.all() != 0: dv_smNN = (v_s - v[sites-Nx*Ny]) / dzm1 fv = (dv_sm1 - dv_sp1) / dxbar + (dv_smN - dv_spN) / dybar\ + (dv_smNN - dv_spNN) / dzbar - rho[sites] # update vector update(fn, fp, fv, sites) def right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites): # Boundary conditions on the right contact # lattice distances and sites dx = np.array([0]) dxm1 = sys.dx[-1] dxbar = (dx + dxm1) / 2. dybar = (dy + dym1) / 2. dzbar = (dz + dzm1) / 2. # compute currents _, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\ _, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = \ currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) # compute jx_s with continuity equation jnx_s = jnx_sm1 + dxbar * (r[sites] - sys.g[sites] - (jny_s - jny_smN)/dybar\ - (jnz_s - jnz_smNN)/dzbar) jpx_s = jpx_sm1 + dxbar * (sys.g[sites] - r[sites] - (jpy_s - jpy_smN)/dybar\ - (jpz_s - jpz_smNN)/dzbar) # b_n, b_p and b_v values bn = jnx_s + sys.Scn[1] * (n[sites] - n_eq[sites]) bp = jpx_s - sys.Scp[1] * (p[sites] - p_eq[sites]) bv = 0 # Dirichlet BC # update right hand side vector update(bn, bp, bv, sites) ########################################################################### # inside the system: 0 < i < Nx-1, 0 < j < Ny-1, 0 < k < Nz-1 # ########################################################################### # We compute fn, fp, fv on the inner part of the system. # list of the sites inside the system sites = _sites[1:Nz-1, 1:Ny-1, 1:Nx-1].flatten() # lattice distances dx = np.tile(sys.dx[1:], (Ny-2)*(Nz-2)) dy = np.repeat(sys.dy[1:], (Nx-2)*(Nz-2)) dz = np.repeat(sys.dz[1:], (Nx-2)*(Ny-2)) dxm1 = np.tile(sys.dx[:-1], (Ny-2)*(Nz-2)) dym1 = np.repeat(sys.dy[:-1], (Nx-2)*(Nz-2)) dzm1 = np.repeat(sys.dz[:-1], (Nx-2)*(Ny-2)) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # left boundary: i = 0, 0 <= j <= Ny-1, 0 <= k <= Nz-1 # ########################################################################### # list of the sites on the left side sites = _sites[:, :, 0].flatten() # compute the currents jnx = get_jn(sys, efn, v, sites, sites + 1, sys.dx[0]) jpx = get_jp(sys, efp, v, sites, sites + 1, sys.dx[0]) # compute an, ap, av an = jnx - sys.Scn[0] * (n[sites] - n_eq[sites]) ap = jpx + sys.Scp[0] * (p[sites] - p_eq[sites]) av = 0 # to ensure Dirichlet BCs update(an, ap, av, sites) ########################################################################### # right boundaries # ########################################################################### ########################################################################### # right boundary: i = Nx-1, 0 < j < Ny-1, 0 < k < Nz-1 # ########################################################################### # list of the sites on the right side sites = _sites[1:Nz-1, 1:Ny-1, Nx-1].flatten() # lattice distances dy = np.repeat(sys.dy[1:], Nz-2) dym1 = np.repeat(sys.dy[:-1], Nz-2) dz = np.repeat(sys.dz[1:], Ny-2) dzm1 = np.repeat(sys.dz[:-1], Ny-2) # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, j = Ny-1, 0 < k < Nz-1 # ########################################################################### # list of the sites on the right side sites = _sites[1:Nz-1, Ny-1, Nx-1].flatten() # lattice distances dy = np.array([0]) dym1 = np.repeat(sys.dy[-1], Nz-2) dz = sys.dz[1:] dzm1 = sys.dz[:-1] # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, j = 0, 0 < k < Nz-1 # ########################################################################### # list of the sites on the right side sites = _sites[1:Nz-1, 0, Nx-1].flatten() # lattice distances dy = np.repeat(sys.dy[-1], Nz-2) dym1 = np.array([0]) dz = sys.dz[1:] dzm1 = sys.dz[:-1] # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, 0 < j < Ny-1, k = Nz-1 # ########################################################################### # list of the sites on the right side sites = _sites[Nz-1, 1:Ny-1, Nx-1].flatten() # lattice distances dy = sys.dy[1:] dym1 = sys.dy[:-1] dz = np.array([0]) dzm1 = np.repeat(sys.dz[-1], Ny-2) # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, 0 < j < Ny-1, k = 0 # ########################################################################### # list of the sites on the right side sites = _sites[0, 1:Ny-1, Nx-1].flatten() # lattice distances dy = sys.dy[1:] dym1 = sys.dy[:-1] dz = np.repeat(sys.dz[0], Ny-2) dzm1 = np.array([0]) # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, j = Ny-1, k = 0 # ########################################################################### # list of the sites on the right side sites = _sites[0, Ny-1, Nx-1].flatten() # lattice distances dy = np.array([0]) dym1 = sys.dy[-1] dz = sys.dz[0] dzm1 = np.array([0]) # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, j = Ny-1, k = Nz-1 # ########################################################################### # list of the sites on the right side sites = _sites[Nz-1, Ny-1, Nx-1].flatten() # lattice distances dy = np.array([0]) dym1 = sys.dy[-1] dz = np.array([0]) dzm1 = sys.dz[-1] # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, j = 0, k = Nz-1 # ########################################################################### # list of the sites on the right side sites = _sites[Nz-1, 0, Nx-1].flatten() # lattice distances dy = sys.dy[0] dym1 = np.array([0]) dz = np.array([0]) dzm1 = sys.dz[-1] # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # right boundary: i = Nx-1, j = 0, k = 0 # ########################################################################### # list of the sites on the right side sites = _sites[0, 0, Nx-1].flatten() # lattice distances dy = sys.dy[0] dym1 = np.array([0]) dz = sys.dz[0] dzm1 = np.array([0]) # compute the BC and update the right hand side vector right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites) ########################################################################### # faces between contacts: 0 < i < Nx-1, j or k fixed # ########################################################################### # Here we focus on the faces between the contacts. ########################################################################### # z-face top: 0 < i < Nx-1, 0 < j < Ny-1, k = Nz-1 # ########################################################################### # list of the sites sites = _sites[Nz-1, 1:Ny-1, 1:Nx-1].flatten() # lattice distances dx = np.tile(sys.dx[1:], Ny-2) dy = np.repeat(sys.dy[1:], Nx-2) dz = np.array([0]) dxm1 = np.tile(sys.dx[:-1], Ny-2) dym1 = np.repeat(sys.dy[:-1], Nx-2) dzm1 = np.repeat(sys.dz[-1], (Nx-2)*(Ny-2)) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # z- face bottom: 0 < i < Nx-1, 0 < j < Ny-1, k = 0 # ########################################################################### # list of the sites sites = _sites[0, 1:Ny-1, 1:Nx-1].flatten() # lattice distances dx = np.tile(sys.dx[1:], Ny-2) dy = np.repeat(sys.dy[1:], Nx-2) dz = np.repeat(sys.dz[0], (Nx-2)*(Ny-2)) dxm1 = np.tile(sys.dx[:-1], Ny-2) dym1 = np.repeat(sys.dy[:-1], Nx-2) dzm1 = np.array([0]) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # y-face front: 0 < i < Nx-1, j = 0, 0 < k < Nz-1 # ########################################################################### # list of the sites sites = _sites[1:Nz-1, 0, 1:Nx-1].flatten() # lattice distances dx = np.tile(sys.dx[1:], Nz-2) dy = np.repeat(sys.dy[0], (Nx-2)*(Nz-2)) dz = np.repeat(sys.dz[1:], (Nx-2)) dxm1 = np.tile(sys.dx[:-1], Nz-2) dym1 = np.array([0]) dzm1 = np.repeat(sys.dz[:-1], Nx-2) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # y-face back: 0 < i < Nx-1, j = Ny-1, 0 < k < Nz-1 # ########################################################################### # list of the sites sites = _sites[1:Nz-1, Ny-1, 1:Nx-1].flatten() # lattice distances dx = np.tile(sys.dx[1:], Nz-2) dy = np.array([0]) dz = np.repeat(sys.dz[1:], Nx-2) dxm1 = np.tile(sys.dx[:-1], Nz-2) dym1 = np.repeat(sys.dy[0], (Nx-2)*(Nz-2)) dzm1 = np.repeat(sys.dz[:-1], Nx-2) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # edges between contacts: 0 < i < Nx-1, j and k fixed # ########################################################################### # Here we focus on the edges between the contacts. # lattice distances dx = sys.dx[1:] dxm1 = sys.dx[:-1] ########################################################################### # edge z top // y back: 0 < i < Nx-1, j = Ny-1, k = Nz-1 # ########################################################################### # list of the sites sites = _sites[Nz-1, Ny-1, 1:Nx-1].flatten() # lattice distances dy = np.array([0]) dz = np.array([0]) dym1 = np.repeat(sys.dy[-1], Nx-2) dzm1 = np.repeat(sys.dz[-1], Nx-2) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # edge z top // y front: 0 < i < Nx-1, j = 0, k = Nz-1 # ########################################################################### # list of the sites sites = _sites[Nz-1, 0, 1:Nx-1].flatten() # lattice distances dy = np.repeat(sys.dy[0], Nx-2) dz = np.array([0]) dym1 = np.array([0]) dzm1 = np.repeat(sys.dz[-1], Nx-2) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # edge z bottom // y back: 0 < i < Nx-1, j = Ny-1, k = 0 # ########################################################################### # list of the sites sites = _sites[0, Ny-1, 1:Nx-1].flatten() # lattice distances dy = np.array([0]) dz = np.repeat(sys.dz[0], Nx-2) dym1 = np.repeat(sys.dy[-1], Nx-2) dzm1 = np.array([0]) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) ########################################################################### # edge z bottom // y front: 0 < i < Nx-1, j = 0, k = 0 # ########################################################################### # list of the sites sites = _sites[0, 0, 1:Nx-1].flatten() # lattice distances dy = np.repeat(sys.dy[0], Nx-2) dz = np.repeat(sys.dz[0], Nx-2) dym1 = np.array([0]) dzm1 = np.array([0]) # compute fn, fp, fv and update vector ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites) return vec
python
import tornado.web import mallory class HeartbeatHandler(tornado.web.RequestHandler): def initialize(self, circuit_breaker): self.circuit_breaker = circuit_breaker @tornado.web.asynchronous @tornado.gen.engine def get(self): if self.circuit_breaker.is_tripped(): self.set_status(503) status_message = "Circuit Breaker Tripped" else: self.set_status(200) status_message = "OK" self.write("Mallory " + mallory.Version + "\n") self.write(status_message) self.finish()
python
from petroleum.conditional_task import ConditionalTask from petroleum.exceptions import PetroleumException from petroleum.task import Task class ExclusiveChoice(Task): def __init__(self, name=None, *args, **kwargs): self._conditional_tasks = [] super().__init__(name=None, *args, **kwargs) def get_next_task(self, task_status): for conditional_task in self._conditional_tasks: result = conditional_task.condition(task_status) if not isinstance(result, bool): raise PetroleumException( "Condition %s did not return bool" % conditional_task.condition ) if result is True: return conditional_task.task return getattr(self, "_next_task", None) def connect_if(self, task, condition): conditional_task = ConditionalTask(task=task, condition=condition) self._conditional_tasks.append(conditional_task)
python
""" AmberTools utilities. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "BSD 3-clause" from collections import OrderedDict from cStringIO import StringIO import numpy as np import os import shutil import subprocess import tempfile from rdkit import Chem from vs_utils.utils.pdb_utils import PdbReader class Antechamber(object): """ Wrapper methods for Antechamber functionality. Calculations are carried out in a temporary directory because Antechamber writes out several files to disk. Parameters ---------- charge_type : str, optional (default 'bcc') Antechamber charge type string. Defaults to AM1-BCC charges. """ def __init__(self, charge_type='bcc'): self.charge_type = charge_type # temporary directory self.temp_dir = tempfile.mkdtemp() def __del__(self): """ Cleanup. """ shutil.rmtree(self.temp_dir) def get_charges_and_radii(self, mol): """ Use Antechamber to calculate partial charges and atomic radii. Antechamber requires file inputs and output, so the molecule is written to SDF and Antechamber writes out a modified PDB (mpdb) containing charge and radius information. Note that Antechamber only processes the first molecule or conformer in the input file. Parameters ---------- mol : RDMol Molecule. """ net_charge = self.get_net_charge(mol) # write molecule to temporary file _, input_filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir) writer = Chem.SDWriter(input_filename) writer.write(mol) writer.close() # calculate charges and radii with Antechamber output_fd, output_filename = tempfile.mkstemp(suffix='.mpdb', dir=self.temp_dir) os.close(output_fd) # close temp file args = ['antechamber', '-i', input_filename, '-fi', 'sdf', '-o', output_filename, '-fo', 'mpdb', '-c', self.charge_type, '-nc', str(net_charge)] # all arguments must be strings try: subprocess.check_output(args, cwd=self.temp_dir) except subprocess.CalledProcessError as e: name = '' if mol.HasProp('_Name'): name = mol.GetProp('_Name') print "Antechamber: molecule '{}' failed.".format(name) with open(input_filename) as f: print f.read() raise e # extract charges and radii reader = ModifiedPdbReader() with open(output_filename) as f: charges, radii = reader.get_charges_and_radii(f) return charges, radii @staticmethod def get_net_charge(mol): """ Calculate the net charge on a molecule. Parameters ---------- mol : RDMol Molecule. """ net_charge = 0 for atom in mol.GetAtoms(): net_charge += atom.GetFormalCharge() return net_charge class PBSA(object): """ Wrapper methods for PBSA functionality. Calculations are carried out in a temporary directory because PBSA writes out several files to disk. Parameters ---------- size : float, optional (default 30.) Length of each side of the grid, in Angstroms. Used to calculate PBSA parameters xmin, xmax, etc. resolution : float, optional (default 0.5) Space between grid points, in Angstroms. Corresponds to PBSA space parameter. nb_cutoff : float, optional (default 5.) Cutoff distance for van der Waals interactions. Corresponds to PBSA cutnb parameter. ionic_strength : float, optional (default 150.) Ionic strength of the solvent, in mM. Corresponds to PBSA istrng parameter. """ def __init__(self, size=30., resolution=0.5, nb_cutoff=5., ionic_strength=150.): self.size = float(size) self.resolution = float(resolution) self.nb_cutoff = float(nb_cutoff) self.ionic_strength = float(ionic_strength) # temporary directory self.temp_dir = tempfile.mkdtemp() def __del__(self): """ Cleanup. """ shutil.rmtree(self.temp_dir) def get_esp_grid(self, mol, charges, radii, conf_id=None): """ Use PBSA to calculate an electrostatic potential grid for a molecule conformer. Parameters ---------- mol : RDKit Mol Molecule. charges : array_like Atomic partial charges. radii : array_like Atomic radii. conf_id : int, optional Conformer ID. """ # generate a PQR file for this conformer pqr = self.mol_to_pqr(mol, charges, radii, conf_id=conf_id) # get ESP grid grid = self.get_esp_grid_from_pqr(pqr) return grid @staticmethod def mol_to_pqr(mol, charges, radii, conf_id=None): """ Generate a PQR block for a molecule conformer. Parameters ---------- mol : RDKit Mol Molecule. charges : array_like Atomic partial charges. radii : array_like Atomic radii. conf_id : int, optional Conformer ID. """ if conf_id is None: conf_id = -1 pdb = Chem.MolToPDBBlock(mol, confId=conf_id) reader = PdbReader() pqr = reader.pdb_to_pqr(StringIO(pdb), charges, radii) return pqr def get_esp_grid_from_pqr(self, pqr): """ Use PBSA to calculate an electrostatic potential grid for a molecule (one conformer only) in PQR format. The grid is written is ASCII format to pbsa.phi. Parameters ---------- pqr : file_like Input PQR file. """ # write PQR to disk pqr_fd, pqr_filename = tempfile.mkstemp(suffix='.pqr', dir=self.temp_dir) os.close(pqr_fd) # close temp file with open(pqr_filename, 'wb') as f: f.write(pqr) # write PBSA parameter file param_fd, param_filename = tempfile.mkstemp(suffix='.in', dir=self.temp_dir) os.close(param_fd) # close temp file with open(param_filename, 'wb') as f: f.write(self.get_pbsa_parameter_file()) # run PBSA output_fd, output_filename = tempfile.mkstemp(suffix='.out', dir=self.temp_dir) os.close(output_fd) # close temp file os.remove(output_filename) # PBSA won't overwrite existing file args = ['pbsa', '-i', param_filename, '-o', output_filename, '-pqr', pqr_filename] try: subprocess.check_output(args, cwd=self.temp_dir) except subprocess.CalledProcessError as e: with open(output_filename) as f: print f.read() raise e # extract ESP grid with open(os.path.join(self.temp_dir, 'pbsa.phi')) as f: grid, center = self.parse_esp_grid(f) return grid, center def get_pbsa_parameter_file(self): """ Construct a PBSA parameter file. """ params = """ Calculate ESP for a small molecule &cntrl inp=0, ! required for PQR input / &pb npbverb=1, ! be verbose phiout=1, phiform=1, ! write grid to Amber ASCII file istrng={istrng}, ! ionic strength space={space}, ! grid spacing xmin={xmin}, xmax={xmax}, ymin={ymin}, ymax={ymax}, zmin={zmin}, zmax={zmax}, eneopt=1, cutnb={cutnb}, / """ delta = self.size / 2. params = params.format( space=self.resolution, istrng=self.ionic_strength, xmin=-delta, xmax=delta, ymin=-delta, ymax=delta, zmin=-delta, zmax=delta, cutnb=self.nb_cutoff) return params def parse_esp_grid(self, grid): """ Parse PBSA ASCII electrostatic potential grid. Variables used in the ASCII format: * h : grid spacing * (gox, goy, goz) : grid origin * (xm, ym, zm) : grid dimensions * phi : electrostatic potential in kcal/mol-e The mapping between one-based grid points (i, j, k) and phi indices is p_i = i + xm * (j - 1 + ym * (k - 1)). However, since phi is a flattened version of the grid (with Fortran ordering), we can use np.reshape to get the 3D grid. Spatial coordinates (x, y, z) in the grid are given by (gox + h * i, goy + h * j, goz + h * k). The grid center is therefore (gox + h * (xm + 1) / 2, goy + h * (ym + 1) / 2, goz + h * (zm + 1) / 2). Parameters ---------- grid : file_like Amber ASCII format file. """ h = gox = goy = goz = None xm = ym = zm = None phi = None for line in grid: line = line.strip() if line.startswith('#'): continue if h is None: h, gox, goy, goz = np.asarray(line.split(), dtype=float) elif xm is None: xm, ym, zm = np.asarray(line.split(), dtype=int) else: phi = np.asarray(line.split(), dtype=float) dim = (xm, ym, zm) grid = np.reshape(phi, dim, order='F') origin = (gox, goy, goz) center = tuple(o + h * (m + 1) / 2. for o, m in zip(origin, dim)) # sanity checks assert h == self.resolution return grid, center class ModifiedPdbReader(PdbReader): """ Handle Amber modified PDB files and generate Amber-style PQR files. """ def _parse_atom_record(self, line): """ Parse optional fields in ATOM and HETATM records. Amber modified PDB files contain charge, radius and atom type information in the fields following the x, y, z coordinates for atoms. Parameters ---------- line : str Amber modified PDB ATOM or HETATM line. """ fields = OrderedDict() charge, radius, amber_type = line[54:].strip().split() fields['charge'] = charge fields['radius'] = radius fields['amber_type'] = amber_type return fields def get_charges_and_radii(self, mpdb): """ Extract atomic charges and radii from an Antechamber modified PDB file. Parameters ---------- mpdb : file_like Antechamber modified PDB file. """ charges = [] radii = [] for line in mpdb: if line.startswith('ATOM') or line.startswith('HETATM'): fields = self.parse_atom_record(line) charges.append(fields['charge']) radii.append(fields['radius']) charges = np.asarray(charges, dtype=float) radii = np.asarray(radii, dtype=float) return charges, radii
python
from matplotlib import pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np from scipy.interpolate import griddata import copy def visualize_source( points, values, ax=None, enlarge_factor=1.1, npixels=100, cmap='jet', ): """ Points is defined as autolens [(y1,x1), (y2,x2), ...] order """ points = np.asarray(points) points = points[:, ::-1] #change to numpy/scipy api format -- [(x1,y2), (x2,y2),...] order half_width = max(np.abs(points.min()), np.abs(points.max())) half_width *= enlarge_factor extent = [-1.0*half_width, half_width, -1.0*half_width, half_width] coordinate_1d, dpix = np.linspace(-1.0*half_width, half_width, npixels, endpoint=True, retstep=True) xgrid, ygrid = np.meshgrid(coordinate_1d, coordinate_1d) extent = [-1.0*half_width-0.5*dpix, half_width+0.5*dpix, -1.0*half_width-0.5*dpix, half_width+0.5*dpix] source_image = griddata(points, values, (xgrid, ygrid), method='linear', fill_value=0.0) im = ax.imshow(source_image, origin='lower', extent=extent, cmap=cmap) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) def visualize_unmasked_1d_image( unmasked_1d_image, mask, dpix, ax=None, cmap='jet', origin='upper', ): """ mask: the 2d data mask """ cmap = copy.copy(plt.get_cmap(cmap)) cmap.set_bad(color='white') unmasked_2d_image = np.zeros_like(mask, dtype='float') unmasked_2d_image[~mask] = unmasked_1d_image half_width = len(mask)*0.5*dpix extent = [-1.0*half_width, half_width, -1.0*half_width, half_width] unmasked_2d_image = np.ma.masked_array(unmasked_2d_image, mask=mask) im = ax.imshow(unmasked_2d_image, origin=origin, extent=extent, cmap=cmap) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im, cax=cax) coordinate_1d = np.arange(len(mask)) * dpix coordinate_1d = coordinate_1d - np.mean(coordinate_1d) xgrid, ygrid = np.meshgrid(coordinate_1d, coordinate_1d) rgrid = np.sqrt(xgrid**2 + ygrid**2) limit = np.max(rgrid[~mask]) ax.set_xlim(-1.0*limit, limit) ax.set_ylim(-1.0*limit, limit)
python
# -*- coding: utf-8 -*- __author__ = 'S.I. Mimilakis' __copyright__ = 'MacSeNet' import torch import torch.nn as nn from torch.autograd import Variable class SkipFiltering(nn.Module): def __init__(self, N, l_dim): """ Constructing blocks of the skip filtering connections. Reference: - https://arxiv.org/abs/1709.00611 - https://arxiv.org/abs/1711.01437 Args : N : (int) Original dimensionallity of the input. l_dim : (int) Dimensionallity of the latent variables. """ super(SkipFiltering, self).__init__() print('Constructing Skip-filtering model') self._N = N self._ldim = l_dim self.activation_function = torch.nn.ReLU() # Encoder self.ih_matrix = nn.Linear(self._N, self._ldim) # Decoder self.ho_matrix = nn.Linear(self._ldim, self._N) # Initialize the weights self.initialize_skip_filt() def initialize_skip_filt(self): """ Manual weight/bias initialization. """ # Matrices nn.init.xavier_normal(self.ih_matrix.weight) nn.init.xavier_normal(self.ho_matrix.weight) # Biases self.ih_matrix.bias.data.zero_() self.ho_matrix.bias.data.zero_() print('Initialization of the skip-filtering connection(s) model done...') return None def forward(self, input_x, mask_return=False): if torch.has_cudnn: x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True) else: x = Variable(torch.from_numpy(input_x), requires_grad=True) # Encoder hl_rep = self.activation_function(self.ih_matrix(x)) # Decoder mask = self.activation_function(self.ho_matrix(hl_rep)) # Skip-Filtering connection(s) y_out = torch.mul(x, mask) if mask_return: return y_out, x, mask else: return y_out, x # EOF
python
# __init__.py import logging import os from task_manager.views import ( HomeView, ErrorView, InfoView, LoginView, LogoutView, ProfileView, RegistrationView, TaskListView, TaskView ) from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.options import options, define from tornado_sqlalchemy import SQLAlchemy from tornado.web import Application SQLALCHEMY_URL = os.environ.get('DATABASE_URL', '') define('port', default=8888, help='port to listen on') def main(): """Construct and serve the tornado application.""" api_root = '/api/v1' app = Application(handlers=[ (r'/', HomeView), (r'/favicon.ico', HomeView), (r'/error_500', ErrorView), (api_root, InfoView), (api_root + r'/login', LoginView), (api_root + r'/accounts', RegistrationView), (api_root + r'/accounts/([\w]+)', ProfileView), (api_root + r'/accounts/([\w]+)/tasks', TaskListView), (api_root + r'/accounts/([\w]+)/tasks/([\d]+)', TaskView), (api_root + r'/accounts/([\w]+)/logout', LogoutView), ], db=SQLAlchemy(os.environ.get('DATABASE_URL', 'postgres://postgres:postgres@localhost:5432/task_manager')), cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__", **options.group_dict('application'), login_url="/api/v1/login", xsrf_cookies=True, debug=True, static_path=os.path.join(os.path.dirname(__file__), "static"), template_path=os.path.join(os.path.dirname(__file__), "templates") ) http_server = HTTPServer(app) http_server.listen(options.port) print('Listening on http://localhost:%d' % options.port) logging.info('Listening on http://localhost:%d' % options.port) IOLoop.current().start()
python
from dnaweaver import ( CommercialDnaOffer, DnaAssemblyStation, GibsonAssemblyMethod, OligoAssemblyMethod, TmSegmentSelector, FixedSizeSegmentSelector, PerBasepairPricing, SequenceLengthConstraint, ) # OLIGO COMPANY oligo_com = CommercialDnaOffer( name="Oligo vendor", sequence_constraints=[SequenceLengthConstraint(max_length=200)], pricing=PerBasepairPricing(0.10), lead_time=7, ) oligo_assembly_station = DnaAssemblyStation( name="Oligo Assembly Station", assembly_method=OligoAssemblyMethod( overhang_selector=TmSegmentSelector( min_size=15, max_size=25, min_tm=50, max_tm=70 ), min_segment_length=40, max_segment_length=200, sequence_constraints=[SequenceLengthConstraint(max_length=1500)], duration=8, cost=2, ), supplier=oligo_com, coarse_grain=20, fine_grain=False, a_star_factor="auto", ) gibson_blocks_assembly_station = DnaAssemblyStation( name="Gibson Blocks Assembly", assembly_method=GibsonAssemblyMethod( overhang_selector=FixedSizeSegmentSelector(80), min_segment_length=1000, max_segment_length=4000, duration=8, cost=16, ), supplier=oligo_assembly_station, coarse_grain=300, fine_grain=False, memoize=True, a_star_factor="auto", ) chunks_assembly_station = DnaAssemblyStation( name="Chunks assembly (Yeast)", assembly_method=GibsonAssemblyMethod( overhang_selector=FixedSizeSegmentSelector(300), min_segment_length=7000, max_segment_length=15000, duration=8, ), supplier=gibson_blocks_assembly_station, coarse_grain=1000, fine_grain=None, logger="bar", a_star_factor="auto", memoize=True, ) with open("50kb_sequence.txt", "r") as f: sequence = f.read() print("Generating an assembly plan...") chunks_assembly_station.prepare_network_on_sequence(sequence) quote = chunks_assembly_station.get_quote(sequence, with_assembly_plan=True) print(quote.assembly_step_summary()) print("Generating report...") assembly_plan_report = quote.to_assembly_plan_report() assembly_plan_report.write_full_report("report") print("Done! (see 'report' folder)")
python